1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2009-2012 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * * 8 * This program is free software; you can redistribute it and/or * 9 * modify it under the terms of version 2 of the GNU General * 10 * Public License as published by the Free Software Foundation. * 11 * This program is distributed in the hope that it will be useful. * 12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 * more details, a copy of which can be found in the file COPYING * 18 * included with this package. * 19 *******************************************************************/ 20 21 #include <linux/interrupt.h> 22 #include <linux/mempool.h> 23 #include <linux/pci.h> 24 #include <linux/slab.h> 25 #include <linux/delay.h> 26 #include <linux/list.h> 27 28 #include <scsi/scsi.h> 29 #include <scsi/scsi_host.h> 30 #include <scsi/scsi_transport_fc.h> 31 #include <scsi/scsi_bsg_fc.h> 32 #include <scsi/fc/fc_fs.h> 33 34 #include "lpfc_hw4.h" 35 #include "lpfc_hw.h" 36 #include "lpfc_sli.h" 37 #include "lpfc_sli4.h" 38 #include "lpfc_nl.h" 39 #include "lpfc_bsg.h" 40 #include "lpfc_disc.h" 41 #include "lpfc_scsi.h" 42 #include "lpfc.h" 43 #include "lpfc_logmsg.h" 44 #include "lpfc_crtn.h" 45 #include "lpfc_debugfs.h" 46 #include "lpfc_vport.h" 47 #include "lpfc_version.h" 48 49 struct lpfc_bsg_event { 50 struct list_head node; 51 struct kref kref; 52 wait_queue_head_t wq; 53 54 /* Event type and waiter identifiers */ 55 uint32_t type_mask; 56 uint32_t req_id; 57 uint32_t reg_id; 58 59 /* next two flags are here for the auto-delete logic */ 60 unsigned long wait_time_stamp; 61 int waiting; 62 63 /* seen and not seen events */ 64 struct list_head events_to_get; 65 struct list_head events_to_see; 66 67 /* driver data associated with the job */ 68 void *dd_data; 69 }; 70 71 struct lpfc_bsg_iocb { 72 struct lpfc_iocbq *cmdiocbq; 73 struct lpfc_dmabuf *rmp; 74 struct lpfc_nodelist *ndlp; 75 }; 76 77 struct lpfc_bsg_mbox { 78 LPFC_MBOXQ_t *pmboxq; 79 MAILBOX_t *mb; 80 struct lpfc_dmabuf *dmabuffers; /* for BIU diags */ 81 uint8_t *ext; /* extended mailbox data */ 82 uint32_t mbOffset; /* from app */ 83 uint32_t inExtWLen; /* from app */ 84 uint32_t outExtWLen; /* from app */ 85 }; 86 87 #define MENLO_DID 0x0000FC0E 88 89 struct lpfc_bsg_menlo { 90 struct lpfc_iocbq *cmdiocbq; 91 struct lpfc_dmabuf *rmp; 92 }; 93 94 #define TYPE_EVT 1 95 #define TYPE_IOCB 2 96 #define TYPE_MBOX 3 97 #define TYPE_MENLO 4 98 struct bsg_job_data { 99 uint32_t type; 100 struct fc_bsg_job *set_job; /* job waiting for this iocb to finish */ 101 union { 102 struct lpfc_bsg_event *evt; 103 struct lpfc_bsg_iocb iocb; 104 struct lpfc_bsg_mbox mbox; 105 struct lpfc_bsg_menlo menlo; 106 } context_un; 107 }; 108 109 struct event_data { 110 struct list_head node; 111 uint32_t type; 112 uint32_t immed_dat; 113 void *data; 114 uint32_t len; 115 }; 116 117 #define BUF_SZ_4K 4096 118 #define SLI_CT_ELX_LOOPBACK 0x10 119 120 enum ELX_LOOPBACK_CMD { 121 ELX_LOOPBACK_XRI_SETUP, 122 ELX_LOOPBACK_DATA, 123 }; 124 125 #define ELX_LOOPBACK_HEADER_SZ \ 126 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un) 127 128 struct lpfc_dmabufext { 129 struct lpfc_dmabuf dma; 130 uint32_t size; 131 uint32_t flag; 132 }; 133 134 static void 135 lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist) 136 { 137 struct lpfc_dmabuf *mlast, *next_mlast; 138 139 if (mlist) { 140 list_for_each_entry_safe(mlast, next_mlast, &mlist->list, 141 list) { 142 lpfc_mbuf_free(phba, mlast->virt, mlast->phys); 143 list_del(&mlast->list); 144 kfree(mlast); 145 } 146 lpfc_mbuf_free(phba, mlist->virt, mlist->phys); 147 kfree(mlist); 148 } 149 return; 150 } 151 152 static struct lpfc_dmabuf * 153 lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size, 154 int outbound_buffers, struct ulp_bde64 *bpl, 155 int *bpl_entries) 156 { 157 struct lpfc_dmabuf *mlist = NULL; 158 struct lpfc_dmabuf *mp; 159 unsigned int bytes_left = size; 160 161 /* Verify we can support the size specified */ 162 if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE))) 163 return NULL; 164 165 /* Determine the number of dma buffers to allocate */ 166 *bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 : 167 size/LPFC_BPL_SIZE); 168 169 /* Allocate dma buffer and place in BPL passed */ 170 while (bytes_left) { 171 /* Allocate dma buffer */ 172 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 173 if (!mp) { 174 if (mlist) 175 lpfc_free_bsg_buffers(phba, mlist); 176 return NULL; 177 } 178 179 INIT_LIST_HEAD(&mp->list); 180 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); 181 182 if (!mp->virt) { 183 kfree(mp); 184 if (mlist) 185 lpfc_free_bsg_buffers(phba, mlist); 186 return NULL; 187 } 188 189 /* Queue it to a linked list */ 190 if (!mlist) 191 mlist = mp; 192 else 193 list_add_tail(&mp->list, &mlist->list); 194 195 /* Add buffer to buffer pointer list */ 196 if (outbound_buffers) 197 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 198 else 199 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 200 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys)); 201 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys)); 202 bpl->tus.f.bdeSize = (uint16_t) 203 (bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE : 204 bytes_left); 205 bytes_left -= bpl->tus.f.bdeSize; 206 bpl->tus.w = le32_to_cpu(bpl->tus.w); 207 bpl++; 208 } 209 return mlist; 210 } 211 212 static unsigned int 213 lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers, 214 struct fc_bsg_buffer *bsg_buffers, 215 unsigned int bytes_to_transfer, int to_buffers) 216 { 217 218 struct lpfc_dmabuf *mp; 219 unsigned int transfer_bytes, bytes_copied = 0; 220 unsigned int sg_offset, dma_offset; 221 unsigned char *dma_address, *sg_address; 222 LIST_HEAD(temp_list); 223 struct sg_mapping_iter miter; 224 unsigned long flags; 225 unsigned int sg_flags = SG_MITER_ATOMIC; 226 bool sg_valid; 227 228 list_splice_init(&dma_buffers->list, &temp_list); 229 list_add(&dma_buffers->list, &temp_list); 230 sg_offset = 0; 231 if (to_buffers) 232 sg_flags |= SG_MITER_FROM_SG; 233 else 234 sg_flags |= SG_MITER_TO_SG; 235 sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt, 236 sg_flags); 237 local_irq_save(flags); 238 sg_valid = sg_miter_next(&miter); 239 list_for_each_entry(mp, &temp_list, list) { 240 dma_offset = 0; 241 while (bytes_to_transfer && sg_valid && 242 (dma_offset < LPFC_BPL_SIZE)) { 243 dma_address = mp->virt + dma_offset; 244 if (sg_offset) { 245 /* Continue previous partial transfer of sg */ 246 sg_address = miter.addr + sg_offset; 247 transfer_bytes = miter.length - sg_offset; 248 } else { 249 sg_address = miter.addr; 250 transfer_bytes = miter.length; 251 } 252 if (bytes_to_transfer < transfer_bytes) 253 transfer_bytes = bytes_to_transfer; 254 if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset)) 255 transfer_bytes = LPFC_BPL_SIZE - dma_offset; 256 if (to_buffers) 257 memcpy(dma_address, sg_address, transfer_bytes); 258 else 259 memcpy(sg_address, dma_address, transfer_bytes); 260 dma_offset += transfer_bytes; 261 sg_offset += transfer_bytes; 262 bytes_to_transfer -= transfer_bytes; 263 bytes_copied += transfer_bytes; 264 if (sg_offset >= miter.length) { 265 sg_offset = 0; 266 sg_valid = sg_miter_next(&miter); 267 } 268 } 269 } 270 sg_miter_stop(&miter); 271 local_irq_restore(flags); 272 list_del_init(&dma_buffers->list); 273 list_splice(&temp_list, &dma_buffers->list); 274 return bytes_copied; 275 } 276 277 /** 278 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler 279 * @phba: Pointer to HBA context object. 280 * @cmdiocbq: Pointer to command iocb. 281 * @rspiocbq: Pointer to response iocb. 282 * 283 * This function is the completion handler for iocbs issued using 284 * lpfc_bsg_send_mgmt_cmd function. This function is called by the 285 * ring event handler function without any lock held. This function 286 * can be called from both worker thread context and interrupt 287 * context. This function also can be called from another thread which 288 * cleans up the SLI layer objects. 289 * This function copies the contents of the response iocb to the 290 * response iocb memory object provided by the caller of 291 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 292 * sleeps for the iocb completion. 293 **/ 294 static void 295 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, 296 struct lpfc_iocbq *cmdiocbq, 297 struct lpfc_iocbq *rspiocbq) 298 { 299 struct bsg_job_data *dd_data; 300 struct fc_bsg_job *job; 301 IOCB_t *rsp; 302 struct lpfc_dmabuf *bmp, *cmp, *rmp; 303 struct lpfc_nodelist *ndlp; 304 struct lpfc_bsg_iocb *iocb; 305 unsigned long flags; 306 unsigned int rsp_size; 307 int rc = 0; 308 309 dd_data = cmdiocbq->context1; 310 311 /* Determine if job has been aborted */ 312 spin_lock_irqsave(&phba->ct_ev_lock, flags); 313 job = dd_data->set_job; 314 if (job) { 315 /* Prevent timeout handling from trying to abort job */ 316 job->dd_data = NULL; 317 } 318 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 319 320 iocb = &dd_data->context_un.iocb; 321 ndlp = iocb->ndlp; 322 rmp = iocb->rmp; 323 cmp = cmdiocbq->context2; 324 bmp = cmdiocbq->context3; 325 rsp = &rspiocbq->iocb; 326 327 /* Copy the completed data or set the error status */ 328 329 if (job) { 330 if (rsp->ulpStatus) { 331 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 332 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { 333 case IOERR_SEQUENCE_TIMEOUT: 334 rc = -ETIMEDOUT; 335 break; 336 case IOERR_INVALID_RPI: 337 rc = -EFAULT; 338 break; 339 default: 340 rc = -EACCES; 341 break; 342 } 343 } else { 344 rc = -EACCES; 345 } 346 } else { 347 rsp_size = rsp->un.genreq64.bdl.bdeSize; 348 job->reply->reply_payload_rcv_len = 349 lpfc_bsg_copy_data(rmp, &job->reply_payload, 350 rsp_size, 0); 351 } 352 } 353 354 lpfc_free_bsg_buffers(phba, cmp); 355 lpfc_free_bsg_buffers(phba, rmp); 356 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 357 kfree(bmp); 358 lpfc_sli_release_iocbq(phba, cmdiocbq); 359 lpfc_nlp_put(ndlp); 360 kfree(dd_data); 361 362 /* Complete the job if the job is still active */ 363 364 if (job) { 365 job->reply->result = rc; 366 job->job_done(job); 367 } 368 return; 369 } 370 371 /** 372 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request 373 * @job: fc_bsg_job to handle 374 **/ 375 static int 376 lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) 377 { 378 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 379 struct lpfc_hba *phba = vport->phba; 380 struct lpfc_rport_data *rdata = job->rport->dd_data; 381 struct lpfc_nodelist *ndlp = rdata->pnode; 382 struct ulp_bde64 *bpl = NULL; 383 uint32_t timeout; 384 struct lpfc_iocbq *cmdiocbq = NULL; 385 IOCB_t *cmd; 386 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL; 387 int request_nseg; 388 int reply_nseg; 389 struct bsg_job_data *dd_data; 390 uint32_t creg_val; 391 int rc = 0; 392 int iocb_stat; 393 394 /* in case no data is transferred */ 395 job->reply->reply_payload_rcv_len = 0; 396 397 /* allocate our bsg tracking structure */ 398 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 399 if (!dd_data) { 400 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 401 "2733 Failed allocation of dd_data\n"); 402 rc = -ENOMEM; 403 goto no_dd_data; 404 } 405 406 if (!lpfc_nlp_get(ndlp)) { 407 rc = -ENODEV; 408 goto no_ndlp; 409 } 410 411 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) { 412 rc = -ENODEV; 413 goto free_ndlp; 414 } 415 416 cmdiocbq = lpfc_sli_get_iocbq(phba); 417 if (!cmdiocbq) { 418 rc = -ENOMEM; 419 goto free_ndlp; 420 } 421 422 cmd = &cmdiocbq->iocb; 423 424 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 425 if (!bmp) { 426 rc = -ENOMEM; 427 goto free_cmdiocbq; 428 } 429 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 430 if (!bmp->virt) { 431 rc = -ENOMEM; 432 goto free_bmp; 433 } 434 435 INIT_LIST_HEAD(&bmp->list); 436 437 bpl = (struct ulp_bde64 *) bmp->virt; 438 request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64); 439 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, 440 1, bpl, &request_nseg); 441 if (!cmp) { 442 rc = -ENOMEM; 443 goto free_bmp; 444 } 445 lpfc_bsg_copy_data(cmp, &job->request_payload, 446 job->request_payload.payload_len, 1); 447 448 bpl += request_nseg; 449 reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg; 450 rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0, 451 bpl, &reply_nseg); 452 if (!rmp) { 453 rc = -ENOMEM; 454 goto free_cmp; 455 } 456 457 cmd->un.genreq64.bdl.ulpIoTag32 = 0; 458 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 459 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); 460 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 461 cmd->un.genreq64.bdl.bdeSize = 462 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); 463 cmd->ulpCommand = CMD_GEN_REQUEST64_CR; 464 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); 465 cmd->un.genreq64.w5.hcsw.Dfctl = 0; 466 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 467 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT; 468 cmd->ulpBdeCount = 1; 469 cmd->ulpLe = 1; 470 cmd->ulpClass = CLASS3; 471 cmd->ulpContext = ndlp->nlp_rpi; 472 if (phba->sli_rev == LPFC_SLI_REV4) 473 cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 474 cmd->ulpOwner = OWN_CHIP; 475 cmdiocbq->vport = phba->pport; 476 cmdiocbq->context3 = bmp; 477 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 478 timeout = phba->fc_ratov * 2; 479 cmd->ulpTimeout = timeout; 480 481 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp; 482 cmdiocbq->context1 = dd_data; 483 cmdiocbq->context2 = cmp; 484 cmdiocbq->context3 = bmp; 485 cmdiocbq->context_un.ndlp = ndlp; 486 dd_data->type = TYPE_IOCB; 487 dd_data->set_job = job; 488 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 489 dd_data->context_un.iocb.ndlp = ndlp; 490 dd_data->context_un.iocb.rmp = rmp; 491 job->dd_data = dd_data; 492 493 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 494 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 495 rc = -EIO ; 496 goto free_rmp; 497 } 498 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 499 writel(creg_val, phba->HCregaddr); 500 readl(phba->HCregaddr); /* flush */ 501 } 502 503 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 504 if (iocb_stat == IOCB_SUCCESS) 505 return 0; /* done for now */ 506 else if (iocb_stat == IOCB_BUSY) 507 rc = -EAGAIN; 508 else 509 rc = -EIO; 510 511 /* iocb failed so cleanup */ 512 513 free_rmp: 514 lpfc_free_bsg_buffers(phba, rmp); 515 free_cmp: 516 lpfc_free_bsg_buffers(phba, cmp); 517 free_bmp: 518 if (bmp->virt) 519 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 520 kfree(bmp); 521 free_cmdiocbq: 522 lpfc_sli_release_iocbq(phba, cmdiocbq); 523 free_ndlp: 524 lpfc_nlp_put(ndlp); 525 no_ndlp: 526 kfree(dd_data); 527 no_dd_data: 528 /* make error code available to userspace */ 529 job->reply->result = rc; 530 job->dd_data = NULL; 531 return rc; 532 } 533 534 /** 535 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler 536 * @phba: Pointer to HBA context object. 537 * @cmdiocbq: Pointer to command iocb. 538 * @rspiocbq: Pointer to response iocb. 539 * 540 * This function is the completion handler for iocbs issued using 541 * lpfc_bsg_rport_els_cmp function. This function is called by the 542 * ring event handler function without any lock held. This function 543 * can be called from both worker thread context and interrupt 544 * context. This function also can be called from other thread which 545 * cleans up the SLI layer objects. 546 * This function copies the contents of the response iocb to the 547 * response iocb memory object provided by the caller of 548 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 549 * sleeps for the iocb completion. 550 **/ 551 static void 552 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, 553 struct lpfc_iocbq *cmdiocbq, 554 struct lpfc_iocbq *rspiocbq) 555 { 556 struct bsg_job_data *dd_data; 557 struct fc_bsg_job *job; 558 IOCB_t *rsp; 559 struct lpfc_nodelist *ndlp; 560 struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL; 561 struct fc_bsg_ctels_reply *els_reply; 562 uint8_t *rjt_data; 563 unsigned long flags; 564 unsigned int rsp_size; 565 int rc = 0; 566 567 dd_data = cmdiocbq->context1; 568 ndlp = dd_data->context_un.iocb.ndlp; 569 cmdiocbq->context1 = ndlp; 570 571 /* Determine if job has been aborted */ 572 spin_lock_irqsave(&phba->ct_ev_lock, flags); 573 job = dd_data->set_job; 574 if (job) { 575 /* Prevent timeout handling from trying to abort job */ 576 job->dd_data = NULL; 577 } 578 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 579 580 rsp = &rspiocbq->iocb; 581 pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2; 582 prsp = (struct lpfc_dmabuf *)pcmd->list.next; 583 584 /* Copy the completed job data or determine the job status if job is 585 * still active 586 */ 587 588 if (job) { 589 if (rsp->ulpStatus == IOSTAT_SUCCESS) { 590 rsp_size = rsp->un.elsreq64.bdl.bdeSize; 591 job->reply->reply_payload_rcv_len = 592 sg_copy_from_buffer(job->reply_payload.sg_list, 593 job->reply_payload.sg_cnt, 594 prsp->virt, 595 rsp_size); 596 } else if (rsp->ulpStatus == IOSTAT_LS_RJT) { 597 job->reply->reply_payload_rcv_len = 598 sizeof(struct fc_bsg_ctels_reply); 599 /* LS_RJT data returned in word 4 */ 600 rjt_data = (uint8_t *)&rsp->un.ulpWord[4]; 601 els_reply = &job->reply->reply_data.ctels_reply; 602 els_reply->status = FC_CTELS_STATUS_REJECT; 603 els_reply->rjt_data.action = rjt_data[3]; 604 els_reply->rjt_data.reason_code = rjt_data[2]; 605 els_reply->rjt_data.reason_explanation = rjt_data[1]; 606 els_reply->rjt_data.vendor_unique = rjt_data[0]; 607 } else { 608 rc = -EIO; 609 } 610 } 611 612 lpfc_nlp_put(ndlp); 613 lpfc_els_free_iocb(phba, cmdiocbq); 614 kfree(dd_data); 615 616 /* Complete the job if the job is still active */ 617 618 if (job) { 619 job->reply->result = rc; 620 job->job_done(job); 621 } 622 return; 623 } 624 625 /** 626 * lpfc_bsg_rport_els - send an ELS command from a bsg request 627 * @job: fc_bsg_job to handle 628 **/ 629 static int 630 lpfc_bsg_rport_els(struct fc_bsg_job *job) 631 { 632 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 633 struct lpfc_hba *phba = vport->phba; 634 struct lpfc_rport_data *rdata = job->rport->dd_data; 635 struct lpfc_nodelist *ndlp = rdata->pnode; 636 uint32_t elscmd; 637 uint32_t cmdsize; 638 uint32_t rspsize; 639 struct lpfc_iocbq *cmdiocbq; 640 uint16_t rpi = 0; 641 struct bsg_job_data *dd_data; 642 uint32_t creg_val; 643 int rc = 0; 644 645 /* in case no data is transferred */ 646 job->reply->reply_payload_rcv_len = 0; 647 648 /* verify the els command is not greater than the 649 * maximum ELS transfer size. 650 */ 651 652 if (job->request_payload.payload_len > FCELSSIZE) { 653 rc = -EINVAL; 654 goto no_dd_data; 655 } 656 657 /* allocate our bsg tracking structure */ 658 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 659 if (!dd_data) { 660 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 661 "2735 Failed allocation of dd_data\n"); 662 rc = -ENOMEM; 663 goto no_dd_data; 664 } 665 666 elscmd = job->request->rqst_data.r_els.els_code; 667 cmdsize = job->request_payload.payload_len; 668 rspsize = job->reply_payload.payload_len; 669 670 if (!lpfc_nlp_get(ndlp)) { 671 rc = -ENODEV; 672 goto free_dd_data; 673 } 674 675 /* We will use the allocated dma buffers by prep els iocb for command 676 * and response to ensure if the job times out and the request is freed, 677 * we won't be dma into memory that is no longer allocated to for the 678 * request. 679 */ 680 681 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, 682 ndlp->nlp_DID, elscmd); 683 if (!cmdiocbq) { 684 rc = -EIO; 685 goto release_ndlp; 686 } 687 688 rpi = ndlp->nlp_rpi; 689 690 /* Transfer the request payload to allocated command dma buffer */ 691 692 sg_copy_to_buffer(job->request_payload.sg_list, 693 job->request_payload.sg_cnt, 694 ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt, 695 cmdsize); 696 697 if (phba->sli_rev == LPFC_SLI_REV4) 698 cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi]; 699 else 700 cmdiocbq->iocb.ulpContext = rpi; 701 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 702 cmdiocbq->context1 = dd_data; 703 cmdiocbq->context_un.ndlp = ndlp; 704 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp; 705 dd_data->type = TYPE_IOCB; 706 dd_data->set_job = job; 707 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 708 dd_data->context_un.iocb.ndlp = ndlp; 709 dd_data->context_un.iocb.rmp = NULL; 710 job->dd_data = dd_data; 711 712 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 713 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 714 rc = -EIO; 715 goto linkdown_err; 716 } 717 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 718 writel(creg_val, phba->HCregaddr); 719 readl(phba->HCregaddr); /* flush */ 720 } 721 722 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 723 724 if (rc == IOCB_SUCCESS) 725 return 0; /* done for now */ 726 else if (rc == IOCB_BUSY) 727 rc = -EAGAIN; 728 else 729 rc = -EIO; 730 731 linkdown_err: 732 733 cmdiocbq->context1 = ndlp; 734 lpfc_els_free_iocb(phba, cmdiocbq); 735 736 release_ndlp: 737 lpfc_nlp_put(ndlp); 738 739 free_dd_data: 740 kfree(dd_data); 741 742 no_dd_data: 743 /* make error code available to userspace */ 744 job->reply->result = rc; 745 job->dd_data = NULL; 746 return rc; 747 } 748 749 /** 750 * lpfc_bsg_event_free - frees an allocated event structure 751 * @kref: Pointer to a kref. 752 * 753 * Called from kref_put. Back cast the kref into an event structure address. 754 * Free any events to get, delete associated nodes, free any events to see, 755 * free any data then free the event itself. 756 **/ 757 static void 758 lpfc_bsg_event_free(struct kref *kref) 759 { 760 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event, 761 kref); 762 struct event_data *ed; 763 764 list_del(&evt->node); 765 766 while (!list_empty(&evt->events_to_get)) { 767 ed = list_entry(evt->events_to_get.next, typeof(*ed), node); 768 list_del(&ed->node); 769 kfree(ed->data); 770 kfree(ed); 771 } 772 773 while (!list_empty(&evt->events_to_see)) { 774 ed = list_entry(evt->events_to_see.next, typeof(*ed), node); 775 list_del(&ed->node); 776 kfree(ed->data); 777 kfree(ed); 778 } 779 780 kfree(evt->dd_data); 781 kfree(evt); 782 } 783 784 /** 785 * lpfc_bsg_event_ref - increments the kref for an event 786 * @evt: Pointer to an event structure. 787 **/ 788 static inline void 789 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt) 790 { 791 kref_get(&evt->kref); 792 } 793 794 /** 795 * lpfc_bsg_event_unref - Uses kref_put to free an event structure 796 * @evt: Pointer to an event structure. 797 **/ 798 static inline void 799 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt) 800 { 801 kref_put(&evt->kref, lpfc_bsg_event_free); 802 } 803 804 /** 805 * lpfc_bsg_event_new - allocate and initialize a event structure 806 * @ev_mask: Mask of events. 807 * @ev_reg_id: Event reg id. 808 * @ev_req_id: Event request id. 809 **/ 810 static struct lpfc_bsg_event * 811 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id) 812 { 813 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL); 814 815 if (!evt) 816 return NULL; 817 818 INIT_LIST_HEAD(&evt->events_to_get); 819 INIT_LIST_HEAD(&evt->events_to_see); 820 evt->type_mask = ev_mask; 821 evt->req_id = ev_req_id; 822 evt->reg_id = ev_reg_id; 823 evt->wait_time_stamp = jiffies; 824 evt->dd_data = NULL; 825 init_waitqueue_head(&evt->wq); 826 kref_init(&evt->kref); 827 return evt; 828 } 829 830 /** 831 * diag_cmd_data_free - Frees an lpfc dma buffer extension 832 * @phba: Pointer to HBA context object. 833 * @mlist: Pointer to an lpfc dma buffer extension. 834 **/ 835 static int 836 diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist) 837 { 838 struct lpfc_dmabufext *mlast; 839 struct pci_dev *pcidev; 840 struct list_head head, *curr, *next; 841 842 if ((!mlist) || (!lpfc_is_link_up(phba) && 843 (phba->link_flag & LS_LOOPBACK_MODE))) { 844 return 0; 845 } 846 847 pcidev = phba->pcidev; 848 list_add_tail(&head, &mlist->dma.list); 849 850 list_for_each_safe(curr, next, &head) { 851 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list); 852 if (mlast->dma.virt) 853 dma_free_coherent(&pcidev->dev, 854 mlast->size, 855 mlast->dma.virt, 856 mlast->dma.phys); 857 kfree(mlast); 858 } 859 return 0; 860 } 861 862 /** 863 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command 864 * @phba: 865 * @pring: 866 * @piocbq: 867 * 868 * This function is called when an unsolicited CT command is received. It 869 * forwards the event to any processes registered to receive CT events. 870 **/ 871 int 872 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 873 struct lpfc_iocbq *piocbq) 874 { 875 uint32_t evt_req_id = 0; 876 uint32_t cmd; 877 uint32_t len; 878 struct lpfc_dmabuf *dmabuf = NULL; 879 struct lpfc_bsg_event *evt; 880 struct event_data *evt_dat = NULL; 881 struct lpfc_iocbq *iocbq; 882 size_t offset = 0; 883 struct list_head head; 884 struct ulp_bde64 *bde; 885 dma_addr_t dma_addr; 886 int i; 887 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2; 888 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; 889 struct lpfc_hbq_entry *hbqe; 890 struct lpfc_sli_ct_request *ct_req; 891 struct fc_bsg_job *job = NULL; 892 struct bsg_job_data *dd_data = NULL; 893 unsigned long flags; 894 int size = 0; 895 896 INIT_LIST_HEAD(&head); 897 list_add_tail(&head, &piocbq->list); 898 899 if (piocbq->iocb.ulpBdeCount == 0 || 900 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0) 901 goto error_ct_unsol_exit; 902 903 if (phba->link_state == LPFC_HBA_ERROR || 904 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) 905 goto error_ct_unsol_exit; 906 907 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 908 dmabuf = bdeBuf1; 909 else { 910 dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh, 911 piocbq->iocb.un.cont64[0].addrLow); 912 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr); 913 } 914 if (dmabuf == NULL) 915 goto error_ct_unsol_exit; 916 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt; 917 evt_req_id = ct_req->FsType; 918 cmd = ct_req->CommandResponse.bits.CmdRsp; 919 len = ct_req->CommandResponse.bits.Size; 920 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 921 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf); 922 923 spin_lock_irqsave(&phba->ct_ev_lock, flags); 924 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 925 if (!(evt->type_mask & FC_REG_CT_EVENT) || 926 evt->req_id != evt_req_id) 927 continue; 928 929 lpfc_bsg_event_ref(evt); 930 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 931 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL); 932 if (evt_dat == NULL) { 933 spin_lock_irqsave(&phba->ct_ev_lock, flags); 934 lpfc_bsg_event_unref(evt); 935 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 936 "2614 Memory allocation failed for " 937 "CT event\n"); 938 break; 939 } 940 941 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 942 /* take accumulated byte count from the last iocbq */ 943 iocbq = list_entry(head.prev, typeof(*iocbq), list); 944 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len; 945 } else { 946 list_for_each_entry(iocbq, &head, list) { 947 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) 948 evt_dat->len += 949 iocbq->iocb.un.cont64[i].tus.f.bdeSize; 950 } 951 } 952 953 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL); 954 if (evt_dat->data == NULL) { 955 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 956 "2615 Memory allocation failed for " 957 "CT event data, size %d\n", 958 evt_dat->len); 959 kfree(evt_dat); 960 spin_lock_irqsave(&phba->ct_ev_lock, flags); 961 lpfc_bsg_event_unref(evt); 962 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 963 goto error_ct_unsol_exit; 964 } 965 966 list_for_each_entry(iocbq, &head, list) { 967 size = 0; 968 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 969 bdeBuf1 = iocbq->context2; 970 bdeBuf2 = iocbq->context3; 971 } 972 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) { 973 if (phba->sli3_options & 974 LPFC_SLI3_HBQ_ENABLED) { 975 if (i == 0) { 976 hbqe = (struct lpfc_hbq_entry *) 977 &iocbq->iocb.un.ulpWord[0]; 978 size = hbqe->bde.tus.f.bdeSize; 979 dmabuf = bdeBuf1; 980 } else if (i == 1) { 981 hbqe = (struct lpfc_hbq_entry *) 982 &iocbq->iocb.unsli3. 983 sli3Words[4]; 984 size = hbqe->bde.tus.f.bdeSize; 985 dmabuf = bdeBuf2; 986 } 987 if ((offset + size) > evt_dat->len) 988 size = evt_dat->len - offset; 989 } else { 990 size = iocbq->iocb.un.cont64[i]. 991 tus.f.bdeSize; 992 bde = &iocbq->iocb.un.cont64[i]; 993 dma_addr = getPaddr(bde->addrHigh, 994 bde->addrLow); 995 dmabuf = lpfc_sli_ringpostbuf_get(phba, 996 pring, dma_addr); 997 } 998 if (!dmabuf) { 999 lpfc_printf_log(phba, KERN_ERR, 1000 LOG_LIBDFC, "2616 No dmabuf " 1001 "found for iocbq 0x%p\n", 1002 iocbq); 1003 kfree(evt_dat->data); 1004 kfree(evt_dat); 1005 spin_lock_irqsave(&phba->ct_ev_lock, 1006 flags); 1007 lpfc_bsg_event_unref(evt); 1008 spin_unlock_irqrestore( 1009 &phba->ct_ev_lock, flags); 1010 goto error_ct_unsol_exit; 1011 } 1012 memcpy((char *)(evt_dat->data) + offset, 1013 dmabuf->virt, size); 1014 offset += size; 1015 if (evt_req_id != SLI_CT_ELX_LOOPBACK && 1016 !(phba->sli3_options & 1017 LPFC_SLI3_HBQ_ENABLED)) { 1018 lpfc_sli_ringpostbuf_put(phba, pring, 1019 dmabuf); 1020 } else { 1021 switch (cmd) { 1022 case ELX_LOOPBACK_DATA: 1023 if (phba->sli_rev < 1024 LPFC_SLI_REV4) 1025 diag_cmd_data_free(phba, 1026 (struct lpfc_dmabufext 1027 *)dmabuf); 1028 break; 1029 case ELX_LOOPBACK_XRI_SETUP: 1030 if ((phba->sli_rev == 1031 LPFC_SLI_REV2) || 1032 (phba->sli3_options & 1033 LPFC_SLI3_HBQ_ENABLED 1034 )) { 1035 lpfc_in_buf_free(phba, 1036 dmabuf); 1037 } else { 1038 lpfc_post_buffer(phba, 1039 pring, 1040 1); 1041 } 1042 break; 1043 default: 1044 if (!(phba->sli3_options & 1045 LPFC_SLI3_HBQ_ENABLED)) 1046 lpfc_post_buffer(phba, 1047 pring, 1048 1); 1049 break; 1050 } 1051 } 1052 } 1053 } 1054 1055 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1056 if (phba->sli_rev == LPFC_SLI_REV4) { 1057 evt_dat->immed_dat = phba->ctx_idx; 1058 phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX; 1059 /* Provide warning for over-run of the ct_ctx array */ 1060 if (phba->ct_ctx[evt_dat->immed_dat].valid == 1061 UNSOL_VALID) 1062 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 1063 "2717 CT context array entry " 1064 "[%d] over-run: oxid:x%x, " 1065 "sid:x%x\n", phba->ctx_idx, 1066 phba->ct_ctx[ 1067 evt_dat->immed_dat].oxid, 1068 phba->ct_ctx[ 1069 evt_dat->immed_dat].SID); 1070 phba->ct_ctx[evt_dat->immed_dat].rxid = 1071 piocbq->iocb.ulpContext; 1072 phba->ct_ctx[evt_dat->immed_dat].oxid = 1073 piocbq->iocb.unsli3.rcvsli3.ox_id; 1074 phba->ct_ctx[evt_dat->immed_dat].SID = 1075 piocbq->iocb.un.rcvels.remoteID; 1076 phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID; 1077 } else 1078 evt_dat->immed_dat = piocbq->iocb.ulpContext; 1079 1080 evt_dat->type = FC_REG_CT_EVENT; 1081 list_add(&evt_dat->node, &evt->events_to_see); 1082 if (evt_req_id == SLI_CT_ELX_LOOPBACK) { 1083 wake_up_interruptible(&evt->wq); 1084 lpfc_bsg_event_unref(evt); 1085 break; 1086 } 1087 1088 list_move(evt->events_to_see.prev, &evt->events_to_get); 1089 1090 dd_data = (struct bsg_job_data *)evt->dd_data; 1091 job = dd_data->set_job; 1092 dd_data->set_job = NULL; 1093 lpfc_bsg_event_unref(evt); 1094 if (job) { 1095 job->reply->reply_payload_rcv_len = size; 1096 /* make error code available to userspace */ 1097 job->reply->result = 0; 1098 job->dd_data = NULL; 1099 /* complete the job back to userspace */ 1100 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1101 job->job_done(job); 1102 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1103 } 1104 } 1105 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1106 1107 error_ct_unsol_exit: 1108 if (!list_empty(&head)) 1109 list_del(&head); 1110 if ((phba->sli_rev < LPFC_SLI_REV4) && 1111 (evt_req_id == SLI_CT_ELX_LOOPBACK)) 1112 return 0; 1113 return 1; 1114 } 1115 1116 /** 1117 * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane 1118 * @phba: Pointer to HBA context object. 1119 * @dmabuf: pointer to a dmabuf that describes the FC sequence 1120 * 1121 * This function handles abort to the CT command toward management plane 1122 * for SLI4 port. 1123 * 1124 * If the pending context of a CT command to management plane present, clears 1125 * such context and returns 1 for handled; otherwise, it returns 0 indicating 1126 * no context exists. 1127 **/ 1128 int 1129 lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf) 1130 { 1131 struct fc_frame_header fc_hdr; 1132 struct fc_frame_header *fc_hdr_ptr = &fc_hdr; 1133 int ctx_idx, handled = 0; 1134 uint16_t oxid, rxid; 1135 uint32_t sid; 1136 1137 memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 1138 sid = sli4_sid_from_fc_hdr(fc_hdr_ptr); 1139 oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id); 1140 rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id); 1141 1142 for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) { 1143 if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID) 1144 continue; 1145 if (phba->ct_ctx[ctx_idx].rxid != rxid) 1146 continue; 1147 if (phba->ct_ctx[ctx_idx].oxid != oxid) 1148 continue; 1149 if (phba->ct_ctx[ctx_idx].SID != sid) 1150 continue; 1151 phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID; 1152 handled = 1; 1153 } 1154 return handled; 1155 } 1156 1157 /** 1158 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command 1159 * @job: SET_EVENT fc_bsg_job 1160 **/ 1161 static int 1162 lpfc_bsg_hba_set_event(struct fc_bsg_job *job) 1163 { 1164 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 1165 struct lpfc_hba *phba = vport->phba; 1166 struct set_ct_event *event_req; 1167 struct lpfc_bsg_event *evt; 1168 int rc = 0; 1169 struct bsg_job_data *dd_data = NULL; 1170 uint32_t ev_mask; 1171 unsigned long flags; 1172 1173 if (job->request_len < 1174 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) { 1175 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1176 "2612 Received SET_CT_EVENT below minimum " 1177 "size\n"); 1178 rc = -EINVAL; 1179 goto job_error; 1180 } 1181 1182 event_req = (struct set_ct_event *) 1183 job->request->rqst_data.h_vendor.vendor_cmd; 1184 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask & 1185 FC_REG_EVENT_MASK); 1186 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1187 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 1188 if (evt->reg_id == event_req->ev_reg_id) { 1189 lpfc_bsg_event_ref(evt); 1190 evt->wait_time_stamp = jiffies; 1191 dd_data = (struct bsg_job_data *)evt->dd_data; 1192 break; 1193 } 1194 } 1195 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1196 1197 if (&evt->node == &phba->ct_ev_waiters) { 1198 /* no event waiting struct yet - first call */ 1199 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 1200 if (dd_data == NULL) { 1201 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1202 "2734 Failed allocation of dd_data\n"); 1203 rc = -ENOMEM; 1204 goto job_error; 1205 } 1206 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id, 1207 event_req->ev_req_id); 1208 if (!evt) { 1209 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1210 "2617 Failed allocation of event " 1211 "waiter\n"); 1212 rc = -ENOMEM; 1213 goto job_error; 1214 } 1215 dd_data->type = TYPE_EVT; 1216 dd_data->set_job = NULL; 1217 dd_data->context_un.evt = evt; 1218 evt->dd_data = (void *)dd_data; 1219 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1220 list_add(&evt->node, &phba->ct_ev_waiters); 1221 lpfc_bsg_event_ref(evt); 1222 evt->wait_time_stamp = jiffies; 1223 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1224 } 1225 1226 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1227 evt->waiting = 1; 1228 dd_data->set_job = job; /* for unsolicited command */ 1229 job->dd_data = dd_data; /* for fc transport timeout callback*/ 1230 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1231 return 0; /* call job done later */ 1232 1233 job_error: 1234 if (dd_data != NULL) 1235 kfree(dd_data); 1236 1237 job->dd_data = NULL; 1238 return rc; 1239 } 1240 1241 /** 1242 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command 1243 * @job: GET_EVENT fc_bsg_job 1244 **/ 1245 static int 1246 lpfc_bsg_hba_get_event(struct fc_bsg_job *job) 1247 { 1248 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 1249 struct lpfc_hba *phba = vport->phba; 1250 struct get_ct_event *event_req; 1251 struct get_ct_event_reply *event_reply; 1252 struct lpfc_bsg_event *evt; 1253 struct event_data *evt_dat = NULL; 1254 unsigned long flags; 1255 uint32_t rc = 0; 1256 1257 if (job->request_len < 1258 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) { 1259 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1260 "2613 Received GET_CT_EVENT request below " 1261 "minimum size\n"); 1262 rc = -EINVAL; 1263 goto job_error; 1264 } 1265 1266 event_req = (struct get_ct_event *) 1267 job->request->rqst_data.h_vendor.vendor_cmd; 1268 1269 event_reply = (struct get_ct_event_reply *) 1270 job->reply->reply_data.vendor_reply.vendor_rsp; 1271 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1272 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 1273 if (evt->reg_id == event_req->ev_reg_id) { 1274 if (list_empty(&evt->events_to_get)) 1275 break; 1276 lpfc_bsg_event_ref(evt); 1277 evt->wait_time_stamp = jiffies; 1278 evt_dat = list_entry(evt->events_to_get.prev, 1279 struct event_data, node); 1280 list_del(&evt_dat->node); 1281 break; 1282 } 1283 } 1284 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1285 1286 /* The app may continue to ask for event data until it gets 1287 * an error indicating that there isn't anymore 1288 */ 1289 if (evt_dat == NULL) { 1290 job->reply->reply_payload_rcv_len = 0; 1291 rc = -ENOENT; 1292 goto job_error; 1293 } 1294 1295 if (evt_dat->len > job->request_payload.payload_len) { 1296 evt_dat->len = job->request_payload.payload_len; 1297 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1298 "2618 Truncated event data at %d " 1299 "bytes\n", 1300 job->request_payload.payload_len); 1301 } 1302 1303 event_reply->type = evt_dat->type; 1304 event_reply->immed_data = evt_dat->immed_dat; 1305 if (evt_dat->len > 0) 1306 job->reply->reply_payload_rcv_len = 1307 sg_copy_from_buffer(job->request_payload.sg_list, 1308 job->request_payload.sg_cnt, 1309 evt_dat->data, evt_dat->len); 1310 else 1311 job->reply->reply_payload_rcv_len = 0; 1312 1313 if (evt_dat) { 1314 kfree(evt_dat->data); 1315 kfree(evt_dat); 1316 } 1317 1318 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1319 lpfc_bsg_event_unref(evt); 1320 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1321 job->dd_data = NULL; 1322 job->reply->result = 0; 1323 job->job_done(job); 1324 return 0; 1325 1326 job_error: 1327 job->dd_data = NULL; 1328 job->reply->result = rc; 1329 return rc; 1330 } 1331 1332 /** 1333 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler 1334 * @phba: Pointer to HBA context object. 1335 * @cmdiocbq: Pointer to command iocb. 1336 * @rspiocbq: Pointer to response iocb. 1337 * 1338 * This function is the completion handler for iocbs issued using 1339 * lpfc_issue_ct_rsp_cmp function. This function is called by the 1340 * ring event handler function without any lock held. This function 1341 * can be called from both worker thread context and interrupt 1342 * context. This function also can be called from other thread which 1343 * cleans up the SLI layer objects. 1344 * This function copy the contents of the response iocb to the 1345 * response iocb memory object provided by the caller of 1346 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 1347 * sleeps for the iocb completion. 1348 **/ 1349 static void 1350 lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, 1351 struct lpfc_iocbq *cmdiocbq, 1352 struct lpfc_iocbq *rspiocbq) 1353 { 1354 struct bsg_job_data *dd_data; 1355 struct fc_bsg_job *job; 1356 IOCB_t *rsp; 1357 struct lpfc_dmabuf *bmp, *cmp; 1358 struct lpfc_nodelist *ndlp; 1359 unsigned long flags; 1360 int rc = 0; 1361 1362 dd_data = cmdiocbq->context1; 1363 1364 /* Determine if job has been aborted */ 1365 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1366 job = dd_data->set_job; 1367 if (job) { 1368 /* Prevent timeout handling from trying to abort job */ 1369 job->dd_data = NULL; 1370 } 1371 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1372 1373 ndlp = dd_data->context_un.iocb.ndlp; 1374 cmp = cmdiocbq->context2; 1375 bmp = cmdiocbq->context3; 1376 rsp = &rspiocbq->iocb; 1377 1378 /* Copy the completed job data or set the error status */ 1379 1380 if (job) { 1381 if (rsp->ulpStatus) { 1382 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 1383 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { 1384 case IOERR_SEQUENCE_TIMEOUT: 1385 rc = -ETIMEDOUT; 1386 break; 1387 case IOERR_INVALID_RPI: 1388 rc = -EFAULT; 1389 break; 1390 default: 1391 rc = -EACCES; 1392 break; 1393 } 1394 } else { 1395 rc = -EACCES; 1396 } 1397 } else { 1398 job->reply->reply_payload_rcv_len = 0; 1399 } 1400 } 1401 1402 lpfc_free_bsg_buffers(phba, cmp); 1403 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1404 kfree(bmp); 1405 lpfc_sli_release_iocbq(phba, cmdiocbq); 1406 lpfc_nlp_put(ndlp); 1407 kfree(dd_data); 1408 1409 /* Complete the job if the job is still active */ 1410 1411 if (job) { 1412 job->reply->result = rc; 1413 job->job_done(job); 1414 } 1415 return; 1416 } 1417 1418 /** 1419 * lpfc_issue_ct_rsp - issue a ct response 1420 * @phba: Pointer to HBA context object. 1421 * @job: Pointer to the job object. 1422 * @tag: tag index value into the ports context exchange array. 1423 * @bmp: Pointer to a dma buffer descriptor. 1424 * @num_entry: Number of enties in the bde. 1425 **/ 1426 static int 1427 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, 1428 struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp, 1429 int num_entry) 1430 { 1431 IOCB_t *icmd; 1432 struct lpfc_iocbq *ctiocb = NULL; 1433 int rc = 0; 1434 struct lpfc_nodelist *ndlp = NULL; 1435 struct bsg_job_data *dd_data; 1436 uint32_t creg_val; 1437 1438 /* allocate our bsg tracking structure */ 1439 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 1440 if (!dd_data) { 1441 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1442 "2736 Failed allocation of dd_data\n"); 1443 rc = -ENOMEM; 1444 goto no_dd_data; 1445 } 1446 1447 /* Allocate buffer for command iocb */ 1448 ctiocb = lpfc_sli_get_iocbq(phba); 1449 if (!ctiocb) { 1450 rc = -ENOMEM; 1451 goto no_ctiocb; 1452 } 1453 1454 icmd = &ctiocb->iocb; 1455 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 1456 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 1457 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys); 1458 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 1459 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64)); 1460 icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA); 1461 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 1462 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL; 1463 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 1464 1465 /* Fill in rest of iocb */ 1466 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; 1467 icmd->ulpBdeCount = 1; 1468 icmd->ulpLe = 1; 1469 icmd->ulpClass = CLASS3; 1470 if (phba->sli_rev == LPFC_SLI_REV4) { 1471 /* Do not issue unsol response if oxid not marked as valid */ 1472 if (phba->ct_ctx[tag].valid != UNSOL_VALID) { 1473 rc = IOCB_ERROR; 1474 goto issue_ct_rsp_exit; 1475 } 1476 icmd->ulpContext = phba->ct_ctx[tag].rxid; 1477 icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid; 1478 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID); 1479 if (!ndlp) { 1480 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 1481 "2721 ndlp null for oxid %x SID %x\n", 1482 icmd->ulpContext, 1483 phba->ct_ctx[tag].SID); 1484 rc = IOCB_ERROR; 1485 goto issue_ct_rsp_exit; 1486 } 1487 1488 /* Check if the ndlp is active */ 1489 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 1490 rc = IOCB_ERROR; 1491 goto issue_ct_rsp_exit; 1492 } 1493 1494 /* get a refernece count so the ndlp doesn't go away while 1495 * we respond 1496 */ 1497 if (!lpfc_nlp_get(ndlp)) { 1498 rc = IOCB_ERROR; 1499 goto issue_ct_rsp_exit; 1500 } 1501 1502 icmd->un.ulpWord[3] = 1503 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 1504 1505 /* The exchange is done, mark the entry as invalid */ 1506 phba->ct_ctx[tag].valid = UNSOL_INVALID; 1507 } else 1508 icmd->ulpContext = (ushort) tag; 1509 1510 icmd->ulpTimeout = phba->fc_ratov * 2; 1511 1512 /* Xmit CT response on exchange <xid> */ 1513 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1514 "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n", 1515 icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state); 1516 1517 ctiocb->iocb_cmpl = NULL; 1518 ctiocb->iocb_flag |= LPFC_IO_LIBDFC; 1519 ctiocb->vport = phba->pport; 1520 ctiocb->context1 = dd_data; 1521 ctiocb->context2 = cmp; 1522 ctiocb->context3 = bmp; 1523 ctiocb->context_un.ndlp = ndlp; 1524 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp; 1525 1526 dd_data->type = TYPE_IOCB; 1527 dd_data->set_job = job; 1528 dd_data->context_un.iocb.cmdiocbq = ctiocb; 1529 dd_data->context_un.iocb.ndlp = ndlp; 1530 dd_data->context_un.iocb.rmp = NULL; 1531 job->dd_data = dd_data; 1532 1533 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1534 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 1535 rc = -IOCB_ERROR; 1536 goto issue_ct_rsp_exit; 1537 } 1538 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 1539 writel(creg_val, phba->HCregaddr); 1540 readl(phba->HCregaddr); /* flush */ 1541 } 1542 1543 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 1544 1545 if (rc == IOCB_SUCCESS) 1546 return 0; /* done for now */ 1547 1548 issue_ct_rsp_exit: 1549 lpfc_sli_release_iocbq(phba, ctiocb); 1550 no_ctiocb: 1551 kfree(dd_data); 1552 no_dd_data: 1553 return rc; 1554 } 1555 1556 /** 1557 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command 1558 * @job: SEND_MGMT_RESP fc_bsg_job 1559 **/ 1560 static int 1561 lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job) 1562 { 1563 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 1564 struct lpfc_hba *phba = vport->phba; 1565 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *) 1566 job->request->rqst_data.h_vendor.vendor_cmd; 1567 struct ulp_bde64 *bpl; 1568 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL; 1569 int bpl_entries; 1570 uint32_t tag = mgmt_resp->tag; 1571 unsigned long reqbfrcnt = 1572 (unsigned long)job->request_payload.payload_len; 1573 int rc = 0; 1574 1575 /* in case no data is transferred */ 1576 job->reply->reply_payload_rcv_len = 0; 1577 1578 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) { 1579 rc = -ERANGE; 1580 goto send_mgmt_rsp_exit; 1581 } 1582 1583 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1584 if (!bmp) { 1585 rc = -ENOMEM; 1586 goto send_mgmt_rsp_exit; 1587 } 1588 1589 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 1590 if (!bmp->virt) { 1591 rc = -ENOMEM; 1592 goto send_mgmt_rsp_free_bmp; 1593 } 1594 1595 INIT_LIST_HEAD(&bmp->list); 1596 bpl = (struct ulp_bde64 *) bmp->virt; 1597 bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64)); 1598 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, 1599 1, bpl, &bpl_entries); 1600 if (!cmp) { 1601 rc = -ENOMEM; 1602 goto send_mgmt_rsp_free_bmp; 1603 } 1604 lpfc_bsg_copy_data(cmp, &job->request_payload, 1605 job->request_payload.payload_len, 1); 1606 1607 rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries); 1608 1609 if (rc == IOCB_SUCCESS) 1610 return 0; /* done for now */ 1611 1612 rc = -EACCES; 1613 1614 lpfc_free_bsg_buffers(phba, cmp); 1615 1616 send_mgmt_rsp_free_bmp: 1617 if (bmp->virt) 1618 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1619 kfree(bmp); 1620 send_mgmt_rsp_exit: 1621 /* make error code available to userspace */ 1622 job->reply->result = rc; 1623 job->dd_data = NULL; 1624 return rc; 1625 } 1626 1627 /** 1628 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode 1629 * @phba: Pointer to HBA context object. 1630 * 1631 * This function is responsible for preparing driver for diag loopback 1632 * on device. 1633 */ 1634 static int 1635 lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba) 1636 { 1637 struct lpfc_vport **vports; 1638 struct Scsi_Host *shost; 1639 struct lpfc_sli *psli; 1640 struct lpfc_sli_ring *pring; 1641 int i = 0; 1642 1643 psli = &phba->sli; 1644 if (!psli) 1645 return -ENODEV; 1646 1647 pring = &psli->ring[LPFC_FCP_RING]; 1648 if (!pring) 1649 return -ENODEV; 1650 1651 if ((phba->link_state == LPFC_HBA_ERROR) || 1652 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || 1653 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) 1654 return -EACCES; 1655 1656 vports = lpfc_create_vport_work_array(phba); 1657 if (vports) { 1658 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1659 shost = lpfc_shost_from_vport(vports[i]); 1660 scsi_block_requests(shost); 1661 } 1662 lpfc_destroy_vport_work_array(phba, vports); 1663 } else { 1664 shost = lpfc_shost_from_vport(phba->pport); 1665 scsi_block_requests(shost); 1666 } 1667 1668 while (!list_empty(&pring->txcmplq)) { 1669 if (i++ > 500) /* wait up to 5 seconds */ 1670 break; 1671 msleep(10); 1672 } 1673 return 0; 1674 } 1675 1676 /** 1677 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode 1678 * @phba: Pointer to HBA context object. 1679 * 1680 * This function is responsible for driver exit processing of setting up 1681 * diag loopback mode on device. 1682 */ 1683 static void 1684 lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba) 1685 { 1686 struct Scsi_Host *shost; 1687 struct lpfc_vport **vports; 1688 int i; 1689 1690 vports = lpfc_create_vport_work_array(phba); 1691 if (vports) { 1692 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1693 shost = lpfc_shost_from_vport(vports[i]); 1694 scsi_unblock_requests(shost); 1695 } 1696 lpfc_destroy_vport_work_array(phba, vports); 1697 } else { 1698 shost = lpfc_shost_from_vport(phba->pport); 1699 scsi_unblock_requests(shost); 1700 } 1701 return; 1702 } 1703 1704 /** 1705 * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command 1706 * @phba: Pointer to HBA context object. 1707 * @job: LPFC_BSG_VENDOR_DIAG_MODE 1708 * 1709 * This function is responsible for placing an sli3 port into diagnostic 1710 * loopback mode in order to perform a diagnostic loopback test. 1711 * All new scsi requests are blocked, a small delay is used to allow the 1712 * scsi requests to complete then the link is brought down. If the link is 1713 * is placed in loopback mode then scsi requests are again allowed 1714 * so the scsi mid-layer doesn't give up on the port. 1715 * All of this is done in-line. 1716 */ 1717 static int 1718 lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job) 1719 { 1720 struct diag_mode_set *loopback_mode; 1721 uint32_t link_flags; 1722 uint32_t timeout; 1723 LPFC_MBOXQ_t *pmboxq = NULL; 1724 int mbxstatus = MBX_SUCCESS; 1725 int i = 0; 1726 int rc = 0; 1727 1728 /* no data to return just the return code */ 1729 job->reply->reply_payload_rcv_len = 0; 1730 1731 if (job->request_len < sizeof(struct fc_bsg_request) + 1732 sizeof(struct diag_mode_set)) { 1733 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1734 "2738 Received DIAG MODE request size:%d " 1735 "below the minimum size:%d\n", 1736 job->request_len, 1737 (int)(sizeof(struct fc_bsg_request) + 1738 sizeof(struct diag_mode_set))); 1739 rc = -EINVAL; 1740 goto job_error; 1741 } 1742 1743 rc = lpfc_bsg_diag_mode_enter(phba); 1744 if (rc) 1745 goto job_error; 1746 1747 /* bring the link to diagnostic mode */ 1748 loopback_mode = (struct diag_mode_set *) 1749 job->request->rqst_data.h_vendor.vendor_cmd; 1750 link_flags = loopback_mode->type; 1751 timeout = loopback_mode->timeout * 100; 1752 1753 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1754 if (!pmboxq) { 1755 rc = -ENOMEM; 1756 goto loopback_mode_exit; 1757 } 1758 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 1759 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; 1760 pmboxq->u.mb.mbxOwner = OWN_HOST; 1761 1762 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 1763 1764 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) { 1765 /* wait for link down before proceeding */ 1766 i = 0; 1767 while (phba->link_state != LPFC_LINK_DOWN) { 1768 if (i++ > timeout) { 1769 rc = -ETIMEDOUT; 1770 goto loopback_mode_exit; 1771 } 1772 msleep(10); 1773 } 1774 1775 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 1776 if (link_flags == INTERNAL_LOOP_BACK) 1777 pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB; 1778 else 1779 pmboxq->u.mb.un.varInitLnk.link_flags = 1780 FLAGS_TOPOLOGY_MODE_LOOP; 1781 1782 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK; 1783 pmboxq->u.mb.mbxOwner = OWN_HOST; 1784 1785 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, 1786 LPFC_MBOX_TMO); 1787 1788 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) 1789 rc = -ENODEV; 1790 else { 1791 spin_lock_irq(&phba->hbalock); 1792 phba->link_flag |= LS_LOOPBACK_MODE; 1793 spin_unlock_irq(&phba->hbalock); 1794 /* wait for the link attention interrupt */ 1795 msleep(100); 1796 1797 i = 0; 1798 while (phba->link_state != LPFC_HBA_READY) { 1799 if (i++ > timeout) { 1800 rc = -ETIMEDOUT; 1801 break; 1802 } 1803 1804 msleep(10); 1805 } 1806 } 1807 1808 } else 1809 rc = -ENODEV; 1810 1811 loopback_mode_exit: 1812 lpfc_bsg_diag_mode_exit(phba); 1813 1814 /* 1815 * Let SLI layer release mboxq if mbox command completed after timeout. 1816 */ 1817 if (pmboxq && mbxstatus != MBX_TIMEOUT) 1818 mempool_free(pmboxq, phba->mbox_mem_pool); 1819 1820 job_error: 1821 /* make error code available to userspace */ 1822 job->reply->result = rc; 1823 /* complete the job back to userspace if no error */ 1824 if (rc == 0) 1825 job->job_done(job); 1826 return rc; 1827 } 1828 1829 /** 1830 * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state 1831 * @phba: Pointer to HBA context object. 1832 * @diag: Flag for set link to diag or nomral operation state. 1833 * 1834 * This function is responsible for issuing a sli4 mailbox command for setting 1835 * link to either diag state or normal operation state. 1836 */ 1837 static int 1838 lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag) 1839 { 1840 LPFC_MBOXQ_t *pmboxq; 1841 struct lpfc_mbx_set_link_diag_state *link_diag_state; 1842 uint32_t req_len, alloc_len; 1843 int mbxstatus = MBX_SUCCESS, rc; 1844 1845 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1846 if (!pmboxq) 1847 return -ENOMEM; 1848 1849 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) - 1850 sizeof(struct lpfc_sli4_cfg_mhdr)); 1851 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 1852 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE, 1853 req_len, LPFC_SLI4_MBX_EMBED); 1854 if (alloc_len != req_len) { 1855 rc = -ENOMEM; 1856 goto link_diag_state_set_out; 1857 } 1858 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 1859 "3128 Set link to diagnostic state:x%x (x%x/x%x)\n", 1860 diag, phba->sli4_hba.lnk_info.lnk_tp, 1861 phba->sli4_hba.lnk_info.lnk_no); 1862 1863 link_diag_state = &pmboxq->u.mqe.un.link_diag_state; 1864 bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req, 1865 LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE); 1866 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req, 1867 phba->sli4_hba.lnk_info.lnk_no); 1868 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req, 1869 phba->sli4_hba.lnk_info.lnk_tp); 1870 if (diag) 1871 bf_set(lpfc_mbx_set_diag_state_diag, 1872 &link_diag_state->u.req, 1); 1873 else 1874 bf_set(lpfc_mbx_set_diag_state_diag, 1875 &link_diag_state->u.req, 0); 1876 1877 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 1878 1879 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) 1880 rc = 0; 1881 else 1882 rc = -ENODEV; 1883 1884 link_diag_state_set_out: 1885 if (pmboxq && (mbxstatus != MBX_TIMEOUT)) 1886 mempool_free(pmboxq, phba->mbox_mem_pool); 1887 1888 return rc; 1889 } 1890 1891 /** 1892 * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic 1893 * @phba: Pointer to HBA context object. 1894 * 1895 * This function is responsible for issuing a sli4 mailbox command for setting 1896 * up internal loopback diagnostic. 1897 */ 1898 static int 1899 lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba) 1900 { 1901 LPFC_MBOXQ_t *pmboxq; 1902 uint32_t req_len, alloc_len; 1903 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback; 1904 int mbxstatus = MBX_SUCCESS, rc = 0; 1905 1906 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1907 if (!pmboxq) 1908 return -ENOMEM; 1909 req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) - 1910 sizeof(struct lpfc_sli4_cfg_mhdr)); 1911 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 1912 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK, 1913 req_len, LPFC_SLI4_MBX_EMBED); 1914 if (alloc_len != req_len) { 1915 mempool_free(pmboxq, phba->mbox_mem_pool); 1916 return -ENOMEM; 1917 } 1918 link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback; 1919 bf_set(lpfc_mbx_set_diag_state_link_num, 1920 &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no); 1921 bf_set(lpfc_mbx_set_diag_state_link_type, 1922 &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp); 1923 bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req, 1924 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL); 1925 1926 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 1927 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) { 1928 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1929 "3127 Failed setup loopback mode mailbox " 1930 "command, rc:x%x, status:x%x\n", mbxstatus, 1931 pmboxq->u.mb.mbxStatus); 1932 rc = -ENODEV; 1933 } 1934 if (pmboxq && (mbxstatus != MBX_TIMEOUT)) 1935 mempool_free(pmboxq, phba->mbox_mem_pool); 1936 return rc; 1937 } 1938 1939 /** 1940 * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic 1941 * @phba: Pointer to HBA context object. 1942 * 1943 * This function set up SLI4 FC port registrations for diagnostic run, which 1944 * includes all the rpis, vfi, and also vpi. 1945 */ 1946 static int 1947 lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba) 1948 { 1949 int rc; 1950 1951 if (phba->pport->fc_flag & FC_VFI_REGISTERED) { 1952 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1953 "3136 Port still had vfi registered: " 1954 "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n", 1955 phba->pport->fc_myDID, phba->fcf.fcfi, 1956 phba->sli4_hba.vfi_ids[phba->pport->vfi], 1957 phba->vpi_ids[phba->pport->vpi]); 1958 return -EINVAL; 1959 } 1960 rc = lpfc_issue_reg_vfi(phba->pport); 1961 return rc; 1962 } 1963 1964 /** 1965 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command 1966 * @phba: Pointer to HBA context object. 1967 * @job: LPFC_BSG_VENDOR_DIAG_MODE 1968 * 1969 * This function is responsible for placing an sli4 port into diagnostic 1970 * loopback mode in order to perform a diagnostic loopback test. 1971 */ 1972 static int 1973 lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job) 1974 { 1975 struct diag_mode_set *loopback_mode; 1976 uint32_t link_flags, timeout; 1977 int i, rc = 0; 1978 1979 /* no data to return just the return code */ 1980 job->reply->reply_payload_rcv_len = 0; 1981 1982 if (job->request_len < sizeof(struct fc_bsg_request) + 1983 sizeof(struct diag_mode_set)) { 1984 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1985 "3011 Received DIAG MODE request size:%d " 1986 "below the minimum size:%d\n", 1987 job->request_len, 1988 (int)(sizeof(struct fc_bsg_request) + 1989 sizeof(struct diag_mode_set))); 1990 rc = -EINVAL; 1991 goto job_error; 1992 } 1993 1994 rc = lpfc_bsg_diag_mode_enter(phba); 1995 if (rc) 1996 goto job_error; 1997 1998 /* indicate we are in loobpack diagnostic mode */ 1999 spin_lock_irq(&phba->hbalock); 2000 phba->link_flag |= LS_LOOPBACK_MODE; 2001 spin_unlock_irq(&phba->hbalock); 2002 2003 /* reset port to start frome scratch */ 2004 rc = lpfc_selective_reset(phba); 2005 if (rc) 2006 goto job_error; 2007 2008 /* bring the link to diagnostic mode */ 2009 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2010 "3129 Bring link to diagnostic state.\n"); 2011 loopback_mode = (struct diag_mode_set *) 2012 job->request->rqst_data.h_vendor.vendor_cmd; 2013 link_flags = loopback_mode->type; 2014 timeout = loopback_mode->timeout * 100; 2015 2016 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); 2017 if (rc) { 2018 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2019 "3130 Failed to bring link to diagnostic " 2020 "state, rc:x%x\n", rc); 2021 goto loopback_mode_exit; 2022 } 2023 2024 /* wait for link down before proceeding */ 2025 i = 0; 2026 while (phba->link_state != LPFC_LINK_DOWN) { 2027 if (i++ > timeout) { 2028 rc = -ETIMEDOUT; 2029 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2030 "3131 Timeout waiting for link to " 2031 "diagnostic mode, timeout:%d ms\n", 2032 timeout * 10); 2033 goto loopback_mode_exit; 2034 } 2035 msleep(10); 2036 } 2037 2038 /* set up loopback mode */ 2039 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2040 "3132 Set up loopback mode:x%x\n", link_flags); 2041 2042 if (link_flags == INTERNAL_LOOP_BACK) 2043 rc = lpfc_sli4_bsg_set_internal_loopback(phba); 2044 else if (link_flags == EXTERNAL_LOOP_BACK) 2045 rc = lpfc_hba_init_link_fc_topology(phba, 2046 FLAGS_TOPOLOGY_MODE_PT_PT, 2047 MBX_NOWAIT); 2048 else { 2049 rc = -EINVAL; 2050 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 2051 "3141 Loopback mode:x%x not supported\n", 2052 link_flags); 2053 goto loopback_mode_exit; 2054 } 2055 2056 if (!rc) { 2057 /* wait for the link attention interrupt */ 2058 msleep(100); 2059 i = 0; 2060 while (phba->link_state < LPFC_LINK_UP) { 2061 if (i++ > timeout) { 2062 rc = -ETIMEDOUT; 2063 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2064 "3137 Timeout waiting for link up " 2065 "in loopback mode, timeout:%d ms\n", 2066 timeout * 10); 2067 break; 2068 } 2069 msleep(10); 2070 } 2071 } 2072 2073 /* port resource registration setup for loopback diagnostic */ 2074 if (!rc) { 2075 /* set up a none zero myDID for loopback test */ 2076 phba->pport->fc_myDID = 1; 2077 rc = lpfc_sli4_diag_fcport_reg_setup(phba); 2078 } else 2079 goto loopback_mode_exit; 2080 2081 if (!rc) { 2082 /* wait for the port ready */ 2083 msleep(100); 2084 i = 0; 2085 while (phba->link_state != LPFC_HBA_READY) { 2086 if (i++ > timeout) { 2087 rc = -ETIMEDOUT; 2088 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2089 "3133 Timeout waiting for port " 2090 "loopback mode ready, timeout:%d ms\n", 2091 timeout * 10); 2092 break; 2093 } 2094 msleep(10); 2095 } 2096 } 2097 2098 loopback_mode_exit: 2099 /* clear loopback diagnostic mode */ 2100 if (rc) { 2101 spin_lock_irq(&phba->hbalock); 2102 phba->link_flag &= ~LS_LOOPBACK_MODE; 2103 spin_unlock_irq(&phba->hbalock); 2104 } 2105 lpfc_bsg_diag_mode_exit(phba); 2106 2107 job_error: 2108 /* make error code available to userspace */ 2109 job->reply->result = rc; 2110 /* complete the job back to userspace if no error */ 2111 if (rc == 0) 2112 job->job_done(job); 2113 return rc; 2114 } 2115 2116 /** 2117 * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode 2118 * @job: LPFC_BSG_VENDOR_DIAG_MODE 2119 * 2120 * This function is responsible for responding to check and dispatch bsg diag 2121 * command from the user to proper driver action routines. 2122 */ 2123 static int 2124 lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job) 2125 { 2126 struct Scsi_Host *shost; 2127 struct lpfc_vport *vport; 2128 struct lpfc_hba *phba; 2129 int rc; 2130 2131 shost = job->shost; 2132 if (!shost) 2133 return -ENODEV; 2134 vport = (struct lpfc_vport *)job->shost->hostdata; 2135 if (!vport) 2136 return -ENODEV; 2137 phba = vport->phba; 2138 if (!phba) 2139 return -ENODEV; 2140 2141 if (phba->sli_rev < LPFC_SLI_REV4) 2142 rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job); 2143 else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 2144 LPFC_SLI_INTF_IF_TYPE_2) 2145 rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job); 2146 else 2147 rc = -ENODEV; 2148 2149 return rc; 2150 } 2151 2152 /** 2153 * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode 2154 * @job: LPFC_BSG_VENDOR_DIAG_MODE_END 2155 * 2156 * This function is responsible for responding to check and dispatch bsg diag 2157 * command from the user to proper driver action routines. 2158 */ 2159 static int 2160 lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job) 2161 { 2162 struct Scsi_Host *shost; 2163 struct lpfc_vport *vport; 2164 struct lpfc_hba *phba; 2165 struct diag_mode_set *loopback_mode_end_cmd; 2166 uint32_t timeout; 2167 int rc, i; 2168 2169 shost = job->shost; 2170 if (!shost) 2171 return -ENODEV; 2172 vport = (struct lpfc_vport *)job->shost->hostdata; 2173 if (!vport) 2174 return -ENODEV; 2175 phba = vport->phba; 2176 if (!phba) 2177 return -ENODEV; 2178 2179 if (phba->sli_rev < LPFC_SLI_REV4) 2180 return -ENODEV; 2181 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 2182 LPFC_SLI_INTF_IF_TYPE_2) 2183 return -ENODEV; 2184 2185 /* clear loopback diagnostic mode */ 2186 spin_lock_irq(&phba->hbalock); 2187 phba->link_flag &= ~LS_LOOPBACK_MODE; 2188 spin_unlock_irq(&phba->hbalock); 2189 loopback_mode_end_cmd = (struct diag_mode_set *) 2190 job->request->rqst_data.h_vendor.vendor_cmd; 2191 timeout = loopback_mode_end_cmd->timeout * 100; 2192 2193 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0); 2194 if (rc) { 2195 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2196 "3139 Failed to bring link to diagnostic " 2197 "state, rc:x%x\n", rc); 2198 goto loopback_mode_end_exit; 2199 } 2200 2201 /* wait for link down before proceeding */ 2202 i = 0; 2203 while (phba->link_state != LPFC_LINK_DOWN) { 2204 if (i++ > timeout) { 2205 rc = -ETIMEDOUT; 2206 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2207 "3140 Timeout waiting for link to " 2208 "diagnostic mode_end, timeout:%d ms\n", 2209 timeout * 10); 2210 /* there is nothing much we can do here */ 2211 break; 2212 } 2213 msleep(10); 2214 } 2215 2216 /* reset port resource registrations */ 2217 rc = lpfc_selective_reset(phba); 2218 phba->pport->fc_myDID = 0; 2219 2220 loopback_mode_end_exit: 2221 /* make return code available to userspace */ 2222 job->reply->result = rc; 2223 /* complete the job back to userspace if no error */ 2224 if (rc == 0) 2225 job->job_done(job); 2226 return rc; 2227 } 2228 2229 /** 2230 * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test 2231 * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST 2232 * 2233 * This function is to perform SLI4 diag link test request from the user 2234 * applicaiton. 2235 */ 2236 static int 2237 lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job) 2238 { 2239 struct Scsi_Host *shost; 2240 struct lpfc_vport *vport; 2241 struct lpfc_hba *phba; 2242 LPFC_MBOXQ_t *pmboxq; 2243 struct sli4_link_diag *link_diag_test_cmd; 2244 uint32_t req_len, alloc_len; 2245 uint32_t timeout; 2246 struct lpfc_mbx_run_link_diag_test *run_link_diag_test; 2247 union lpfc_sli4_cfg_shdr *shdr; 2248 uint32_t shdr_status, shdr_add_status; 2249 struct diag_status *diag_status_reply; 2250 int mbxstatus, rc = 0; 2251 2252 shost = job->shost; 2253 if (!shost) { 2254 rc = -ENODEV; 2255 goto job_error; 2256 } 2257 vport = (struct lpfc_vport *)job->shost->hostdata; 2258 if (!vport) { 2259 rc = -ENODEV; 2260 goto job_error; 2261 } 2262 phba = vport->phba; 2263 if (!phba) { 2264 rc = -ENODEV; 2265 goto job_error; 2266 } 2267 2268 if (phba->sli_rev < LPFC_SLI_REV4) { 2269 rc = -ENODEV; 2270 goto job_error; 2271 } 2272 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 2273 LPFC_SLI_INTF_IF_TYPE_2) { 2274 rc = -ENODEV; 2275 goto job_error; 2276 } 2277 2278 if (job->request_len < sizeof(struct fc_bsg_request) + 2279 sizeof(struct sli4_link_diag)) { 2280 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2281 "3013 Received LINK DIAG TEST request " 2282 " size:%d below the minimum size:%d\n", 2283 job->request_len, 2284 (int)(sizeof(struct fc_bsg_request) + 2285 sizeof(struct sli4_link_diag))); 2286 rc = -EINVAL; 2287 goto job_error; 2288 } 2289 2290 rc = lpfc_bsg_diag_mode_enter(phba); 2291 if (rc) 2292 goto job_error; 2293 2294 link_diag_test_cmd = (struct sli4_link_diag *) 2295 job->request->rqst_data.h_vendor.vendor_cmd; 2296 timeout = link_diag_test_cmd->timeout * 100; 2297 2298 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); 2299 2300 if (rc) 2301 goto job_error; 2302 2303 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2304 if (!pmboxq) { 2305 rc = -ENOMEM; 2306 goto link_diag_test_exit; 2307 } 2308 2309 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) - 2310 sizeof(struct lpfc_sli4_cfg_mhdr)); 2311 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 2312 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE, 2313 req_len, LPFC_SLI4_MBX_EMBED); 2314 if (alloc_len != req_len) { 2315 rc = -ENOMEM; 2316 goto link_diag_test_exit; 2317 } 2318 run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test; 2319 bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req, 2320 phba->sli4_hba.lnk_info.lnk_no); 2321 bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req, 2322 phba->sli4_hba.lnk_info.lnk_tp); 2323 bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req, 2324 link_diag_test_cmd->test_id); 2325 bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req, 2326 link_diag_test_cmd->loops); 2327 bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req, 2328 link_diag_test_cmd->test_version); 2329 bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req, 2330 link_diag_test_cmd->error_action); 2331 2332 mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 2333 2334 shdr = (union lpfc_sli4_cfg_shdr *) 2335 &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr; 2336 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 2337 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 2338 if (shdr_status || shdr_add_status || mbxstatus) { 2339 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 2340 "3010 Run link diag test mailbox failed with " 2341 "mbx_status x%x status x%x, add_status x%x\n", 2342 mbxstatus, shdr_status, shdr_add_status); 2343 } 2344 2345 diag_status_reply = (struct diag_status *) 2346 job->reply->reply_data.vendor_reply.vendor_rsp; 2347 2348 if (job->reply_len < 2349 sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) { 2350 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2351 "3012 Received Run link diag test reply " 2352 "below minimum size (%d): reply_len:%d\n", 2353 (int)(sizeof(struct fc_bsg_request) + 2354 sizeof(struct diag_status)), 2355 job->reply_len); 2356 rc = -EINVAL; 2357 goto job_error; 2358 } 2359 2360 diag_status_reply->mbox_status = mbxstatus; 2361 diag_status_reply->shdr_status = shdr_status; 2362 diag_status_reply->shdr_add_status = shdr_add_status; 2363 2364 link_diag_test_exit: 2365 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0); 2366 2367 if (pmboxq) 2368 mempool_free(pmboxq, phba->mbox_mem_pool); 2369 2370 lpfc_bsg_diag_mode_exit(phba); 2371 2372 job_error: 2373 /* make error code available to userspace */ 2374 job->reply->result = rc; 2375 /* complete the job back to userspace if no error */ 2376 if (rc == 0) 2377 job->job_done(job); 2378 return rc; 2379 } 2380 2381 /** 2382 * lpfcdiag_loop_self_reg - obtains a remote port login id 2383 * @phba: Pointer to HBA context object 2384 * @rpi: Pointer to a remote port login id 2385 * 2386 * This function obtains a remote port login id so the diag loopback test 2387 * can send and receive its own unsolicited CT command. 2388 **/ 2389 static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi) 2390 { 2391 LPFC_MBOXQ_t *mbox; 2392 struct lpfc_dmabuf *dmabuff; 2393 int status; 2394 2395 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2396 if (!mbox) 2397 return -ENOMEM; 2398 2399 if (phba->sli_rev < LPFC_SLI_REV4) 2400 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID, 2401 (uint8_t *)&phba->pport->fc_sparam, 2402 mbox, *rpi); 2403 else { 2404 *rpi = lpfc_sli4_alloc_rpi(phba); 2405 status = lpfc_reg_rpi(phba, phba->pport->vpi, 2406 phba->pport->fc_myDID, 2407 (uint8_t *)&phba->pport->fc_sparam, 2408 mbox, *rpi); 2409 } 2410 2411 if (status) { 2412 mempool_free(mbox, phba->mbox_mem_pool); 2413 if (phba->sli_rev == LPFC_SLI_REV4) 2414 lpfc_sli4_free_rpi(phba, *rpi); 2415 return -ENOMEM; 2416 } 2417 2418 dmabuff = (struct lpfc_dmabuf *) mbox->context1; 2419 mbox->context1 = NULL; 2420 mbox->context2 = NULL; 2421 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 2422 2423 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 2424 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); 2425 kfree(dmabuff); 2426 if (status != MBX_TIMEOUT) 2427 mempool_free(mbox, phba->mbox_mem_pool); 2428 if (phba->sli_rev == LPFC_SLI_REV4) 2429 lpfc_sli4_free_rpi(phba, *rpi); 2430 return -ENODEV; 2431 } 2432 2433 if (phba->sli_rev < LPFC_SLI_REV4) 2434 *rpi = mbox->u.mb.un.varWords[0]; 2435 2436 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); 2437 kfree(dmabuff); 2438 mempool_free(mbox, phba->mbox_mem_pool); 2439 return 0; 2440 } 2441 2442 /** 2443 * lpfcdiag_loop_self_unreg - unregs from the rpi 2444 * @phba: Pointer to HBA context object 2445 * @rpi: Remote port login id 2446 * 2447 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg 2448 **/ 2449 static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi) 2450 { 2451 LPFC_MBOXQ_t *mbox; 2452 int status; 2453 2454 /* Allocate mboxq structure */ 2455 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2456 if (mbox == NULL) 2457 return -ENOMEM; 2458 2459 if (phba->sli_rev < LPFC_SLI_REV4) 2460 lpfc_unreg_login(phba, 0, rpi, mbox); 2461 else 2462 lpfc_unreg_login(phba, phba->pport->vpi, 2463 phba->sli4_hba.rpi_ids[rpi], mbox); 2464 2465 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 2466 2467 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 2468 if (status != MBX_TIMEOUT) 2469 mempool_free(mbox, phba->mbox_mem_pool); 2470 return -EIO; 2471 } 2472 mempool_free(mbox, phba->mbox_mem_pool); 2473 if (phba->sli_rev == LPFC_SLI_REV4) 2474 lpfc_sli4_free_rpi(phba, rpi); 2475 return 0; 2476 } 2477 2478 /** 2479 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids 2480 * @phba: Pointer to HBA context object 2481 * @rpi: Remote port login id 2482 * @txxri: Pointer to transmit exchange id 2483 * @rxxri: Pointer to response exchabge id 2484 * 2485 * This function obtains the transmit and receive ids required to send 2486 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp 2487 * flags are used to the unsolicted response handler is able to process 2488 * the ct command sent on the same port. 2489 **/ 2490 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, 2491 uint16_t *txxri, uint16_t * rxxri) 2492 { 2493 struct lpfc_bsg_event *evt; 2494 struct lpfc_iocbq *cmdiocbq, *rspiocbq; 2495 IOCB_t *cmd, *rsp; 2496 struct lpfc_dmabuf *dmabuf; 2497 struct ulp_bde64 *bpl = NULL; 2498 struct lpfc_sli_ct_request *ctreq = NULL; 2499 int ret_val = 0; 2500 int time_left; 2501 int iocb_stat = 0; 2502 unsigned long flags; 2503 2504 *txxri = 0; 2505 *rxxri = 0; 2506 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, 2507 SLI_CT_ELX_LOOPBACK); 2508 if (!evt) 2509 return -ENOMEM; 2510 2511 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2512 list_add(&evt->node, &phba->ct_ev_waiters); 2513 lpfc_bsg_event_ref(evt); 2514 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2515 2516 cmdiocbq = lpfc_sli_get_iocbq(phba); 2517 rspiocbq = lpfc_sli_get_iocbq(phba); 2518 2519 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2520 if (dmabuf) { 2521 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys); 2522 if (dmabuf->virt) { 2523 INIT_LIST_HEAD(&dmabuf->list); 2524 bpl = (struct ulp_bde64 *) dmabuf->virt; 2525 memset(bpl, 0, sizeof(*bpl)); 2526 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1); 2527 bpl->addrHigh = 2528 le32_to_cpu(putPaddrHigh(dmabuf->phys + 2529 sizeof(*bpl))); 2530 bpl->addrLow = 2531 le32_to_cpu(putPaddrLow(dmabuf->phys + 2532 sizeof(*bpl))); 2533 bpl->tus.f.bdeFlags = 0; 2534 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ; 2535 bpl->tus.w = le32_to_cpu(bpl->tus.w); 2536 } 2537 } 2538 2539 if (cmdiocbq == NULL || rspiocbq == NULL || 2540 dmabuf == NULL || bpl == NULL || ctreq == NULL || 2541 dmabuf->virt == NULL) { 2542 ret_val = -ENOMEM; 2543 goto err_get_xri_exit; 2544 } 2545 2546 cmd = &cmdiocbq->iocb; 2547 rsp = &rspiocbq->iocb; 2548 2549 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); 2550 2551 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; 2552 ctreq->RevisionId.bits.InId = 0; 2553 ctreq->FsType = SLI_CT_ELX_LOOPBACK; 2554 ctreq->FsSubType = 0; 2555 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP; 2556 ctreq->CommandResponse.bits.Size = 0; 2557 2558 2559 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys); 2560 cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys); 2561 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 2562 cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl); 2563 2564 cmd->un.xseq64.w5.hcsw.Fctl = LA; 2565 cmd->un.xseq64.w5.hcsw.Dfctl = 0; 2566 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 2567 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 2568 2569 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR; 2570 cmd->ulpBdeCount = 1; 2571 cmd->ulpLe = 1; 2572 cmd->ulpClass = CLASS3; 2573 cmd->ulpContext = rpi; 2574 2575 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 2576 cmdiocbq->vport = phba->pport; 2577 2578 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 2579 rspiocbq, 2580 (phba->fc_ratov * 2) 2581 + LPFC_DRVR_TIMEOUT); 2582 if (iocb_stat) { 2583 ret_val = -EIO; 2584 goto err_get_xri_exit; 2585 } 2586 *txxri = rsp->ulpContext; 2587 2588 evt->waiting = 1; 2589 evt->wait_time_stamp = jiffies; 2590 time_left = wait_event_interruptible_timeout( 2591 evt->wq, !list_empty(&evt->events_to_see), 2592 msecs_to_jiffies(1000 * 2593 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); 2594 if (list_empty(&evt->events_to_see)) 2595 ret_val = (time_left) ? -EINTR : -ETIMEDOUT; 2596 else { 2597 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2598 list_move(evt->events_to_see.prev, &evt->events_to_get); 2599 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2600 *rxxri = (list_entry(evt->events_to_get.prev, 2601 typeof(struct event_data), 2602 node))->immed_dat; 2603 } 2604 evt->waiting = 0; 2605 2606 err_get_xri_exit: 2607 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2608 lpfc_bsg_event_unref(evt); /* release ref */ 2609 lpfc_bsg_event_unref(evt); /* delete */ 2610 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2611 2612 if (dmabuf) { 2613 if (dmabuf->virt) 2614 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 2615 kfree(dmabuf); 2616 } 2617 2618 if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT)) 2619 lpfc_sli_release_iocbq(phba, cmdiocbq); 2620 if (rspiocbq) 2621 lpfc_sli_release_iocbq(phba, rspiocbq); 2622 return ret_val; 2623 } 2624 2625 /** 2626 * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers 2627 * @phba: Pointer to HBA context object 2628 * 2629 * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and. 2630 * retruns the pointer to the buffer. 2631 **/ 2632 static struct lpfc_dmabuf * 2633 lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba) 2634 { 2635 struct lpfc_dmabuf *dmabuf; 2636 struct pci_dev *pcidev = phba->pcidev; 2637 2638 /* allocate dma buffer struct */ 2639 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2640 if (!dmabuf) 2641 return NULL; 2642 2643 INIT_LIST_HEAD(&dmabuf->list); 2644 2645 /* now, allocate dma buffer */ 2646 dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE, 2647 &(dmabuf->phys), GFP_KERNEL); 2648 2649 if (!dmabuf->virt) { 2650 kfree(dmabuf); 2651 return NULL; 2652 } 2653 memset((uint8_t *)dmabuf->virt, 0, BSG_MBOX_SIZE); 2654 2655 return dmabuf; 2656 } 2657 2658 /** 2659 * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer 2660 * @phba: Pointer to HBA context object. 2661 * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor. 2662 * 2663 * This routine just simply frees a dma buffer and its associated buffer 2664 * descriptor referred by @dmabuf. 2665 **/ 2666 static void 2667 lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf) 2668 { 2669 struct pci_dev *pcidev = phba->pcidev; 2670 2671 if (!dmabuf) 2672 return; 2673 2674 if (dmabuf->virt) 2675 dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE, 2676 dmabuf->virt, dmabuf->phys); 2677 kfree(dmabuf); 2678 return; 2679 } 2680 2681 /** 2682 * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers 2683 * @phba: Pointer to HBA context object. 2684 * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs. 2685 * 2686 * This routine just simply frees all dma buffers and their associated buffer 2687 * descriptors referred by @dmabuf_list. 2688 **/ 2689 static void 2690 lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba, 2691 struct list_head *dmabuf_list) 2692 { 2693 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 2694 2695 if (list_empty(dmabuf_list)) 2696 return; 2697 2698 list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) { 2699 list_del_init(&dmabuf->list); 2700 lpfc_bsg_dma_page_free(phba, dmabuf); 2701 } 2702 return; 2703 } 2704 2705 /** 2706 * diag_cmd_data_alloc - fills in a bde struct with dma buffers 2707 * @phba: Pointer to HBA context object 2708 * @bpl: Pointer to 64 bit bde structure 2709 * @size: Number of bytes to process 2710 * @nocopydata: Flag to copy user data into the allocated buffer 2711 * 2712 * This function allocates page size buffers and populates an lpfc_dmabufext. 2713 * If allowed the user data pointed to with indataptr is copied into the kernel 2714 * memory. The chained list of page size buffers is returned. 2715 **/ 2716 static struct lpfc_dmabufext * 2717 diag_cmd_data_alloc(struct lpfc_hba *phba, 2718 struct ulp_bde64 *bpl, uint32_t size, 2719 int nocopydata) 2720 { 2721 struct lpfc_dmabufext *mlist = NULL; 2722 struct lpfc_dmabufext *dmp; 2723 int cnt, offset = 0, i = 0; 2724 struct pci_dev *pcidev; 2725 2726 pcidev = phba->pcidev; 2727 2728 while (size) { 2729 /* We get chunks of 4K */ 2730 if (size > BUF_SZ_4K) 2731 cnt = BUF_SZ_4K; 2732 else 2733 cnt = size; 2734 2735 /* allocate struct lpfc_dmabufext buffer header */ 2736 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL); 2737 if (!dmp) 2738 goto out; 2739 2740 INIT_LIST_HEAD(&dmp->dma.list); 2741 2742 /* Queue it to a linked list */ 2743 if (mlist) 2744 list_add_tail(&dmp->dma.list, &mlist->dma.list); 2745 else 2746 mlist = dmp; 2747 2748 /* allocate buffer */ 2749 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev, 2750 cnt, 2751 &(dmp->dma.phys), 2752 GFP_KERNEL); 2753 2754 if (!dmp->dma.virt) 2755 goto out; 2756 2757 dmp->size = cnt; 2758 2759 if (nocopydata) { 2760 bpl->tus.f.bdeFlags = 0; 2761 pci_dma_sync_single_for_device(phba->pcidev, 2762 dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE); 2763 2764 } else { 2765 memset((uint8_t *)dmp->dma.virt, 0, cnt); 2766 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 2767 } 2768 2769 /* build buffer ptr list for IOCB */ 2770 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys)); 2771 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys)); 2772 bpl->tus.f.bdeSize = (ushort) cnt; 2773 bpl->tus.w = le32_to_cpu(bpl->tus.w); 2774 bpl++; 2775 2776 i++; 2777 offset += cnt; 2778 size -= cnt; 2779 } 2780 2781 mlist->flag = i; 2782 return mlist; 2783 out: 2784 diag_cmd_data_free(phba, mlist); 2785 return NULL; 2786 } 2787 2788 /** 2789 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd 2790 * @phba: Pointer to HBA context object 2791 * @rxxri: Receive exchange id 2792 * @len: Number of data bytes 2793 * 2794 * This function allocates and posts a data buffer of sufficient size to receive 2795 * an unsolicted CT command. 2796 **/ 2797 static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, 2798 size_t len) 2799 { 2800 struct lpfc_sli *psli = &phba->sli; 2801 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; 2802 struct lpfc_iocbq *cmdiocbq; 2803 IOCB_t *cmd = NULL; 2804 struct list_head head, *curr, *next; 2805 struct lpfc_dmabuf *rxbmp; 2806 struct lpfc_dmabuf *dmp; 2807 struct lpfc_dmabuf *mp[2] = {NULL, NULL}; 2808 struct ulp_bde64 *rxbpl = NULL; 2809 uint32_t num_bde; 2810 struct lpfc_dmabufext *rxbuffer = NULL; 2811 int ret_val = 0; 2812 int iocb_stat; 2813 int i = 0; 2814 2815 cmdiocbq = lpfc_sli_get_iocbq(phba); 2816 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2817 if (rxbmp != NULL) { 2818 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); 2819 if (rxbmp->virt) { 2820 INIT_LIST_HEAD(&rxbmp->list); 2821 rxbpl = (struct ulp_bde64 *) rxbmp->virt; 2822 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0); 2823 } 2824 } 2825 2826 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) { 2827 ret_val = -ENOMEM; 2828 goto err_post_rxbufs_exit; 2829 } 2830 2831 /* Queue buffers for the receive exchange */ 2832 num_bde = (uint32_t)rxbuffer->flag; 2833 dmp = &rxbuffer->dma; 2834 2835 cmd = &cmdiocbq->iocb; 2836 i = 0; 2837 2838 INIT_LIST_HEAD(&head); 2839 list_add_tail(&head, &dmp->list); 2840 list_for_each_safe(curr, next, &head) { 2841 mp[i] = list_entry(curr, struct lpfc_dmabuf, list); 2842 list_del(curr); 2843 2844 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2845 mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba); 2846 cmd->un.quexri64cx.buff.bde.addrHigh = 2847 putPaddrHigh(mp[i]->phys); 2848 cmd->un.quexri64cx.buff.bde.addrLow = 2849 putPaddrLow(mp[i]->phys); 2850 cmd->un.quexri64cx.buff.bde.tus.f.bdeSize = 2851 ((struct lpfc_dmabufext *)mp[i])->size; 2852 cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag; 2853 cmd->ulpCommand = CMD_QUE_XRI64_CX; 2854 cmd->ulpPU = 0; 2855 cmd->ulpLe = 1; 2856 cmd->ulpBdeCount = 1; 2857 cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0; 2858 2859 } else { 2860 cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys); 2861 cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys); 2862 cmd->un.cont64[i].tus.f.bdeSize = 2863 ((struct lpfc_dmabufext *)mp[i])->size; 2864 cmd->ulpBdeCount = ++i; 2865 2866 if ((--num_bde > 0) && (i < 2)) 2867 continue; 2868 2869 cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX; 2870 cmd->ulpLe = 1; 2871 } 2872 2873 cmd->ulpClass = CLASS3; 2874 cmd->ulpContext = rxxri; 2875 2876 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 2877 0); 2878 if (iocb_stat == IOCB_ERROR) { 2879 diag_cmd_data_free(phba, 2880 (struct lpfc_dmabufext *)mp[0]); 2881 if (mp[1]) 2882 diag_cmd_data_free(phba, 2883 (struct lpfc_dmabufext *)mp[1]); 2884 dmp = list_entry(next, struct lpfc_dmabuf, list); 2885 ret_val = -EIO; 2886 goto err_post_rxbufs_exit; 2887 } 2888 2889 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]); 2890 if (mp[1]) { 2891 lpfc_sli_ringpostbuf_put(phba, pring, mp[1]); 2892 mp[1] = NULL; 2893 } 2894 2895 /* The iocb was freed by lpfc_sli_issue_iocb */ 2896 cmdiocbq = lpfc_sli_get_iocbq(phba); 2897 if (!cmdiocbq) { 2898 dmp = list_entry(next, struct lpfc_dmabuf, list); 2899 ret_val = -EIO; 2900 goto err_post_rxbufs_exit; 2901 } 2902 2903 cmd = &cmdiocbq->iocb; 2904 i = 0; 2905 } 2906 list_del(&head); 2907 2908 err_post_rxbufs_exit: 2909 2910 if (rxbmp) { 2911 if (rxbmp->virt) 2912 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys); 2913 kfree(rxbmp); 2914 } 2915 2916 if (cmdiocbq) 2917 lpfc_sli_release_iocbq(phba, cmdiocbq); 2918 return ret_val; 2919 } 2920 2921 /** 2922 * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself 2923 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job 2924 * 2925 * This function receives a user data buffer to be transmitted and received on 2926 * the same port, the link must be up and in loopback mode prior 2927 * to being called. 2928 * 1. A kernel buffer is allocated to copy the user data into. 2929 * 2. The port registers with "itself". 2930 * 3. The transmit and receive exchange ids are obtained. 2931 * 4. The receive exchange id is posted. 2932 * 5. A new els loopback event is created. 2933 * 6. The command and response iocbs are allocated. 2934 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback. 2935 * 2936 * This function is meant to be called n times while the port is in loopback 2937 * so it is the apps responsibility to issue a reset to take the port out 2938 * of loopback mode. 2939 **/ 2940 static int 2941 lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job) 2942 { 2943 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 2944 struct lpfc_hba *phba = vport->phba; 2945 struct diag_mode_test *diag_mode; 2946 struct lpfc_bsg_event *evt; 2947 struct event_data *evdat; 2948 struct lpfc_sli *psli = &phba->sli; 2949 uint32_t size; 2950 uint32_t full_size; 2951 size_t segment_len = 0, segment_offset = 0, current_offset = 0; 2952 uint16_t rpi = 0; 2953 struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL; 2954 IOCB_t *cmd, *rsp = NULL; 2955 struct lpfc_sli_ct_request *ctreq; 2956 struct lpfc_dmabuf *txbmp; 2957 struct ulp_bde64 *txbpl = NULL; 2958 struct lpfc_dmabufext *txbuffer = NULL; 2959 struct list_head head; 2960 struct lpfc_dmabuf *curr; 2961 uint16_t txxri = 0, rxxri; 2962 uint32_t num_bde; 2963 uint8_t *ptr = NULL, *rx_databuf = NULL; 2964 int rc = 0; 2965 int time_left; 2966 int iocb_stat; 2967 unsigned long flags; 2968 void *dataout = NULL; 2969 uint32_t total_mem; 2970 2971 /* in case no data is returned return just the return code */ 2972 job->reply->reply_payload_rcv_len = 0; 2973 2974 if (job->request_len < 2975 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) { 2976 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2977 "2739 Received DIAG TEST request below minimum " 2978 "size\n"); 2979 rc = -EINVAL; 2980 goto loopback_test_exit; 2981 } 2982 2983 if (job->request_payload.payload_len != 2984 job->reply_payload.payload_len) { 2985 rc = -EINVAL; 2986 goto loopback_test_exit; 2987 } 2988 diag_mode = (struct diag_mode_test *) 2989 job->request->rqst_data.h_vendor.vendor_cmd; 2990 2991 if ((phba->link_state == LPFC_HBA_ERROR) || 2992 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || 2993 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { 2994 rc = -EACCES; 2995 goto loopback_test_exit; 2996 } 2997 2998 if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) { 2999 rc = -EACCES; 3000 goto loopback_test_exit; 3001 } 3002 3003 size = job->request_payload.payload_len; 3004 full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */ 3005 3006 if ((size == 0) || (size > 80 * BUF_SZ_4K)) { 3007 rc = -ERANGE; 3008 goto loopback_test_exit; 3009 } 3010 3011 if (full_size >= BUF_SZ_4K) { 3012 /* 3013 * Allocate memory for ioctl data. If buffer is bigger than 64k, 3014 * then we allocate 64k and re-use that buffer over and over to 3015 * xfer the whole block. This is because Linux kernel has a 3016 * problem allocating more than 120k of kernel space memory. Saw 3017 * problem with GET_FCPTARGETMAPPING... 3018 */ 3019 if (size <= (64 * 1024)) 3020 total_mem = full_size; 3021 else 3022 total_mem = 64 * 1024; 3023 } else 3024 /* Allocate memory for ioctl data */ 3025 total_mem = BUF_SZ_4K; 3026 3027 dataout = kmalloc(total_mem, GFP_KERNEL); 3028 if (dataout == NULL) { 3029 rc = -ENOMEM; 3030 goto loopback_test_exit; 3031 } 3032 3033 ptr = dataout; 3034 ptr += ELX_LOOPBACK_HEADER_SZ; 3035 sg_copy_to_buffer(job->request_payload.sg_list, 3036 job->request_payload.sg_cnt, 3037 ptr, size); 3038 rc = lpfcdiag_loop_self_reg(phba, &rpi); 3039 if (rc) 3040 goto loopback_test_exit; 3041 3042 if (phba->sli_rev < LPFC_SLI_REV4) { 3043 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri); 3044 if (rc) { 3045 lpfcdiag_loop_self_unreg(phba, rpi); 3046 goto loopback_test_exit; 3047 } 3048 3049 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size); 3050 if (rc) { 3051 lpfcdiag_loop_self_unreg(phba, rpi); 3052 goto loopback_test_exit; 3053 } 3054 } 3055 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, 3056 SLI_CT_ELX_LOOPBACK); 3057 if (!evt) { 3058 lpfcdiag_loop_self_unreg(phba, rpi); 3059 rc = -ENOMEM; 3060 goto loopback_test_exit; 3061 } 3062 3063 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3064 list_add(&evt->node, &phba->ct_ev_waiters); 3065 lpfc_bsg_event_ref(evt); 3066 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3067 3068 cmdiocbq = lpfc_sli_get_iocbq(phba); 3069 if (phba->sli_rev < LPFC_SLI_REV4) 3070 rspiocbq = lpfc_sli_get_iocbq(phba); 3071 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3072 3073 if (txbmp) { 3074 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys); 3075 if (txbmp->virt) { 3076 INIT_LIST_HEAD(&txbmp->list); 3077 txbpl = (struct ulp_bde64 *) txbmp->virt; 3078 txbuffer = diag_cmd_data_alloc(phba, 3079 txbpl, full_size, 0); 3080 } 3081 } 3082 3083 if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) { 3084 rc = -ENOMEM; 3085 goto err_loopback_test_exit; 3086 } 3087 if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) { 3088 rc = -ENOMEM; 3089 goto err_loopback_test_exit; 3090 } 3091 3092 cmd = &cmdiocbq->iocb; 3093 if (phba->sli_rev < LPFC_SLI_REV4) 3094 rsp = &rspiocbq->iocb; 3095 3096 INIT_LIST_HEAD(&head); 3097 list_add_tail(&head, &txbuffer->dma.list); 3098 list_for_each_entry(curr, &head, list) { 3099 segment_len = ((struct lpfc_dmabufext *)curr)->size; 3100 if (current_offset == 0) { 3101 ctreq = curr->virt; 3102 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); 3103 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; 3104 ctreq->RevisionId.bits.InId = 0; 3105 ctreq->FsType = SLI_CT_ELX_LOOPBACK; 3106 ctreq->FsSubType = 0; 3107 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA; 3108 ctreq->CommandResponse.bits.Size = size; 3109 segment_offset = ELX_LOOPBACK_HEADER_SZ; 3110 } else 3111 segment_offset = 0; 3112 3113 BUG_ON(segment_offset >= segment_len); 3114 memcpy(curr->virt + segment_offset, 3115 ptr + current_offset, 3116 segment_len - segment_offset); 3117 3118 current_offset += segment_len - segment_offset; 3119 BUG_ON(current_offset > size); 3120 } 3121 list_del(&head); 3122 3123 /* Build the XMIT_SEQUENCE iocb */ 3124 num_bde = (uint32_t)txbuffer->flag; 3125 3126 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys); 3127 cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys); 3128 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 3129 cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64)); 3130 3131 cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA); 3132 cmd->un.xseq64.w5.hcsw.Dfctl = 0; 3133 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 3134 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 3135 3136 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; 3137 cmd->ulpBdeCount = 1; 3138 cmd->ulpLe = 1; 3139 cmd->ulpClass = CLASS3; 3140 3141 if (phba->sli_rev < LPFC_SLI_REV4) { 3142 cmd->ulpContext = txxri; 3143 } else { 3144 cmd->un.xseq64.bdl.ulpIoTag32 = 0; 3145 cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi]; 3146 cmdiocbq->context3 = txbmp; 3147 cmdiocbq->sli4_xritag = NO_XRI; 3148 cmd->unsli3.rcvsli3.ox_id = 0xffff; 3149 } 3150 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 3151 cmdiocbq->vport = phba->pport; 3152 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 3153 rspiocbq, (phba->fc_ratov * 2) + 3154 LPFC_DRVR_TIMEOUT); 3155 3156 if ((iocb_stat != IOCB_SUCCESS) || ((phba->sli_rev < LPFC_SLI_REV4) && 3157 (rsp->ulpStatus != IOCB_SUCCESS))) { 3158 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3159 "3126 Failed loopback test issue iocb: " 3160 "iocb_stat:x%x\n", iocb_stat); 3161 rc = -EIO; 3162 goto err_loopback_test_exit; 3163 } 3164 3165 evt->waiting = 1; 3166 time_left = wait_event_interruptible_timeout( 3167 evt->wq, !list_empty(&evt->events_to_see), 3168 msecs_to_jiffies(1000 * 3169 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); 3170 evt->waiting = 0; 3171 if (list_empty(&evt->events_to_see)) { 3172 rc = (time_left) ? -EINTR : -ETIMEDOUT; 3173 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3174 "3125 Not receiving unsolicited event, " 3175 "rc:x%x\n", rc); 3176 } else { 3177 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3178 list_move(evt->events_to_see.prev, &evt->events_to_get); 3179 evdat = list_entry(evt->events_to_get.prev, 3180 typeof(*evdat), node); 3181 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3182 rx_databuf = evdat->data; 3183 if (evdat->len != full_size) { 3184 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3185 "1603 Loopback test did not receive expected " 3186 "data length. actual length 0x%x expected " 3187 "length 0x%x\n", 3188 evdat->len, full_size); 3189 rc = -EIO; 3190 } else if (rx_databuf == NULL) 3191 rc = -EIO; 3192 else { 3193 rc = IOCB_SUCCESS; 3194 /* skip over elx loopback header */ 3195 rx_databuf += ELX_LOOPBACK_HEADER_SZ; 3196 job->reply->reply_payload_rcv_len = 3197 sg_copy_from_buffer(job->reply_payload.sg_list, 3198 job->reply_payload.sg_cnt, 3199 rx_databuf, size); 3200 job->reply->reply_payload_rcv_len = size; 3201 } 3202 } 3203 3204 err_loopback_test_exit: 3205 lpfcdiag_loop_self_unreg(phba, rpi); 3206 3207 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3208 lpfc_bsg_event_unref(evt); /* release ref */ 3209 lpfc_bsg_event_unref(evt); /* delete */ 3210 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3211 3212 if (cmdiocbq != NULL) 3213 lpfc_sli_release_iocbq(phba, cmdiocbq); 3214 3215 if (rspiocbq != NULL) 3216 lpfc_sli_release_iocbq(phba, rspiocbq); 3217 3218 if (txbmp != NULL) { 3219 if (txbpl != NULL) { 3220 if (txbuffer != NULL) 3221 diag_cmd_data_free(phba, txbuffer); 3222 lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys); 3223 } 3224 kfree(txbmp); 3225 } 3226 3227 loopback_test_exit: 3228 kfree(dataout); 3229 /* make error code available to userspace */ 3230 job->reply->result = rc; 3231 job->dd_data = NULL; 3232 /* complete the job back to userspace if no error */ 3233 if (rc == IOCB_SUCCESS) 3234 job->job_done(job); 3235 return rc; 3236 } 3237 3238 /** 3239 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command 3240 * @job: GET_DFC_REV fc_bsg_job 3241 **/ 3242 static int 3243 lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job) 3244 { 3245 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 3246 struct lpfc_hba *phba = vport->phba; 3247 struct get_mgmt_rev *event_req; 3248 struct get_mgmt_rev_reply *event_reply; 3249 int rc = 0; 3250 3251 if (job->request_len < 3252 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) { 3253 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3254 "2740 Received GET_DFC_REV request below " 3255 "minimum size\n"); 3256 rc = -EINVAL; 3257 goto job_error; 3258 } 3259 3260 event_req = (struct get_mgmt_rev *) 3261 job->request->rqst_data.h_vendor.vendor_cmd; 3262 3263 event_reply = (struct get_mgmt_rev_reply *) 3264 job->reply->reply_data.vendor_reply.vendor_rsp; 3265 3266 if (job->reply_len < 3267 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) { 3268 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3269 "2741 Received GET_DFC_REV reply below " 3270 "minimum size\n"); 3271 rc = -EINVAL; 3272 goto job_error; 3273 } 3274 3275 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV; 3276 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV; 3277 job_error: 3278 job->reply->result = rc; 3279 if (rc == 0) 3280 job->job_done(job); 3281 return rc; 3282 } 3283 3284 /** 3285 * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler 3286 * @phba: Pointer to HBA context object. 3287 * @pmboxq: Pointer to mailbox command. 3288 * 3289 * This is completion handler function for mailbox commands issued from 3290 * lpfc_bsg_issue_mbox function. This function is called by the 3291 * mailbox event handler function with no lock held. This function 3292 * will wake up thread waiting on the wait queue pointed by context1 3293 * of the mailbox. 3294 **/ 3295 void 3296 lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3297 { 3298 struct bsg_job_data *dd_data; 3299 struct fc_bsg_job *job; 3300 uint32_t size; 3301 unsigned long flags; 3302 uint8_t *pmb, *pmb_buf; 3303 3304 dd_data = pmboxq->context1; 3305 3306 /* 3307 * The outgoing buffer is readily referred from the dma buffer, 3308 * just need to get header part from mailboxq structure. 3309 */ 3310 pmb = (uint8_t *)&pmboxq->u.mb; 3311 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; 3312 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t)); 3313 3314 /* Determine if job has been aborted */ 3315 3316 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3317 job = dd_data->set_job; 3318 if (job) { 3319 /* Prevent timeout handling from trying to abort job */ 3320 job->dd_data = NULL; 3321 } 3322 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3323 3324 /* Copy the mailbox data to the job if it is still active */ 3325 3326 if (job) { 3327 size = job->reply_payload.payload_len; 3328 job->reply->reply_payload_rcv_len = 3329 sg_copy_from_buffer(job->reply_payload.sg_list, 3330 job->reply_payload.sg_cnt, 3331 pmb_buf, size); 3332 } 3333 3334 dd_data->set_job = NULL; 3335 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); 3336 lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers); 3337 kfree(dd_data); 3338 3339 /* Complete the job if the job is still active */ 3340 3341 if (job) { 3342 job->reply->result = 0; 3343 job->job_done(job); 3344 } 3345 return; 3346 } 3347 3348 /** 3349 * lpfc_bsg_check_cmd_access - test for a supported mailbox command 3350 * @phba: Pointer to HBA context object. 3351 * @mb: Pointer to a mailbox object. 3352 * @vport: Pointer to a vport object. 3353 * 3354 * Some commands require the port to be offline, some may not be called from 3355 * the application. 3356 **/ 3357 static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba, 3358 MAILBOX_t *mb, struct lpfc_vport *vport) 3359 { 3360 /* return negative error values for bsg job */ 3361 switch (mb->mbxCommand) { 3362 /* Offline only */ 3363 case MBX_INIT_LINK: 3364 case MBX_DOWN_LINK: 3365 case MBX_CONFIG_LINK: 3366 case MBX_CONFIG_RING: 3367 case MBX_RESET_RING: 3368 case MBX_UNREG_LOGIN: 3369 case MBX_CLEAR_LA: 3370 case MBX_DUMP_CONTEXT: 3371 case MBX_RUN_DIAGS: 3372 case MBX_RESTART: 3373 case MBX_SET_MASK: 3374 if (!(vport->fc_flag & FC_OFFLINE_MODE)) { 3375 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3376 "2743 Command 0x%x is illegal in on-line " 3377 "state\n", 3378 mb->mbxCommand); 3379 return -EPERM; 3380 } 3381 case MBX_WRITE_NV: 3382 case MBX_WRITE_VPARMS: 3383 case MBX_LOAD_SM: 3384 case MBX_READ_NV: 3385 case MBX_READ_CONFIG: 3386 case MBX_READ_RCONFIG: 3387 case MBX_READ_STATUS: 3388 case MBX_READ_XRI: 3389 case MBX_READ_REV: 3390 case MBX_READ_LNK_STAT: 3391 case MBX_DUMP_MEMORY: 3392 case MBX_DOWN_LOAD: 3393 case MBX_UPDATE_CFG: 3394 case MBX_KILL_BOARD: 3395 case MBX_LOAD_AREA: 3396 case MBX_LOAD_EXP_ROM: 3397 case MBX_BEACON: 3398 case MBX_DEL_LD_ENTRY: 3399 case MBX_SET_DEBUG: 3400 case MBX_WRITE_WWN: 3401 case MBX_SLI4_CONFIG: 3402 case MBX_READ_EVENT_LOG: 3403 case MBX_READ_EVENT_LOG_STATUS: 3404 case MBX_WRITE_EVENT_LOG: 3405 case MBX_PORT_CAPABILITIES: 3406 case MBX_PORT_IOV_CONTROL: 3407 case MBX_RUN_BIU_DIAG64: 3408 break; 3409 case MBX_SET_VARIABLE: 3410 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3411 "1226 mbox: set_variable 0x%x, 0x%x\n", 3412 mb->un.varWords[0], 3413 mb->un.varWords[1]); 3414 if ((mb->un.varWords[0] == SETVAR_MLOMNT) 3415 && (mb->un.varWords[1] == 1)) { 3416 phba->wait_4_mlo_maint_flg = 1; 3417 } else if (mb->un.varWords[0] == SETVAR_MLORST) { 3418 spin_lock_irq(&phba->hbalock); 3419 phba->link_flag &= ~LS_LOOPBACK_MODE; 3420 spin_unlock_irq(&phba->hbalock); 3421 phba->fc_topology = LPFC_TOPOLOGY_PT_PT; 3422 } 3423 break; 3424 case MBX_READ_SPARM64: 3425 case MBX_READ_TOPOLOGY: 3426 case MBX_REG_LOGIN: 3427 case MBX_REG_LOGIN64: 3428 case MBX_CONFIG_PORT: 3429 case MBX_RUN_BIU_DIAG: 3430 default: 3431 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3432 "2742 Unknown Command 0x%x\n", 3433 mb->mbxCommand); 3434 return -EPERM; 3435 } 3436 3437 return 0; /* ok */ 3438 } 3439 3440 /** 3441 * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session 3442 * @phba: Pointer to HBA context object. 3443 * 3444 * This is routine clean up and reset BSG handling of multi-buffer mbox 3445 * command session. 3446 **/ 3447 static void 3448 lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba) 3449 { 3450 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) 3451 return; 3452 3453 /* free all memory, including dma buffers */ 3454 lpfc_bsg_dma_page_list_free(phba, 3455 &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3456 lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf); 3457 /* multi-buffer write mailbox command pass-through complete */ 3458 memset((char *)&phba->mbox_ext_buf_ctx, 0, 3459 sizeof(struct lpfc_mbox_ext_buf_ctx)); 3460 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3461 3462 return; 3463 } 3464 3465 /** 3466 * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl 3467 * @phba: Pointer to HBA context object. 3468 * @pmboxq: Pointer to mailbox command. 3469 * 3470 * This is routine handles BSG job for mailbox commands completions with 3471 * multiple external buffers. 3472 **/ 3473 static struct fc_bsg_job * 3474 lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3475 { 3476 struct bsg_job_data *dd_data; 3477 struct fc_bsg_job *job; 3478 uint8_t *pmb, *pmb_buf; 3479 unsigned long flags; 3480 uint32_t size; 3481 int rc = 0; 3482 struct lpfc_dmabuf *dmabuf; 3483 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3484 uint8_t *pmbx; 3485 3486 dd_data = pmboxq->context1; 3487 3488 /* Determine if job has been aborted */ 3489 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3490 job = dd_data->set_job; 3491 if (job) { 3492 /* Prevent timeout handling from trying to abort job */ 3493 job->dd_data = NULL; 3494 } 3495 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3496 3497 /* 3498 * The outgoing buffer is readily referred from the dma buffer, 3499 * just need to get header part from mailboxq structure. 3500 */ 3501 3502 pmb = (uint8_t *)&pmboxq->u.mb; 3503 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; 3504 /* Copy the byte swapped response mailbox back to the user */ 3505 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t)); 3506 /* if there is any non-embedded extended data copy that too */ 3507 dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf; 3508 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 3509 if (!bsg_bf_get(lpfc_mbox_hdr_emb, 3510 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) { 3511 pmbx = (uint8_t *)dmabuf->virt; 3512 /* byte swap the extended data following the mailbox command */ 3513 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)], 3514 &pmbx[sizeof(MAILBOX_t)], 3515 sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len); 3516 } 3517 3518 /* Complete the job if the job is still active */ 3519 3520 if (job) { 3521 size = job->reply_payload.payload_len; 3522 job->reply->reply_payload_rcv_len = 3523 sg_copy_from_buffer(job->reply_payload.sg_list, 3524 job->reply_payload.sg_cnt, 3525 pmb_buf, size); 3526 3527 /* result for successful */ 3528 job->reply->result = 0; 3529 3530 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3531 "2937 SLI_CONFIG ext-buffer maibox command " 3532 "(x%x/x%x) complete bsg job done, bsize:%d\n", 3533 phba->mbox_ext_buf_ctx.nembType, 3534 phba->mbox_ext_buf_ctx.mboxType, size); 3535 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, 3536 phba->mbox_ext_buf_ctx.nembType, 3537 phba->mbox_ext_buf_ctx.mboxType, 3538 dma_ebuf, sta_pos_addr, 3539 phba->mbox_ext_buf_ctx.mbx_dmabuf, 0); 3540 } else { 3541 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3542 "2938 SLI_CONFIG ext-buffer maibox " 3543 "command (x%x/x%x) failure, rc:x%x\n", 3544 phba->mbox_ext_buf_ctx.nembType, 3545 phba->mbox_ext_buf_ctx.mboxType, rc); 3546 } 3547 3548 3549 /* state change */ 3550 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE; 3551 kfree(dd_data); 3552 return job; 3553 } 3554 3555 /** 3556 * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox 3557 * @phba: Pointer to HBA context object. 3558 * @pmboxq: Pointer to mailbox command. 3559 * 3560 * This is completion handler function for mailbox read commands with multiple 3561 * external buffers. 3562 **/ 3563 static void 3564 lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3565 { 3566 struct fc_bsg_job *job; 3567 3568 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); 3569 3570 /* handle the BSG job with mailbox command */ 3571 if (!job) 3572 pmboxq->u.mb.mbxStatus = MBXERR_ERROR; 3573 3574 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3575 "2939 SLI_CONFIG ext-buffer rd maibox command " 3576 "complete, ctxState:x%x, mbxStatus:x%x\n", 3577 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); 3578 3579 if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1) 3580 lpfc_bsg_mbox_ext_session_reset(phba); 3581 3582 /* free base driver mailbox structure memory */ 3583 mempool_free(pmboxq, phba->mbox_mem_pool); 3584 3585 /* if the job is still active, call job done */ 3586 if (job) 3587 job->job_done(job); 3588 3589 return; 3590 } 3591 3592 /** 3593 * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox 3594 * @phba: Pointer to HBA context object. 3595 * @pmboxq: Pointer to mailbox command. 3596 * 3597 * This is completion handler function for mailbox write commands with multiple 3598 * external buffers. 3599 **/ 3600 static void 3601 lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3602 { 3603 struct fc_bsg_job *job; 3604 3605 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); 3606 3607 /* handle the BSG job with the mailbox command */ 3608 if (!job) 3609 pmboxq->u.mb.mbxStatus = MBXERR_ERROR; 3610 3611 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3612 "2940 SLI_CONFIG ext-buffer wr maibox command " 3613 "complete, ctxState:x%x, mbxStatus:x%x\n", 3614 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); 3615 3616 /* free all memory, including dma buffers */ 3617 mempool_free(pmboxq, phba->mbox_mem_pool); 3618 lpfc_bsg_mbox_ext_session_reset(phba); 3619 3620 /* if the job is still active, call job done */ 3621 if (job) 3622 job->job_done(job); 3623 3624 return; 3625 } 3626 3627 static void 3628 lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp, 3629 uint32_t index, struct lpfc_dmabuf *mbx_dmabuf, 3630 struct lpfc_dmabuf *ext_dmabuf) 3631 { 3632 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3633 3634 /* pointer to the start of mailbox command */ 3635 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt; 3636 3637 if (nemb_tp == nemb_mse) { 3638 if (index == 0) { 3639 sli_cfg_mbx->un.sli_config_emb0_subsys. 3640 mse[index].pa_hi = 3641 putPaddrHigh(mbx_dmabuf->phys + 3642 sizeof(MAILBOX_t)); 3643 sli_cfg_mbx->un.sli_config_emb0_subsys. 3644 mse[index].pa_lo = 3645 putPaddrLow(mbx_dmabuf->phys + 3646 sizeof(MAILBOX_t)); 3647 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3648 "2943 SLI_CONFIG(mse)[%d], " 3649 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3650 index, 3651 sli_cfg_mbx->un.sli_config_emb0_subsys. 3652 mse[index].buf_len, 3653 sli_cfg_mbx->un.sli_config_emb0_subsys. 3654 mse[index].pa_hi, 3655 sli_cfg_mbx->un.sli_config_emb0_subsys. 3656 mse[index].pa_lo); 3657 } else { 3658 sli_cfg_mbx->un.sli_config_emb0_subsys. 3659 mse[index].pa_hi = 3660 putPaddrHigh(ext_dmabuf->phys); 3661 sli_cfg_mbx->un.sli_config_emb0_subsys. 3662 mse[index].pa_lo = 3663 putPaddrLow(ext_dmabuf->phys); 3664 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3665 "2944 SLI_CONFIG(mse)[%d], " 3666 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3667 index, 3668 sli_cfg_mbx->un.sli_config_emb0_subsys. 3669 mse[index].buf_len, 3670 sli_cfg_mbx->un.sli_config_emb0_subsys. 3671 mse[index].pa_hi, 3672 sli_cfg_mbx->un.sli_config_emb0_subsys. 3673 mse[index].pa_lo); 3674 } 3675 } else { 3676 if (index == 0) { 3677 sli_cfg_mbx->un.sli_config_emb1_subsys. 3678 hbd[index].pa_hi = 3679 putPaddrHigh(mbx_dmabuf->phys + 3680 sizeof(MAILBOX_t)); 3681 sli_cfg_mbx->un.sli_config_emb1_subsys. 3682 hbd[index].pa_lo = 3683 putPaddrLow(mbx_dmabuf->phys + 3684 sizeof(MAILBOX_t)); 3685 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3686 "3007 SLI_CONFIG(hbd)[%d], " 3687 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3688 index, 3689 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 3690 &sli_cfg_mbx->un. 3691 sli_config_emb1_subsys.hbd[index]), 3692 sli_cfg_mbx->un.sli_config_emb1_subsys. 3693 hbd[index].pa_hi, 3694 sli_cfg_mbx->un.sli_config_emb1_subsys. 3695 hbd[index].pa_lo); 3696 3697 } else { 3698 sli_cfg_mbx->un.sli_config_emb1_subsys. 3699 hbd[index].pa_hi = 3700 putPaddrHigh(ext_dmabuf->phys); 3701 sli_cfg_mbx->un.sli_config_emb1_subsys. 3702 hbd[index].pa_lo = 3703 putPaddrLow(ext_dmabuf->phys); 3704 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3705 "3008 SLI_CONFIG(hbd)[%d], " 3706 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3707 index, 3708 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 3709 &sli_cfg_mbx->un. 3710 sli_config_emb1_subsys.hbd[index]), 3711 sli_cfg_mbx->un.sli_config_emb1_subsys. 3712 hbd[index].pa_hi, 3713 sli_cfg_mbx->un.sli_config_emb1_subsys. 3714 hbd[index].pa_lo); 3715 } 3716 } 3717 return; 3718 } 3719 3720 /** 3721 * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read 3722 * @phba: Pointer to HBA context object. 3723 * @mb: Pointer to a BSG mailbox object. 3724 * @nemb_tp: Enumerate of non-embedded mailbox command type. 3725 * @dmabuff: Pointer to a DMA buffer descriptor. 3726 * 3727 * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with 3728 * non-embedded external bufffers. 3729 **/ 3730 static int 3731 lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, 3732 enum nemb_type nemb_tp, 3733 struct lpfc_dmabuf *dmabuf) 3734 { 3735 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3736 struct dfc_mbox_req *mbox_req; 3737 struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf; 3738 uint32_t ext_buf_cnt, ext_buf_index; 3739 struct lpfc_dmabuf *ext_dmabuf = NULL; 3740 struct bsg_job_data *dd_data = NULL; 3741 LPFC_MBOXQ_t *pmboxq = NULL; 3742 MAILBOX_t *pmb; 3743 uint8_t *pmbx; 3744 int rc, i; 3745 3746 mbox_req = 3747 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd; 3748 3749 /* pointer to the start of mailbox command */ 3750 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 3751 3752 if (nemb_tp == nemb_mse) { 3753 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt, 3754 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr); 3755 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) { 3756 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3757 "2945 Handled SLI_CONFIG(mse) rd, " 3758 "ext_buf_cnt(%d) out of range(%d)\n", 3759 ext_buf_cnt, 3760 LPFC_MBX_SLI_CONFIG_MAX_MSE); 3761 rc = -ERANGE; 3762 goto job_error; 3763 } 3764 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3765 "2941 Handled SLI_CONFIG(mse) rd, " 3766 "ext_buf_cnt:%d\n", ext_buf_cnt); 3767 } else { 3768 /* sanity check on interface type for support */ 3769 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 3770 LPFC_SLI_INTF_IF_TYPE_2) { 3771 rc = -ENODEV; 3772 goto job_error; 3773 } 3774 /* nemb_tp == nemb_hbd */ 3775 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count; 3776 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) { 3777 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3778 "2946 Handled SLI_CONFIG(hbd) rd, " 3779 "ext_buf_cnt(%d) out of range(%d)\n", 3780 ext_buf_cnt, 3781 LPFC_MBX_SLI_CONFIG_MAX_HBD); 3782 rc = -ERANGE; 3783 goto job_error; 3784 } 3785 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3786 "2942 Handled SLI_CONFIG(hbd) rd, " 3787 "ext_buf_cnt:%d\n", ext_buf_cnt); 3788 } 3789 3790 /* before dma descriptor setup */ 3791 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox, 3792 sta_pre_addr, dmabuf, ext_buf_cnt); 3793 3794 /* reject non-embedded mailbox command with none external buffer */ 3795 if (ext_buf_cnt == 0) { 3796 rc = -EPERM; 3797 goto job_error; 3798 } else if (ext_buf_cnt > 1) { 3799 /* additional external read buffers */ 3800 for (i = 1; i < ext_buf_cnt; i++) { 3801 ext_dmabuf = lpfc_bsg_dma_page_alloc(phba); 3802 if (!ext_dmabuf) { 3803 rc = -ENOMEM; 3804 goto job_error; 3805 } 3806 list_add_tail(&ext_dmabuf->list, 3807 &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3808 } 3809 } 3810 3811 /* bsg tracking structure */ 3812 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 3813 if (!dd_data) { 3814 rc = -ENOMEM; 3815 goto job_error; 3816 } 3817 3818 /* mailbox command structure for base driver */ 3819 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3820 if (!pmboxq) { 3821 rc = -ENOMEM; 3822 goto job_error; 3823 } 3824 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 3825 3826 /* for the first external buffer */ 3827 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf); 3828 3829 /* for the rest of external buffer descriptors if any */ 3830 if (ext_buf_cnt > 1) { 3831 ext_buf_index = 1; 3832 list_for_each_entry_safe(curr_dmabuf, next_dmabuf, 3833 &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) { 3834 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 3835 ext_buf_index, dmabuf, 3836 curr_dmabuf); 3837 ext_buf_index++; 3838 } 3839 } 3840 3841 /* after dma descriptor setup */ 3842 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox, 3843 sta_pos_addr, dmabuf, ext_buf_cnt); 3844 3845 /* construct base driver mbox command */ 3846 pmb = &pmboxq->u.mb; 3847 pmbx = (uint8_t *)dmabuf->virt; 3848 memcpy(pmb, pmbx, sizeof(*pmb)); 3849 pmb->mbxOwner = OWN_HOST; 3850 pmboxq->vport = phba->pport; 3851 3852 /* multi-buffer handling context */ 3853 phba->mbox_ext_buf_ctx.nembType = nemb_tp; 3854 phba->mbox_ext_buf_ctx.mboxType = mbox_rd; 3855 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt; 3856 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag; 3857 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum; 3858 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf; 3859 3860 /* callback for multi-buffer read mailbox command */ 3861 pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl; 3862 3863 /* context fields to callback function */ 3864 pmboxq->context1 = dd_data; 3865 dd_data->type = TYPE_MBOX; 3866 dd_data->set_job = job; 3867 dd_data->context_un.mbox.pmboxq = pmboxq; 3868 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; 3869 job->dd_data = dd_data; 3870 3871 /* state change */ 3872 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; 3873 3874 /* 3875 * Non-embedded mailbox subcommand data gets byte swapped here because 3876 * the lower level driver code only does the first 64 mailbox words. 3877 */ 3878 if ((!bsg_bf_get(lpfc_mbox_hdr_emb, 3879 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) && 3880 (nemb_tp == nemb_mse)) 3881 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)], 3882 &pmbx[sizeof(MAILBOX_t)], 3883 sli_cfg_mbx->un.sli_config_emb0_subsys. 3884 mse[0].buf_len); 3885 3886 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 3887 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 3888 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3889 "2947 Issued SLI_CONFIG ext-buffer " 3890 "maibox command, rc:x%x\n", rc); 3891 return SLI_CONFIG_HANDLED; 3892 } 3893 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3894 "2948 Failed to issue SLI_CONFIG ext-buffer " 3895 "maibox command, rc:x%x\n", rc); 3896 rc = -EPIPE; 3897 3898 job_error: 3899 if (pmboxq) 3900 mempool_free(pmboxq, phba->mbox_mem_pool); 3901 lpfc_bsg_dma_page_list_free(phba, 3902 &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3903 kfree(dd_data); 3904 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE; 3905 return rc; 3906 } 3907 3908 /** 3909 * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write 3910 * @phba: Pointer to HBA context object. 3911 * @mb: Pointer to a BSG mailbox object. 3912 * @dmabuff: Pointer to a DMA buffer descriptor. 3913 * 3914 * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with 3915 * non-embedded external bufffers. 3916 **/ 3917 static int 3918 lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, 3919 enum nemb_type nemb_tp, 3920 struct lpfc_dmabuf *dmabuf) 3921 { 3922 struct dfc_mbox_req *mbox_req; 3923 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3924 uint32_t ext_buf_cnt; 3925 struct bsg_job_data *dd_data = NULL; 3926 LPFC_MBOXQ_t *pmboxq = NULL; 3927 MAILBOX_t *pmb; 3928 uint8_t *mbx; 3929 int rc = SLI_CONFIG_NOT_HANDLED, i; 3930 3931 mbox_req = 3932 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd; 3933 3934 /* pointer to the start of mailbox command */ 3935 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 3936 3937 if (nemb_tp == nemb_mse) { 3938 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt, 3939 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr); 3940 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) { 3941 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3942 "2953 Failed SLI_CONFIG(mse) wr, " 3943 "ext_buf_cnt(%d) out of range(%d)\n", 3944 ext_buf_cnt, 3945 LPFC_MBX_SLI_CONFIG_MAX_MSE); 3946 return -ERANGE; 3947 } 3948 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3949 "2949 Handled SLI_CONFIG(mse) wr, " 3950 "ext_buf_cnt:%d\n", ext_buf_cnt); 3951 } else { 3952 /* sanity check on interface type for support */ 3953 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 3954 LPFC_SLI_INTF_IF_TYPE_2) 3955 return -ENODEV; 3956 /* nemb_tp == nemb_hbd */ 3957 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count; 3958 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) { 3959 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3960 "2954 Failed SLI_CONFIG(hbd) wr, " 3961 "ext_buf_cnt(%d) out of range(%d)\n", 3962 ext_buf_cnt, 3963 LPFC_MBX_SLI_CONFIG_MAX_HBD); 3964 return -ERANGE; 3965 } 3966 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3967 "2950 Handled SLI_CONFIG(hbd) wr, " 3968 "ext_buf_cnt:%d\n", ext_buf_cnt); 3969 } 3970 3971 /* before dma buffer descriptor setup */ 3972 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox, 3973 sta_pre_addr, dmabuf, ext_buf_cnt); 3974 3975 if (ext_buf_cnt == 0) 3976 return -EPERM; 3977 3978 /* for the first external buffer */ 3979 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf); 3980 3981 /* after dma descriptor setup */ 3982 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox, 3983 sta_pos_addr, dmabuf, ext_buf_cnt); 3984 3985 /* log for looking forward */ 3986 for (i = 1; i < ext_buf_cnt; i++) { 3987 if (nemb_tp == nemb_mse) 3988 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3989 "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n", 3990 i, sli_cfg_mbx->un.sli_config_emb0_subsys. 3991 mse[i].buf_len); 3992 else 3993 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3994 "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n", 3995 i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 3996 &sli_cfg_mbx->un.sli_config_emb1_subsys. 3997 hbd[i])); 3998 } 3999 4000 /* multi-buffer handling context */ 4001 phba->mbox_ext_buf_ctx.nembType = nemb_tp; 4002 phba->mbox_ext_buf_ctx.mboxType = mbox_wr; 4003 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt; 4004 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag; 4005 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum; 4006 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf; 4007 4008 if (ext_buf_cnt == 1) { 4009 /* bsg tracking structure */ 4010 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4011 if (!dd_data) { 4012 rc = -ENOMEM; 4013 goto job_error; 4014 } 4015 4016 /* mailbox command structure for base driver */ 4017 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4018 if (!pmboxq) { 4019 rc = -ENOMEM; 4020 goto job_error; 4021 } 4022 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4023 pmb = &pmboxq->u.mb; 4024 mbx = (uint8_t *)dmabuf->virt; 4025 memcpy(pmb, mbx, sizeof(*pmb)); 4026 pmb->mbxOwner = OWN_HOST; 4027 pmboxq->vport = phba->pport; 4028 4029 /* callback for multi-buffer read mailbox command */ 4030 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl; 4031 4032 /* context fields to callback function */ 4033 pmboxq->context1 = dd_data; 4034 dd_data->type = TYPE_MBOX; 4035 dd_data->set_job = job; 4036 dd_data->context_un.mbox.pmboxq = pmboxq; 4037 dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx; 4038 job->dd_data = dd_data; 4039 4040 /* state change */ 4041 4042 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; 4043 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4044 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 4045 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4046 "2955 Issued SLI_CONFIG ext-buffer " 4047 "maibox command, rc:x%x\n", rc); 4048 return SLI_CONFIG_HANDLED; 4049 } 4050 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4051 "2956 Failed to issue SLI_CONFIG ext-buffer " 4052 "maibox command, rc:x%x\n", rc); 4053 rc = -EPIPE; 4054 goto job_error; 4055 } 4056 4057 /* wait for additoinal external buffers */ 4058 4059 job->reply->result = 0; 4060 job->job_done(job); 4061 return SLI_CONFIG_HANDLED; 4062 4063 job_error: 4064 if (pmboxq) 4065 mempool_free(pmboxq, phba->mbox_mem_pool); 4066 kfree(dd_data); 4067 4068 return rc; 4069 } 4070 4071 /** 4072 * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer 4073 * @phba: Pointer to HBA context object. 4074 * @mb: Pointer to a BSG mailbox object. 4075 * @dmabuff: Pointer to a DMA buffer descriptor. 4076 * 4077 * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded 4078 * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B 4079 * with embedded sussystem 0x1 and opcodes with external HBDs. 4080 **/ 4081 static int 4082 lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, 4083 struct lpfc_dmabuf *dmabuf) 4084 { 4085 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4086 uint32_t subsys; 4087 uint32_t opcode; 4088 int rc = SLI_CONFIG_NOT_HANDLED; 4089 4090 /* state change on new multi-buffer pass-through mailbox command */ 4091 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST; 4092 4093 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 4094 4095 if (!bsg_bf_get(lpfc_mbox_hdr_emb, 4096 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) { 4097 subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys, 4098 &sli_cfg_mbx->un.sli_config_emb0_subsys); 4099 opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode, 4100 &sli_cfg_mbx->un.sli_config_emb0_subsys); 4101 if (subsys == SLI_CONFIG_SUBSYS_FCOE) { 4102 switch (opcode) { 4103 case FCOE_OPCODE_READ_FCF: 4104 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4105 "2957 Handled SLI_CONFIG " 4106 "subsys_fcoe, opcode:x%x\n", 4107 opcode); 4108 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, 4109 nemb_mse, dmabuf); 4110 break; 4111 case FCOE_OPCODE_ADD_FCF: 4112 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4113 "2958 Handled SLI_CONFIG " 4114 "subsys_fcoe, opcode:x%x\n", 4115 opcode); 4116 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job, 4117 nemb_mse, dmabuf); 4118 break; 4119 default: 4120 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4121 "2959 Reject SLI_CONFIG " 4122 "subsys_fcoe, opcode:x%x\n", 4123 opcode); 4124 rc = -EPERM; 4125 break; 4126 } 4127 } else if (subsys == SLI_CONFIG_SUBSYS_COMN) { 4128 switch (opcode) { 4129 case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES: 4130 case COMN_OPCODE_GET_CNTL_ATTRIBUTES: 4131 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4132 "3106 Handled SLI_CONFIG " 4133 "subsys_comn, opcode:x%x\n", 4134 opcode); 4135 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, 4136 nemb_mse, dmabuf); 4137 break; 4138 default: 4139 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4140 "3107 Reject SLI_CONFIG " 4141 "subsys_comn, opcode:x%x\n", 4142 opcode); 4143 rc = -EPERM; 4144 break; 4145 } 4146 } else { 4147 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4148 "2977 Reject SLI_CONFIG " 4149 "subsys:x%d, opcode:x%x\n", 4150 subsys, opcode); 4151 rc = -EPERM; 4152 } 4153 } else { 4154 subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys, 4155 &sli_cfg_mbx->un.sli_config_emb1_subsys); 4156 opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode, 4157 &sli_cfg_mbx->un.sli_config_emb1_subsys); 4158 if (subsys == SLI_CONFIG_SUBSYS_COMN) { 4159 switch (opcode) { 4160 case COMN_OPCODE_READ_OBJECT: 4161 case COMN_OPCODE_READ_OBJECT_LIST: 4162 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4163 "2960 Handled SLI_CONFIG " 4164 "subsys_comn, opcode:x%x\n", 4165 opcode); 4166 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, 4167 nemb_hbd, dmabuf); 4168 break; 4169 case COMN_OPCODE_WRITE_OBJECT: 4170 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4171 "2961 Handled SLI_CONFIG " 4172 "subsys_comn, opcode:x%x\n", 4173 opcode); 4174 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job, 4175 nemb_hbd, dmabuf); 4176 break; 4177 default: 4178 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4179 "2962 Not handled SLI_CONFIG " 4180 "subsys_comn, opcode:x%x\n", 4181 opcode); 4182 rc = SLI_CONFIG_NOT_HANDLED; 4183 break; 4184 } 4185 } else { 4186 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4187 "2978 Not handled SLI_CONFIG " 4188 "subsys:x%d, opcode:x%x\n", 4189 subsys, opcode); 4190 rc = SLI_CONFIG_NOT_HANDLED; 4191 } 4192 } 4193 4194 /* state reset on not handled new multi-buffer mailbox command */ 4195 if (rc != SLI_CONFIG_HANDLED) 4196 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE; 4197 4198 return rc; 4199 } 4200 4201 /** 4202 * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers 4203 * @phba: Pointer to HBA context object. 4204 * 4205 * This routine is for requesting to abort a pass-through mailbox command with 4206 * multiple external buffers due to error condition. 4207 **/ 4208 static void 4209 lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba) 4210 { 4211 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT) 4212 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS; 4213 else 4214 lpfc_bsg_mbox_ext_session_reset(phba); 4215 return; 4216 } 4217 4218 /** 4219 * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer 4220 * @phba: Pointer to HBA context object. 4221 * @dmabuf: Pointer to a DMA buffer descriptor. 4222 * 4223 * This routine extracts the next mailbox read external buffer back to 4224 * user space through BSG. 4225 **/ 4226 static int 4227 lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job) 4228 { 4229 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4230 struct lpfc_dmabuf *dmabuf; 4231 uint8_t *pbuf; 4232 uint32_t size; 4233 uint32_t index; 4234 4235 index = phba->mbox_ext_buf_ctx.seqNum; 4236 phba->mbox_ext_buf_ctx.seqNum++; 4237 4238 sli_cfg_mbx = (struct lpfc_sli_config_mbox *) 4239 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt; 4240 4241 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) { 4242 size = bsg_bf_get(lpfc_mbox_sli_config_mse_len, 4243 &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]); 4244 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4245 "2963 SLI_CONFIG (mse) ext-buffer rd get " 4246 "buffer[%d], size:%d\n", index, size); 4247 } else { 4248 size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 4249 &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]); 4250 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4251 "2964 SLI_CONFIG (hbd) ext-buffer rd get " 4252 "buffer[%d], size:%d\n", index, size); 4253 } 4254 if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list)) 4255 return -EPIPE; 4256 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list, 4257 struct lpfc_dmabuf, list); 4258 list_del_init(&dmabuf->list); 4259 4260 /* after dma buffer descriptor setup */ 4261 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType, 4262 mbox_rd, dma_ebuf, sta_pos_addr, 4263 dmabuf, index); 4264 4265 pbuf = (uint8_t *)dmabuf->virt; 4266 job->reply->reply_payload_rcv_len = 4267 sg_copy_from_buffer(job->reply_payload.sg_list, 4268 job->reply_payload.sg_cnt, 4269 pbuf, size); 4270 4271 lpfc_bsg_dma_page_free(phba, dmabuf); 4272 4273 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) { 4274 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4275 "2965 SLI_CONFIG (hbd) ext-buffer rd mbox " 4276 "command session done\n"); 4277 lpfc_bsg_mbox_ext_session_reset(phba); 4278 } 4279 4280 job->reply->result = 0; 4281 job->job_done(job); 4282 4283 return SLI_CONFIG_HANDLED; 4284 } 4285 4286 /** 4287 * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer 4288 * @phba: Pointer to HBA context object. 4289 * @dmabuf: Pointer to a DMA buffer descriptor. 4290 * 4291 * This routine sets up the next mailbox read external buffer obtained 4292 * from user space through BSG. 4293 **/ 4294 static int 4295 lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job, 4296 struct lpfc_dmabuf *dmabuf) 4297 { 4298 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4299 struct bsg_job_data *dd_data = NULL; 4300 LPFC_MBOXQ_t *pmboxq = NULL; 4301 MAILBOX_t *pmb; 4302 enum nemb_type nemb_tp; 4303 uint8_t *pbuf; 4304 uint32_t size; 4305 uint32_t index; 4306 int rc; 4307 4308 index = phba->mbox_ext_buf_ctx.seqNum; 4309 phba->mbox_ext_buf_ctx.seqNum++; 4310 nemb_tp = phba->mbox_ext_buf_ctx.nembType; 4311 4312 sli_cfg_mbx = (struct lpfc_sli_config_mbox *) 4313 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt; 4314 4315 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4316 if (!dd_data) { 4317 rc = -ENOMEM; 4318 goto job_error; 4319 } 4320 4321 pbuf = (uint8_t *)dmabuf->virt; 4322 size = job->request_payload.payload_len; 4323 sg_copy_to_buffer(job->request_payload.sg_list, 4324 job->request_payload.sg_cnt, 4325 pbuf, size); 4326 4327 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) { 4328 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4329 "2966 SLI_CONFIG (mse) ext-buffer wr set " 4330 "buffer[%d], size:%d\n", 4331 phba->mbox_ext_buf_ctx.seqNum, size); 4332 4333 } else { 4334 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4335 "2967 SLI_CONFIG (hbd) ext-buffer wr set " 4336 "buffer[%d], size:%d\n", 4337 phba->mbox_ext_buf_ctx.seqNum, size); 4338 4339 } 4340 4341 /* set up external buffer descriptor and add to external buffer list */ 4342 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index, 4343 phba->mbox_ext_buf_ctx.mbx_dmabuf, 4344 dmabuf); 4345 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 4346 4347 /* after write dma buffer */ 4348 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType, 4349 mbox_wr, dma_ebuf, sta_pos_addr, 4350 dmabuf, index); 4351 4352 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) { 4353 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4354 "2968 SLI_CONFIG ext-buffer wr all %d " 4355 "ebuffers received\n", 4356 phba->mbox_ext_buf_ctx.numBuf); 4357 /* mailbox command structure for base driver */ 4358 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4359 if (!pmboxq) { 4360 rc = -ENOMEM; 4361 goto job_error; 4362 } 4363 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4364 pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt; 4365 pmb = &pmboxq->u.mb; 4366 memcpy(pmb, pbuf, sizeof(*pmb)); 4367 pmb->mbxOwner = OWN_HOST; 4368 pmboxq->vport = phba->pport; 4369 4370 /* callback for multi-buffer write mailbox command */ 4371 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl; 4372 4373 /* context fields to callback function */ 4374 pmboxq->context1 = dd_data; 4375 dd_data->type = TYPE_MBOX; 4376 dd_data->set_job = job; 4377 dd_data->context_un.mbox.pmboxq = pmboxq; 4378 dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf; 4379 job->dd_data = dd_data; 4380 4381 /* state change */ 4382 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; 4383 4384 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4385 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 4386 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4387 "2969 Issued SLI_CONFIG ext-buffer " 4388 "maibox command, rc:x%x\n", rc); 4389 return SLI_CONFIG_HANDLED; 4390 } 4391 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4392 "2970 Failed to issue SLI_CONFIG ext-buffer " 4393 "maibox command, rc:x%x\n", rc); 4394 rc = -EPIPE; 4395 goto job_error; 4396 } 4397 4398 /* wait for additoinal external buffers */ 4399 job->reply->result = 0; 4400 job->job_done(job); 4401 return SLI_CONFIG_HANDLED; 4402 4403 job_error: 4404 lpfc_bsg_dma_page_free(phba, dmabuf); 4405 kfree(dd_data); 4406 4407 return rc; 4408 } 4409 4410 /** 4411 * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd 4412 * @phba: Pointer to HBA context object. 4413 * @mb: Pointer to a BSG mailbox object. 4414 * @dmabuff: Pointer to a DMA buffer descriptor. 4415 * 4416 * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox 4417 * command with multiple non-embedded external buffers. 4418 **/ 4419 static int 4420 lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job, 4421 struct lpfc_dmabuf *dmabuf) 4422 { 4423 int rc; 4424 4425 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4426 "2971 SLI_CONFIG buffer (type:x%x)\n", 4427 phba->mbox_ext_buf_ctx.mboxType); 4428 4429 if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) { 4430 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) { 4431 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4432 "2972 SLI_CONFIG rd buffer state " 4433 "mismatch:x%x\n", 4434 phba->mbox_ext_buf_ctx.state); 4435 lpfc_bsg_mbox_ext_abort(phba); 4436 return -EPIPE; 4437 } 4438 rc = lpfc_bsg_read_ebuf_get(phba, job); 4439 if (rc == SLI_CONFIG_HANDLED) 4440 lpfc_bsg_dma_page_free(phba, dmabuf); 4441 } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */ 4442 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) { 4443 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4444 "2973 SLI_CONFIG wr buffer state " 4445 "mismatch:x%x\n", 4446 phba->mbox_ext_buf_ctx.state); 4447 lpfc_bsg_mbox_ext_abort(phba); 4448 return -EPIPE; 4449 } 4450 rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf); 4451 } 4452 return rc; 4453 } 4454 4455 /** 4456 * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer 4457 * @phba: Pointer to HBA context object. 4458 * @mb: Pointer to a BSG mailbox object. 4459 * @dmabuff: Pointer to a DMA buffer descriptor. 4460 * 4461 * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG 4462 * (0x9B) mailbox commands and external buffers. 4463 **/ 4464 static int 4465 lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, 4466 struct lpfc_dmabuf *dmabuf) 4467 { 4468 struct dfc_mbox_req *mbox_req; 4469 int rc = SLI_CONFIG_NOT_HANDLED; 4470 4471 mbox_req = 4472 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd; 4473 4474 /* mbox command with/without single external buffer */ 4475 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0) 4476 return rc; 4477 4478 /* mbox command and first external buffer */ 4479 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) { 4480 if (mbox_req->extSeqNum == 1) { 4481 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4482 "2974 SLI_CONFIG mailbox: tag:%d, " 4483 "seq:%d\n", mbox_req->extMboxTag, 4484 mbox_req->extSeqNum); 4485 rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf); 4486 return rc; 4487 } else 4488 goto sli_cfg_ext_error; 4489 } 4490 4491 /* 4492 * handle additional external buffers 4493 */ 4494 4495 /* check broken pipe conditions */ 4496 if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag) 4497 goto sli_cfg_ext_error; 4498 if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf) 4499 goto sli_cfg_ext_error; 4500 if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1) 4501 goto sli_cfg_ext_error; 4502 4503 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4504 "2975 SLI_CONFIG mailbox external buffer: " 4505 "extSta:x%x, tag:%d, seq:%d\n", 4506 phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag, 4507 mbox_req->extSeqNum); 4508 rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf); 4509 return rc; 4510 4511 sli_cfg_ext_error: 4512 /* all other cases, broken pipe */ 4513 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4514 "2976 SLI_CONFIG mailbox broken pipe: " 4515 "ctxSta:x%x, ctxNumBuf:%d " 4516 "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n", 4517 phba->mbox_ext_buf_ctx.state, 4518 phba->mbox_ext_buf_ctx.numBuf, 4519 phba->mbox_ext_buf_ctx.mbxTag, 4520 phba->mbox_ext_buf_ctx.seqNum, 4521 mbox_req->extMboxTag, mbox_req->extSeqNum); 4522 4523 lpfc_bsg_mbox_ext_session_reset(phba); 4524 4525 return -EPIPE; 4526 } 4527 4528 /** 4529 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app 4530 * @phba: Pointer to HBA context object. 4531 * @mb: Pointer to a mailbox object. 4532 * @vport: Pointer to a vport object. 4533 * 4534 * Allocate a tracking object, mailbox command memory, get a mailbox 4535 * from the mailbox pool, copy the caller mailbox command. 4536 * 4537 * If offline and the sli is active we need to poll for the command (port is 4538 * being reset) and com-plete the job, otherwise issue the mailbox command and 4539 * let our completion handler finish the command. 4540 **/ 4541 static uint32_t 4542 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, 4543 struct lpfc_vport *vport) 4544 { 4545 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */ 4546 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */ 4547 /* a 4k buffer to hold the mb and extended data from/to the bsg */ 4548 uint8_t *pmbx = NULL; 4549 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */ 4550 struct lpfc_dmabuf *dmabuf = NULL; 4551 struct dfc_mbox_req *mbox_req; 4552 struct READ_EVENT_LOG_VAR *rdEventLog; 4553 uint32_t transmit_length, receive_length, mode; 4554 struct lpfc_mbx_sli4_config *sli4_config; 4555 struct lpfc_mbx_nembed_cmd *nembed_sge; 4556 struct mbox_header *header; 4557 struct ulp_bde64 *bde; 4558 uint8_t *ext = NULL; 4559 int rc = 0; 4560 uint8_t *from; 4561 uint32_t size; 4562 4563 /* in case no data is transferred */ 4564 job->reply->reply_payload_rcv_len = 0; 4565 4566 /* sanity check to protect driver */ 4567 if (job->reply_payload.payload_len > BSG_MBOX_SIZE || 4568 job->request_payload.payload_len > BSG_MBOX_SIZE) { 4569 rc = -ERANGE; 4570 goto job_done; 4571 } 4572 4573 /* 4574 * Don't allow mailbox commands to be sent when blocked or when in 4575 * the middle of discovery 4576 */ 4577 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { 4578 rc = -EAGAIN; 4579 goto job_done; 4580 } 4581 4582 mbox_req = 4583 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd; 4584 4585 /* check if requested extended data lengths are valid */ 4586 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) || 4587 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) { 4588 rc = -ERANGE; 4589 goto job_done; 4590 } 4591 4592 dmabuf = lpfc_bsg_dma_page_alloc(phba); 4593 if (!dmabuf || !dmabuf->virt) { 4594 rc = -ENOMEM; 4595 goto job_done; 4596 } 4597 4598 /* Get the mailbox command or external buffer from BSG */ 4599 pmbx = (uint8_t *)dmabuf->virt; 4600 size = job->request_payload.payload_len; 4601 sg_copy_to_buffer(job->request_payload.sg_list, 4602 job->request_payload.sg_cnt, pmbx, size); 4603 4604 /* Handle possible SLI_CONFIG with non-embedded payloads */ 4605 if (phba->sli_rev == LPFC_SLI_REV4) { 4606 rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf); 4607 if (rc == SLI_CONFIG_HANDLED) 4608 goto job_cont; 4609 if (rc) 4610 goto job_done; 4611 /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */ 4612 } 4613 4614 rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport); 4615 if (rc != 0) 4616 goto job_done; /* must be negative */ 4617 4618 /* allocate our bsg tracking structure */ 4619 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4620 if (!dd_data) { 4621 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 4622 "2727 Failed allocation of dd_data\n"); 4623 rc = -ENOMEM; 4624 goto job_done; 4625 } 4626 4627 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4628 if (!pmboxq) { 4629 rc = -ENOMEM; 4630 goto job_done; 4631 } 4632 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4633 4634 pmb = &pmboxq->u.mb; 4635 memcpy(pmb, pmbx, sizeof(*pmb)); 4636 pmb->mbxOwner = OWN_HOST; 4637 pmboxq->vport = vport; 4638 4639 /* If HBA encountered an error attention, allow only DUMP 4640 * or RESTART mailbox commands until the HBA is restarted. 4641 */ 4642 if (phba->pport->stopped && 4643 pmb->mbxCommand != MBX_DUMP_MEMORY && 4644 pmb->mbxCommand != MBX_RESTART && 4645 pmb->mbxCommand != MBX_WRITE_VPARMS && 4646 pmb->mbxCommand != MBX_WRITE_WWN) 4647 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 4648 "2797 mbox: Issued mailbox cmd " 4649 "0x%x while in stopped state.\n", 4650 pmb->mbxCommand); 4651 4652 /* extended mailbox commands will need an extended buffer */ 4653 if (mbox_req->inExtWLen || mbox_req->outExtWLen) { 4654 from = pmbx; 4655 ext = from + sizeof(MAILBOX_t); 4656 pmboxq->context2 = ext; 4657 pmboxq->in_ext_byte_len = 4658 mbox_req->inExtWLen * sizeof(uint32_t); 4659 pmboxq->out_ext_byte_len = 4660 mbox_req->outExtWLen * sizeof(uint32_t); 4661 pmboxq->mbox_offset_word = mbox_req->mbOffset; 4662 } 4663 4664 /* biu diag will need a kernel buffer to transfer the data 4665 * allocate our own buffer and setup the mailbox command to 4666 * use ours 4667 */ 4668 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) { 4669 transmit_length = pmb->un.varWords[1]; 4670 receive_length = pmb->un.varWords[4]; 4671 /* transmit length cannot be greater than receive length or 4672 * mailbox extension size 4673 */ 4674 if ((transmit_length > receive_length) || 4675 (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) { 4676 rc = -ERANGE; 4677 goto job_done; 4678 } 4679 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh = 4680 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)); 4681 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow = 4682 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)); 4683 4684 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh = 4685 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t) 4686 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize); 4687 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow = 4688 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t) 4689 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize); 4690 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) { 4691 rdEventLog = &pmb->un.varRdEventLog; 4692 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize; 4693 mode = bf_get(lpfc_event_log, rdEventLog); 4694 4695 /* receive length cannot be greater than mailbox 4696 * extension size 4697 */ 4698 if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) { 4699 rc = -ERANGE; 4700 goto job_done; 4701 } 4702 4703 /* mode zero uses a bde like biu diags command */ 4704 if (mode == 0) { 4705 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys 4706 + sizeof(MAILBOX_t)); 4707 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys 4708 + sizeof(MAILBOX_t)); 4709 } 4710 } else if (phba->sli_rev == LPFC_SLI_REV4) { 4711 /* Let type 4 (well known data) through because the data is 4712 * returned in varwords[4-8] 4713 * otherwise check the recieve length and fetch the buffer addr 4714 */ 4715 if ((pmb->mbxCommand == MBX_DUMP_MEMORY) && 4716 (pmb->un.varDmp.type != DMP_WELL_KNOWN)) { 4717 /* rebuild the command for sli4 using our own buffers 4718 * like we do for biu diags 4719 */ 4720 receive_length = pmb->un.varWords[2]; 4721 /* receive length cannot be greater than mailbox 4722 * extension size 4723 */ 4724 if (receive_length == 0) { 4725 rc = -ERANGE; 4726 goto job_done; 4727 } 4728 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys 4729 + sizeof(MAILBOX_t)); 4730 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys 4731 + sizeof(MAILBOX_t)); 4732 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) && 4733 pmb->un.varUpdateCfg.co) { 4734 bde = (struct ulp_bde64 *)&pmb->un.varWords[4]; 4735 4736 /* bde size cannot be greater than mailbox ext size */ 4737 if (bde->tus.f.bdeSize > 4738 BSG_MBOX_SIZE - sizeof(MAILBOX_t)) { 4739 rc = -ERANGE; 4740 goto job_done; 4741 } 4742 bde->addrHigh = putPaddrHigh(dmabuf->phys 4743 + sizeof(MAILBOX_t)); 4744 bde->addrLow = putPaddrLow(dmabuf->phys 4745 + sizeof(MAILBOX_t)); 4746 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) { 4747 /* Handling non-embedded SLI_CONFIG mailbox command */ 4748 sli4_config = &pmboxq->u.mqe.un.sli4_config; 4749 if (!bf_get(lpfc_mbox_hdr_emb, 4750 &sli4_config->header.cfg_mhdr)) { 4751 /* rebuild the command for sli4 using our 4752 * own buffers like we do for biu diags 4753 */ 4754 header = (struct mbox_header *) 4755 &pmb->un.varWords[0]; 4756 nembed_sge = (struct lpfc_mbx_nembed_cmd *) 4757 &pmb->un.varWords[0]; 4758 receive_length = nembed_sge->sge[0].length; 4759 4760 /* receive length cannot be greater than 4761 * mailbox extension size 4762 */ 4763 if ((receive_length == 0) || 4764 (receive_length > 4765 BSG_MBOX_SIZE - sizeof(MAILBOX_t))) { 4766 rc = -ERANGE; 4767 goto job_done; 4768 } 4769 4770 nembed_sge->sge[0].pa_hi = 4771 putPaddrHigh(dmabuf->phys 4772 + sizeof(MAILBOX_t)); 4773 nembed_sge->sge[0].pa_lo = 4774 putPaddrLow(dmabuf->phys 4775 + sizeof(MAILBOX_t)); 4776 } 4777 } 4778 } 4779 4780 dd_data->context_un.mbox.dmabuffers = dmabuf; 4781 4782 /* setup wake call as IOCB callback */ 4783 pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl; 4784 4785 /* setup context field to pass wait_queue pointer to wake function */ 4786 pmboxq->context1 = dd_data; 4787 dd_data->type = TYPE_MBOX; 4788 dd_data->set_job = job; 4789 dd_data->context_un.mbox.pmboxq = pmboxq; 4790 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; 4791 dd_data->context_un.mbox.ext = ext; 4792 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset; 4793 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen; 4794 dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen; 4795 job->dd_data = dd_data; 4796 4797 if ((vport->fc_flag & FC_OFFLINE_MODE) || 4798 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) { 4799 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 4800 if (rc != MBX_SUCCESS) { 4801 rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; 4802 goto job_done; 4803 } 4804 4805 /* job finished, copy the data */ 4806 memcpy(pmbx, pmb, sizeof(*pmb)); 4807 job->reply->reply_payload_rcv_len = 4808 sg_copy_from_buffer(job->reply_payload.sg_list, 4809 job->reply_payload.sg_cnt, 4810 pmbx, size); 4811 /* not waiting mbox already done */ 4812 rc = 0; 4813 goto job_done; 4814 } 4815 4816 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4817 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) 4818 return 1; /* job started */ 4819 4820 job_done: 4821 /* common exit for error or job completed inline */ 4822 if (pmboxq) 4823 mempool_free(pmboxq, phba->mbox_mem_pool); 4824 lpfc_bsg_dma_page_free(phba, dmabuf); 4825 kfree(dd_data); 4826 4827 job_cont: 4828 return rc; 4829 } 4830 4831 /** 4832 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command 4833 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX. 4834 **/ 4835 static int 4836 lpfc_bsg_mbox_cmd(struct fc_bsg_job *job) 4837 { 4838 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 4839 struct lpfc_hba *phba = vport->phba; 4840 struct dfc_mbox_req *mbox_req; 4841 int rc = 0; 4842 4843 /* mix-and-match backward compatibility */ 4844 job->reply->reply_payload_rcv_len = 0; 4845 if (job->request_len < 4846 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) { 4847 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4848 "2737 Mix-and-match backward compatibility " 4849 "between MBOX_REQ old size:%d and " 4850 "new request size:%d\n", 4851 (int)(job->request_len - 4852 sizeof(struct fc_bsg_request)), 4853 (int)sizeof(struct dfc_mbox_req)); 4854 mbox_req = (struct dfc_mbox_req *) 4855 job->request->rqst_data.h_vendor.vendor_cmd; 4856 mbox_req->extMboxTag = 0; 4857 mbox_req->extSeqNum = 0; 4858 } 4859 4860 rc = lpfc_bsg_issue_mbox(phba, job, vport); 4861 4862 if (rc == 0) { 4863 /* job done */ 4864 job->reply->result = 0; 4865 job->dd_data = NULL; 4866 job->job_done(job); 4867 } else if (rc == 1) 4868 /* job submitted, will complete later*/ 4869 rc = 0; /* return zero, no error */ 4870 else { 4871 /* some error occurred */ 4872 job->reply->result = rc; 4873 job->dd_data = NULL; 4874 } 4875 4876 return rc; 4877 } 4878 4879 /** 4880 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler 4881 * @phba: Pointer to HBA context object. 4882 * @cmdiocbq: Pointer to command iocb. 4883 * @rspiocbq: Pointer to response iocb. 4884 * 4885 * This function is the completion handler for iocbs issued using 4886 * lpfc_menlo_cmd function. This function is called by the 4887 * ring event handler function without any lock held. This function 4888 * can be called from both worker thread context and interrupt 4889 * context. This function also can be called from another thread which 4890 * cleans up the SLI layer objects. 4891 * This function copies the contents of the response iocb to the 4892 * response iocb memory object provided by the caller of 4893 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 4894 * sleeps for the iocb completion. 4895 **/ 4896 static void 4897 lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba, 4898 struct lpfc_iocbq *cmdiocbq, 4899 struct lpfc_iocbq *rspiocbq) 4900 { 4901 struct bsg_job_data *dd_data; 4902 struct fc_bsg_job *job; 4903 IOCB_t *rsp; 4904 struct lpfc_dmabuf *bmp, *cmp, *rmp; 4905 struct lpfc_bsg_menlo *menlo; 4906 unsigned long flags; 4907 struct menlo_response *menlo_resp; 4908 unsigned int rsp_size; 4909 int rc = 0; 4910 4911 dd_data = cmdiocbq->context1; 4912 cmp = cmdiocbq->context2; 4913 bmp = cmdiocbq->context3; 4914 menlo = &dd_data->context_un.menlo; 4915 rmp = menlo->rmp; 4916 rsp = &rspiocbq->iocb; 4917 4918 /* Determine if job has been aborted */ 4919 spin_lock_irqsave(&phba->ct_ev_lock, flags); 4920 job = dd_data->set_job; 4921 if (job) { 4922 /* Prevent timeout handling from trying to abort job */ 4923 job->dd_data = NULL; 4924 } 4925 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 4926 4927 /* Copy the job data or set the failing status for the job */ 4928 4929 if (job) { 4930 /* always return the xri, this would be used in the case 4931 * of a menlo download to allow the data to be sent as a 4932 * continuation of the exchange. 4933 */ 4934 4935 menlo_resp = (struct menlo_response *) 4936 job->reply->reply_data.vendor_reply.vendor_rsp; 4937 menlo_resp->xri = rsp->ulpContext; 4938 if (rsp->ulpStatus) { 4939 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 4940 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { 4941 case IOERR_SEQUENCE_TIMEOUT: 4942 rc = -ETIMEDOUT; 4943 break; 4944 case IOERR_INVALID_RPI: 4945 rc = -EFAULT; 4946 break; 4947 default: 4948 rc = -EACCES; 4949 break; 4950 } 4951 } else { 4952 rc = -EACCES; 4953 } 4954 } else { 4955 rsp_size = rsp->un.genreq64.bdl.bdeSize; 4956 job->reply->reply_payload_rcv_len = 4957 lpfc_bsg_copy_data(rmp, &job->reply_payload, 4958 rsp_size, 0); 4959 } 4960 4961 } 4962 4963 lpfc_sli_release_iocbq(phba, cmdiocbq); 4964 lpfc_free_bsg_buffers(phba, cmp); 4965 lpfc_free_bsg_buffers(phba, rmp); 4966 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 4967 kfree(bmp); 4968 kfree(dd_data); 4969 4970 /* Complete the job if active */ 4971 4972 if (job) { 4973 job->reply->result = rc; 4974 job->job_done(job); 4975 } 4976 4977 return; 4978 } 4979 4980 /** 4981 * lpfc_menlo_cmd - send an ioctl for menlo hardware 4982 * @job: fc_bsg_job to handle 4983 * 4984 * This function issues a gen request 64 CR ioctl for all menlo cmd requests, 4985 * all the command completions will return the xri for the command. 4986 * For menlo data requests a gen request 64 CX is used to continue the exchange 4987 * supplied in the menlo request header xri field. 4988 **/ 4989 static int 4990 lpfc_menlo_cmd(struct fc_bsg_job *job) 4991 { 4992 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 4993 struct lpfc_hba *phba = vport->phba; 4994 struct lpfc_iocbq *cmdiocbq; 4995 IOCB_t *cmd; 4996 int rc = 0; 4997 struct menlo_command *menlo_cmd; 4998 struct menlo_response *menlo_resp; 4999 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL; 5000 int request_nseg; 5001 int reply_nseg; 5002 struct bsg_job_data *dd_data; 5003 struct ulp_bde64 *bpl = NULL; 5004 5005 /* in case no data is returned return just the return code */ 5006 job->reply->reply_payload_rcv_len = 0; 5007 5008 if (job->request_len < 5009 sizeof(struct fc_bsg_request) + 5010 sizeof(struct menlo_command)) { 5011 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5012 "2784 Received MENLO_CMD request below " 5013 "minimum size\n"); 5014 rc = -ERANGE; 5015 goto no_dd_data; 5016 } 5017 5018 if (job->reply_len < 5019 sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) { 5020 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5021 "2785 Received MENLO_CMD reply below " 5022 "minimum size\n"); 5023 rc = -ERANGE; 5024 goto no_dd_data; 5025 } 5026 5027 if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) { 5028 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5029 "2786 Adapter does not support menlo " 5030 "commands\n"); 5031 rc = -EPERM; 5032 goto no_dd_data; 5033 } 5034 5035 menlo_cmd = (struct menlo_command *) 5036 job->request->rqst_data.h_vendor.vendor_cmd; 5037 5038 menlo_resp = (struct menlo_response *) 5039 job->reply->reply_data.vendor_reply.vendor_rsp; 5040 5041 /* allocate our bsg tracking structure */ 5042 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 5043 if (!dd_data) { 5044 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5045 "2787 Failed allocation of dd_data\n"); 5046 rc = -ENOMEM; 5047 goto no_dd_data; 5048 } 5049 5050 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5051 if (!bmp) { 5052 rc = -ENOMEM; 5053 goto free_dd; 5054 } 5055 5056 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 5057 if (!bmp->virt) { 5058 rc = -ENOMEM; 5059 goto free_bmp; 5060 } 5061 5062 INIT_LIST_HEAD(&bmp->list); 5063 5064 bpl = (struct ulp_bde64 *)bmp->virt; 5065 request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64); 5066 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, 5067 1, bpl, &request_nseg); 5068 if (!cmp) { 5069 rc = -ENOMEM; 5070 goto free_bmp; 5071 } 5072 lpfc_bsg_copy_data(cmp, &job->request_payload, 5073 job->request_payload.payload_len, 1); 5074 5075 bpl += request_nseg; 5076 reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg; 5077 rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0, 5078 bpl, &reply_nseg); 5079 if (!rmp) { 5080 rc = -ENOMEM; 5081 goto free_cmp; 5082 } 5083 5084 cmdiocbq = lpfc_sli_get_iocbq(phba); 5085 if (!cmdiocbq) { 5086 rc = -ENOMEM; 5087 goto free_rmp; 5088 } 5089 5090 cmd = &cmdiocbq->iocb; 5091 cmd->un.genreq64.bdl.ulpIoTag32 = 0; 5092 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 5093 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); 5094 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 5095 cmd->un.genreq64.bdl.bdeSize = 5096 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); 5097 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); 5098 cmd->un.genreq64.w5.hcsw.Dfctl = 0; 5099 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD; 5100 cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */ 5101 cmd->ulpBdeCount = 1; 5102 cmd->ulpClass = CLASS3; 5103 cmd->ulpOwner = OWN_CHIP; 5104 cmd->ulpLe = 1; /* Limited Edition */ 5105 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 5106 cmdiocbq->vport = phba->pport; 5107 /* We want the firmware to timeout before we do */ 5108 cmd->ulpTimeout = MENLO_TIMEOUT - 5; 5109 cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp; 5110 cmdiocbq->context1 = dd_data; 5111 cmdiocbq->context2 = cmp; 5112 cmdiocbq->context3 = bmp; 5113 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) { 5114 cmd->ulpCommand = CMD_GEN_REQUEST64_CR; 5115 cmd->ulpPU = MENLO_PU; /* 3 */ 5116 cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */ 5117 cmd->ulpContext = MENLO_CONTEXT; /* 0 */ 5118 } else { 5119 cmd->ulpCommand = CMD_GEN_REQUEST64_CX; 5120 cmd->ulpPU = 1; 5121 cmd->un.ulpWord[4] = 0; 5122 cmd->ulpContext = menlo_cmd->xri; 5123 } 5124 5125 dd_data->type = TYPE_MENLO; 5126 dd_data->set_job = job; 5127 dd_data->context_un.menlo.cmdiocbq = cmdiocbq; 5128 dd_data->context_un.menlo.rmp = rmp; 5129 job->dd_data = dd_data; 5130 5131 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 5132 MENLO_TIMEOUT - 5); 5133 if (rc == IOCB_SUCCESS) 5134 return 0; /* done for now */ 5135 5136 lpfc_sli_release_iocbq(phba, cmdiocbq); 5137 5138 free_rmp: 5139 lpfc_free_bsg_buffers(phba, rmp); 5140 free_cmp: 5141 lpfc_free_bsg_buffers(phba, cmp); 5142 free_bmp: 5143 if (bmp->virt) 5144 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 5145 kfree(bmp); 5146 free_dd: 5147 kfree(dd_data); 5148 no_dd_data: 5149 /* make error code available to userspace */ 5150 job->reply->result = rc; 5151 job->dd_data = NULL; 5152 return rc; 5153 } 5154 5155 /** 5156 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job 5157 * @job: fc_bsg_job to handle 5158 **/ 5159 static int 5160 lpfc_bsg_hst_vendor(struct fc_bsg_job *job) 5161 { 5162 int command = job->request->rqst_data.h_vendor.vendor_cmd[0]; 5163 int rc; 5164 5165 switch (command) { 5166 case LPFC_BSG_VENDOR_SET_CT_EVENT: 5167 rc = lpfc_bsg_hba_set_event(job); 5168 break; 5169 case LPFC_BSG_VENDOR_GET_CT_EVENT: 5170 rc = lpfc_bsg_hba_get_event(job); 5171 break; 5172 case LPFC_BSG_VENDOR_SEND_MGMT_RESP: 5173 rc = lpfc_bsg_send_mgmt_rsp(job); 5174 break; 5175 case LPFC_BSG_VENDOR_DIAG_MODE: 5176 rc = lpfc_bsg_diag_loopback_mode(job); 5177 break; 5178 case LPFC_BSG_VENDOR_DIAG_MODE_END: 5179 rc = lpfc_sli4_bsg_diag_mode_end(job); 5180 break; 5181 case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK: 5182 rc = lpfc_bsg_diag_loopback_run(job); 5183 break; 5184 case LPFC_BSG_VENDOR_LINK_DIAG_TEST: 5185 rc = lpfc_sli4_bsg_link_diag_test(job); 5186 break; 5187 case LPFC_BSG_VENDOR_GET_MGMT_REV: 5188 rc = lpfc_bsg_get_dfc_rev(job); 5189 break; 5190 case LPFC_BSG_VENDOR_MBOX: 5191 rc = lpfc_bsg_mbox_cmd(job); 5192 break; 5193 case LPFC_BSG_VENDOR_MENLO_CMD: 5194 case LPFC_BSG_VENDOR_MENLO_DATA: 5195 rc = lpfc_menlo_cmd(job); 5196 break; 5197 default: 5198 rc = -EINVAL; 5199 job->reply->reply_payload_rcv_len = 0; 5200 /* make error code available to userspace */ 5201 job->reply->result = rc; 5202 break; 5203 } 5204 5205 return rc; 5206 } 5207 5208 /** 5209 * lpfc_bsg_request - handle a bsg request from the FC transport 5210 * @job: fc_bsg_job to handle 5211 **/ 5212 int 5213 lpfc_bsg_request(struct fc_bsg_job *job) 5214 { 5215 uint32_t msgcode; 5216 int rc; 5217 5218 msgcode = job->request->msgcode; 5219 switch (msgcode) { 5220 case FC_BSG_HST_VENDOR: 5221 rc = lpfc_bsg_hst_vendor(job); 5222 break; 5223 case FC_BSG_RPT_ELS: 5224 rc = lpfc_bsg_rport_els(job); 5225 break; 5226 case FC_BSG_RPT_CT: 5227 rc = lpfc_bsg_send_mgmt_cmd(job); 5228 break; 5229 default: 5230 rc = -EINVAL; 5231 job->reply->reply_payload_rcv_len = 0; 5232 /* make error code available to userspace */ 5233 job->reply->result = rc; 5234 break; 5235 } 5236 5237 return rc; 5238 } 5239 5240 /** 5241 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport 5242 * @job: fc_bsg_job that has timed out 5243 * 5244 * This function just aborts the job's IOCB. The aborted IOCB will return to 5245 * the waiting function which will handle passing the error back to userspace 5246 **/ 5247 int 5248 lpfc_bsg_timeout(struct fc_bsg_job *job) 5249 { 5250 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 5251 struct lpfc_hba *phba = vport->phba; 5252 struct lpfc_iocbq *cmdiocb; 5253 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 5254 struct bsg_job_data *dd_data; 5255 unsigned long flags; 5256 int rc = 0; 5257 LIST_HEAD(completions); 5258 struct lpfc_iocbq *check_iocb, *next_iocb; 5259 5260 /* if job's driver data is NULL, the command completed or is in the 5261 * the process of completing. In this case, return status to request 5262 * so the timeout is retried. This avoids double completion issues 5263 * and the request will be pulled off the timer queue when the 5264 * command's completion handler executes. Otherwise, prevent the 5265 * command's completion handler from executing the job done callback 5266 * and continue processing to abort the outstanding the command. 5267 */ 5268 5269 spin_lock_irqsave(&phba->ct_ev_lock, flags); 5270 dd_data = (struct bsg_job_data *)job->dd_data; 5271 if (dd_data) { 5272 dd_data->set_job = NULL; 5273 job->dd_data = NULL; 5274 } else { 5275 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5276 return -EAGAIN; 5277 } 5278 5279 switch (dd_data->type) { 5280 case TYPE_IOCB: 5281 /* Check to see if IOCB was issued to the port or not. If not, 5282 * remove it from the txq queue and call cancel iocbs. 5283 * Otherwise, call abort iotag 5284 */ 5285 5286 cmdiocb = dd_data->context_un.iocb.cmdiocbq; 5287 spin_lock_irq(&phba->hbalock); 5288 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, 5289 list) { 5290 if (check_iocb == cmdiocb) { 5291 list_move_tail(&check_iocb->list, &completions); 5292 break; 5293 } 5294 } 5295 if (list_empty(&completions)) 5296 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 5297 spin_unlock_irq(&phba->hbalock); 5298 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5299 if (!list_empty(&completions)) { 5300 lpfc_sli_cancel_iocbs(phba, &completions, 5301 IOSTAT_LOCAL_REJECT, 5302 IOERR_SLI_ABORTED); 5303 } 5304 break; 5305 5306 case TYPE_EVT: 5307 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5308 break; 5309 5310 case TYPE_MBOX: 5311 /* Update the ext buf ctx state if needed */ 5312 5313 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT) 5314 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS; 5315 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5316 break; 5317 case TYPE_MENLO: 5318 /* Check to see if IOCB was issued to the port or not. If not, 5319 * remove it from the txq queue and call cancel iocbs. 5320 * Otherwise, call abort iotag. 5321 */ 5322 5323 cmdiocb = dd_data->context_un.menlo.cmdiocbq; 5324 spin_lock_irq(&phba->hbalock); 5325 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, 5326 list) { 5327 if (check_iocb == cmdiocb) { 5328 list_move_tail(&check_iocb->list, &completions); 5329 break; 5330 } 5331 } 5332 if (list_empty(&completions)) 5333 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 5334 spin_unlock_irq(&phba->hbalock); 5335 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5336 if (!list_empty(&completions)) { 5337 lpfc_sli_cancel_iocbs(phba, &completions, 5338 IOSTAT_LOCAL_REJECT, 5339 IOERR_SLI_ABORTED); 5340 } 5341 break; 5342 default: 5343 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5344 break; 5345 } 5346 5347 /* scsi transport fc fc_bsg_job_timeout expects a zero return code, 5348 * otherwise an error message will be displayed on the console 5349 * so always return success (zero) 5350 */ 5351 return rc; 5352 } 5353