1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * 6 * Copyright (C) 2009-2015 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * * 10 * This program is free software; you can redistribute it and/or * 11 * modify it under the terms of version 2 of the GNU General * 12 * Public License as published by the Free Software Foundation. * 13 * This program is distributed in the hope that it will be useful. * 14 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 15 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 16 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 17 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 18 * TO BE LEGALLY INVALID. See the GNU General Public License for * 19 * more details, a copy of which can be found in the file COPYING * 20 * included with this package. * 21 *******************************************************************/ 22 23 #include <linux/interrupt.h> 24 #include <linux/mempool.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/delay.h> 28 #include <linux/list.h> 29 #include <linux/bsg-lib.h> 30 31 #include <scsi/scsi.h> 32 #include <scsi/scsi_host.h> 33 #include <scsi/scsi_transport_fc.h> 34 #include <scsi/scsi_bsg_fc.h> 35 #include <scsi/fc/fc_fs.h> 36 37 #include "lpfc_hw4.h" 38 #include "lpfc_hw.h" 39 #include "lpfc_sli.h" 40 #include "lpfc_sli4.h" 41 #include "lpfc_nl.h" 42 #include "lpfc_bsg.h" 43 #include "lpfc_disc.h" 44 #include "lpfc_scsi.h" 45 #include "lpfc.h" 46 #include "lpfc_logmsg.h" 47 #include "lpfc_crtn.h" 48 #include "lpfc_debugfs.h" 49 #include "lpfc_vport.h" 50 #include "lpfc_version.h" 51 52 struct lpfc_bsg_event { 53 struct list_head node; 54 struct kref kref; 55 wait_queue_head_t wq; 56 57 /* Event type and waiter identifiers */ 58 uint32_t type_mask; 59 uint32_t req_id; 60 uint32_t reg_id; 61 62 /* next two flags are here for the auto-delete logic */ 63 unsigned long wait_time_stamp; 64 int waiting; 65 66 /* seen and not seen events */ 67 struct list_head events_to_get; 68 struct list_head events_to_see; 69 70 /* driver data associated with the job */ 71 void *dd_data; 72 }; 73 74 struct lpfc_bsg_iocb { 75 struct lpfc_iocbq *cmdiocbq; 76 struct lpfc_dmabuf *rmp; 77 struct lpfc_nodelist *ndlp; 78 }; 79 80 struct lpfc_bsg_mbox { 81 LPFC_MBOXQ_t *pmboxq; 82 MAILBOX_t *mb; 83 struct lpfc_dmabuf *dmabuffers; /* for BIU diags */ 84 uint8_t *ext; /* extended mailbox data */ 85 uint32_t mbOffset; /* from app */ 86 uint32_t inExtWLen; /* from app */ 87 uint32_t outExtWLen; /* from app */ 88 }; 89 90 #define MENLO_DID 0x0000FC0E 91 92 struct lpfc_bsg_menlo { 93 struct lpfc_iocbq *cmdiocbq; 94 struct lpfc_dmabuf *rmp; 95 }; 96 97 #define TYPE_EVT 1 98 #define TYPE_IOCB 2 99 #define TYPE_MBOX 3 100 #define TYPE_MENLO 4 101 struct bsg_job_data { 102 uint32_t type; 103 struct bsg_job *set_job; /* job waiting for this iocb to finish */ 104 union { 105 struct lpfc_bsg_event *evt; 106 struct lpfc_bsg_iocb iocb; 107 struct lpfc_bsg_mbox mbox; 108 struct lpfc_bsg_menlo menlo; 109 } context_un; 110 }; 111 112 struct event_data { 113 struct list_head node; 114 uint32_t type; 115 uint32_t immed_dat; 116 void *data; 117 uint32_t len; 118 }; 119 120 #define BUF_SZ_4K 4096 121 #define SLI_CT_ELX_LOOPBACK 0x10 122 123 enum ELX_LOOPBACK_CMD { 124 ELX_LOOPBACK_XRI_SETUP, 125 ELX_LOOPBACK_DATA, 126 }; 127 128 #define ELX_LOOPBACK_HEADER_SZ \ 129 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un) 130 131 struct lpfc_dmabufext { 132 struct lpfc_dmabuf dma; 133 uint32_t size; 134 uint32_t flag; 135 }; 136 137 static void 138 lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist) 139 { 140 struct lpfc_dmabuf *mlast, *next_mlast; 141 142 if (mlist) { 143 list_for_each_entry_safe(mlast, next_mlast, &mlist->list, 144 list) { 145 lpfc_mbuf_free(phba, mlast->virt, mlast->phys); 146 list_del(&mlast->list); 147 kfree(mlast); 148 } 149 lpfc_mbuf_free(phba, mlist->virt, mlist->phys); 150 kfree(mlist); 151 } 152 return; 153 } 154 155 static struct lpfc_dmabuf * 156 lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size, 157 int outbound_buffers, struct ulp_bde64 *bpl, 158 int *bpl_entries) 159 { 160 struct lpfc_dmabuf *mlist = NULL; 161 struct lpfc_dmabuf *mp; 162 unsigned int bytes_left = size; 163 164 /* Verify we can support the size specified */ 165 if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE))) 166 return NULL; 167 168 /* Determine the number of dma buffers to allocate */ 169 *bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 : 170 size/LPFC_BPL_SIZE); 171 172 /* Allocate dma buffer and place in BPL passed */ 173 while (bytes_left) { 174 /* Allocate dma buffer */ 175 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 176 if (!mp) { 177 if (mlist) 178 lpfc_free_bsg_buffers(phba, mlist); 179 return NULL; 180 } 181 182 INIT_LIST_HEAD(&mp->list); 183 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); 184 185 if (!mp->virt) { 186 kfree(mp); 187 if (mlist) 188 lpfc_free_bsg_buffers(phba, mlist); 189 return NULL; 190 } 191 192 /* Queue it to a linked list */ 193 if (!mlist) 194 mlist = mp; 195 else 196 list_add_tail(&mp->list, &mlist->list); 197 198 /* Add buffer to buffer pointer list */ 199 if (outbound_buffers) 200 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 201 else 202 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 203 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys)); 204 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys)); 205 bpl->tus.f.bdeSize = (uint16_t) 206 (bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE : 207 bytes_left); 208 bytes_left -= bpl->tus.f.bdeSize; 209 bpl->tus.w = le32_to_cpu(bpl->tus.w); 210 bpl++; 211 } 212 return mlist; 213 } 214 215 static unsigned int 216 lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers, 217 struct bsg_buffer *bsg_buffers, 218 unsigned int bytes_to_transfer, int to_buffers) 219 { 220 221 struct lpfc_dmabuf *mp; 222 unsigned int transfer_bytes, bytes_copied = 0; 223 unsigned int sg_offset, dma_offset; 224 unsigned char *dma_address, *sg_address; 225 LIST_HEAD(temp_list); 226 struct sg_mapping_iter miter; 227 unsigned long flags; 228 unsigned int sg_flags = SG_MITER_ATOMIC; 229 bool sg_valid; 230 231 list_splice_init(&dma_buffers->list, &temp_list); 232 list_add(&dma_buffers->list, &temp_list); 233 sg_offset = 0; 234 if (to_buffers) 235 sg_flags |= SG_MITER_FROM_SG; 236 else 237 sg_flags |= SG_MITER_TO_SG; 238 sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt, 239 sg_flags); 240 local_irq_save(flags); 241 sg_valid = sg_miter_next(&miter); 242 list_for_each_entry(mp, &temp_list, list) { 243 dma_offset = 0; 244 while (bytes_to_transfer && sg_valid && 245 (dma_offset < LPFC_BPL_SIZE)) { 246 dma_address = mp->virt + dma_offset; 247 if (sg_offset) { 248 /* Continue previous partial transfer of sg */ 249 sg_address = miter.addr + sg_offset; 250 transfer_bytes = miter.length - sg_offset; 251 } else { 252 sg_address = miter.addr; 253 transfer_bytes = miter.length; 254 } 255 if (bytes_to_transfer < transfer_bytes) 256 transfer_bytes = bytes_to_transfer; 257 if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset)) 258 transfer_bytes = LPFC_BPL_SIZE - dma_offset; 259 if (to_buffers) 260 memcpy(dma_address, sg_address, transfer_bytes); 261 else 262 memcpy(sg_address, dma_address, transfer_bytes); 263 dma_offset += transfer_bytes; 264 sg_offset += transfer_bytes; 265 bytes_to_transfer -= transfer_bytes; 266 bytes_copied += transfer_bytes; 267 if (sg_offset >= miter.length) { 268 sg_offset = 0; 269 sg_valid = sg_miter_next(&miter); 270 } 271 } 272 } 273 sg_miter_stop(&miter); 274 local_irq_restore(flags); 275 list_del_init(&dma_buffers->list); 276 list_splice(&temp_list, &dma_buffers->list); 277 return bytes_copied; 278 } 279 280 /** 281 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler 282 * @phba: Pointer to HBA context object. 283 * @cmdiocbq: Pointer to command iocb. 284 * @rspiocbq: Pointer to response iocb. 285 * 286 * This function is the completion handler for iocbs issued using 287 * lpfc_bsg_send_mgmt_cmd function. This function is called by the 288 * ring event handler function without any lock held. This function 289 * can be called from both worker thread context and interrupt 290 * context. This function also can be called from another thread which 291 * cleans up the SLI layer objects. 292 * This function copies the contents of the response iocb to the 293 * response iocb memory object provided by the caller of 294 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 295 * sleeps for the iocb completion. 296 **/ 297 static void 298 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, 299 struct lpfc_iocbq *cmdiocbq, 300 struct lpfc_iocbq *rspiocbq) 301 { 302 struct bsg_job_data *dd_data; 303 struct bsg_job *job; 304 struct fc_bsg_reply *bsg_reply; 305 IOCB_t *rsp; 306 struct lpfc_dmabuf *bmp, *cmp, *rmp; 307 struct lpfc_nodelist *ndlp; 308 struct lpfc_bsg_iocb *iocb; 309 unsigned long flags; 310 unsigned int rsp_size; 311 int rc = 0; 312 313 dd_data = cmdiocbq->context1; 314 315 /* Determine if job has been aborted */ 316 spin_lock_irqsave(&phba->ct_ev_lock, flags); 317 job = dd_data->set_job; 318 if (job) { 319 bsg_reply = job->reply; 320 /* Prevent timeout handling from trying to abort job */ 321 job->dd_data = NULL; 322 } 323 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 324 325 /* Close the timeout handler abort window */ 326 spin_lock_irqsave(&phba->hbalock, flags); 327 cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; 328 spin_unlock_irqrestore(&phba->hbalock, flags); 329 330 iocb = &dd_data->context_un.iocb; 331 ndlp = iocb->ndlp; 332 rmp = iocb->rmp; 333 cmp = cmdiocbq->context2; 334 bmp = cmdiocbq->context3; 335 rsp = &rspiocbq->iocb; 336 337 /* Copy the completed data or set the error status */ 338 339 if (job) { 340 if (rsp->ulpStatus) { 341 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 342 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { 343 case IOERR_SEQUENCE_TIMEOUT: 344 rc = -ETIMEDOUT; 345 break; 346 case IOERR_INVALID_RPI: 347 rc = -EFAULT; 348 break; 349 default: 350 rc = -EACCES; 351 break; 352 } 353 } else { 354 rc = -EACCES; 355 } 356 } else { 357 rsp_size = rsp->un.genreq64.bdl.bdeSize; 358 bsg_reply->reply_payload_rcv_len = 359 lpfc_bsg_copy_data(rmp, &job->reply_payload, 360 rsp_size, 0); 361 } 362 } 363 364 lpfc_free_bsg_buffers(phba, cmp); 365 lpfc_free_bsg_buffers(phba, rmp); 366 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 367 kfree(bmp); 368 lpfc_sli_release_iocbq(phba, cmdiocbq); 369 lpfc_nlp_put(ndlp); 370 kfree(dd_data); 371 372 /* Complete the job if the job is still active */ 373 374 if (job) { 375 bsg_reply->result = rc; 376 bsg_job_done(job, bsg_reply->result, 377 bsg_reply->reply_payload_rcv_len); 378 } 379 return; 380 } 381 382 /** 383 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request 384 * @job: fc_bsg_job to handle 385 **/ 386 static int 387 lpfc_bsg_send_mgmt_cmd(struct bsg_job *job) 388 { 389 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 390 struct lpfc_hba *phba = vport->phba; 391 struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data; 392 struct lpfc_nodelist *ndlp = rdata->pnode; 393 struct fc_bsg_reply *bsg_reply = job->reply; 394 struct ulp_bde64 *bpl = NULL; 395 uint32_t timeout; 396 struct lpfc_iocbq *cmdiocbq = NULL; 397 IOCB_t *cmd; 398 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL; 399 int request_nseg; 400 int reply_nseg; 401 struct bsg_job_data *dd_data; 402 unsigned long flags; 403 uint32_t creg_val; 404 int rc = 0; 405 int iocb_stat; 406 407 /* in case no data is transferred */ 408 bsg_reply->reply_payload_rcv_len = 0; 409 410 /* allocate our bsg tracking structure */ 411 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 412 if (!dd_data) { 413 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 414 "2733 Failed allocation of dd_data\n"); 415 rc = -ENOMEM; 416 goto no_dd_data; 417 } 418 419 if (!lpfc_nlp_get(ndlp)) { 420 rc = -ENODEV; 421 goto no_ndlp; 422 } 423 424 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) { 425 rc = -ENODEV; 426 goto free_ndlp; 427 } 428 429 cmdiocbq = lpfc_sli_get_iocbq(phba); 430 if (!cmdiocbq) { 431 rc = -ENOMEM; 432 goto free_ndlp; 433 } 434 435 cmd = &cmdiocbq->iocb; 436 437 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 438 if (!bmp) { 439 rc = -ENOMEM; 440 goto free_cmdiocbq; 441 } 442 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 443 if (!bmp->virt) { 444 rc = -ENOMEM; 445 goto free_bmp; 446 } 447 448 INIT_LIST_HEAD(&bmp->list); 449 450 bpl = (struct ulp_bde64 *) bmp->virt; 451 request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64); 452 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, 453 1, bpl, &request_nseg); 454 if (!cmp) { 455 rc = -ENOMEM; 456 goto free_bmp; 457 } 458 lpfc_bsg_copy_data(cmp, &job->request_payload, 459 job->request_payload.payload_len, 1); 460 461 bpl += request_nseg; 462 reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg; 463 rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0, 464 bpl, &reply_nseg); 465 if (!rmp) { 466 rc = -ENOMEM; 467 goto free_cmp; 468 } 469 470 cmd->un.genreq64.bdl.ulpIoTag32 = 0; 471 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 472 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); 473 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 474 cmd->un.genreq64.bdl.bdeSize = 475 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); 476 cmd->ulpCommand = CMD_GEN_REQUEST64_CR; 477 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); 478 cmd->un.genreq64.w5.hcsw.Dfctl = 0; 479 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 480 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT; 481 cmd->ulpBdeCount = 1; 482 cmd->ulpLe = 1; 483 cmd->ulpClass = CLASS3; 484 cmd->ulpContext = ndlp->nlp_rpi; 485 if (phba->sli_rev == LPFC_SLI_REV4) 486 cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 487 cmd->ulpOwner = OWN_CHIP; 488 cmdiocbq->vport = phba->pport; 489 cmdiocbq->context3 = bmp; 490 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 491 timeout = phba->fc_ratov * 2; 492 cmd->ulpTimeout = timeout; 493 494 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp; 495 cmdiocbq->context1 = dd_data; 496 cmdiocbq->context2 = cmp; 497 cmdiocbq->context3 = bmp; 498 cmdiocbq->context_un.ndlp = ndlp; 499 dd_data->type = TYPE_IOCB; 500 dd_data->set_job = job; 501 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 502 dd_data->context_un.iocb.ndlp = ndlp; 503 dd_data->context_un.iocb.rmp = rmp; 504 job->dd_data = dd_data; 505 506 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 507 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 508 rc = -EIO ; 509 goto free_rmp; 510 } 511 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 512 writel(creg_val, phba->HCregaddr); 513 readl(phba->HCregaddr); /* flush */ 514 } 515 516 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 517 518 if (iocb_stat == IOCB_SUCCESS) { 519 spin_lock_irqsave(&phba->hbalock, flags); 520 /* make sure the I/O had not been completed yet */ 521 if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) { 522 /* open up abort window to timeout handler */ 523 cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; 524 } 525 spin_unlock_irqrestore(&phba->hbalock, flags); 526 return 0; /* done for now */ 527 } else if (iocb_stat == IOCB_BUSY) { 528 rc = -EAGAIN; 529 } else { 530 rc = -EIO; 531 } 532 533 /* iocb failed so cleanup */ 534 job->dd_data = NULL; 535 536 free_rmp: 537 lpfc_free_bsg_buffers(phba, rmp); 538 free_cmp: 539 lpfc_free_bsg_buffers(phba, cmp); 540 free_bmp: 541 if (bmp->virt) 542 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 543 kfree(bmp); 544 free_cmdiocbq: 545 lpfc_sli_release_iocbq(phba, cmdiocbq); 546 free_ndlp: 547 lpfc_nlp_put(ndlp); 548 no_ndlp: 549 kfree(dd_data); 550 no_dd_data: 551 /* make error code available to userspace */ 552 bsg_reply->result = rc; 553 job->dd_data = NULL; 554 return rc; 555 } 556 557 /** 558 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler 559 * @phba: Pointer to HBA context object. 560 * @cmdiocbq: Pointer to command iocb. 561 * @rspiocbq: Pointer to response iocb. 562 * 563 * This function is the completion handler for iocbs issued using 564 * lpfc_bsg_rport_els_cmp function. This function is called by the 565 * ring event handler function without any lock held. This function 566 * can be called from both worker thread context and interrupt 567 * context. This function also can be called from other thread which 568 * cleans up the SLI layer objects. 569 * This function copies the contents of the response iocb to the 570 * response iocb memory object provided by the caller of 571 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 572 * sleeps for the iocb completion. 573 **/ 574 static void 575 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, 576 struct lpfc_iocbq *cmdiocbq, 577 struct lpfc_iocbq *rspiocbq) 578 { 579 struct bsg_job_data *dd_data; 580 struct bsg_job *job; 581 struct fc_bsg_reply *bsg_reply; 582 IOCB_t *rsp; 583 struct lpfc_nodelist *ndlp; 584 struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL; 585 struct fc_bsg_ctels_reply *els_reply; 586 uint8_t *rjt_data; 587 unsigned long flags; 588 unsigned int rsp_size; 589 int rc = 0; 590 591 dd_data = cmdiocbq->context1; 592 ndlp = dd_data->context_un.iocb.ndlp; 593 cmdiocbq->context1 = ndlp; 594 595 /* Determine if job has been aborted */ 596 spin_lock_irqsave(&phba->ct_ev_lock, flags); 597 job = dd_data->set_job; 598 if (job) { 599 bsg_reply = job->reply; 600 /* Prevent timeout handling from trying to abort job */ 601 job->dd_data = NULL; 602 } 603 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 604 605 /* Close the timeout handler abort window */ 606 spin_lock_irqsave(&phba->hbalock, flags); 607 cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; 608 spin_unlock_irqrestore(&phba->hbalock, flags); 609 610 rsp = &rspiocbq->iocb; 611 pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2; 612 prsp = (struct lpfc_dmabuf *)pcmd->list.next; 613 614 /* Copy the completed job data or determine the job status if job is 615 * still active 616 */ 617 618 if (job) { 619 if (rsp->ulpStatus == IOSTAT_SUCCESS) { 620 rsp_size = rsp->un.elsreq64.bdl.bdeSize; 621 bsg_reply->reply_payload_rcv_len = 622 sg_copy_from_buffer(job->reply_payload.sg_list, 623 job->reply_payload.sg_cnt, 624 prsp->virt, 625 rsp_size); 626 } else if (rsp->ulpStatus == IOSTAT_LS_RJT) { 627 bsg_reply->reply_payload_rcv_len = 628 sizeof(struct fc_bsg_ctels_reply); 629 /* LS_RJT data returned in word 4 */ 630 rjt_data = (uint8_t *)&rsp->un.ulpWord[4]; 631 els_reply = &bsg_reply->reply_data.ctels_reply; 632 els_reply->status = FC_CTELS_STATUS_REJECT; 633 els_reply->rjt_data.action = rjt_data[3]; 634 els_reply->rjt_data.reason_code = rjt_data[2]; 635 els_reply->rjt_data.reason_explanation = rjt_data[1]; 636 els_reply->rjt_data.vendor_unique = rjt_data[0]; 637 } else { 638 rc = -EIO; 639 } 640 } 641 642 lpfc_nlp_put(ndlp); 643 lpfc_els_free_iocb(phba, cmdiocbq); 644 kfree(dd_data); 645 646 /* Complete the job if the job is still active */ 647 648 if (job) { 649 bsg_reply->result = rc; 650 bsg_job_done(job, bsg_reply->result, 651 bsg_reply->reply_payload_rcv_len); 652 } 653 return; 654 } 655 656 /** 657 * lpfc_bsg_rport_els - send an ELS command from a bsg request 658 * @job: fc_bsg_job to handle 659 **/ 660 static int 661 lpfc_bsg_rport_els(struct bsg_job *job) 662 { 663 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 664 struct lpfc_hba *phba = vport->phba; 665 struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data; 666 struct lpfc_nodelist *ndlp = rdata->pnode; 667 struct fc_bsg_request *bsg_request = job->request; 668 struct fc_bsg_reply *bsg_reply = job->reply; 669 uint32_t elscmd; 670 uint32_t cmdsize; 671 struct lpfc_iocbq *cmdiocbq; 672 uint16_t rpi = 0; 673 struct bsg_job_data *dd_data; 674 unsigned long flags; 675 uint32_t creg_val; 676 int rc = 0; 677 678 /* in case no data is transferred */ 679 bsg_reply->reply_payload_rcv_len = 0; 680 681 /* verify the els command is not greater than the 682 * maximum ELS transfer size. 683 */ 684 685 if (job->request_payload.payload_len > FCELSSIZE) { 686 rc = -EINVAL; 687 goto no_dd_data; 688 } 689 690 /* allocate our bsg tracking structure */ 691 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 692 if (!dd_data) { 693 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 694 "2735 Failed allocation of dd_data\n"); 695 rc = -ENOMEM; 696 goto no_dd_data; 697 } 698 699 elscmd = bsg_request->rqst_data.r_els.els_code; 700 cmdsize = job->request_payload.payload_len; 701 702 if (!lpfc_nlp_get(ndlp)) { 703 rc = -ENODEV; 704 goto free_dd_data; 705 } 706 707 /* We will use the allocated dma buffers by prep els iocb for command 708 * and response to ensure if the job times out and the request is freed, 709 * we won't be dma into memory that is no longer allocated to for the 710 * request. 711 */ 712 713 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, 714 ndlp->nlp_DID, elscmd); 715 if (!cmdiocbq) { 716 rc = -EIO; 717 goto release_ndlp; 718 } 719 720 rpi = ndlp->nlp_rpi; 721 722 /* Transfer the request payload to allocated command dma buffer */ 723 724 sg_copy_to_buffer(job->request_payload.sg_list, 725 job->request_payload.sg_cnt, 726 ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt, 727 cmdsize); 728 729 if (phba->sli_rev == LPFC_SLI_REV4) 730 cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi]; 731 else 732 cmdiocbq->iocb.ulpContext = rpi; 733 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 734 cmdiocbq->context1 = dd_data; 735 cmdiocbq->context_un.ndlp = ndlp; 736 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp; 737 dd_data->type = TYPE_IOCB; 738 dd_data->set_job = job; 739 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 740 dd_data->context_un.iocb.ndlp = ndlp; 741 dd_data->context_un.iocb.rmp = NULL; 742 job->dd_data = dd_data; 743 744 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 745 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 746 rc = -EIO; 747 goto linkdown_err; 748 } 749 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 750 writel(creg_val, phba->HCregaddr); 751 readl(phba->HCregaddr); /* flush */ 752 } 753 754 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 755 756 if (rc == IOCB_SUCCESS) { 757 spin_lock_irqsave(&phba->hbalock, flags); 758 /* make sure the I/O had not been completed/released */ 759 if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) { 760 /* open up abort window to timeout handler */ 761 cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; 762 } 763 spin_unlock_irqrestore(&phba->hbalock, flags); 764 return 0; /* done for now */ 765 } else if (rc == IOCB_BUSY) { 766 rc = -EAGAIN; 767 } else { 768 rc = -EIO; 769 } 770 771 /* iocb failed so cleanup */ 772 job->dd_data = NULL; 773 774 linkdown_err: 775 cmdiocbq->context1 = ndlp; 776 lpfc_els_free_iocb(phba, cmdiocbq); 777 778 release_ndlp: 779 lpfc_nlp_put(ndlp); 780 781 free_dd_data: 782 kfree(dd_data); 783 784 no_dd_data: 785 /* make error code available to userspace */ 786 bsg_reply->result = rc; 787 job->dd_data = NULL; 788 return rc; 789 } 790 791 /** 792 * lpfc_bsg_event_free - frees an allocated event structure 793 * @kref: Pointer to a kref. 794 * 795 * Called from kref_put. Back cast the kref into an event structure address. 796 * Free any events to get, delete associated nodes, free any events to see, 797 * free any data then free the event itself. 798 **/ 799 static void 800 lpfc_bsg_event_free(struct kref *kref) 801 { 802 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event, 803 kref); 804 struct event_data *ed; 805 806 list_del(&evt->node); 807 808 while (!list_empty(&evt->events_to_get)) { 809 ed = list_entry(evt->events_to_get.next, typeof(*ed), node); 810 list_del(&ed->node); 811 kfree(ed->data); 812 kfree(ed); 813 } 814 815 while (!list_empty(&evt->events_to_see)) { 816 ed = list_entry(evt->events_to_see.next, typeof(*ed), node); 817 list_del(&ed->node); 818 kfree(ed->data); 819 kfree(ed); 820 } 821 822 kfree(evt->dd_data); 823 kfree(evt); 824 } 825 826 /** 827 * lpfc_bsg_event_ref - increments the kref for an event 828 * @evt: Pointer to an event structure. 829 **/ 830 static inline void 831 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt) 832 { 833 kref_get(&evt->kref); 834 } 835 836 /** 837 * lpfc_bsg_event_unref - Uses kref_put to free an event structure 838 * @evt: Pointer to an event structure. 839 **/ 840 static inline void 841 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt) 842 { 843 kref_put(&evt->kref, lpfc_bsg_event_free); 844 } 845 846 /** 847 * lpfc_bsg_event_new - allocate and initialize a event structure 848 * @ev_mask: Mask of events. 849 * @ev_reg_id: Event reg id. 850 * @ev_req_id: Event request id. 851 **/ 852 static struct lpfc_bsg_event * 853 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id) 854 { 855 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL); 856 857 if (!evt) 858 return NULL; 859 860 INIT_LIST_HEAD(&evt->events_to_get); 861 INIT_LIST_HEAD(&evt->events_to_see); 862 evt->type_mask = ev_mask; 863 evt->req_id = ev_req_id; 864 evt->reg_id = ev_reg_id; 865 evt->wait_time_stamp = jiffies; 866 evt->dd_data = NULL; 867 init_waitqueue_head(&evt->wq); 868 kref_init(&evt->kref); 869 return evt; 870 } 871 872 /** 873 * diag_cmd_data_free - Frees an lpfc dma buffer extension 874 * @phba: Pointer to HBA context object. 875 * @mlist: Pointer to an lpfc dma buffer extension. 876 **/ 877 static int 878 diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist) 879 { 880 struct lpfc_dmabufext *mlast; 881 struct pci_dev *pcidev; 882 struct list_head head, *curr, *next; 883 884 if ((!mlist) || (!lpfc_is_link_up(phba) && 885 (phba->link_flag & LS_LOOPBACK_MODE))) { 886 return 0; 887 } 888 889 pcidev = phba->pcidev; 890 list_add_tail(&head, &mlist->dma.list); 891 892 list_for_each_safe(curr, next, &head) { 893 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list); 894 if (mlast->dma.virt) 895 dma_free_coherent(&pcidev->dev, 896 mlast->size, 897 mlast->dma.virt, 898 mlast->dma.phys); 899 kfree(mlast); 900 } 901 return 0; 902 } 903 904 /** 905 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command 906 * @phba: 907 * @pring: 908 * @piocbq: 909 * 910 * This function is called when an unsolicited CT command is received. It 911 * forwards the event to any processes registered to receive CT events. 912 **/ 913 int 914 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 915 struct lpfc_iocbq *piocbq) 916 { 917 uint32_t evt_req_id = 0; 918 uint32_t cmd; 919 struct lpfc_dmabuf *dmabuf = NULL; 920 struct lpfc_bsg_event *evt; 921 struct event_data *evt_dat = NULL; 922 struct lpfc_iocbq *iocbq; 923 size_t offset = 0; 924 struct list_head head; 925 struct ulp_bde64 *bde; 926 dma_addr_t dma_addr; 927 int i; 928 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2; 929 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; 930 struct lpfc_hbq_entry *hbqe; 931 struct lpfc_sli_ct_request *ct_req; 932 struct bsg_job *job = NULL; 933 struct fc_bsg_reply *bsg_reply; 934 struct bsg_job_data *dd_data = NULL; 935 unsigned long flags; 936 int size = 0; 937 938 INIT_LIST_HEAD(&head); 939 list_add_tail(&head, &piocbq->list); 940 941 if (piocbq->iocb.ulpBdeCount == 0 || 942 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0) 943 goto error_ct_unsol_exit; 944 945 if (phba->link_state == LPFC_HBA_ERROR || 946 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) 947 goto error_ct_unsol_exit; 948 949 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 950 dmabuf = bdeBuf1; 951 else { 952 dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh, 953 piocbq->iocb.un.cont64[0].addrLow); 954 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr); 955 } 956 if (dmabuf == NULL) 957 goto error_ct_unsol_exit; 958 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt; 959 evt_req_id = ct_req->FsType; 960 cmd = ct_req->CommandResponse.bits.CmdRsp; 961 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 962 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf); 963 964 spin_lock_irqsave(&phba->ct_ev_lock, flags); 965 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 966 if (!(evt->type_mask & FC_REG_CT_EVENT) || 967 evt->req_id != evt_req_id) 968 continue; 969 970 lpfc_bsg_event_ref(evt); 971 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 972 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL); 973 if (evt_dat == NULL) { 974 spin_lock_irqsave(&phba->ct_ev_lock, flags); 975 lpfc_bsg_event_unref(evt); 976 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 977 "2614 Memory allocation failed for " 978 "CT event\n"); 979 break; 980 } 981 982 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 983 /* take accumulated byte count from the last iocbq */ 984 iocbq = list_entry(head.prev, typeof(*iocbq), list); 985 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len; 986 } else { 987 list_for_each_entry(iocbq, &head, list) { 988 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) 989 evt_dat->len += 990 iocbq->iocb.un.cont64[i].tus.f.bdeSize; 991 } 992 } 993 994 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL); 995 if (evt_dat->data == NULL) { 996 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 997 "2615 Memory allocation failed for " 998 "CT event data, size %d\n", 999 evt_dat->len); 1000 kfree(evt_dat); 1001 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1002 lpfc_bsg_event_unref(evt); 1003 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1004 goto error_ct_unsol_exit; 1005 } 1006 1007 list_for_each_entry(iocbq, &head, list) { 1008 size = 0; 1009 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 1010 bdeBuf1 = iocbq->context2; 1011 bdeBuf2 = iocbq->context3; 1012 } 1013 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) { 1014 if (phba->sli3_options & 1015 LPFC_SLI3_HBQ_ENABLED) { 1016 if (i == 0) { 1017 hbqe = (struct lpfc_hbq_entry *) 1018 &iocbq->iocb.un.ulpWord[0]; 1019 size = hbqe->bde.tus.f.bdeSize; 1020 dmabuf = bdeBuf1; 1021 } else if (i == 1) { 1022 hbqe = (struct lpfc_hbq_entry *) 1023 &iocbq->iocb.unsli3. 1024 sli3Words[4]; 1025 size = hbqe->bde.tus.f.bdeSize; 1026 dmabuf = bdeBuf2; 1027 } 1028 if ((offset + size) > evt_dat->len) 1029 size = evt_dat->len - offset; 1030 } else { 1031 size = iocbq->iocb.un.cont64[i]. 1032 tus.f.bdeSize; 1033 bde = &iocbq->iocb.un.cont64[i]; 1034 dma_addr = getPaddr(bde->addrHigh, 1035 bde->addrLow); 1036 dmabuf = lpfc_sli_ringpostbuf_get(phba, 1037 pring, dma_addr); 1038 } 1039 if (!dmabuf) { 1040 lpfc_printf_log(phba, KERN_ERR, 1041 LOG_LIBDFC, "2616 No dmabuf " 1042 "found for iocbq 0x%p\n", 1043 iocbq); 1044 kfree(evt_dat->data); 1045 kfree(evt_dat); 1046 spin_lock_irqsave(&phba->ct_ev_lock, 1047 flags); 1048 lpfc_bsg_event_unref(evt); 1049 spin_unlock_irqrestore( 1050 &phba->ct_ev_lock, flags); 1051 goto error_ct_unsol_exit; 1052 } 1053 memcpy((char *)(evt_dat->data) + offset, 1054 dmabuf->virt, size); 1055 offset += size; 1056 if (evt_req_id != SLI_CT_ELX_LOOPBACK && 1057 !(phba->sli3_options & 1058 LPFC_SLI3_HBQ_ENABLED)) { 1059 lpfc_sli_ringpostbuf_put(phba, pring, 1060 dmabuf); 1061 } else { 1062 switch (cmd) { 1063 case ELX_LOOPBACK_DATA: 1064 if (phba->sli_rev < 1065 LPFC_SLI_REV4) 1066 diag_cmd_data_free(phba, 1067 (struct lpfc_dmabufext 1068 *)dmabuf); 1069 break; 1070 case ELX_LOOPBACK_XRI_SETUP: 1071 if ((phba->sli_rev == 1072 LPFC_SLI_REV2) || 1073 (phba->sli3_options & 1074 LPFC_SLI3_HBQ_ENABLED 1075 )) { 1076 lpfc_in_buf_free(phba, 1077 dmabuf); 1078 } else { 1079 lpfc_post_buffer(phba, 1080 pring, 1081 1); 1082 } 1083 break; 1084 default: 1085 if (!(phba->sli3_options & 1086 LPFC_SLI3_HBQ_ENABLED)) 1087 lpfc_post_buffer(phba, 1088 pring, 1089 1); 1090 break; 1091 } 1092 } 1093 } 1094 } 1095 1096 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1097 if (phba->sli_rev == LPFC_SLI_REV4) { 1098 evt_dat->immed_dat = phba->ctx_idx; 1099 phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX; 1100 /* Provide warning for over-run of the ct_ctx array */ 1101 if (phba->ct_ctx[evt_dat->immed_dat].valid == 1102 UNSOL_VALID) 1103 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 1104 "2717 CT context array entry " 1105 "[%d] over-run: oxid:x%x, " 1106 "sid:x%x\n", phba->ctx_idx, 1107 phba->ct_ctx[ 1108 evt_dat->immed_dat].oxid, 1109 phba->ct_ctx[ 1110 evt_dat->immed_dat].SID); 1111 phba->ct_ctx[evt_dat->immed_dat].rxid = 1112 piocbq->iocb.ulpContext; 1113 phba->ct_ctx[evt_dat->immed_dat].oxid = 1114 piocbq->iocb.unsli3.rcvsli3.ox_id; 1115 phba->ct_ctx[evt_dat->immed_dat].SID = 1116 piocbq->iocb.un.rcvels.remoteID; 1117 phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID; 1118 } else 1119 evt_dat->immed_dat = piocbq->iocb.ulpContext; 1120 1121 evt_dat->type = FC_REG_CT_EVENT; 1122 list_add(&evt_dat->node, &evt->events_to_see); 1123 if (evt_req_id == SLI_CT_ELX_LOOPBACK) { 1124 wake_up_interruptible(&evt->wq); 1125 lpfc_bsg_event_unref(evt); 1126 break; 1127 } 1128 1129 list_move(evt->events_to_see.prev, &evt->events_to_get); 1130 1131 dd_data = (struct bsg_job_data *)evt->dd_data; 1132 job = dd_data->set_job; 1133 dd_data->set_job = NULL; 1134 lpfc_bsg_event_unref(evt); 1135 if (job) { 1136 bsg_reply = job->reply; 1137 bsg_reply->reply_payload_rcv_len = size; 1138 /* make error code available to userspace */ 1139 bsg_reply->result = 0; 1140 job->dd_data = NULL; 1141 /* complete the job back to userspace */ 1142 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1143 bsg_job_done(job, bsg_reply->result, 1144 bsg_reply->reply_payload_rcv_len); 1145 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1146 } 1147 } 1148 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1149 1150 error_ct_unsol_exit: 1151 if (!list_empty(&head)) 1152 list_del(&head); 1153 if ((phba->sli_rev < LPFC_SLI_REV4) && 1154 (evt_req_id == SLI_CT_ELX_LOOPBACK)) 1155 return 0; 1156 return 1; 1157 } 1158 1159 /** 1160 * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane 1161 * @phba: Pointer to HBA context object. 1162 * @dmabuf: pointer to a dmabuf that describes the FC sequence 1163 * 1164 * This function handles abort to the CT command toward management plane 1165 * for SLI4 port. 1166 * 1167 * If the pending context of a CT command to management plane present, clears 1168 * such context and returns 1 for handled; otherwise, it returns 0 indicating 1169 * no context exists. 1170 **/ 1171 int 1172 lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf) 1173 { 1174 struct fc_frame_header fc_hdr; 1175 struct fc_frame_header *fc_hdr_ptr = &fc_hdr; 1176 int ctx_idx, handled = 0; 1177 uint16_t oxid, rxid; 1178 uint32_t sid; 1179 1180 memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 1181 sid = sli4_sid_from_fc_hdr(fc_hdr_ptr); 1182 oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id); 1183 rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id); 1184 1185 for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) { 1186 if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID) 1187 continue; 1188 if (phba->ct_ctx[ctx_idx].rxid != rxid) 1189 continue; 1190 if (phba->ct_ctx[ctx_idx].oxid != oxid) 1191 continue; 1192 if (phba->ct_ctx[ctx_idx].SID != sid) 1193 continue; 1194 phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID; 1195 handled = 1; 1196 } 1197 return handled; 1198 } 1199 1200 /** 1201 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command 1202 * @job: SET_EVENT fc_bsg_job 1203 **/ 1204 static int 1205 lpfc_bsg_hba_set_event(struct bsg_job *job) 1206 { 1207 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 1208 struct lpfc_hba *phba = vport->phba; 1209 struct fc_bsg_request *bsg_request = job->request; 1210 struct set_ct_event *event_req; 1211 struct lpfc_bsg_event *evt; 1212 int rc = 0; 1213 struct bsg_job_data *dd_data = NULL; 1214 uint32_t ev_mask; 1215 unsigned long flags; 1216 1217 if (job->request_len < 1218 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) { 1219 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1220 "2612 Received SET_CT_EVENT below minimum " 1221 "size\n"); 1222 rc = -EINVAL; 1223 goto job_error; 1224 } 1225 1226 event_req = (struct set_ct_event *) 1227 bsg_request->rqst_data.h_vendor.vendor_cmd; 1228 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask & 1229 FC_REG_EVENT_MASK); 1230 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1231 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 1232 if (evt->reg_id == event_req->ev_reg_id) { 1233 lpfc_bsg_event_ref(evt); 1234 evt->wait_time_stamp = jiffies; 1235 dd_data = (struct bsg_job_data *)evt->dd_data; 1236 break; 1237 } 1238 } 1239 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1240 1241 if (&evt->node == &phba->ct_ev_waiters) { 1242 /* no event waiting struct yet - first call */ 1243 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 1244 if (dd_data == NULL) { 1245 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1246 "2734 Failed allocation of dd_data\n"); 1247 rc = -ENOMEM; 1248 goto job_error; 1249 } 1250 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id, 1251 event_req->ev_req_id); 1252 if (!evt) { 1253 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1254 "2617 Failed allocation of event " 1255 "waiter\n"); 1256 rc = -ENOMEM; 1257 goto job_error; 1258 } 1259 dd_data->type = TYPE_EVT; 1260 dd_data->set_job = NULL; 1261 dd_data->context_un.evt = evt; 1262 evt->dd_data = (void *)dd_data; 1263 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1264 list_add(&evt->node, &phba->ct_ev_waiters); 1265 lpfc_bsg_event_ref(evt); 1266 evt->wait_time_stamp = jiffies; 1267 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1268 } 1269 1270 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1271 evt->waiting = 1; 1272 dd_data->set_job = job; /* for unsolicited command */ 1273 job->dd_data = dd_data; /* for fc transport timeout callback*/ 1274 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1275 return 0; /* call job done later */ 1276 1277 job_error: 1278 if (dd_data != NULL) 1279 kfree(dd_data); 1280 1281 job->dd_data = NULL; 1282 return rc; 1283 } 1284 1285 /** 1286 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command 1287 * @job: GET_EVENT fc_bsg_job 1288 **/ 1289 static int 1290 lpfc_bsg_hba_get_event(struct bsg_job *job) 1291 { 1292 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 1293 struct lpfc_hba *phba = vport->phba; 1294 struct fc_bsg_request *bsg_request = job->request; 1295 struct fc_bsg_reply *bsg_reply = job->reply; 1296 struct get_ct_event *event_req; 1297 struct get_ct_event_reply *event_reply; 1298 struct lpfc_bsg_event *evt, *evt_next; 1299 struct event_data *evt_dat = NULL; 1300 unsigned long flags; 1301 uint32_t rc = 0; 1302 1303 if (job->request_len < 1304 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) { 1305 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1306 "2613 Received GET_CT_EVENT request below " 1307 "minimum size\n"); 1308 rc = -EINVAL; 1309 goto job_error; 1310 } 1311 1312 event_req = (struct get_ct_event *) 1313 bsg_request->rqst_data.h_vendor.vendor_cmd; 1314 1315 event_reply = (struct get_ct_event_reply *) 1316 bsg_reply->reply_data.vendor_reply.vendor_rsp; 1317 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1318 list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) { 1319 if (evt->reg_id == event_req->ev_reg_id) { 1320 if (list_empty(&evt->events_to_get)) 1321 break; 1322 lpfc_bsg_event_ref(evt); 1323 evt->wait_time_stamp = jiffies; 1324 evt_dat = list_entry(evt->events_to_get.prev, 1325 struct event_data, node); 1326 list_del(&evt_dat->node); 1327 break; 1328 } 1329 } 1330 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1331 1332 /* The app may continue to ask for event data until it gets 1333 * an error indicating that there isn't anymore 1334 */ 1335 if (evt_dat == NULL) { 1336 bsg_reply->reply_payload_rcv_len = 0; 1337 rc = -ENOENT; 1338 goto job_error; 1339 } 1340 1341 if (evt_dat->len > job->request_payload.payload_len) { 1342 evt_dat->len = job->request_payload.payload_len; 1343 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1344 "2618 Truncated event data at %d " 1345 "bytes\n", 1346 job->request_payload.payload_len); 1347 } 1348 1349 event_reply->type = evt_dat->type; 1350 event_reply->immed_data = evt_dat->immed_dat; 1351 if (evt_dat->len > 0) 1352 bsg_reply->reply_payload_rcv_len = 1353 sg_copy_from_buffer(job->request_payload.sg_list, 1354 job->request_payload.sg_cnt, 1355 evt_dat->data, evt_dat->len); 1356 else 1357 bsg_reply->reply_payload_rcv_len = 0; 1358 1359 if (evt_dat) { 1360 kfree(evt_dat->data); 1361 kfree(evt_dat); 1362 } 1363 1364 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1365 lpfc_bsg_event_unref(evt); 1366 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1367 job->dd_data = NULL; 1368 bsg_reply->result = 0; 1369 bsg_job_done(job, bsg_reply->result, 1370 bsg_reply->reply_payload_rcv_len); 1371 return 0; 1372 1373 job_error: 1374 job->dd_data = NULL; 1375 bsg_reply->result = rc; 1376 return rc; 1377 } 1378 1379 /** 1380 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler 1381 * @phba: Pointer to HBA context object. 1382 * @cmdiocbq: Pointer to command iocb. 1383 * @rspiocbq: Pointer to response iocb. 1384 * 1385 * This function is the completion handler for iocbs issued using 1386 * lpfc_issue_ct_rsp_cmp function. This function is called by the 1387 * ring event handler function without any lock held. This function 1388 * can be called from both worker thread context and interrupt 1389 * context. This function also can be called from other thread which 1390 * cleans up the SLI layer objects. 1391 * This function copy the contents of the response iocb to the 1392 * response iocb memory object provided by the caller of 1393 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 1394 * sleeps for the iocb completion. 1395 **/ 1396 static void 1397 lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, 1398 struct lpfc_iocbq *cmdiocbq, 1399 struct lpfc_iocbq *rspiocbq) 1400 { 1401 struct bsg_job_data *dd_data; 1402 struct bsg_job *job; 1403 struct fc_bsg_reply *bsg_reply; 1404 IOCB_t *rsp; 1405 struct lpfc_dmabuf *bmp, *cmp; 1406 struct lpfc_nodelist *ndlp; 1407 unsigned long flags; 1408 int rc = 0; 1409 1410 dd_data = cmdiocbq->context1; 1411 1412 /* Determine if job has been aborted */ 1413 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1414 job = dd_data->set_job; 1415 if (job) { 1416 /* Prevent timeout handling from trying to abort job */ 1417 job->dd_data = NULL; 1418 } 1419 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1420 1421 /* Close the timeout handler abort window */ 1422 spin_lock_irqsave(&phba->hbalock, flags); 1423 cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; 1424 spin_unlock_irqrestore(&phba->hbalock, flags); 1425 1426 ndlp = dd_data->context_un.iocb.ndlp; 1427 cmp = cmdiocbq->context2; 1428 bmp = cmdiocbq->context3; 1429 rsp = &rspiocbq->iocb; 1430 1431 /* Copy the completed job data or set the error status */ 1432 1433 if (job) { 1434 bsg_reply = job->reply; 1435 if (rsp->ulpStatus) { 1436 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 1437 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { 1438 case IOERR_SEQUENCE_TIMEOUT: 1439 rc = -ETIMEDOUT; 1440 break; 1441 case IOERR_INVALID_RPI: 1442 rc = -EFAULT; 1443 break; 1444 default: 1445 rc = -EACCES; 1446 break; 1447 } 1448 } else { 1449 rc = -EACCES; 1450 } 1451 } else { 1452 bsg_reply->reply_payload_rcv_len = 0; 1453 } 1454 } 1455 1456 lpfc_free_bsg_buffers(phba, cmp); 1457 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1458 kfree(bmp); 1459 lpfc_sli_release_iocbq(phba, cmdiocbq); 1460 lpfc_nlp_put(ndlp); 1461 kfree(dd_data); 1462 1463 /* Complete the job if the job is still active */ 1464 1465 if (job) { 1466 bsg_reply->result = rc; 1467 bsg_job_done(job, bsg_reply->result, 1468 bsg_reply->reply_payload_rcv_len); 1469 } 1470 return; 1471 } 1472 1473 /** 1474 * lpfc_issue_ct_rsp - issue a ct response 1475 * @phba: Pointer to HBA context object. 1476 * @job: Pointer to the job object. 1477 * @tag: tag index value into the ports context exchange array. 1478 * @bmp: Pointer to a dma buffer descriptor. 1479 * @num_entry: Number of enties in the bde. 1480 **/ 1481 static int 1482 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag, 1483 struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp, 1484 int num_entry) 1485 { 1486 IOCB_t *icmd; 1487 struct lpfc_iocbq *ctiocb = NULL; 1488 int rc = 0; 1489 struct lpfc_nodelist *ndlp = NULL; 1490 struct bsg_job_data *dd_data; 1491 unsigned long flags; 1492 uint32_t creg_val; 1493 1494 /* allocate our bsg tracking structure */ 1495 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 1496 if (!dd_data) { 1497 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1498 "2736 Failed allocation of dd_data\n"); 1499 rc = -ENOMEM; 1500 goto no_dd_data; 1501 } 1502 1503 /* Allocate buffer for command iocb */ 1504 ctiocb = lpfc_sli_get_iocbq(phba); 1505 if (!ctiocb) { 1506 rc = -ENOMEM; 1507 goto no_ctiocb; 1508 } 1509 1510 icmd = &ctiocb->iocb; 1511 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 1512 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 1513 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys); 1514 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 1515 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64)); 1516 icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA); 1517 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 1518 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL; 1519 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 1520 1521 /* Fill in rest of iocb */ 1522 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; 1523 icmd->ulpBdeCount = 1; 1524 icmd->ulpLe = 1; 1525 icmd->ulpClass = CLASS3; 1526 if (phba->sli_rev == LPFC_SLI_REV4) { 1527 /* Do not issue unsol response if oxid not marked as valid */ 1528 if (phba->ct_ctx[tag].valid != UNSOL_VALID) { 1529 rc = IOCB_ERROR; 1530 goto issue_ct_rsp_exit; 1531 } 1532 icmd->ulpContext = phba->ct_ctx[tag].rxid; 1533 icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid; 1534 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID); 1535 if (!ndlp) { 1536 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 1537 "2721 ndlp null for oxid %x SID %x\n", 1538 icmd->ulpContext, 1539 phba->ct_ctx[tag].SID); 1540 rc = IOCB_ERROR; 1541 goto issue_ct_rsp_exit; 1542 } 1543 1544 /* Check if the ndlp is active */ 1545 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 1546 rc = IOCB_ERROR; 1547 goto issue_ct_rsp_exit; 1548 } 1549 1550 /* get a refernece count so the ndlp doesn't go away while 1551 * we respond 1552 */ 1553 if (!lpfc_nlp_get(ndlp)) { 1554 rc = IOCB_ERROR; 1555 goto issue_ct_rsp_exit; 1556 } 1557 1558 icmd->un.ulpWord[3] = 1559 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 1560 1561 /* The exchange is done, mark the entry as invalid */ 1562 phba->ct_ctx[tag].valid = UNSOL_INVALID; 1563 } else 1564 icmd->ulpContext = (ushort) tag; 1565 1566 icmd->ulpTimeout = phba->fc_ratov * 2; 1567 1568 /* Xmit CT response on exchange <xid> */ 1569 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1570 "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n", 1571 icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state); 1572 1573 ctiocb->iocb_cmpl = NULL; 1574 ctiocb->iocb_flag |= LPFC_IO_LIBDFC; 1575 ctiocb->vport = phba->pport; 1576 ctiocb->context1 = dd_data; 1577 ctiocb->context2 = cmp; 1578 ctiocb->context3 = bmp; 1579 ctiocb->context_un.ndlp = ndlp; 1580 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp; 1581 1582 dd_data->type = TYPE_IOCB; 1583 dd_data->set_job = job; 1584 dd_data->context_un.iocb.cmdiocbq = ctiocb; 1585 dd_data->context_un.iocb.ndlp = ndlp; 1586 dd_data->context_un.iocb.rmp = NULL; 1587 job->dd_data = dd_data; 1588 1589 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1590 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 1591 rc = -IOCB_ERROR; 1592 goto issue_ct_rsp_exit; 1593 } 1594 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 1595 writel(creg_val, phba->HCregaddr); 1596 readl(phba->HCregaddr); /* flush */ 1597 } 1598 1599 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 1600 1601 if (rc == IOCB_SUCCESS) { 1602 spin_lock_irqsave(&phba->hbalock, flags); 1603 /* make sure the I/O had not been completed/released */ 1604 if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) { 1605 /* open up abort window to timeout handler */ 1606 ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; 1607 } 1608 spin_unlock_irqrestore(&phba->hbalock, flags); 1609 return 0; /* done for now */ 1610 } 1611 1612 /* iocb failed so cleanup */ 1613 job->dd_data = NULL; 1614 1615 issue_ct_rsp_exit: 1616 lpfc_sli_release_iocbq(phba, ctiocb); 1617 no_ctiocb: 1618 kfree(dd_data); 1619 no_dd_data: 1620 return rc; 1621 } 1622 1623 /** 1624 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command 1625 * @job: SEND_MGMT_RESP fc_bsg_job 1626 **/ 1627 static int 1628 lpfc_bsg_send_mgmt_rsp(struct bsg_job *job) 1629 { 1630 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 1631 struct lpfc_hba *phba = vport->phba; 1632 struct fc_bsg_request *bsg_request = job->request; 1633 struct fc_bsg_reply *bsg_reply = job->reply; 1634 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *) 1635 bsg_request->rqst_data.h_vendor.vendor_cmd; 1636 struct ulp_bde64 *bpl; 1637 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL; 1638 int bpl_entries; 1639 uint32_t tag = mgmt_resp->tag; 1640 unsigned long reqbfrcnt = 1641 (unsigned long)job->request_payload.payload_len; 1642 int rc = 0; 1643 1644 /* in case no data is transferred */ 1645 bsg_reply->reply_payload_rcv_len = 0; 1646 1647 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) { 1648 rc = -ERANGE; 1649 goto send_mgmt_rsp_exit; 1650 } 1651 1652 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1653 if (!bmp) { 1654 rc = -ENOMEM; 1655 goto send_mgmt_rsp_exit; 1656 } 1657 1658 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 1659 if (!bmp->virt) { 1660 rc = -ENOMEM; 1661 goto send_mgmt_rsp_free_bmp; 1662 } 1663 1664 INIT_LIST_HEAD(&bmp->list); 1665 bpl = (struct ulp_bde64 *) bmp->virt; 1666 bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64)); 1667 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, 1668 1, bpl, &bpl_entries); 1669 if (!cmp) { 1670 rc = -ENOMEM; 1671 goto send_mgmt_rsp_free_bmp; 1672 } 1673 lpfc_bsg_copy_data(cmp, &job->request_payload, 1674 job->request_payload.payload_len, 1); 1675 1676 rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries); 1677 1678 if (rc == IOCB_SUCCESS) 1679 return 0; /* done for now */ 1680 1681 rc = -EACCES; 1682 1683 lpfc_free_bsg_buffers(phba, cmp); 1684 1685 send_mgmt_rsp_free_bmp: 1686 if (bmp->virt) 1687 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1688 kfree(bmp); 1689 send_mgmt_rsp_exit: 1690 /* make error code available to userspace */ 1691 bsg_reply->result = rc; 1692 job->dd_data = NULL; 1693 return rc; 1694 } 1695 1696 /** 1697 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode 1698 * @phba: Pointer to HBA context object. 1699 * 1700 * This function is responsible for preparing driver for diag loopback 1701 * on device. 1702 */ 1703 static int 1704 lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba) 1705 { 1706 struct lpfc_vport **vports; 1707 struct Scsi_Host *shost; 1708 struct lpfc_sli *psli; 1709 struct lpfc_queue *qp = NULL; 1710 struct lpfc_sli_ring *pring; 1711 int i = 0; 1712 1713 psli = &phba->sli; 1714 if (!psli) 1715 return -ENODEV; 1716 1717 1718 if ((phba->link_state == LPFC_HBA_ERROR) || 1719 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || 1720 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) 1721 return -EACCES; 1722 1723 vports = lpfc_create_vport_work_array(phba); 1724 if (vports) { 1725 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1726 shost = lpfc_shost_from_vport(vports[i]); 1727 scsi_block_requests(shost); 1728 } 1729 lpfc_destroy_vport_work_array(phba, vports); 1730 } else { 1731 shost = lpfc_shost_from_vport(phba->pport); 1732 scsi_block_requests(shost); 1733 } 1734 1735 if (phba->sli_rev != LPFC_SLI_REV4) { 1736 pring = &psli->sli3_ring[LPFC_FCP_RING]; 1737 lpfc_emptyq_wait(phba, &pring->txcmplq, &phba->hbalock); 1738 return 0; 1739 } 1740 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 1741 pring = qp->pring; 1742 if (!pring || (pring->ringno != LPFC_FCP_RING)) 1743 continue; 1744 if (!lpfc_emptyq_wait(phba, &pring->txcmplq, 1745 &pring->ring_lock)) 1746 break; 1747 } 1748 return 0; 1749 } 1750 1751 /** 1752 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode 1753 * @phba: Pointer to HBA context object. 1754 * 1755 * This function is responsible for driver exit processing of setting up 1756 * diag loopback mode on device. 1757 */ 1758 static void 1759 lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba) 1760 { 1761 struct Scsi_Host *shost; 1762 struct lpfc_vport **vports; 1763 int i; 1764 1765 vports = lpfc_create_vport_work_array(phba); 1766 if (vports) { 1767 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1768 shost = lpfc_shost_from_vport(vports[i]); 1769 scsi_unblock_requests(shost); 1770 } 1771 lpfc_destroy_vport_work_array(phba, vports); 1772 } else { 1773 shost = lpfc_shost_from_vport(phba->pport); 1774 scsi_unblock_requests(shost); 1775 } 1776 return; 1777 } 1778 1779 /** 1780 * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command 1781 * @phba: Pointer to HBA context object. 1782 * @job: LPFC_BSG_VENDOR_DIAG_MODE 1783 * 1784 * This function is responsible for placing an sli3 port into diagnostic 1785 * loopback mode in order to perform a diagnostic loopback test. 1786 * All new scsi requests are blocked, a small delay is used to allow the 1787 * scsi requests to complete then the link is brought down. If the link is 1788 * is placed in loopback mode then scsi requests are again allowed 1789 * so the scsi mid-layer doesn't give up on the port. 1790 * All of this is done in-line. 1791 */ 1792 static int 1793 lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job) 1794 { 1795 struct fc_bsg_request *bsg_request = job->request; 1796 struct fc_bsg_reply *bsg_reply = job->reply; 1797 struct diag_mode_set *loopback_mode; 1798 uint32_t link_flags; 1799 uint32_t timeout; 1800 LPFC_MBOXQ_t *pmboxq = NULL; 1801 int mbxstatus = MBX_SUCCESS; 1802 int i = 0; 1803 int rc = 0; 1804 1805 /* no data to return just the return code */ 1806 bsg_reply->reply_payload_rcv_len = 0; 1807 1808 if (job->request_len < sizeof(struct fc_bsg_request) + 1809 sizeof(struct diag_mode_set)) { 1810 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1811 "2738 Received DIAG MODE request size:%d " 1812 "below the minimum size:%d\n", 1813 job->request_len, 1814 (int)(sizeof(struct fc_bsg_request) + 1815 sizeof(struct diag_mode_set))); 1816 rc = -EINVAL; 1817 goto job_error; 1818 } 1819 1820 rc = lpfc_bsg_diag_mode_enter(phba); 1821 if (rc) 1822 goto job_error; 1823 1824 /* bring the link to diagnostic mode */ 1825 loopback_mode = (struct diag_mode_set *) 1826 bsg_request->rqst_data.h_vendor.vendor_cmd; 1827 link_flags = loopback_mode->type; 1828 timeout = loopback_mode->timeout * 100; 1829 1830 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1831 if (!pmboxq) { 1832 rc = -ENOMEM; 1833 goto loopback_mode_exit; 1834 } 1835 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 1836 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; 1837 pmboxq->u.mb.mbxOwner = OWN_HOST; 1838 1839 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 1840 1841 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) { 1842 /* wait for link down before proceeding */ 1843 i = 0; 1844 while (phba->link_state != LPFC_LINK_DOWN) { 1845 if (i++ > timeout) { 1846 rc = -ETIMEDOUT; 1847 goto loopback_mode_exit; 1848 } 1849 msleep(10); 1850 } 1851 1852 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 1853 if (link_flags == INTERNAL_LOOP_BACK) 1854 pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB; 1855 else 1856 pmboxq->u.mb.un.varInitLnk.link_flags = 1857 FLAGS_TOPOLOGY_MODE_LOOP; 1858 1859 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK; 1860 pmboxq->u.mb.mbxOwner = OWN_HOST; 1861 1862 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, 1863 LPFC_MBOX_TMO); 1864 1865 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) 1866 rc = -ENODEV; 1867 else { 1868 spin_lock_irq(&phba->hbalock); 1869 phba->link_flag |= LS_LOOPBACK_MODE; 1870 spin_unlock_irq(&phba->hbalock); 1871 /* wait for the link attention interrupt */ 1872 msleep(100); 1873 1874 i = 0; 1875 while (phba->link_state != LPFC_HBA_READY) { 1876 if (i++ > timeout) { 1877 rc = -ETIMEDOUT; 1878 break; 1879 } 1880 1881 msleep(10); 1882 } 1883 } 1884 1885 } else 1886 rc = -ENODEV; 1887 1888 loopback_mode_exit: 1889 lpfc_bsg_diag_mode_exit(phba); 1890 1891 /* 1892 * Let SLI layer release mboxq if mbox command completed after timeout. 1893 */ 1894 if (pmboxq && mbxstatus != MBX_TIMEOUT) 1895 mempool_free(pmboxq, phba->mbox_mem_pool); 1896 1897 job_error: 1898 /* make error code available to userspace */ 1899 bsg_reply->result = rc; 1900 /* complete the job back to userspace if no error */ 1901 if (rc == 0) 1902 bsg_job_done(job, bsg_reply->result, 1903 bsg_reply->reply_payload_rcv_len); 1904 return rc; 1905 } 1906 1907 /** 1908 * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state 1909 * @phba: Pointer to HBA context object. 1910 * @diag: Flag for set link to diag or nomral operation state. 1911 * 1912 * This function is responsible for issuing a sli4 mailbox command for setting 1913 * link to either diag state or normal operation state. 1914 */ 1915 static int 1916 lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag) 1917 { 1918 LPFC_MBOXQ_t *pmboxq; 1919 struct lpfc_mbx_set_link_diag_state *link_diag_state; 1920 uint32_t req_len, alloc_len; 1921 int mbxstatus = MBX_SUCCESS, rc; 1922 1923 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1924 if (!pmboxq) 1925 return -ENOMEM; 1926 1927 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) - 1928 sizeof(struct lpfc_sli4_cfg_mhdr)); 1929 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 1930 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE, 1931 req_len, LPFC_SLI4_MBX_EMBED); 1932 if (alloc_len != req_len) { 1933 rc = -ENOMEM; 1934 goto link_diag_state_set_out; 1935 } 1936 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 1937 "3128 Set link to diagnostic state:x%x (x%x/x%x)\n", 1938 diag, phba->sli4_hba.lnk_info.lnk_tp, 1939 phba->sli4_hba.lnk_info.lnk_no); 1940 1941 link_diag_state = &pmboxq->u.mqe.un.link_diag_state; 1942 bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req, 1943 LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE); 1944 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req, 1945 phba->sli4_hba.lnk_info.lnk_no); 1946 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req, 1947 phba->sli4_hba.lnk_info.lnk_tp); 1948 if (diag) 1949 bf_set(lpfc_mbx_set_diag_state_diag, 1950 &link_diag_state->u.req, 1); 1951 else 1952 bf_set(lpfc_mbx_set_diag_state_diag, 1953 &link_diag_state->u.req, 0); 1954 1955 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 1956 1957 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) 1958 rc = 0; 1959 else 1960 rc = -ENODEV; 1961 1962 link_diag_state_set_out: 1963 if (pmboxq && (mbxstatus != MBX_TIMEOUT)) 1964 mempool_free(pmboxq, phba->mbox_mem_pool); 1965 1966 return rc; 1967 } 1968 1969 /** 1970 * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic 1971 * @phba: Pointer to HBA context object. 1972 * 1973 * This function is responsible for issuing a sli4 mailbox command for setting 1974 * up internal loopback diagnostic. 1975 */ 1976 static int 1977 lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba) 1978 { 1979 LPFC_MBOXQ_t *pmboxq; 1980 uint32_t req_len, alloc_len; 1981 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback; 1982 int mbxstatus = MBX_SUCCESS, rc = 0; 1983 1984 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1985 if (!pmboxq) 1986 return -ENOMEM; 1987 req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) - 1988 sizeof(struct lpfc_sli4_cfg_mhdr)); 1989 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 1990 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK, 1991 req_len, LPFC_SLI4_MBX_EMBED); 1992 if (alloc_len != req_len) { 1993 mempool_free(pmboxq, phba->mbox_mem_pool); 1994 return -ENOMEM; 1995 } 1996 link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback; 1997 bf_set(lpfc_mbx_set_diag_state_link_num, 1998 &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no); 1999 bf_set(lpfc_mbx_set_diag_state_link_type, 2000 &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp); 2001 bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req, 2002 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL); 2003 2004 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 2005 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) { 2006 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2007 "3127 Failed setup loopback mode mailbox " 2008 "command, rc:x%x, status:x%x\n", mbxstatus, 2009 pmboxq->u.mb.mbxStatus); 2010 rc = -ENODEV; 2011 } 2012 if (pmboxq && (mbxstatus != MBX_TIMEOUT)) 2013 mempool_free(pmboxq, phba->mbox_mem_pool); 2014 return rc; 2015 } 2016 2017 /** 2018 * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic 2019 * @phba: Pointer to HBA context object. 2020 * 2021 * This function set up SLI4 FC port registrations for diagnostic run, which 2022 * includes all the rpis, vfi, and also vpi. 2023 */ 2024 static int 2025 lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba) 2026 { 2027 int rc; 2028 2029 if (phba->pport->fc_flag & FC_VFI_REGISTERED) { 2030 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2031 "3136 Port still had vfi registered: " 2032 "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n", 2033 phba->pport->fc_myDID, phba->fcf.fcfi, 2034 phba->sli4_hba.vfi_ids[phba->pport->vfi], 2035 phba->vpi_ids[phba->pport->vpi]); 2036 return -EINVAL; 2037 } 2038 rc = lpfc_issue_reg_vfi(phba->pport); 2039 return rc; 2040 } 2041 2042 /** 2043 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command 2044 * @phba: Pointer to HBA context object. 2045 * @job: LPFC_BSG_VENDOR_DIAG_MODE 2046 * 2047 * This function is responsible for placing an sli4 port into diagnostic 2048 * loopback mode in order to perform a diagnostic loopback test. 2049 */ 2050 static int 2051 lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job) 2052 { 2053 struct fc_bsg_request *bsg_request = job->request; 2054 struct fc_bsg_reply *bsg_reply = job->reply; 2055 struct diag_mode_set *loopback_mode; 2056 uint32_t link_flags, timeout; 2057 int i, rc = 0; 2058 2059 /* no data to return just the return code */ 2060 bsg_reply->reply_payload_rcv_len = 0; 2061 2062 if (job->request_len < sizeof(struct fc_bsg_request) + 2063 sizeof(struct diag_mode_set)) { 2064 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2065 "3011 Received DIAG MODE request size:%d " 2066 "below the minimum size:%d\n", 2067 job->request_len, 2068 (int)(sizeof(struct fc_bsg_request) + 2069 sizeof(struct diag_mode_set))); 2070 rc = -EINVAL; 2071 goto job_error; 2072 } 2073 2074 rc = lpfc_bsg_diag_mode_enter(phba); 2075 if (rc) 2076 goto job_error; 2077 2078 /* indicate we are in loobpack diagnostic mode */ 2079 spin_lock_irq(&phba->hbalock); 2080 phba->link_flag |= LS_LOOPBACK_MODE; 2081 spin_unlock_irq(&phba->hbalock); 2082 2083 /* reset port to start frome scratch */ 2084 rc = lpfc_selective_reset(phba); 2085 if (rc) 2086 goto job_error; 2087 2088 /* bring the link to diagnostic mode */ 2089 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2090 "3129 Bring link to diagnostic state.\n"); 2091 loopback_mode = (struct diag_mode_set *) 2092 bsg_request->rqst_data.h_vendor.vendor_cmd; 2093 link_flags = loopback_mode->type; 2094 timeout = loopback_mode->timeout * 100; 2095 2096 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); 2097 if (rc) { 2098 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2099 "3130 Failed to bring link to diagnostic " 2100 "state, rc:x%x\n", rc); 2101 goto loopback_mode_exit; 2102 } 2103 2104 /* wait for link down before proceeding */ 2105 i = 0; 2106 while (phba->link_state != LPFC_LINK_DOWN) { 2107 if (i++ > timeout) { 2108 rc = -ETIMEDOUT; 2109 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2110 "3131 Timeout waiting for link to " 2111 "diagnostic mode, timeout:%d ms\n", 2112 timeout * 10); 2113 goto loopback_mode_exit; 2114 } 2115 msleep(10); 2116 } 2117 2118 /* set up loopback mode */ 2119 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2120 "3132 Set up loopback mode:x%x\n", link_flags); 2121 2122 if (link_flags == INTERNAL_LOOP_BACK) 2123 rc = lpfc_sli4_bsg_set_internal_loopback(phba); 2124 else if (link_flags == EXTERNAL_LOOP_BACK) 2125 rc = lpfc_hba_init_link_fc_topology(phba, 2126 FLAGS_TOPOLOGY_MODE_PT_PT, 2127 MBX_NOWAIT); 2128 else { 2129 rc = -EINVAL; 2130 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 2131 "3141 Loopback mode:x%x not supported\n", 2132 link_flags); 2133 goto loopback_mode_exit; 2134 } 2135 2136 if (!rc) { 2137 /* wait for the link attention interrupt */ 2138 msleep(100); 2139 i = 0; 2140 while (phba->link_state < LPFC_LINK_UP) { 2141 if (i++ > timeout) { 2142 rc = -ETIMEDOUT; 2143 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2144 "3137 Timeout waiting for link up " 2145 "in loopback mode, timeout:%d ms\n", 2146 timeout * 10); 2147 break; 2148 } 2149 msleep(10); 2150 } 2151 } 2152 2153 /* port resource registration setup for loopback diagnostic */ 2154 if (!rc) { 2155 /* set up a none zero myDID for loopback test */ 2156 phba->pport->fc_myDID = 1; 2157 rc = lpfc_sli4_diag_fcport_reg_setup(phba); 2158 } else 2159 goto loopback_mode_exit; 2160 2161 if (!rc) { 2162 /* wait for the port ready */ 2163 msleep(100); 2164 i = 0; 2165 while (phba->link_state != LPFC_HBA_READY) { 2166 if (i++ > timeout) { 2167 rc = -ETIMEDOUT; 2168 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2169 "3133 Timeout waiting for port " 2170 "loopback mode ready, timeout:%d ms\n", 2171 timeout * 10); 2172 break; 2173 } 2174 msleep(10); 2175 } 2176 } 2177 2178 loopback_mode_exit: 2179 /* clear loopback diagnostic mode */ 2180 if (rc) { 2181 spin_lock_irq(&phba->hbalock); 2182 phba->link_flag &= ~LS_LOOPBACK_MODE; 2183 spin_unlock_irq(&phba->hbalock); 2184 } 2185 lpfc_bsg_diag_mode_exit(phba); 2186 2187 job_error: 2188 /* make error code available to userspace */ 2189 bsg_reply->result = rc; 2190 /* complete the job back to userspace if no error */ 2191 if (rc == 0) 2192 bsg_job_done(job, bsg_reply->result, 2193 bsg_reply->reply_payload_rcv_len); 2194 return rc; 2195 } 2196 2197 /** 2198 * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode 2199 * @job: LPFC_BSG_VENDOR_DIAG_MODE 2200 * 2201 * This function is responsible for responding to check and dispatch bsg diag 2202 * command from the user to proper driver action routines. 2203 */ 2204 static int 2205 lpfc_bsg_diag_loopback_mode(struct bsg_job *job) 2206 { 2207 struct Scsi_Host *shost; 2208 struct lpfc_vport *vport; 2209 struct lpfc_hba *phba; 2210 int rc; 2211 2212 shost = fc_bsg_to_shost(job); 2213 if (!shost) 2214 return -ENODEV; 2215 vport = shost_priv(shost); 2216 if (!vport) 2217 return -ENODEV; 2218 phba = vport->phba; 2219 if (!phba) 2220 return -ENODEV; 2221 2222 if (phba->sli_rev < LPFC_SLI_REV4) 2223 rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job); 2224 else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 2225 LPFC_SLI_INTF_IF_TYPE_2) 2226 rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job); 2227 else 2228 rc = -ENODEV; 2229 2230 return rc; 2231 } 2232 2233 /** 2234 * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode 2235 * @job: LPFC_BSG_VENDOR_DIAG_MODE_END 2236 * 2237 * This function is responsible for responding to check and dispatch bsg diag 2238 * command from the user to proper driver action routines. 2239 */ 2240 static int 2241 lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job) 2242 { 2243 struct fc_bsg_request *bsg_request = job->request; 2244 struct fc_bsg_reply *bsg_reply = job->reply; 2245 struct Scsi_Host *shost; 2246 struct lpfc_vport *vport; 2247 struct lpfc_hba *phba; 2248 struct diag_mode_set *loopback_mode_end_cmd; 2249 uint32_t timeout; 2250 int rc, i; 2251 2252 shost = fc_bsg_to_shost(job); 2253 if (!shost) 2254 return -ENODEV; 2255 vport = shost_priv(shost); 2256 if (!vport) 2257 return -ENODEV; 2258 phba = vport->phba; 2259 if (!phba) 2260 return -ENODEV; 2261 2262 if (phba->sli_rev < LPFC_SLI_REV4) 2263 return -ENODEV; 2264 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 2265 LPFC_SLI_INTF_IF_TYPE_2) 2266 return -ENODEV; 2267 2268 /* clear loopback diagnostic mode */ 2269 spin_lock_irq(&phba->hbalock); 2270 phba->link_flag &= ~LS_LOOPBACK_MODE; 2271 spin_unlock_irq(&phba->hbalock); 2272 loopback_mode_end_cmd = (struct diag_mode_set *) 2273 bsg_request->rqst_data.h_vendor.vendor_cmd; 2274 timeout = loopback_mode_end_cmd->timeout * 100; 2275 2276 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0); 2277 if (rc) { 2278 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2279 "3139 Failed to bring link to diagnostic " 2280 "state, rc:x%x\n", rc); 2281 goto loopback_mode_end_exit; 2282 } 2283 2284 /* wait for link down before proceeding */ 2285 i = 0; 2286 while (phba->link_state != LPFC_LINK_DOWN) { 2287 if (i++ > timeout) { 2288 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2289 "3140 Timeout waiting for link to " 2290 "diagnostic mode_end, timeout:%d ms\n", 2291 timeout * 10); 2292 /* there is nothing much we can do here */ 2293 break; 2294 } 2295 msleep(10); 2296 } 2297 2298 /* reset port resource registrations */ 2299 rc = lpfc_selective_reset(phba); 2300 phba->pport->fc_myDID = 0; 2301 2302 loopback_mode_end_exit: 2303 /* make return code available to userspace */ 2304 bsg_reply->result = rc; 2305 /* complete the job back to userspace if no error */ 2306 if (rc == 0) 2307 bsg_job_done(job, bsg_reply->result, 2308 bsg_reply->reply_payload_rcv_len); 2309 return rc; 2310 } 2311 2312 /** 2313 * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test 2314 * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST 2315 * 2316 * This function is to perform SLI4 diag link test request from the user 2317 * applicaiton. 2318 */ 2319 static int 2320 lpfc_sli4_bsg_link_diag_test(struct bsg_job *job) 2321 { 2322 struct fc_bsg_request *bsg_request = job->request; 2323 struct fc_bsg_reply *bsg_reply = job->reply; 2324 struct Scsi_Host *shost; 2325 struct lpfc_vport *vport; 2326 struct lpfc_hba *phba; 2327 LPFC_MBOXQ_t *pmboxq; 2328 struct sli4_link_diag *link_diag_test_cmd; 2329 uint32_t req_len, alloc_len; 2330 struct lpfc_mbx_run_link_diag_test *run_link_diag_test; 2331 union lpfc_sli4_cfg_shdr *shdr; 2332 uint32_t shdr_status, shdr_add_status; 2333 struct diag_status *diag_status_reply; 2334 int mbxstatus, rc = 0; 2335 2336 shost = fc_bsg_to_shost(job); 2337 if (!shost) { 2338 rc = -ENODEV; 2339 goto job_error; 2340 } 2341 vport = shost_priv(shost); 2342 if (!vport) { 2343 rc = -ENODEV; 2344 goto job_error; 2345 } 2346 phba = vport->phba; 2347 if (!phba) { 2348 rc = -ENODEV; 2349 goto job_error; 2350 } 2351 2352 if (phba->sli_rev < LPFC_SLI_REV4) { 2353 rc = -ENODEV; 2354 goto job_error; 2355 } 2356 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 2357 LPFC_SLI_INTF_IF_TYPE_2) { 2358 rc = -ENODEV; 2359 goto job_error; 2360 } 2361 2362 if (job->request_len < sizeof(struct fc_bsg_request) + 2363 sizeof(struct sli4_link_diag)) { 2364 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2365 "3013 Received LINK DIAG TEST request " 2366 " size:%d below the minimum size:%d\n", 2367 job->request_len, 2368 (int)(sizeof(struct fc_bsg_request) + 2369 sizeof(struct sli4_link_diag))); 2370 rc = -EINVAL; 2371 goto job_error; 2372 } 2373 2374 rc = lpfc_bsg_diag_mode_enter(phba); 2375 if (rc) 2376 goto job_error; 2377 2378 link_diag_test_cmd = (struct sli4_link_diag *) 2379 bsg_request->rqst_data.h_vendor.vendor_cmd; 2380 2381 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); 2382 2383 if (rc) 2384 goto job_error; 2385 2386 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2387 if (!pmboxq) 2388 goto link_diag_test_exit; 2389 2390 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) - 2391 sizeof(struct lpfc_sli4_cfg_mhdr)); 2392 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 2393 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE, 2394 req_len, LPFC_SLI4_MBX_EMBED); 2395 if (alloc_len != req_len) 2396 goto link_diag_test_exit; 2397 2398 run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test; 2399 bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req, 2400 phba->sli4_hba.lnk_info.lnk_no); 2401 bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req, 2402 phba->sli4_hba.lnk_info.lnk_tp); 2403 bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req, 2404 link_diag_test_cmd->test_id); 2405 bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req, 2406 link_diag_test_cmd->loops); 2407 bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req, 2408 link_diag_test_cmd->test_version); 2409 bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req, 2410 link_diag_test_cmd->error_action); 2411 2412 mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 2413 2414 shdr = (union lpfc_sli4_cfg_shdr *) 2415 &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr; 2416 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 2417 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 2418 if (shdr_status || shdr_add_status || mbxstatus) { 2419 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 2420 "3010 Run link diag test mailbox failed with " 2421 "mbx_status x%x status x%x, add_status x%x\n", 2422 mbxstatus, shdr_status, shdr_add_status); 2423 } 2424 2425 diag_status_reply = (struct diag_status *) 2426 bsg_reply->reply_data.vendor_reply.vendor_rsp; 2427 2428 if (job->reply_len < 2429 sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) { 2430 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2431 "3012 Received Run link diag test reply " 2432 "below minimum size (%d): reply_len:%d\n", 2433 (int)(sizeof(struct fc_bsg_request) + 2434 sizeof(struct diag_status)), 2435 job->reply_len); 2436 rc = -EINVAL; 2437 goto job_error; 2438 } 2439 2440 diag_status_reply->mbox_status = mbxstatus; 2441 diag_status_reply->shdr_status = shdr_status; 2442 diag_status_reply->shdr_add_status = shdr_add_status; 2443 2444 link_diag_test_exit: 2445 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0); 2446 2447 if (pmboxq) 2448 mempool_free(pmboxq, phba->mbox_mem_pool); 2449 2450 lpfc_bsg_diag_mode_exit(phba); 2451 2452 job_error: 2453 /* make error code available to userspace */ 2454 bsg_reply->result = rc; 2455 /* complete the job back to userspace if no error */ 2456 if (rc == 0) 2457 bsg_job_done(job, bsg_reply->result, 2458 bsg_reply->reply_payload_rcv_len); 2459 return rc; 2460 } 2461 2462 /** 2463 * lpfcdiag_loop_self_reg - obtains a remote port login id 2464 * @phba: Pointer to HBA context object 2465 * @rpi: Pointer to a remote port login id 2466 * 2467 * This function obtains a remote port login id so the diag loopback test 2468 * can send and receive its own unsolicited CT command. 2469 **/ 2470 static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi) 2471 { 2472 LPFC_MBOXQ_t *mbox; 2473 struct lpfc_dmabuf *dmabuff; 2474 int status; 2475 2476 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2477 if (!mbox) 2478 return -ENOMEM; 2479 2480 if (phba->sli_rev < LPFC_SLI_REV4) 2481 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID, 2482 (uint8_t *)&phba->pport->fc_sparam, 2483 mbox, *rpi); 2484 else { 2485 *rpi = lpfc_sli4_alloc_rpi(phba); 2486 if (*rpi == LPFC_RPI_ALLOC_ERROR) { 2487 mempool_free(mbox, phba->mbox_mem_pool); 2488 return -EBUSY; 2489 } 2490 status = lpfc_reg_rpi(phba, phba->pport->vpi, 2491 phba->pport->fc_myDID, 2492 (uint8_t *)&phba->pport->fc_sparam, 2493 mbox, *rpi); 2494 } 2495 2496 if (status) { 2497 mempool_free(mbox, phba->mbox_mem_pool); 2498 if (phba->sli_rev == LPFC_SLI_REV4) 2499 lpfc_sli4_free_rpi(phba, *rpi); 2500 return -ENOMEM; 2501 } 2502 2503 dmabuff = (struct lpfc_dmabuf *) mbox->context1; 2504 mbox->context1 = NULL; 2505 mbox->context2 = NULL; 2506 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 2507 2508 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 2509 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); 2510 kfree(dmabuff); 2511 if (status != MBX_TIMEOUT) 2512 mempool_free(mbox, phba->mbox_mem_pool); 2513 if (phba->sli_rev == LPFC_SLI_REV4) 2514 lpfc_sli4_free_rpi(phba, *rpi); 2515 return -ENODEV; 2516 } 2517 2518 if (phba->sli_rev < LPFC_SLI_REV4) 2519 *rpi = mbox->u.mb.un.varWords[0]; 2520 2521 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); 2522 kfree(dmabuff); 2523 mempool_free(mbox, phba->mbox_mem_pool); 2524 return 0; 2525 } 2526 2527 /** 2528 * lpfcdiag_loop_self_unreg - unregs from the rpi 2529 * @phba: Pointer to HBA context object 2530 * @rpi: Remote port login id 2531 * 2532 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg 2533 **/ 2534 static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi) 2535 { 2536 LPFC_MBOXQ_t *mbox; 2537 int status; 2538 2539 /* Allocate mboxq structure */ 2540 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2541 if (mbox == NULL) 2542 return -ENOMEM; 2543 2544 if (phba->sli_rev < LPFC_SLI_REV4) 2545 lpfc_unreg_login(phba, 0, rpi, mbox); 2546 else 2547 lpfc_unreg_login(phba, phba->pport->vpi, 2548 phba->sli4_hba.rpi_ids[rpi], mbox); 2549 2550 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 2551 2552 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 2553 if (status != MBX_TIMEOUT) 2554 mempool_free(mbox, phba->mbox_mem_pool); 2555 return -EIO; 2556 } 2557 mempool_free(mbox, phba->mbox_mem_pool); 2558 if (phba->sli_rev == LPFC_SLI_REV4) 2559 lpfc_sli4_free_rpi(phba, rpi); 2560 return 0; 2561 } 2562 2563 /** 2564 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids 2565 * @phba: Pointer to HBA context object 2566 * @rpi: Remote port login id 2567 * @txxri: Pointer to transmit exchange id 2568 * @rxxri: Pointer to response exchabge id 2569 * 2570 * This function obtains the transmit and receive ids required to send 2571 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp 2572 * flags are used to the unsolicted response handler is able to process 2573 * the ct command sent on the same port. 2574 **/ 2575 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, 2576 uint16_t *txxri, uint16_t * rxxri) 2577 { 2578 struct lpfc_bsg_event *evt; 2579 struct lpfc_iocbq *cmdiocbq, *rspiocbq; 2580 IOCB_t *cmd, *rsp; 2581 struct lpfc_dmabuf *dmabuf; 2582 struct ulp_bde64 *bpl = NULL; 2583 struct lpfc_sli_ct_request *ctreq = NULL; 2584 int ret_val = 0; 2585 int time_left; 2586 int iocb_stat = IOCB_SUCCESS; 2587 unsigned long flags; 2588 2589 *txxri = 0; 2590 *rxxri = 0; 2591 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, 2592 SLI_CT_ELX_LOOPBACK); 2593 if (!evt) 2594 return -ENOMEM; 2595 2596 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2597 list_add(&evt->node, &phba->ct_ev_waiters); 2598 lpfc_bsg_event_ref(evt); 2599 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2600 2601 cmdiocbq = lpfc_sli_get_iocbq(phba); 2602 rspiocbq = lpfc_sli_get_iocbq(phba); 2603 2604 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2605 if (dmabuf) { 2606 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys); 2607 if (dmabuf->virt) { 2608 INIT_LIST_HEAD(&dmabuf->list); 2609 bpl = (struct ulp_bde64 *) dmabuf->virt; 2610 memset(bpl, 0, sizeof(*bpl)); 2611 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1); 2612 bpl->addrHigh = 2613 le32_to_cpu(putPaddrHigh(dmabuf->phys + 2614 sizeof(*bpl))); 2615 bpl->addrLow = 2616 le32_to_cpu(putPaddrLow(dmabuf->phys + 2617 sizeof(*bpl))); 2618 bpl->tus.f.bdeFlags = 0; 2619 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ; 2620 bpl->tus.w = le32_to_cpu(bpl->tus.w); 2621 } 2622 } 2623 2624 if (cmdiocbq == NULL || rspiocbq == NULL || 2625 dmabuf == NULL || bpl == NULL || ctreq == NULL || 2626 dmabuf->virt == NULL) { 2627 ret_val = -ENOMEM; 2628 goto err_get_xri_exit; 2629 } 2630 2631 cmd = &cmdiocbq->iocb; 2632 rsp = &rspiocbq->iocb; 2633 2634 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); 2635 2636 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; 2637 ctreq->RevisionId.bits.InId = 0; 2638 ctreq->FsType = SLI_CT_ELX_LOOPBACK; 2639 ctreq->FsSubType = 0; 2640 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP; 2641 ctreq->CommandResponse.bits.Size = 0; 2642 2643 2644 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys); 2645 cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys); 2646 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 2647 cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl); 2648 2649 cmd->un.xseq64.w5.hcsw.Fctl = LA; 2650 cmd->un.xseq64.w5.hcsw.Dfctl = 0; 2651 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 2652 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 2653 2654 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR; 2655 cmd->ulpBdeCount = 1; 2656 cmd->ulpLe = 1; 2657 cmd->ulpClass = CLASS3; 2658 cmd->ulpContext = rpi; 2659 2660 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 2661 cmdiocbq->vport = phba->pport; 2662 cmdiocbq->iocb_cmpl = NULL; 2663 2664 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 2665 rspiocbq, 2666 (phba->fc_ratov * 2) 2667 + LPFC_DRVR_TIMEOUT); 2668 if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) { 2669 ret_val = -EIO; 2670 goto err_get_xri_exit; 2671 } 2672 *txxri = rsp->ulpContext; 2673 2674 evt->waiting = 1; 2675 evt->wait_time_stamp = jiffies; 2676 time_left = wait_event_interruptible_timeout( 2677 evt->wq, !list_empty(&evt->events_to_see), 2678 msecs_to_jiffies(1000 * 2679 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); 2680 if (list_empty(&evt->events_to_see)) 2681 ret_val = (time_left) ? -EINTR : -ETIMEDOUT; 2682 else { 2683 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2684 list_move(evt->events_to_see.prev, &evt->events_to_get); 2685 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2686 *rxxri = (list_entry(evt->events_to_get.prev, 2687 typeof(struct event_data), 2688 node))->immed_dat; 2689 } 2690 evt->waiting = 0; 2691 2692 err_get_xri_exit: 2693 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2694 lpfc_bsg_event_unref(evt); /* release ref */ 2695 lpfc_bsg_event_unref(evt); /* delete */ 2696 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2697 2698 if (dmabuf) { 2699 if (dmabuf->virt) 2700 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 2701 kfree(dmabuf); 2702 } 2703 2704 if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT)) 2705 lpfc_sli_release_iocbq(phba, cmdiocbq); 2706 if (rspiocbq) 2707 lpfc_sli_release_iocbq(phba, rspiocbq); 2708 return ret_val; 2709 } 2710 2711 /** 2712 * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers 2713 * @phba: Pointer to HBA context object 2714 * 2715 * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and 2716 * returns the pointer to the buffer. 2717 **/ 2718 static struct lpfc_dmabuf * 2719 lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba) 2720 { 2721 struct lpfc_dmabuf *dmabuf; 2722 struct pci_dev *pcidev = phba->pcidev; 2723 2724 /* allocate dma buffer struct */ 2725 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2726 if (!dmabuf) 2727 return NULL; 2728 2729 INIT_LIST_HEAD(&dmabuf->list); 2730 2731 /* now, allocate dma buffer */ 2732 dmabuf->virt = dma_zalloc_coherent(&pcidev->dev, BSG_MBOX_SIZE, 2733 &(dmabuf->phys), GFP_KERNEL); 2734 2735 if (!dmabuf->virt) { 2736 kfree(dmabuf); 2737 return NULL; 2738 } 2739 2740 return dmabuf; 2741 } 2742 2743 /** 2744 * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer 2745 * @phba: Pointer to HBA context object. 2746 * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor. 2747 * 2748 * This routine just simply frees a dma buffer and its associated buffer 2749 * descriptor referred by @dmabuf. 2750 **/ 2751 static void 2752 lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf) 2753 { 2754 struct pci_dev *pcidev = phba->pcidev; 2755 2756 if (!dmabuf) 2757 return; 2758 2759 if (dmabuf->virt) 2760 dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE, 2761 dmabuf->virt, dmabuf->phys); 2762 kfree(dmabuf); 2763 return; 2764 } 2765 2766 /** 2767 * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers 2768 * @phba: Pointer to HBA context object. 2769 * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs. 2770 * 2771 * This routine just simply frees all dma buffers and their associated buffer 2772 * descriptors referred by @dmabuf_list. 2773 **/ 2774 static void 2775 lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba, 2776 struct list_head *dmabuf_list) 2777 { 2778 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 2779 2780 if (list_empty(dmabuf_list)) 2781 return; 2782 2783 list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) { 2784 list_del_init(&dmabuf->list); 2785 lpfc_bsg_dma_page_free(phba, dmabuf); 2786 } 2787 return; 2788 } 2789 2790 /** 2791 * diag_cmd_data_alloc - fills in a bde struct with dma buffers 2792 * @phba: Pointer to HBA context object 2793 * @bpl: Pointer to 64 bit bde structure 2794 * @size: Number of bytes to process 2795 * @nocopydata: Flag to copy user data into the allocated buffer 2796 * 2797 * This function allocates page size buffers and populates an lpfc_dmabufext. 2798 * If allowed the user data pointed to with indataptr is copied into the kernel 2799 * memory. The chained list of page size buffers is returned. 2800 **/ 2801 static struct lpfc_dmabufext * 2802 diag_cmd_data_alloc(struct lpfc_hba *phba, 2803 struct ulp_bde64 *bpl, uint32_t size, 2804 int nocopydata) 2805 { 2806 struct lpfc_dmabufext *mlist = NULL; 2807 struct lpfc_dmabufext *dmp; 2808 int cnt, offset = 0, i = 0; 2809 struct pci_dev *pcidev; 2810 2811 pcidev = phba->pcidev; 2812 2813 while (size) { 2814 /* We get chunks of 4K */ 2815 if (size > BUF_SZ_4K) 2816 cnt = BUF_SZ_4K; 2817 else 2818 cnt = size; 2819 2820 /* allocate struct lpfc_dmabufext buffer header */ 2821 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL); 2822 if (!dmp) 2823 goto out; 2824 2825 INIT_LIST_HEAD(&dmp->dma.list); 2826 2827 /* Queue it to a linked list */ 2828 if (mlist) 2829 list_add_tail(&dmp->dma.list, &mlist->dma.list); 2830 else 2831 mlist = dmp; 2832 2833 /* allocate buffer */ 2834 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev, 2835 cnt, 2836 &(dmp->dma.phys), 2837 GFP_KERNEL); 2838 2839 if (!dmp->dma.virt) 2840 goto out; 2841 2842 dmp->size = cnt; 2843 2844 if (nocopydata) { 2845 bpl->tus.f.bdeFlags = 0; 2846 pci_dma_sync_single_for_device(phba->pcidev, 2847 dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE); 2848 2849 } else { 2850 memset((uint8_t *)dmp->dma.virt, 0, cnt); 2851 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 2852 } 2853 2854 /* build buffer ptr list for IOCB */ 2855 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys)); 2856 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys)); 2857 bpl->tus.f.bdeSize = (ushort) cnt; 2858 bpl->tus.w = le32_to_cpu(bpl->tus.w); 2859 bpl++; 2860 2861 i++; 2862 offset += cnt; 2863 size -= cnt; 2864 } 2865 2866 if (mlist) { 2867 mlist->flag = i; 2868 return mlist; 2869 } 2870 out: 2871 diag_cmd_data_free(phba, mlist); 2872 return NULL; 2873 } 2874 2875 /** 2876 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd 2877 * @phba: Pointer to HBA context object 2878 * @rxxri: Receive exchange id 2879 * @len: Number of data bytes 2880 * 2881 * This function allocates and posts a data buffer of sufficient size to receive 2882 * an unsolicted CT command. 2883 **/ 2884 static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, 2885 size_t len) 2886 { 2887 struct lpfc_sli_ring *pring; 2888 struct lpfc_iocbq *cmdiocbq; 2889 IOCB_t *cmd = NULL; 2890 struct list_head head, *curr, *next; 2891 struct lpfc_dmabuf *rxbmp; 2892 struct lpfc_dmabuf *dmp; 2893 struct lpfc_dmabuf *mp[2] = {NULL, NULL}; 2894 struct ulp_bde64 *rxbpl = NULL; 2895 uint32_t num_bde; 2896 struct lpfc_dmabufext *rxbuffer = NULL; 2897 int ret_val = 0; 2898 int iocb_stat; 2899 int i = 0; 2900 2901 pring = lpfc_phba_elsring(phba); 2902 2903 cmdiocbq = lpfc_sli_get_iocbq(phba); 2904 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2905 if (rxbmp != NULL) { 2906 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); 2907 if (rxbmp->virt) { 2908 INIT_LIST_HEAD(&rxbmp->list); 2909 rxbpl = (struct ulp_bde64 *) rxbmp->virt; 2910 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0); 2911 } 2912 } 2913 2914 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer || !pring) { 2915 ret_val = -ENOMEM; 2916 goto err_post_rxbufs_exit; 2917 } 2918 2919 /* Queue buffers for the receive exchange */ 2920 num_bde = (uint32_t)rxbuffer->flag; 2921 dmp = &rxbuffer->dma; 2922 2923 cmd = &cmdiocbq->iocb; 2924 i = 0; 2925 2926 INIT_LIST_HEAD(&head); 2927 list_add_tail(&head, &dmp->list); 2928 list_for_each_safe(curr, next, &head) { 2929 mp[i] = list_entry(curr, struct lpfc_dmabuf, list); 2930 list_del(curr); 2931 2932 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2933 mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba); 2934 cmd->un.quexri64cx.buff.bde.addrHigh = 2935 putPaddrHigh(mp[i]->phys); 2936 cmd->un.quexri64cx.buff.bde.addrLow = 2937 putPaddrLow(mp[i]->phys); 2938 cmd->un.quexri64cx.buff.bde.tus.f.bdeSize = 2939 ((struct lpfc_dmabufext *)mp[i])->size; 2940 cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag; 2941 cmd->ulpCommand = CMD_QUE_XRI64_CX; 2942 cmd->ulpPU = 0; 2943 cmd->ulpLe = 1; 2944 cmd->ulpBdeCount = 1; 2945 cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0; 2946 2947 } else { 2948 cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys); 2949 cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys); 2950 cmd->un.cont64[i].tus.f.bdeSize = 2951 ((struct lpfc_dmabufext *)mp[i])->size; 2952 cmd->ulpBdeCount = ++i; 2953 2954 if ((--num_bde > 0) && (i < 2)) 2955 continue; 2956 2957 cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX; 2958 cmd->ulpLe = 1; 2959 } 2960 2961 cmd->ulpClass = CLASS3; 2962 cmd->ulpContext = rxxri; 2963 2964 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 2965 0); 2966 if (iocb_stat == IOCB_ERROR) { 2967 diag_cmd_data_free(phba, 2968 (struct lpfc_dmabufext *)mp[0]); 2969 if (mp[1]) 2970 diag_cmd_data_free(phba, 2971 (struct lpfc_dmabufext *)mp[1]); 2972 dmp = list_entry(next, struct lpfc_dmabuf, list); 2973 ret_val = -EIO; 2974 goto err_post_rxbufs_exit; 2975 } 2976 2977 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]); 2978 if (mp[1]) { 2979 lpfc_sli_ringpostbuf_put(phba, pring, mp[1]); 2980 mp[1] = NULL; 2981 } 2982 2983 /* The iocb was freed by lpfc_sli_issue_iocb */ 2984 cmdiocbq = lpfc_sli_get_iocbq(phba); 2985 if (!cmdiocbq) { 2986 dmp = list_entry(next, struct lpfc_dmabuf, list); 2987 ret_val = -EIO; 2988 goto err_post_rxbufs_exit; 2989 } 2990 2991 cmd = &cmdiocbq->iocb; 2992 i = 0; 2993 } 2994 list_del(&head); 2995 2996 err_post_rxbufs_exit: 2997 2998 if (rxbmp) { 2999 if (rxbmp->virt) 3000 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys); 3001 kfree(rxbmp); 3002 } 3003 3004 if (cmdiocbq) 3005 lpfc_sli_release_iocbq(phba, cmdiocbq); 3006 return ret_val; 3007 } 3008 3009 /** 3010 * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself 3011 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job 3012 * 3013 * This function receives a user data buffer to be transmitted and received on 3014 * the same port, the link must be up and in loopback mode prior 3015 * to being called. 3016 * 1. A kernel buffer is allocated to copy the user data into. 3017 * 2. The port registers with "itself". 3018 * 3. The transmit and receive exchange ids are obtained. 3019 * 4. The receive exchange id is posted. 3020 * 5. A new els loopback event is created. 3021 * 6. The command and response iocbs are allocated. 3022 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback. 3023 * 3024 * This function is meant to be called n times while the port is in loopback 3025 * so it is the apps responsibility to issue a reset to take the port out 3026 * of loopback mode. 3027 **/ 3028 static int 3029 lpfc_bsg_diag_loopback_run(struct bsg_job *job) 3030 { 3031 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 3032 struct fc_bsg_reply *bsg_reply = job->reply; 3033 struct lpfc_hba *phba = vport->phba; 3034 struct lpfc_bsg_event *evt; 3035 struct event_data *evdat; 3036 struct lpfc_sli *psli = &phba->sli; 3037 uint32_t size; 3038 uint32_t full_size; 3039 size_t segment_len = 0, segment_offset = 0, current_offset = 0; 3040 uint16_t rpi = 0; 3041 struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL; 3042 IOCB_t *cmd, *rsp = NULL; 3043 struct lpfc_sli_ct_request *ctreq; 3044 struct lpfc_dmabuf *txbmp; 3045 struct ulp_bde64 *txbpl = NULL; 3046 struct lpfc_dmabufext *txbuffer = NULL; 3047 struct list_head head; 3048 struct lpfc_dmabuf *curr; 3049 uint16_t txxri = 0, rxxri; 3050 uint32_t num_bde; 3051 uint8_t *ptr = NULL, *rx_databuf = NULL; 3052 int rc = 0; 3053 int time_left; 3054 int iocb_stat = IOCB_SUCCESS; 3055 unsigned long flags; 3056 void *dataout = NULL; 3057 uint32_t total_mem; 3058 3059 /* in case no data is returned return just the return code */ 3060 bsg_reply->reply_payload_rcv_len = 0; 3061 3062 if (job->request_len < 3063 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) { 3064 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3065 "2739 Received DIAG TEST request below minimum " 3066 "size\n"); 3067 rc = -EINVAL; 3068 goto loopback_test_exit; 3069 } 3070 3071 if (job->request_payload.payload_len != 3072 job->reply_payload.payload_len) { 3073 rc = -EINVAL; 3074 goto loopback_test_exit; 3075 } 3076 3077 if ((phba->link_state == LPFC_HBA_ERROR) || 3078 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || 3079 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { 3080 rc = -EACCES; 3081 goto loopback_test_exit; 3082 } 3083 3084 if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) { 3085 rc = -EACCES; 3086 goto loopback_test_exit; 3087 } 3088 3089 size = job->request_payload.payload_len; 3090 full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */ 3091 3092 if ((size == 0) || (size > 80 * BUF_SZ_4K)) { 3093 rc = -ERANGE; 3094 goto loopback_test_exit; 3095 } 3096 3097 if (full_size >= BUF_SZ_4K) { 3098 /* 3099 * Allocate memory for ioctl data. If buffer is bigger than 64k, 3100 * then we allocate 64k and re-use that buffer over and over to 3101 * xfer the whole block. This is because Linux kernel has a 3102 * problem allocating more than 120k of kernel space memory. Saw 3103 * problem with GET_FCPTARGETMAPPING... 3104 */ 3105 if (size <= (64 * 1024)) 3106 total_mem = full_size; 3107 else 3108 total_mem = 64 * 1024; 3109 } else 3110 /* Allocate memory for ioctl data */ 3111 total_mem = BUF_SZ_4K; 3112 3113 dataout = kmalloc(total_mem, GFP_KERNEL); 3114 if (dataout == NULL) { 3115 rc = -ENOMEM; 3116 goto loopback_test_exit; 3117 } 3118 3119 ptr = dataout; 3120 ptr += ELX_LOOPBACK_HEADER_SZ; 3121 sg_copy_to_buffer(job->request_payload.sg_list, 3122 job->request_payload.sg_cnt, 3123 ptr, size); 3124 rc = lpfcdiag_loop_self_reg(phba, &rpi); 3125 if (rc) 3126 goto loopback_test_exit; 3127 3128 if (phba->sli_rev < LPFC_SLI_REV4) { 3129 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri); 3130 if (rc) { 3131 lpfcdiag_loop_self_unreg(phba, rpi); 3132 goto loopback_test_exit; 3133 } 3134 3135 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size); 3136 if (rc) { 3137 lpfcdiag_loop_self_unreg(phba, rpi); 3138 goto loopback_test_exit; 3139 } 3140 } 3141 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, 3142 SLI_CT_ELX_LOOPBACK); 3143 if (!evt) { 3144 lpfcdiag_loop_self_unreg(phba, rpi); 3145 rc = -ENOMEM; 3146 goto loopback_test_exit; 3147 } 3148 3149 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3150 list_add(&evt->node, &phba->ct_ev_waiters); 3151 lpfc_bsg_event_ref(evt); 3152 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3153 3154 cmdiocbq = lpfc_sli_get_iocbq(phba); 3155 if (phba->sli_rev < LPFC_SLI_REV4) 3156 rspiocbq = lpfc_sli_get_iocbq(phba); 3157 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3158 3159 if (txbmp) { 3160 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys); 3161 if (txbmp->virt) { 3162 INIT_LIST_HEAD(&txbmp->list); 3163 txbpl = (struct ulp_bde64 *) txbmp->virt; 3164 txbuffer = diag_cmd_data_alloc(phba, 3165 txbpl, full_size, 0); 3166 } 3167 } 3168 3169 if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) { 3170 rc = -ENOMEM; 3171 goto err_loopback_test_exit; 3172 } 3173 if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) { 3174 rc = -ENOMEM; 3175 goto err_loopback_test_exit; 3176 } 3177 3178 cmd = &cmdiocbq->iocb; 3179 if (phba->sli_rev < LPFC_SLI_REV4) 3180 rsp = &rspiocbq->iocb; 3181 3182 INIT_LIST_HEAD(&head); 3183 list_add_tail(&head, &txbuffer->dma.list); 3184 list_for_each_entry(curr, &head, list) { 3185 segment_len = ((struct lpfc_dmabufext *)curr)->size; 3186 if (current_offset == 0) { 3187 ctreq = curr->virt; 3188 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); 3189 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; 3190 ctreq->RevisionId.bits.InId = 0; 3191 ctreq->FsType = SLI_CT_ELX_LOOPBACK; 3192 ctreq->FsSubType = 0; 3193 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA; 3194 ctreq->CommandResponse.bits.Size = size; 3195 segment_offset = ELX_LOOPBACK_HEADER_SZ; 3196 } else 3197 segment_offset = 0; 3198 3199 BUG_ON(segment_offset >= segment_len); 3200 memcpy(curr->virt + segment_offset, 3201 ptr + current_offset, 3202 segment_len - segment_offset); 3203 3204 current_offset += segment_len - segment_offset; 3205 BUG_ON(current_offset > size); 3206 } 3207 list_del(&head); 3208 3209 /* Build the XMIT_SEQUENCE iocb */ 3210 num_bde = (uint32_t)txbuffer->flag; 3211 3212 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys); 3213 cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys); 3214 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 3215 cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64)); 3216 3217 cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA); 3218 cmd->un.xseq64.w5.hcsw.Dfctl = 0; 3219 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 3220 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 3221 3222 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; 3223 cmd->ulpBdeCount = 1; 3224 cmd->ulpLe = 1; 3225 cmd->ulpClass = CLASS3; 3226 3227 if (phba->sli_rev < LPFC_SLI_REV4) { 3228 cmd->ulpContext = txxri; 3229 } else { 3230 cmd->un.xseq64.bdl.ulpIoTag32 = 0; 3231 cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi]; 3232 cmdiocbq->context3 = txbmp; 3233 cmdiocbq->sli4_xritag = NO_XRI; 3234 cmd->unsli3.rcvsli3.ox_id = 0xffff; 3235 } 3236 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 3237 cmdiocbq->iocb_flag |= LPFC_IO_LOOPBACK; 3238 cmdiocbq->vport = phba->pport; 3239 cmdiocbq->iocb_cmpl = NULL; 3240 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 3241 rspiocbq, (phba->fc_ratov * 2) + 3242 LPFC_DRVR_TIMEOUT); 3243 3244 if ((iocb_stat != IOCB_SUCCESS) || 3245 ((phba->sli_rev < LPFC_SLI_REV4) && 3246 (rsp->ulpStatus != IOSTAT_SUCCESS))) { 3247 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3248 "3126 Failed loopback test issue iocb: " 3249 "iocb_stat:x%x\n", iocb_stat); 3250 rc = -EIO; 3251 goto err_loopback_test_exit; 3252 } 3253 3254 evt->waiting = 1; 3255 time_left = wait_event_interruptible_timeout( 3256 evt->wq, !list_empty(&evt->events_to_see), 3257 msecs_to_jiffies(1000 * 3258 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); 3259 evt->waiting = 0; 3260 if (list_empty(&evt->events_to_see)) { 3261 rc = (time_left) ? -EINTR : -ETIMEDOUT; 3262 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3263 "3125 Not receiving unsolicited event, " 3264 "rc:x%x\n", rc); 3265 } else { 3266 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3267 list_move(evt->events_to_see.prev, &evt->events_to_get); 3268 evdat = list_entry(evt->events_to_get.prev, 3269 typeof(*evdat), node); 3270 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3271 rx_databuf = evdat->data; 3272 if (evdat->len != full_size) { 3273 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3274 "1603 Loopback test did not receive expected " 3275 "data length. actual length 0x%x expected " 3276 "length 0x%x\n", 3277 evdat->len, full_size); 3278 rc = -EIO; 3279 } else if (rx_databuf == NULL) 3280 rc = -EIO; 3281 else { 3282 rc = IOCB_SUCCESS; 3283 /* skip over elx loopback header */ 3284 rx_databuf += ELX_LOOPBACK_HEADER_SZ; 3285 bsg_reply->reply_payload_rcv_len = 3286 sg_copy_from_buffer(job->reply_payload.sg_list, 3287 job->reply_payload.sg_cnt, 3288 rx_databuf, size); 3289 bsg_reply->reply_payload_rcv_len = size; 3290 } 3291 } 3292 3293 err_loopback_test_exit: 3294 lpfcdiag_loop_self_unreg(phba, rpi); 3295 3296 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3297 lpfc_bsg_event_unref(evt); /* release ref */ 3298 lpfc_bsg_event_unref(evt); /* delete */ 3299 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3300 3301 if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT)) 3302 lpfc_sli_release_iocbq(phba, cmdiocbq); 3303 3304 if (rspiocbq != NULL) 3305 lpfc_sli_release_iocbq(phba, rspiocbq); 3306 3307 if (txbmp != NULL) { 3308 if (txbpl != NULL) { 3309 if (txbuffer != NULL) 3310 diag_cmd_data_free(phba, txbuffer); 3311 lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys); 3312 } 3313 kfree(txbmp); 3314 } 3315 3316 loopback_test_exit: 3317 kfree(dataout); 3318 /* make error code available to userspace */ 3319 bsg_reply->result = rc; 3320 job->dd_data = NULL; 3321 /* complete the job back to userspace if no error */ 3322 if (rc == IOCB_SUCCESS) 3323 bsg_job_done(job, bsg_reply->result, 3324 bsg_reply->reply_payload_rcv_len); 3325 return rc; 3326 } 3327 3328 /** 3329 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command 3330 * @job: GET_DFC_REV fc_bsg_job 3331 **/ 3332 static int 3333 lpfc_bsg_get_dfc_rev(struct bsg_job *job) 3334 { 3335 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 3336 struct fc_bsg_reply *bsg_reply = job->reply; 3337 struct lpfc_hba *phba = vport->phba; 3338 struct get_mgmt_rev_reply *event_reply; 3339 int rc = 0; 3340 3341 if (job->request_len < 3342 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) { 3343 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3344 "2740 Received GET_DFC_REV request below " 3345 "minimum size\n"); 3346 rc = -EINVAL; 3347 goto job_error; 3348 } 3349 3350 event_reply = (struct get_mgmt_rev_reply *) 3351 bsg_reply->reply_data.vendor_reply.vendor_rsp; 3352 3353 if (job->reply_len < 3354 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) { 3355 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3356 "2741 Received GET_DFC_REV reply below " 3357 "minimum size\n"); 3358 rc = -EINVAL; 3359 goto job_error; 3360 } 3361 3362 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV; 3363 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV; 3364 job_error: 3365 bsg_reply->result = rc; 3366 if (rc == 0) 3367 bsg_job_done(job, bsg_reply->result, 3368 bsg_reply->reply_payload_rcv_len); 3369 return rc; 3370 } 3371 3372 /** 3373 * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler 3374 * @phba: Pointer to HBA context object. 3375 * @pmboxq: Pointer to mailbox command. 3376 * 3377 * This is completion handler function for mailbox commands issued from 3378 * lpfc_bsg_issue_mbox function. This function is called by the 3379 * mailbox event handler function with no lock held. This function 3380 * will wake up thread waiting on the wait queue pointed by context1 3381 * of the mailbox. 3382 **/ 3383 static void 3384 lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3385 { 3386 struct bsg_job_data *dd_data; 3387 struct fc_bsg_reply *bsg_reply; 3388 struct bsg_job *job; 3389 uint32_t size; 3390 unsigned long flags; 3391 uint8_t *pmb, *pmb_buf; 3392 3393 dd_data = pmboxq->context1; 3394 3395 /* 3396 * The outgoing buffer is readily referred from the dma buffer, 3397 * just need to get header part from mailboxq structure. 3398 */ 3399 pmb = (uint8_t *)&pmboxq->u.mb; 3400 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; 3401 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t)); 3402 3403 /* Determine if job has been aborted */ 3404 3405 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3406 job = dd_data->set_job; 3407 if (job) { 3408 /* Prevent timeout handling from trying to abort job */ 3409 job->dd_data = NULL; 3410 } 3411 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3412 3413 /* Copy the mailbox data to the job if it is still active */ 3414 3415 if (job) { 3416 bsg_reply = job->reply; 3417 size = job->reply_payload.payload_len; 3418 bsg_reply->reply_payload_rcv_len = 3419 sg_copy_from_buffer(job->reply_payload.sg_list, 3420 job->reply_payload.sg_cnt, 3421 pmb_buf, size); 3422 } 3423 3424 dd_data->set_job = NULL; 3425 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); 3426 lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers); 3427 kfree(dd_data); 3428 3429 /* Complete the job if the job is still active */ 3430 3431 if (job) { 3432 bsg_reply->result = 0; 3433 bsg_job_done(job, bsg_reply->result, 3434 bsg_reply->reply_payload_rcv_len); 3435 } 3436 return; 3437 } 3438 3439 /** 3440 * lpfc_bsg_check_cmd_access - test for a supported mailbox command 3441 * @phba: Pointer to HBA context object. 3442 * @mb: Pointer to a mailbox object. 3443 * @vport: Pointer to a vport object. 3444 * 3445 * Some commands require the port to be offline, some may not be called from 3446 * the application. 3447 **/ 3448 static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba, 3449 MAILBOX_t *mb, struct lpfc_vport *vport) 3450 { 3451 /* return negative error values for bsg job */ 3452 switch (mb->mbxCommand) { 3453 /* Offline only */ 3454 case MBX_INIT_LINK: 3455 case MBX_DOWN_LINK: 3456 case MBX_CONFIG_LINK: 3457 case MBX_CONFIG_RING: 3458 case MBX_RESET_RING: 3459 case MBX_UNREG_LOGIN: 3460 case MBX_CLEAR_LA: 3461 case MBX_DUMP_CONTEXT: 3462 case MBX_RUN_DIAGS: 3463 case MBX_RESTART: 3464 case MBX_SET_MASK: 3465 if (!(vport->fc_flag & FC_OFFLINE_MODE)) { 3466 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3467 "2743 Command 0x%x is illegal in on-line " 3468 "state\n", 3469 mb->mbxCommand); 3470 return -EPERM; 3471 } 3472 case MBX_WRITE_NV: 3473 case MBX_WRITE_VPARMS: 3474 case MBX_LOAD_SM: 3475 case MBX_READ_NV: 3476 case MBX_READ_CONFIG: 3477 case MBX_READ_RCONFIG: 3478 case MBX_READ_STATUS: 3479 case MBX_READ_XRI: 3480 case MBX_READ_REV: 3481 case MBX_READ_LNK_STAT: 3482 case MBX_DUMP_MEMORY: 3483 case MBX_DOWN_LOAD: 3484 case MBX_UPDATE_CFG: 3485 case MBX_KILL_BOARD: 3486 case MBX_READ_TOPOLOGY: 3487 case MBX_LOAD_AREA: 3488 case MBX_LOAD_EXP_ROM: 3489 case MBX_BEACON: 3490 case MBX_DEL_LD_ENTRY: 3491 case MBX_SET_DEBUG: 3492 case MBX_WRITE_WWN: 3493 case MBX_SLI4_CONFIG: 3494 case MBX_READ_EVENT_LOG: 3495 case MBX_READ_EVENT_LOG_STATUS: 3496 case MBX_WRITE_EVENT_LOG: 3497 case MBX_PORT_CAPABILITIES: 3498 case MBX_PORT_IOV_CONTROL: 3499 case MBX_RUN_BIU_DIAG64: 3500 break; 3501 case MBX_SET_VARIABLE: 3502 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3503 "1226 mbox: set_variable 0x%x, 0x%x\n", 3504 mb->un.varWords[0], 3505 mb->un.varWords[1]); 3506 if ((mb->un.varWords[0] == SETVAR_MLOMNT) 3507 && (mb->un.varWords[1] == 1)) { 3508 phba->wait_4_mlo_maint_flg = 1; 3509 } else if (mb->un.varWords[0] == SETVAR_MLORST) { 3510 spin_lock_irq(&phba->hbalock); 3511 phba->link_flag &= ~LS_LOOPBACK_MODE; 3512 spin_unlock_irq(&phba->hbalock); 3513 phba->fc_topology = LPFC_TOPOLOGY_PT_PT; 3514 } 3515 break; 3516 case MBX_READ_SPARM64: 3517 case MBX_REG_LOGIN: 3518 case MBX_REG_LOGIN64: 3519 case MBX_CONFIG_PORT: 3520 case MBX_RUN_BIU_DIAG: 3521 default: 3522 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3523 "2742 Unknown Command 0x%x\n", 3524 mb->mbxCommand); 3525 return -EPERM; 3526 } 3527 3528 return 0; /* ok */ 3529 } 3530 3531 /** 3532 * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session 3533 * @phba: Pointer to HBA context object. 3534 * 3535 * This is routine clean up and reset BSG handling of multi-buffer mbox 3536 * command session. 3537 **/ 3538 static void 3539 lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba) 3540 { 3541 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) 3542 return; 3543 3544 /* free all memory, including dma buffers */ 3545 lpfc_bsg_dma_page_list_free(phba, 3546 &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3547 lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf); 3548 /* multi-buffer write mailbox command pass-through complete */ 3549 memset((char *)&phba->mbox_ext_buf_ctx, 0, 3550 sizeof(struct lpfc_mbox_ext_buf_ctx)); 3551 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3552 3553 return; 3554 } 3555 3556 /** 3557 * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl 3558 * @phba: Pointer to HBA context object. 3559 * @pmboxq: Pointer to mailbox command. 3560 * 3561 * This is routine handles BSG job for mailbox commands completions with 3562 * multiple external buffers. 3563 **/ 3564 static struct bsg_job * 3565 lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3566 { 3567 struct bsg_job_data *dd_data; 3568 struct bsg_job *job; 3569 struct fc_bsg_reply *bsg_reply; 3570 uint8_t *pmb, *pmb_buf; 3571 unsigned long flags; 3572 uint32_t size; 3573 int rc = 0; 3574 struct lpfc_dmabuf *dmabuf; 3575 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3576 uint8_t *pmbx; 3577 3578 dd_data = pmboxq->context1; 3579 3580 /* Determine if job has been aborted */ 3581 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3582 job = dd_data->set_job; 3583 if (job) { 3584 bsg_reply = job->reply; 3585 /* Prevent timeout handling from trying to abort job */ 3586 job->dd_data = NULL; 3587 } 3588 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3589 3590 /* 3591 * The outgoing buffer is readily referred from the dma buffer, 3592 * just need to get header part from mailboxq structure. 3593 */ 3594 3595 pmb = (uint8_t *)&pmboxq->u.mb; 3596 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; 3597 /* Copy the byte swapped response mailbox back to the user */ 3598 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t)); 3599 /* if there is any non-embedded extended data copy that too */ 3600 dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf; 3601 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 3602 if (!bsg_bf_get(lpfc_mbox_hdr_emb, 3603 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) { 3604 pmbx = (uint8_t *)dmabuf->virt; 3605 /* byte swap the extended data following the mailbox command */ 3606 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)], 3607 &pmbx[sizeof(MAILBOX_t)], 3608 sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len); 3609 } 3610 3611 /* Complete the job if the job is still active */ 3612 3613 if (job) { 3614 size = job->reply_payload.payload_len; 3615 bsg_reply->reply_payload_rcv_len = 3616 sg_copy_from_buffer(job->reply_payload.sg_list, 3617 job->reply_payload.sg_cnt, 3618 pmb_buf, size); 3619 3620 /* result for successful */ 3621 bsg_reply->result = 0; 3622 3623 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3624 "2937 SLI_CONFIG ext-buffer maibox command " 3625 "(x%x/x%x) complete bsg job done, bsize:%d\n", 3626 phba->mbox_ext_buf_ctx.nembType, 3627 phba->mbox_ext_buf_ctx.mboxType, size); 3628 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, 3629 phba->mbox_ext_buf_ctx.nembType, 3630 phba->mbox_ext_buf_ctx.mboxType, 3631 dma_ebuf, sta_pos_addr, 3632 phba->mbox_ext_buf_ctx.mbx_dmabuf, 0); 3633 } else { 3634 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3635 "2938 SLI_CONFIG ext-buffer maibox " 3636 "command (x%x/x%x) failure, rc:x%x\n", 3637 phba->mbox_ext_buf_ctx.nembType, 3638 phba->mbox_ext_buf_ctx.mboxType, rc); 3639 } 3640 3641 3642 /* state change */ 3643 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE; 3644 kfree(dd_data); 3645 return job; 3646 } 3647 3648 /** 3649 * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox 3650 * @phba: Pointer to HBA context object. 3651 * @pmboxq: Pointer to mailbox command. 3652 * 3653 * This is completion handler function for mailbox read commands with multiple 3654 * external buffers. 3655 **/ 3656 static void 3657 lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3658 { 3659 struct bsg_job *job; 3660 struct fc_bsg_reply *bsg_reply; 3661 3662 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); 3663 3664 /* handle the BSG job with mailbox command */ 3665 if (!job) 3666 pmboxq->u.mb.mbxStatus = MBXERR_ERROR; 3667 3668 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3669 "2939 SLI_CONFIG ext-buffer rd maibox command " 3670 "complete, ctxState:x%x, mbxStatus:x%x\n", 3671 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); 3672 3673 if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1) 3674 lpfc_bsg_mbox_ext_session_reset(phba); 3675 3676 /* free base driver mailbox structure memory */ 3677 mempool_free(pmboxq, phba->mbox_mem_pool); 3678 3679 /* if the job is still active, call job done */ 3680 if (job) { 3681 bsg_reply = job->reply; 3682 bsg_job_done(job, bsg_reply->result, 3683 bsg_reply->reply_payload_rcv_len); 3684 } 3685 return; 3686 } 3687 3688 /** 3689 * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox 3690 * @phba: Pointer to HBA context object. 3691 * @pmboxq: Pointer to mailbox command. 3692 * 3693 * This is completion handler function for mailbox write commands with multiple 3694 * external buffers. 3695 **/ 3696 static void 3697 lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3698 { 3699 struct bsg_job *job; 3700 struct fc_bsg_reply *bsg_reply; 3701 3702 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); 3703 3704 /* handle the BSG job with the mailbox command */ 3705 if (!job) 3706 pmboxq->u.mb.mbxStatus = MBXERR_ERROR; 3707 3708 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3709 "2940 SLI_CONFIG ext-buffer wr maibox command " 3710 "complete, ctxState:x%x, mbxStatus:x%x\n", 3711 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); 3712 3713 /* free all memory, including dma buffers */ 3714 mempool_free(pmboxq, phba->mbox_mem_pool); 3715 lpfc_bsg_mbox_ext_session_reset(phba); 3716 3717 /* if the job is still active, call job done */ 3718 if (job) { 3719 bsg_reply = job->reply; 3720 bsg_job_done(job, bsg_reply->result, 3721 bsg_reply->reply_payload_rcv_len); 3722 } 3723 3724 return; 3725 } 3726 3727 static void 3728 lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp, 3729 uint32_t index, struct lpfc_dmabuf *mbx_dmabuf, 3730 struct lpfc_dmabuf *ext_dmabuf) 3731 { 3732 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3733 3734 /* pointer to the start of mailbox command */ 3735 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt; 3736 3737 if (nemb_tp == nemb_mse) { 3738 if (index == 0) { 3739 sli_cfg_mbx->un.sli_config_emb0_subsys. 3740 mse[index].pa_hi = 3741 putPaddrHigh(mbx_dmabuf->phys + 3742 sizeof(MAILBOX_t)); 3743 sli_cfg_mbx->un.sli_config_emb0_subsys. 3744 mse[index].pa_lo = 3745 putPaddrLow(mbx_dmabuf->phys + 3746 sizeof(MAILBOX_t)); 3747 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3748 "2943 SLI_CONFIG(mse)[%d], " 3749 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3750 index, 3751 sli_cfg_mbx->un.sli_config_emb0_subsys. 3752 mse[index].buf_len, 3753 sli_cfg_mbx->un.sli_config_emb0_subsys. 3754 mse[index].pa_hi, 3755 sli_cfg_mbx->un.sli_config_emb0_subsys. 3756 mse[index].pa_lo); 3757 } else { 3758 sli_cfg_mbx->un.sli_config_emb0_subsys. 3759 mse[index].pa_hi = 3760 putPaddrHigh(ext_dmabuf->phys); 3761 sli_cfg_mbx->un.sli_config_emb0_subsys. 3762 mse[index].pa_lo = 3763 putPaddrLow(ext_dmabuf->phys); 3764 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3765 "2944 SLI_CONFIG(mse)[%d], " 3766 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3767 index, 3768 sli_cfg_mbx->un.sli_config_emb0_subsys. 3769 mse[index].buf_len, 3770 sli_cfg_mbx->un.sli_config_emb0_subsys. 3771 mse[index].pa_hi, 3772 sli_cfg_mbx->un.sli_config_emb0_subsys. 3773 mse[index].pa_lo); 3774 } 3775 } else { 3776 if (index == 0) { 3777 sli_cfg_mbx->un.sli_config_emb1_subsys. 3778 hbd[index].pa_hi = 3779 putPaddrHigh(mbx_dmabuf->phys + 3780 sizeof(MAILBOX_t)); 3781 sli_cfg_mbx->un.sli_config_emb1_subsys. 3782 hbd[index].pa_lo = 3783 putPaddrLow(mbx_dmabuf->phys + 3784 sizeof(MAILBOX_t)); 3785 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3786 "3007 SLI_CONFIG(hbd)[%d], " 3787 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3788 index, 3789 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 3790 &sli_cfg_mbx->un. 3791 sli_config_emb1_subsys.hbd[index]), 3792 sli_cfg_mbx->un.sli_config_emb1_subsys. 3793 hbd[index].pa_hi, 3794 sli_cfg_mbx->un.sli_config_emb1_subsys. 3795 hbd[index].pa_lo); 3796 3797 } else { 3798 sli_cfg_mbx->un.sli_config_emb1_subsys. 3799 hbd[index].pa_hi = 3800 putPaddrHigh(ext_dmabuf->phys); 3801 sli_cfg_mbx->un.sli_config_emb1_subsys. 3802 hbd[index].pa_lo = 3803 putPaddrLow(ext_dmabuf->phys); 3804 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3805 "3008 SLI_CONFIG(hbd)[%d], " 3806 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3807 index, 3808 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 3809 &sli_cfg_mbx->un. 3810 sli_config_emb1_subsys.hbd[index]), 3811 sli_cfg_mbx->un.sli_config_emb1_subsys. 3812 hbd[index].pa_hi, 3813 sli_cfg_mbx->un.sli_config_emb1_subsys. 3814 hbd[index].pa_lo); 3815 } 3816 } 3817 return; 3818 } 3819 3820 /** 3821 * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read 3822 * @phba: Pointer to HBA context object. 3823 * @mb: Pointer to a BSG mailbox object. 3824 * @nemb_tp: Enumerate of non-embedded mailbox command type. 3825 * @dmabuff: Pointer to a DMA buffer descriptor. 3826 * 3827 * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with 3828 * non-embedded external bufffers. 3829 **/ 3830 static int 3831 lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, 3832 enum nemb_type nemb_tp, 3833 struct lpfc_dmabuf *dmabuf) 3834 { 3835 struct fc_bsg_request *bsg_request = job->request; 3836 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3837 struct dfc_mbox_req *mbox_req; 3838 struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf; 3839 uint32_t ext_buf_cnt, ext_buf_index; 3840 struct lpfc_dmabuf *ext_dmabuf = NULL; 3841 struct bsg_job_data *dd_data = NULL; 3842 LPFC_MBOXQ_t *pmboxq = NULL; 3843 MAILBOX_t *pmb; 3844 uint8_t *pmbx; 3845 int rc, i; 3846 3847 mbox_req = 3848 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; 3849 3850 /* pointer to the start of mailbox command */ 3851 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 3852 3853 if (nemb_tp == nemb_mse) { 3854 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt, 3855 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr); 3856 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) { 3857 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3858 "2945 Handled SLI_CONFIG(mse) rd, " 3859 "ext_buf_cnt(%d) out of range(%d)\n", 3860 ext_buf_cnt, 3861 LPFC_MBX_SLI_CONFIG_MAX_MSE); 3862 rc = -ERANGE; 3863 goto job_error; 3864 } 3865 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3866 "2941 Handled SLI_CONFIG(mse) rd, " 3867 "ext_buf_cnt:%d\n", ext_buf_cnt); 3868 } else { 3869 /* sanity check on interface type for support */ 3870 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 3871 LPFC_SLI_INTF_IF_TYPE_2) { 3872 rc = -ENODEV; 3873 goto job_error; 3874 } 3875 /* nemb_tp == nemb_hbd */ 3876 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count; 3877 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) { 3878 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3879 "2946 Handled SLI_CONFIG(hbd) rd, " 3880 "ext_buf_cnt(%d) out of range(%d)\n", 3881 ext_buf_cnt, 3882 LPFC_MBX_SLI_CONFIG_MAX_HBD); 3883 rc = -ERANGE; 3884 goto job_error; 3885 } 3886 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3887 "2942 Handled SLI_CONFIG(hbd) rd, " 3888 "ext_buf_cnt:%d\n", ext_buf_cnt); 3889 } 3890 3891 /* before dma descriptor setup */ 3892 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox, 3893 sta_pre_addr, dmabuf, ext_buf_cnt); 3894 3895 /* reject non-embedded mailbox command with none external buffer */ 3896 if (ext_buf_cnt == 0) { 3897 rc = -EPERM; 3898 goto job_error; 3899 } else if (ext_buf_cnt > 1) { 3900 /* additional external read buffers */ 3901 for (i = 1; i < ext_buf_cnt; i++) { 3902 ext_dmabuf = lpfc_bsg_dma_page_alloc(phba); 3903 if (!ext_dmabuf) { 3904 rc = -ENOMEM; 3905 goto job_error; 3906 } 3907 list_add_tail(&ext_dmabuf->list, 3908 &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3909 } 3910 } 3911 3912 /* bsg tracking structure */ 3913 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 3914 if (!dd_data) { 3915 rc = -ENOMEM; 3916 goto job_error; 3917 } 3918 3919 /* mailbox command structure for base driver */ 3920 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3921 if (!pmboxq) { 3922 rc = -ENOMEM; 3923 goto job_error; 3924 } 3925 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 3926 3927 /* for the first external buffer */ 3928 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf); 3929 3930 /* for the rest of external buffer descriptors if any */ 3931 if (ext_buf_cnt > 1) { 3932 ext_buf_index = 1; 3933 list_for_each_entry_safe(curr_dmabuf, next_dmabuf, 3934 &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) { 3935 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 3936 ext_buf_index, dmabuf, 3937 curr_dmabuf); 3938 ext_buf_index++; 3939 } 3940 } 3941 3942 /* after dma descriptor setup */ 3943 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox, 3944 sta_pos_addr, dmabuf, ext_buf_cnt); 3945 3946 /* construct base driver mbox command */ 3947 pmb = &pmboxq->u.mb; 3948 pmbx = (uint8_t *)dmabuf->virt; 3949 memcpy(pmb, pmbx, sizeof(*pmb)); 3950 pmb->mbxOwner = OWN_HOST; 3951 pmboxq->vport = phba->pport; 3952 3953 /* multi-buffer handling context */ 3954 phba->mbox_ext_buf_ctx.nembType = nemb_tp; 3955 phba->mbox_ext_buf_ctx.mboxType = mbox_rd; 3956 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt; 3957 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag; 3958 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum; 3959 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf; 3960 3961 /* callback for multi-buffer read mailbox command */ 3962 pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl; 3963 3964 /* context fields to callback function */ 3965 pmboxq->context1 = dd_data; 3966 dd_data->type = TYPE_MBOX; 3967 dd_data->set_job = job; 3968 dd_data->context_un.mbox.pmboxq = pmboxq; 3969 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; 3970 job->dd_data = dd_data; 3971 3972 /* state change */ 3973 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; 3974 3975 /* 3976 * Non-embedded mailbox subcommand data gets byte swapped here because 3977 * the lower level driver code only does the first 64 mailbox words. 3978 */ 3979 if ((!bsg_bf_get(lpfc_mbox_hdr_emb, 3980 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) && 3981 (nemb_tp == nemb_mse)) 3982 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)], 3983 &pmbx[sizeof(MAILBOX_t)], 3984 sli_cfg_mbx->un.sli_config_emb0_subsys. 3985 mse[0].buf_len); 3986 3987 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 3988 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 3989 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3990 "2947 Issued SLI_CONFIG ext-buffer " 3991 "maibox command, rc:x%x\n", rc); 3992 return SLI_CONFIG_HANDLED; 3993 } 3994 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3995 "2948 Failed to issue SLI_CONFIG ext-buffer " 3996 "maibox command, rc:x%x\n", rc); 3997 rc = -EPIPE; 3998 3999 job_error: 4000 if (pmboxq) 4001 mempool_free(pmboxq, phba->mbox_mem_pool); 4002 lpfc_bsg_dma_page_list_free(phba, 4003 &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 4004 kfree(dd_data); 4005 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE; 4006 return rc; 4007 } 4008 4009 /** 4010 * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write 4011 * @phba: Pointer to HBA context object. 4012 * @mb: Pointer to a BSG mailbox object. 4013 * @dmabuff: Pointer to a DMA buffer descriptor. 4014 * 4015 * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with 4016 * non-embedded external bufffers. 4017 **/ 4018 static int 4019 lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, 4020 enum nemb_type nemb_tp, 4021 struct lpfc_dmabuf *dmabuf) 4022 { 4023 struct fc_bsg_request *bsg_request = job->request; 4024 struct fc_bsg_reply *bsg_reply = job->reply; 4025 struct dfc_mbox_req *mbox_req; 4026 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4027 uint32_t ext_buf_cnt; 4028 struct bsg_job_data *dd_data = NULL; 4029 LPFC_MBOXQ_t *pmboxq = NULL; 4030 MAILBOX_t *pmb; 4031 uint8_t *mbx; 4032 int rc = SLI_CONFIG_NOT_HANDLED, i; 4033 4034 mbox_req = 4035 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; 4036 4037 /* pointer to the start of mailbox command */ 4038 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 4039 4040 if (nemb_tp == nemb_mse) { 4041 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt, 4042 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr); 4043 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) { 4044 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4045 "2953 Failed SLI_CONFIG(mse) wr, " 4046 "ext_buf_cnt(%d) out of range(%d)\n", 4047 ext_buf_cnt, 4048 LPFC_MBX_SLI_CONFIG_MAX_MSE); 4049 return -ERANGE; 4050 } 4051 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4052 "2949 Handled SLI_CONFIG(mse) wr, " 4053 "ext_buf_cnt:%d\n", ext_buf_cnt); 4054 } else { 4055 /* sanity check on interface type for support */ 4056 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 4057 LPFC_SLI_INTF_IF_TYPE_2) 4058 return -ENODEV; 4059 /* nemb_tp == nemb_hbd */ 4060 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count; 4061 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) { 4062 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4063 "2954 Failed SLI_CONFIG(hbd) wr, " 4064 "ext_buf_cnt(%d) out of range(%d)\n", 4065 ext_buf_cnt, 4066 LPFC_MBX_SLI_CONFIG_MAX_HBD); 4067 return -ERANGE; 4068 } 4069 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4070 "2950 Handled SLI_CONFIG(hbd) wr, " 4071 "ext_buf_cnt:%d\n", ext_buf_cnt); 4072 } 4073 4074 /* before dma buffer descriptor setup */ 4075 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox, 4076 sta_pre_addr, dmabuf, ext_buf_cnt); 4077 4078 if (ext_buf_cnt == 0) 4079 return -EPERM; 4080 4081 /* for the first external buffer */ 4082 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf); 4083 4084 /* after dma descriptor setup */ 4085 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox, 4086 sta_pos_addr, dmabuf, ext_buf_cnt); 4087 4088 /* log for looking forward */ 4089 for (i = 1; i < ext_buf_cnt; i++) { 4090 if (nemb_tp == nemb_mse) 4091 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4092 "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n", 4093 i, sli_cfg_mbx->un.sli_config_emb0_subsys. 4094 mse[i].buf_len); 4095 else 4096 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4097 "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n", 4098 i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 4099 &sli_cfg_mbx->un.sli_config_emb1_subsys. 4100 hbd[i])); 4101 } 4102 4103 /* multi-buffer handling context */ 4104 phba->mbox_ext_buf_ctx.nembType = nemb_tp; 4105 phba->mbox_ext_buf_ctx.mboxType = mbox_wr; 4106 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt; 4107 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag; 4108 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum; 4109 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf; 4110 4111 if (ext_buf_cnt == 1) { 4112 /* bsg tracking structure */ 4113 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4114 if (!dd_data) { 4115 rc = -ENOMEM; 4116 goto job_error; 4117 } 4118 4119 /* mailbox command structure for base driver */ 4120 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4121 if (!pmboxq) { 4122 rc = -ENOMEM; 4123 goto job_error; 4124 } 4125 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4126 pmb = &pmboxq->u.mb; 4127 mbx = (uint8_t *)dmabuf->virt; 4128 memcpy(pmb, mbx, sizeof(*pmb)); 4129 pmb->mbxOwner = OWN_HOST; 4130 pmboxq->vport = phba->pport; 4131 4132 /* callback for multi-buffer read mailbox command */ 4133 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl; 4134 4135 /* context fields to callback function */ 4136 pmboxq->context1 = dd_data; 4137 dd_data->type = TYPE_MBOX; 4138 dd_data->set_job = job; 4139 dd_data->context_un.mbox.pmboxq = pmboxq; 4140 dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx; 4141 job->dd_data = dd_data; 4142 4143 /* state change */ 4144 4145 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; 4146 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4147 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 4148 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4149 "2955 Issued SLI_CONFIG ext-buffer " 4150 "maibox command, rc:x%x\n", rc); 4151 return SLI_CONFIG_HANDLED; 4152 } 4153 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4154 "2956 Failed to issue SLI_CONFIG ext-buffer " 4155 "maibox command, rc:x%x\n", rc); 4156 rc = -EPIPE; 4157 goto job_error; 4158 } 4159 4160 /* wait for additoinal external buffers */ 4161 4162 bsg_reply->result = 0; 4163 bsg_job_done(job, bsg_reply->result, 4164 bsg_reply->reply_payload_rcv_len); 4165 return SLI_CONFIG_HANDLED; 4166 4167 job_error: 4168 if (pmboxq) 4169 mempool_free(pmboxq, phba->mbox_mem_pool); 4170 kfree(dd_data); 4171 4172 return rc; 4173 } 4174 4175 /** 4176 * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer 4177 * @phba: Pointer to HBA context object. 4178 * @mb: Pointer to a BSG mailbox object. 4179 * @dmabuff: Pointer to a DMA buffer descriptor. 4180 * 4181 * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded 4182 * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B 4183 * with embedded sussystem 0x1 and opcodes with external HBDs. 4184 **/ 4185 static int 4186 lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job, 4187 struct lpfc_dmabuf *dmabuf) 4188 { 4189 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4190 uint32_t subsys; 4191 uint32_t opcode; 4192 int rc = SLI_CONFIG_NOT_HANDLED; 4193 4194 /* state change on new multi-buffer pass-through mailbox command */ 4195 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST; 4196 4197 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 4198 4199 if (!bsg_bf_get(lpfc_mbox_hdr_emb, 4200 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) { 4201 subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys, 4202 &sli_cfg_mbx->un.sli_config_emb0_subsys); 4203 opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode, 4204 &sli_cfg_mbx->un.sli_config_emb0_subsys); 4205 if (subsys == SLI_CONFIG_SUBSYS_FCOE) { 4206 switch (opcode) { 4207 case FCOE_OPCODE_READ_FCF: 4208 case FCOE_OPCODE_GET_DPORT_RESULTS: 4209 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4210 "2957 Handled SLI_CONFIG " 4211 "subsys_fcoe, opcode:x%x\n", 4212 opcode); 4213 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, 4214 nemb_mse, dmabuf); 4215 break; 4216 case FCOE_OPCODE_ADD_FCF: 4217 case FCOE_OPCODE_SET_DPORT_MODE: 4218 case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE: 4219 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4220 "2958 Handled SLI_CONFIG " 4221 "subsys_fcoe, opcode:x%x\n", 4222 opcode); 4223 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job, 4224 nemb_mse, dmabuf); 4225 break; 4226 default: 4227 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4228 "2959 Reject SLI_CONFIG " 4229 "subsys_fcoe, opcode:x%x\n", 4230 opcode); 4231 rc = -EPERM; 4232 break; 4233 } 4234 } else if (subsys == SLI_CONFIG_SUBSYS_COMN) { 4235 switch (opcode) { 4236 case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES: 4237 case COMN_OPCODE_GET_CNTL_ATTRIBUTES: 4238 case COMN_OPCODE_GET_PROFILE_CONFIG: 4239 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4240 "3106 Handled SLI_CONFIG " 4241 "subsys_comn, opcode:x%x\n", 4242 opcode); 4243 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, 4244 nemb_mse, dmabuf); 4245 break; 4246 default: 4247 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4248 "3107 Reject SLI_CONFIG " 4249 "subsys_comn, opcode:x%x\n", 4250 opcode); 4251 rc = -EPERM; 4252 break; 4253 } 4254 } else { 4255 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4256 "2977 Reject SLI_CONFIG " 4257 "subsys:x%d, opcode:x%x\n", 4258 subsys, opcode); 4259 rc = -EPERM; 4260 } 4261 } else { 4262 subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys, 4263 &sli_cfg_mbx->un.sli_config_emb1_subsys); 4264 opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode, 4265 &sli_cfg_mbx->un.sli_config_emb1_subsys); 4266 if (subsys == SLI_CONFIG_SUBSYS_COMN) { 4267 switch (opcode) { 4268 case COMN_OPCODE_READ_OBJECT: 4269 case COMN_OPCODE_READ_OBJECT_LIST: 4270 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4271 "2960 Handled SLI_CONFIG " 4272 "subsys_comn, opcode:x%x\n", 4273 opcode); 4274 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, 4275 nemb_hbd, dmabuf); 4276 break; 4277 case COMN_OPCODE_WRITE_OBJECT: 4278 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4279 "2961 Handled SLI_CONFIG " 4280 "subsys_comn, opcode:x%x\n", 4281 opcode); 4282 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job, 4283 nemb_hbd, dmabuf); 4284 break; 4285 default: 4286 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4287 "2962 Not handled SLI_CONFIG " 4288 "subsys_comn, opcode:x%x\n", 4289 opcode); 4290 rc = SLI_CONFIG_NOT_HANDLED; 4291 break; 4292 } 4293 } else { 4294 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4295 "2978 Not handled SLI_CONFIG " 4296 "subsys:x%d, opcode:x%x\n", 4297 subsys, opcode); 4298 rc = SLI_CONFIG_NOT_HANDLED; 4299 } 4300 } 4301 4302 /* state reset on not handled new multi-buffer mailbox command */ 4303 if (rc != SLI_CONFIG_HANDLED) 4304 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE; 4305 4306 return rc; 4307 } 4308 4309 /** 4310 * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers 4311 * @phba: Pointer to HBA context object. 4312 * 4313 * This routine is for requesting to abort a pass-through mailbox command with 4314 * multiple external buffers due to error condition. 4315 **/ 4316 static void 4317 lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba) 4318 { 4319 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT) 4320 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS; 4321 else 4322 lpfc_bsg_mbox_ext_session_reset(phba); 4323 return; 4324 } 4325 4326 /** 4327 * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer 4328 * @phba: Pointer to HBA context object. 4329 * @dmabuf: Pointer to a DMA buffer descriptor. 4330 * 4331 * This routine extracts the next mailbox read external buffer back to 4332 * user space through BSG. 4333 **/ 4334 static int 4335 lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job) 4336 { 4337 struct fc_bsg_reply *bsg_reply = job->reply; 4338 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4339 struct lpfc_dmabuf *dmabuf; 4340 uint8_t *pbuf; 4341 uint32_t size; 4342 uint32_t index; 4343 4344 index = phba->mbox_ext_buf_ctx.seqNum; 4345 phba->mbox_ext_buf_ctx.seqNum++; 4346 4347 sli_cfg_mbx = (struct lpfc_sli_config_mbox *) 4348 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt; 4349 4350 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) { 4351 size = bsg_bf_get(lpfc_mbox_sli_config_mse_len, 4352 &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]); 4353 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4354 "2963 SLI_CONFIG (mse) ext-buffer rd get " 4355 "buffer[%d], size:%d\n", index, size); 4356 } else { 4357 size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 4358 &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]); 4359 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4360 "2964 SLI_CONFIG (hbd) ext-buffer rd get " 4361 "buffer[%d], size:%d\n", index, size); 4362 } 4363 if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list)) 4364 return -EPIPE; 4365 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list, 4366 struct lpfc_dmabuf, list); 4367 list_del_init(&dmabuf->list); 4368 4369 /* after dma buffer descriptor setup */ 4370 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType, 4371 mbox_rd, dma_ebuf, sta_pos_addr, 4372 dmabuf, index); 4373 4374 pbuf = (uint8_t *)dmabuf->virt; 4375 bsg_reply->reply_payload_rcv_len = 4376 sg_copy_from_buffer(job->reply_payload.sg_list, 4377 job->reply_payload.sg_cnt, 4378 pbuf, size); 4379 4380 lpfc_bsg_dma_page_free(phba, dmabuf); 4381 4382 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) { 4383 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4384 "2965 SLI_CONFIG (hbd) ext-buffer rd mbox " 4385 "command session done\n"); 4386 lpfc_bsg_mbox_ext_session_reset(phba); 4387 } 4388 4389 bsg_reply->result = 0; 4390 bsg_job_done(job, bsg_reply->result, 4391 bsg_reply->reply_payload_rcv_len); 4392 4393 return SLI_CONFIG_HANDLED; 4394 } 4395 4396 /** 4397 * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer 4398 * @phba: Pointer to HBA context object. 4399 * @dmabuf: Pointer to a DMA buffer descriptor. 4400 * 4401 * This routine sets up the next mailbox read external buffer obtained 4402 * from user space through BSG. 4403 **/ 4404 static int 4405 lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, 4406 struct lpfc_dmabuf *dmabuf) 4407 { 4408 struct fc_bsg_reply *bsg_reply = job->reply; 4409 struct bsg_job_data *dd_data = NULL; 4410 LPFC_MBOXQ_t *pmboxq = NULL; 4411 MAILBOX_t *pmb; 4412 enum nemb_type nemb_tp; 4413 uint8_t *pbuf; 4414 uint32_t size; 4415 uint32_t index; 4416 int rc; 4417 4418 index = phba->mbox_ext_buf_ctx.seqNum; 4419 phba->mbox_ext_buf_ctx.seqNum++; 4420 nemb_tp = phba->mbox_ext_buf_ctx.nembType; 4421 4422 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4423 if (!dd_data) { 4424 rc = -ENOMEM; 4425 goto job_error; 4426 } 4427 4428 pbuf = (uint8_t *)dmabuf->virt; 4429 size = job->request_payload.payload_len; 4430 sg_copy_to_buffer(job->request_payload.sg_list, 4431 job->request_payload.sg_cnt, 4432 pbuf, size); 4433 4434 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) { 4435 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4436 "2966 SLI_CONFIG (mse) ext-buffer wr set " 4437 "buffer[%d], size:%d\n", 4438 phba->mbox_ext_buf_ctx.seqNum, size); 4439 4440 } else { 4441 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4442 "2967 SLI_CONFIG (hbd) ext-buffer wr set " 4443 "buffer[%d], size:%d\n", 4444 phba->mbox_ext_buf_ctx.seqNum, size); 4445 4446 } 4447 4448 /* set up external buffer descriptor and add to external buffer list */ 4449 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index, 4450 phba->mbox_ext_buf_ctx.mbx_dmabuf, 4451 dmabuf); 4452 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 4453 4454 /* after write dma buffer */ 4455 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType, 4456 mbox_wr, dma_ebuf, sta_pos_addr, 4457 dmabuf, index); 4458 4459 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) { 4460 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4461 "2968 SLI_CONFIG ext-buffer wr all %d " 4462 "ebuffers received\n", 4463 phba->mbox_ext_buf_ctx.numBuf); 4464 /* mailbox command structure for base driver */ 4465 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4466 if (!pmboxq) { 4467 rc = -ENOMEM; 4468 goto job_error; 4469 } 4470 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4471 pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt; 4472 pmb = &pmboxq->u.mb; 4473 memcpy(pmb, pbuf, sizeof(*pmb)); 4474 pmb->mbxOwner = OWN_HOST; 4475 pmboxq->vport = phba->pport; 4476 4477 /* callback for multi-buffer write mailbox command */ 4478 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl; 4479 4480 /* context fields to callback function */ 4481 pmboxq->context1 = dd_data; 4482 dd_data->type = TYPE_MBOX; 4483 dd_data->set_job = job; 4484 dd_data->context_un.mbox.pmboxq = pmboxq; 4485 dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf; 4486 job->dd_data = dd_data; 4487 4488 /* state change */ 4489 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; 4490 4491 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4492 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 4493 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4494 "2969 Issued SLI_CONFIG ext-buffer " 4495 "maibox command, rc:x%x\n", rc); 4496 return SLI_CONFIG_HANDLED; 4497 } 4498 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4499 "2970 Failed to issue SLI_CONFIG ext-buffer " 4500 "maibox command, rc:x%x\n", rc); 4501 rc = -EPIPE; 4502 goto job_error; 4503 } 4504 4505 /* wait for additoinal external buffers */ 4506 bsg_reply->result = 0; 4507 bsg_job_done(job, bsg_reply->result, 4508 bsg_reply->reply_payload_rcv_len); 4509 return SLI_CONFIG_HANDLED; 4510 4511 job_error: 4512 lpfc_bsg_dma_page_free(phba, dmabuf); 4513 kfree(dd_data); 4514 4515 return rc; 4516 } 4517 4518 /** 4519 * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd 4520 * @phba: Pointer to HBA context object. 4521 * @mb: Pointer to a BSG mailbox object. 4522 * @dmabuff: Pointer to a DMA buffer descriptor. 4523 * 4524 * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox 4525 * command with multiple non-embedded external buffers. 4526 **/ 4527 static int 4528 lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job, 4529 struct lpfc_dmabuf *dmabuf) 4530 { 4531 int rc; 4532 4533 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4534 "2971 SLI_CONFIG buffer (type:x%x)\n", 4535 phba->mbox_ext_buf_ctx.mboxType); 4536 4537 if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) { 4538 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) { 4539 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4540 "2972 SLI_CONFIG rd buffer state " 4541 "mismatch:x%x\n", 4542 phba->mbox_ext_buf_ctx.state); 4543 lpfc_bsg_mbox_ext_abort(phba); 4544 return -EPIPE; 4545 } 4546 rc = lpfc_bsg_read_ebuf_get(phba, job); 4547 if (rc == SLI_CONFIG_HANDLED) 4548 lpfc_bsg_dma_page_free(phba, dmabuf); 4549 } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */ 4550 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) { 4551 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4552 "2973 SLI_CONFIG wr buffer state " 4553 "mismatch:x%x\n", 4554 phba->mbox_ext_buf_ctx.state); 4555 lpfc_bsg_mbox_ext_abort(phba); 4556 return -EPIPE; 4557 } 4558 rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf); 4559 } 4560 return rc; 4561 } 4562 4563 /** 4564 * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer 4565 * @phba: Pointer to HBA context object. 4566 * @mb: Pointer to a BSG mailbox object. 4567 * @dmabuff: Pointer to a DMA buffer descriptor. 4568 * 4569 * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG 4570 * (0x9B) mailbox commands and external buffers. 4571 **/ 4572 static int 4573 lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job, 4574 struct lpfc_dmabuf *dmabuf) 4575 { 4576 struct fc_bsg_request *bsg_request = job->request; 4577 struct dfc_mbox_req *mbox_req; 4578 int rc = SLI_CONFIG_NOT_HANDLED; 4579 4580 mbox_req = 4581 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; 4582 4583 /* mbox command with/without single external buffer */ 4584 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0) 4585 return rc; 4586 4587 /* mbox command and first external buffer */ 4588 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) { 4589 if (mbox_req->extSeqNum == 1) { 4590 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4591 "2974 SLI_CONFIG mailbox: tag:%d, " 4592 "seq:%d\n", mbox_req->extMboxTag, 4593 mbox_req->extSeqNum); 4594 rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf); 4595 return rc; 4596 } else 4597 goto sli_cfg_ext_error; 4598 } 4599 4600 /* 4601 * handle additional external buffers 4602 */ 4603 4604 /* check broken pipe conditions */ 4605 if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag) 4606 goto sli_cfg_ext_error; 4607 if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf) 4608 goto sli_cfg_ext_error; 4609 if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1) 4610 goto sli_cfg_ext_error; 4611 4612 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4613 "2975 SLI_CONFIG mailbox external buffer: " 4614 "extSta:x%x, tag:%d, seq:%d\n", 4615 phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag, 4616 mbox_req->extSeqNum); 4617 rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf); 4618 return rc; 4619 4620 sli_cfg_ext_error: 4621 /* all other cases, broken pipe */ 4622 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4623 "2976 SLI_CONFIG mailbox broken pipe: " 4624 "ctxSta:x%x, ctxNumBuf:%d " 4625 "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n", 4626 phba->mbox_ext_buf_ctx.state, 4627 phba->mbox_ext_buf_ctx.numBuf, 4628 phba->mbox_ext_buf_ctx.mbxTag, 4629 phba->mbox_ext_buf_ctx.seqNum, 4630 mbox_req->extMboxTag, mbox_req->extSeqNum); 4631 4632 lpfc_bsg_mbox_ext_session_reset(phba); 4633 4634 return -EPIPE; 4635 } 4636 4637 /** 4638 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app 4639 * @phba: Pointer to HBA context object. 4640 * @mb: Pointer to a mailbox object. 4641 * @vport: Pointer to a vport object. 4642 * 4643 * Allocate a tracking object, mailbox command memory, get a mailbox 4644 * from the mailbox pool, copy the caller mailbox command. 4645 * 4646 * If offline and the sli is active we need to poll for the command (port is 4647 * being reset) and com-plete the job, otherwise issue the mailbox command and 4648 * let our completion handler finish the command. 4649 **/ 4650 static int 4651 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job, 4652 struct lpfc_vport *vport) 4653 { 4654 struct fc_bsg_request *bsg_request = job->request; 4655 struct fc_bsg_reply *bsg_reply = job->reply; 4656 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */ 4657 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */ 4658 /* a 4k buffer to hold the mb and extended data from/to the bsg */ 4659 uint8_t *pmbx = NULL; 4660 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */ 4661 struct lpfc_dmabuf *dmabuf = NULL; 4662 struct dfc_mbox_req *mbox_req; 4663 struct READ_EVENT_LOG_VAR *rdEventLog; 4664 uint32_t transmit_length, receive_length, mode; 4665 struct lpfc_mbx_sli4_config *sli4_config; 4666 struct lpfc_mbx_nembed_cmd *nembed_sge; 4667 struct ulp_bde64 *bde; 4668 uint8_t *ext = NULL; 4669 int rc = 0; 4670 uint8_t *from; 4671 uint32_t size; 4672 4673 /* in case no data is transferred */ 4674 bsg_reply->reply_payload_rcv_len = 0; 4675 4676 /* sanity check to protect driver */ 4677 if (job->reply_payload.payload_len > BSG_MBOX_SIZE || 4678 job->request_payload.payload_len > BSG_MBOX_SIZE) { 4679 rc = -ERANGE; 4680 goto job_done; 4681 } 4682 4683 /* 4684 * Don't allow mailbox commands to be sent when blocked or when in 4685 * the middle of discovery 4686 */ 4687 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { 4688 rc = -EAGAIN; 4689 goto job_done; 4690 } 4691 4692 mbox_req = 4693 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; 4694 4695 /* check if requested extended data lengths are valid */ 4696 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) || 4697 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) { 4698 rc = -ERANGE; 4699 goto job_done; 4700 } 4701 4702 dmabuf = lpfc_bsg_dma_page_alloc(phba); 4703 if (!dmabuf || !dmabuf->virt) { 4704 rc = -ENOMEM; 4705 goto job_done; 4706 } 4707 4708 /* Get the mailbox command or external buffer from BSG */ 4709 pmbx = (uint8_t *)dmabuf->virt; 4710 size = job->request_payload.payload_len; 4711 sg_copy_to_buffer(job->request_payload.sg_list, 4712 job->request_payload.sg_cnt, pmbx, size); 4713 4714 /* Handle possible SLI_CONFIG with non-embedded payloads */ 4715 if (phba->sli_rev == LPFC_SLI_REV4) { 4716 rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf); 4717 if (rc == SLI_CONFIG_HANDLED) 4718 goto job_cont; 4719 if (rc) 4720 goto job_done; 4721 /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */ 4722 } 4723 4724 rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport); 4725 if (rc != 0) 4726 goto job_done; /* must be negative */ 4727 4728 /* allocate our bsg tracking structure */ 4729 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4730 if (!dd_data) { 4731 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 4732 "2727 Failed allocation of dd_data\n"); 4733 rc = -ENOMEM; 4734 goto job_done; 4735 } 4736 4737 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4738 if (!pmboxq) { 4739 rc = -ENOMEM; 4740 goto job_done; 4741 } 4742 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4743 4744 pmb = &pmboxq->u.mb; 4745 memcpy(pmb, pmbx, sizeof(*pmb)); 4746 pmb->mbxOwner = OWN_HOST; 4747 pmboxq->vport = vport; 4748 4749 /* If HBA encountered an error attention, allow only DUMP 4750 * or RESTART mailbox commands until the HBA is restarted. 4751 */ 4752 if (phba->pport->stopped && 4753 pmb->mbxCommand != MBX_DUMP_MEMORY && 4754 pmb->mbxCommand != MBX_RESTART && 4755 pmb->mbxCommand != MBX_WRITE_VPARMS && 4756 pmb->mbxCommand != MBX_WRITE_WWN) 4757 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 4758 "2797 mbox: Issued mailbox cmd " 4759 "0x%x while in stopped state.\n", 4760 pmb->mbxCommand); 4761 4762 /* extended mailbox commands will need an extended buffer */ 4763 if (mbox_req->inExtWLen || mbox_req->outExtWLen) { 4764 from = pmbx; 4765 ext = from + sizeof(MAILBOX_t); 4766 pmboxq->context2 = ext; 4767 pmboxq->in_ext_byte_len = 4768 mbox_req->inExtWLen * sizeof(uint32_t); 4769 pmboxq->out_ext_byte_len = 4770 mbox_req->outExtWLen * sizeof(uint32_t); 4771 pmboxq->mbox_offset_word = mbox_req->mbOffset; 4772 } 4773 4774 /* biu diag will need a kernel buffer to transfer the data 4775 * allocate our own buffer and setup the mailbox command to 4776 * use ours 4777 */ 4778 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) { 4779 transmit_length = pmb->un.varWords[1]; 4780 receive_length = pmb->un.varWords[4]; 4781 /* transmit length cannot be greater than receive length or 4782 * mailbox extension size 4783 */ 4784 if ((transmit_length > receive_length) || 4785 (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) { 4786 rc = -ERANGE; 4787 goto job_done; 4788 } 4789 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh = 4790 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)); 4791 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow = 4792 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)); 4793 4794 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh = 4795 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t) 4796 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize); 4797 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow = 4798 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t) 4799 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize); 4800 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) { 4801 rdEventLog = &pmb->un.varRdEventLog; 4802 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize; 4803 mode = bf_get(lpfc_event_log, rdEventLog); 4804 4805 /* receive length cannot be greater than mailbox 4806 * extension size 4807 */ 4808 if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) { 4809 rc = -ERANGE; 4810 goto job_done; 4811 } 4812 4813 /* mode zero uses a bde like biu diags command */ 4814 if (mode == 0) { 4815 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys 4816 + sizeof(MAILBOX_t)); 4817 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys 4818 + sizeof(MAILBOX_t)); 4819 } 4820 } else if (phba->sli_rev == LPFC_SLI_REV4) { 4821 /* Let type 4 (well known data) through because the data is 4822 * returned in varwords[4-8] 4823 * otherwise check the recieve length and fetch the buffer addr 4824 */ 4825 if ((pmb->mbxCommand == MBX_DUMP_MEMORY) && 4826 (pmb->un.varDmp.type != DMP_WELL_KNOWN)) { 4827 /* rebuild the command for sli4 using our own buffers 4828 * like we do for biu diags 4829 */ 4830 receive_length = pmb->un.varWords[2]; 4831 /* receive length cannot be greater than mailbox 4832 * extension size 4833 */ 4834 if (receive_length == 0) { 4835 rc = -ERANGE; 4836 goto job_done; 4837 } 4838 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys 4839 + sizeof(MAILBOX_t)); 4840 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys 4841 + sizeof(MAILBOX_t)); 4842 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) && 4843 pmb->un.varUpdateCfg.co) { 4844 bde = (struct ulp_bde64 *)&pmb->un.varWords[4]; 4845 4846 /* bde size cannot be greater than mailbox ext size */ 4847 if (bde->tus.f.bdeSize > 4848 BSG_MBOX_SIZE - sizeof(MAILBOX_t)) { 4849 rc = -ERANGE; 4850 goto job_done; 4851 } 4852 bde->addrHigh = putPaddrHigh(dmabuf->phys 4853 + sizeof(MAILBOX_t)); 4854 bde->addrLow = putPaddrLow(dmabuf->phys 4855 + sizeof(MAILBOX_t)); 4856 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) { 4857 /* Handling non-embedded SLI_CONFIG mailbox command */ 4858 sli4_config = &pmboxq->u.mqe.un.sli4_config; 4859 if (!bf_get(lpfc_mbox_hdr_emb, 4860 &sli4_config->header.cfg_mhdr)) { 4861 /* rebuild the command for sli4 using our 4862 * own buffers like we do for biu diags 4863 */ 4864 nembed_sge = (struct lpfc_mbx_nembed_cmd *) 4865 &pmb->un.varWords[0]; 4866 receive_length = nembed_sge->sge[0].length; 4867 4868 /* receive length cannot be greater than 4869 * mailbox extension size 4870 */ 4871 if ((receive_length == 0) || 4872 (receive_length > 4873 BSG_MBOX_SIZE - sizeof(MAILBOX_t))) { 4874 rc = -ERANGE; 4875 goto job_done; 4876 } 4877 4878 nembed_sge->sge[0].pa_hi = 4879 putPaddrHigh(dmabuf->phys 4880 + sizeof(MAILBOX_t)); 4881 nembed_sge->sge[0].pa_lo = 4882 putPaddrLow(dmabuf->phys 4883 + sizeof(MAILBOX_t)); 4884 } 4885 } 4886 } 4887 4888 dd_data->context_un.mbox.dmabuffers = dmabuf; 4889 4890 /* setup wake call as IOCB callback */ 4891 pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl; 4892 4893 /* setup context field to pass wait_queue pointer to wake function */ 4894 pmboxq->context1 = dd_data; 4895 dd_data->type = TYPE_MBOX; 4896 dd_data->set_job = job; 4897 dd_data->context_un.mbox.pmboxq = pmboxq; 4898 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; 4899 dd_data->context_un.mbox.ext = ext; 4900 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset; 4901 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen; 4902 dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen; 4903 job->dd_data = dd_data; 4904 4905 if ((vport->fc_flag & FC_OFFLINE_MODE) || 4906 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) { 4907 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 4908 if (rc != MBX_SUCCESS) { 4909 rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; 4910 goto job_done; 4911 } 4912 4913 /* job finished, copy the data */ 4914 memcpy(pmbx, pmb, sizeof(*pmb)); 4915 bsg_reply->reply_payload_rcv_len = 4916 sg_copy_from_buffer(job->reply_payload.sg_list, 4917 job->reply_payload.sg_cnt, 4918 pmbx, size); 4919 /* not waiting mbox already done */ 4920 rc = 0; 4921 goto job_done; 4922 } 4923 4924 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4925 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) 4926 return 1; /* job started */ 4927 4928 job_done: 4929 /* common exit for error or job completed inline */ 4930 if (pmboxq) 4931 mempool_free(pmboxq, phba->mbox_mem_pool); 4932 lpfc_bsg_dma_page_free(phba, dmabuf); 4933 kfree(dd_data); 4934 4935 job_cont: 4936 return rc; 4937 } 4938 4939 /** 4940 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command 4941 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX. 4942 **/ 4943 static int 4944 lpfc_bsg_mbox_cmd(struct bsg_job *job) 4945 { 4946 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 4947 struct fc_bsg_request *bsg_request = job->request; 4948 struct fc_bsg_reply *bsg_reply = job->reply; 4949 struct lpfc_hba *phba = vport->phba; 4950 struct dfc_mbox_req *mbox_req; 4951 int rc = 0; 4952 4953 /* mix-and-match backward compatibility */ 4954 bsg_reply->reply_payload_rcv_len = 0; 4955 if (job->request_len < 4956 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) { 4957 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4958 "2737 Mix-and-match backward compatibility " 4959 "between MBOX_REQ old size:%d and " 4960 "new request size:%d\n", 4961 (int)(job->request_len - 4962 sizeof(struct fc_bsg_request)), 4963 (int)sizeof(struct dfc_mbox_req)); 4964 mbox_req = (struct dfc_mbox_req *) 4965 bsg_request->rqst_data.h_vendor.vendor_cmd; 4966 mbox_req->extMboxTag = 0; 4967 mbox_req->extSeqNum = 0; 4968 } 4969 4970 rc = lpfc_bsg_issue_mbox(phba, job, vport); 4971 4972 if (rc == 0) { 4973 /* job done */ 4974 bsg_reply->result = 0; 4975 job->dd_data = NULL; 4976 bsg_job_done(job, bsg_reply->result, 4977 bsg_reply->reply_payload_rcv_len); 4978 } else if (rc == 1) 4979 /* job submitted, will complete later*/ 4980 rc = 0; /* return zero, no error */ 4981 else { 4982 /* some error occurred */ 4983 bsg_reply->result = rc; 4984 job->dd_data = NULL; 4985 } 4986 4987 return rc; 4988 } 4989 4990 /** 4991 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler 4992 * @phba: Pointer to HBA context object. 4993 * @cmdiocbq: Pointer to command iocb. 4994 * @rspiocbq: Pointer to response iocb. 4995 * 4996 * This function is the completion handler for iocbs issued using 4997 * lpfc_menlo_cmd function. This function is called by the 4998 * ring event handler function without any lock held. This function 4999 * can be called from both worker thread context and interrupt 5000 * context. This function also can be called from another thread which 5001 * cleans up the SLI layer objects. 5002 * This function copies the contents of the response iocb to the 5003 * response iocb memory object provided by the caller of 5004 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 5005 * sleeps for the iocb completion. 5006 **/ 5007 static void 5008 lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba, 5009 struct lpfc_iocbq *cmdiocbq, 5010 struct lpfc_iocbq *rspiocbq) 5011 { 5012 struct bsg_job_data *dd_data; 5013 struct bsg_job *job; 5014 struct fc_bsg_reply *bsg_reply; 5015 IOCB_t *rsp; 5016 struct lpfc_dmabuf *bmp, *cmp, *rmp; 5017 struct lpfc_bsg_menlo *menlo; 5018 unsigned long flags; 5019 struct menlo_response *menlo_resp; 5020 unsigned int rsp_size; 5021 int rc = 0; 5022 5023 dd_data = cmdiocbq->context1; 5024 cmp = cmdiocbq->context2; 5025 bmp = cmdiocbq->context3; 5026 menlo = &dd_data->context_un.menlo; 5027 rmp = menlo->rmp; 5028 rsp = &rspiocbq->iocb; 5029 5030 /* Determine if job has been aborted */ 5031 spin_lock_irqsave(&phba->ct_ev_lock, flags); 5032 job = dd_data->set_job; 5033 if (job) { 5034 bsg_reply = job->reply; 5035 /* Prevent timeout handling from trying to abort job */ 5036 job->dd_data = NULL; 5037 } 5038 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5039 5040 /* Copy the job data or set the failing status for the job */ 5041 5042 if (job) { 5043 /* always return the xri, this would be used in the case 5044 * of a menlo download to allow the data to be sent as a 5045 * continuation of the exchange. 5046 */ 5047 5048 menlo_resp = (struct menlo_response *) 5049 bsg_reply->reply_data.vendor_reply.vendor_rsp; 5050 menlo_resp->xri = rsp->ulpContext; 5051 if (rsp->ulpStatus) { 5052 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 5053 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { 5054 case IOERR_SEQUENCE_TIMEOUT: 5055 rc = -ETIMEDOUT; 5056 break; 5057 case IOERR_INVALID_RPI: 5058 rc = -EFAULT; 5059 break; 5060 default: 5061 rc = -EACCES; 5062 break; 5063 } 5064 } else { 5065 rc = -EACCES; 5066 } 5067 } else { 5068 rsp_size = rsp->un.genreq64.bdl.bdeSize; 5069 bsg_reply->reply_payload_rcv_len = 5070 lpfc_bsg_copy_data(rmp, &job->reply_payload, 5071 rsp_size, 0); 5072 } 5073 5074 } 5075 5076 lpfc_sli_release_iocbq(phba, cmdiocbq); 5077 lpfc_free_bsg_buffers(phba, cmp); 5078 lpfc_free_bsg_buffers(phba, rmp); 5079 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 5080 kfree(bmp); 5081 kfree(dd_data); 5082 5083 /* Complete the job if active */ 5084 5085 if (job) { 5086 bsg_reply->result = rc; 5087 bsg_job_done(job, bsg_reply->result, 5088 bsg_reply->reply_payload_rcv_len); 5089 } 5090 5091 return; 5092 } 5093 5094 /** 5095 * lpfc_menlo_cmd - send an ioctl for menlo hardware 5096 * @job: fc_bsg_job to handle 5097 * 5098 * This function issues a gen request 64 CR ioctl for all menlo cmd requests, 5099 * all the command completions will return the xri for the command. 5100 * For menlo data requests a gen request 64 CX is used to continue the exchange 5101 * supplied in the menlo request header xri field. 5102 **/ 5103 static int 5104 lpfc_menlo_cmd(struct bsg_job *job) 5105 { 5106 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 5107 struct fc_bsg_request *bsg_request = job->request; 5108 struct fc_bsg_reply *bsg_reply = job->reply; 5109 struct lpfc_hba *phba = vport->phba; 5110 struct lpfc_iocbq *cmdiocbq; 5111 IOCB_t *cmd; 5112 int rc = 0; 5113 struct menlo_command *menlo_cmd; 5114 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL; 5115 int request_nseg; 5116 int reply_nseg; 5117 struct bsg_job_data *dd_data; 5118 struct ulp_bde64 *bpl = NULL; 5119 5120 /* in case no data is returned return just the return code */ 5121 bsg_reply->reply_payload_rcv_len = 0; 5122 5123 if (job->request_len < 5124 sizeof(struct fc_bsg_request) + 5125 sizeof(struct menlo_command)) { 5126 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5127 "2784 Received MENLO_CMD request below " 5128 "minimum size\n"); 5129 rc = -ERANGE; 5130 goto no_dd_data; 5131 } 5132 5133 if (job->reply_len < 5134 sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) { 5135 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5136 "2785 Received MENLO_CMD reply below " 5137 "minimum size\n"); 5138 rc = -ERANGE; 5139 goto no_dd_data; 5140 } 5141 5142 if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) { 5143 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5144 "2786 Adapter does not support menlo " 5145 "commands\n"); 5146 rc = -EPERM; 5147 goto no_dd_data; 5148 } 5149 5150 menlo_cmd = (struct menlo_command *) 5151 bsg_request->rqst_data.h_vendor.vendor_cmd; 5152 5153 /* allocate our bsg tracking structure */ 5154 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 5155 if (!dd_data) { 5156 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5157 "2787 Failed allocation of dd_data\n"); 5158 rc = -ENOMEM; 5159 goto no_dd_data; 5160 } 5161 5162 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5163 if (!bmp) { 5164 rc = -ENOMEM; 5165 goto free_dd; 5166 } 5167 5168 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 5169 if (!bmp->virt) { 5170 rc = -ENOMEM; 5171 goto free_bmp; 5172 } 5173 5174 INIT_LIST_HEAD(&bmp->list); 5175 5176 bpl = (struct ulp_bde64 *)bmp->virt; 5177 request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64); 5178 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, 5179 1, bpl, &request_nseg); 5180 if (!cmp) { 5181 rc = -ENOMEM; 5182 goto free_bmp; 5183 } 5184 lpfc_bsg_copy_data(cmp, &job->request_payload, 5185 job->request_payload.payload_len, 1); 5186 5187 bpl += request_nseg; 5188 reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg; 5189 rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0, 5190 bpl, &reply_nseg); 5191 if (!rmp) { 5192 rc = -ENOMEM; 5193 goto free_cmp; 5194 } 5195 5196 cmdiocbq = lpfc_sli_get_iocbq(phba); 5197 if (!cmdiocbq) { 5198 rc = -ENOMEM; 5199 goto free_rmp; 5200 } 5201 5202 cmd = &cmdiocbq->iocb; 5203 cmd->un.genreq64.bdl.ulpIoTag32 = 0; 5204 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 5205 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); 5206 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 5207 cmd->un.genreq64.bdl.bdeSize = 5208 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); 5209 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); 5210 cmd->un.genreq64.w5.hcsw.Dfctl = 0; 5211 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD; 5212 cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */ 5213 cmd->ulpBdeCount = 1; 5214 cmd->ulpClass = CLASS3; 5215 cmd->ulpOwner = OWN_CHIP; 5216 cmd->ulpLe = 1; /* Limited Edition */ 5217 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 5218 cmdiocbq->vport = phba->pport; 5219 /* We want the firmware to timeout before we do */ 5220 cmd->ulpTimeout = MENLO_TIMEOUT - 5; 5221 cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp; 5222 cmdiocbq->context1 = dd_data; 5223 cmdiocbq->context2 = cmp; 5224 cmdiocbq->context3 = bmp; 5225 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) { 5226 cmd->ulpCommand = CMD_GEN_REQUEST64_CR; 5227 cmd->ulpPU = MENLO_PU; /* 3 */ 5228 cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */ 5229 cmd->ulpContext = MENLO_CONTEXT; /* 0 */ 5230 } else { 5231 cmd->ulpCommand = CMD_GEN_REQUEST64_CX; 5232 cmd->ulpPU = 1; 5233 cmd->un.ulpWord[4] = 0; 5234 cmd->ulpContext = menlo_cmd->xri; 5235 } 5236 5237 dd_data->type = TYPE_MENLO; 5238 dd_data->set_job = job; 5239 dd_data->context_un.menlo.cmdiocbq = cmdiocbq; 5240 dd_data->context_un.menlo.rmp = rmp; 5241 job->dd_data = dd_data; 5242 5243 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 5244 MENLO_TIMEOUT - 5); 5245 if (rc == IOCB_SUCCESS) 5246 return 0; /* done for now */ 5247 5248 lpfc_sli_release_iocbq(phba, cmdiocbq); 5249 5250 free_rmp: 5251 lpfc_free_bsg_buffers(phba, rmp); 5252 free_cmp: 5253 lpfc_free_bsg_buffers(phba, cmp); 5254 free_bmp: 5255 if (bmp->virt) 5256 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 5257 kfree(bmp); 5258 free_dd: 5259 kfree(dd_data); 5260 no_dd_data: 5261 /* make error code available to userspace */ 5262 bsg_reply->result = rc; 5263 job->dd_data = NULL; 5264 return rc; 5265 } 5266 5267 static int 5268 lpfc_forced_link_speed(struct bsg_job *job) 5269 { 5270 struct Scsi_Host *shost = fc_bsg_to_shost(job); 5271 struct lpfc_vport *vport = shost_priv(shost); 5272 struct lpfc_hba *phba = vport->phba; 5273 struct fc_bsg_reply *bsg_reply = job->reply; 5274 struct forced_link_speed_support_reply *forced_reply; 5275 int rc = 0; 5276 5277 if (job->request_len < 5278 sizeof(struct fc_bsg_request) + 5279 sizeof(struct get_forced_link_speed_support)) { 5280 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5281 "0048 Received FORCED_LINK_SPEED request " 5282 "below minimum size\n"); 5283 rc = -EINVAL; 5284 goto job_error; 5285 } 5286 5287 forced_reply = (struct forced_link_speed_support_reply *) 5288 bsg_reply->reply_data.vendor_reply.vendor_rsp; 5289 5290 if (job->reply_len < 5291 sizeof(struct fc_bsg_request) + 5292 sizeof(struct forced_link_speed_support_reply)) { 5293 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5294 "0049 Received FORCED_LINK_SPEED reply below " 5295 "minimum size\n"); 5296 rc = -EINVAL; 5297 goto job_error; 5298 } 5299 5300 forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED) 5301 ? LPFC_FORCED_LINK_SPEED_SUPPORTED 5302 : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED; 5303 job_error: 5304 bsg_reply->result = rc; 5305 if (rc == 0) 5306 bsg_job_done(job, bsg_reply->result, 5307 bsg_reply->reply_payload_rcv_len); 5308 return rc; 5309 } 5310 5311 /** 5312 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job 5313 * @job: fc_bsg_job to handle 5314 **/ 5315 static int 5316 lpfc_bsg_hst_vendor(struct bsg_job *job) 5317 { 5318 struct fc_bsg_request *bsg_request = job->request; 5319 struct fc_bsg_reply *bsg_reply = job->reply; 5320 int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0]; 5321 int rc; 5322 5323 switch (command) { 5324 case LPFC_BSG_VENDOR_SET_CT_EVENT: 5325 rc = lpfc_bsg_hba_set_event(job); 5326 break; 5327 case LPFC_BSG_VENDOR_GET_CT_EVENT: 5328 rc = lpfc_bsg_hba_get_event(job); 5329 break; 5330 case LPFC_BSG_VENDOR_SEND_MGMT_RESP: 5331 rc = lpfc_bsg_send_mgmt_rsp(job); 5332 break; 5333 case LPFC_BSG_VENDOR_DIAG_MODE: 5334 rc = lpfc_bsg_diag_loopback_mode(job); 5335 break; 5336 case LPFC_BSG_VENDOR_DIAG_MODE_END: 5337 rc = lpfc_sli4_bsg_diag_mode_end(job); 5338 break; 5339 case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK: 5340 rc = lpfc_bsg_diag_loopback_run(job); 5341 break; 5342 case LPFC_BSG_VENDOR_LINK_DIAG_TEST: 5343 rc = lpfc_sli4_bsg_link_diag_test(job); 5344 break; 5345 case LPFC_BSG_VENDOR_GET_MGMT_REV: 5346 rc = lpfc_bsg_get_dfc_rev(job); 5347 break; 5348 case LPFC_BSG_VENDOR_MBOX: 5349 rc = lpfc_bsg_mbox_cmd(job); 5350 break; 5351 case LPFC_BSG_VENDOR_MENLO_CMD: 5352 case LPFC_BSG_VENDOR_MENLO_DATA: 5353 rc = lpfc_menlo_cmd(job); 5354 break; 5355 case LPFC_BSG_VENDOR_FORCED_LINK_SPEED: 5356 rc = lpfc_forced_link_speed(job); 5357 break; 5358 default: 5359 rc = -EINVAL; 5360 bsg_reply->reply_payload_rcv_len = 0; 5361 /* make error code available to userspace */ 5362 bsg_reply->result = rc; 5363 break; 5364 } 5365 5366 return rc; 5367 } 5368 5369 /** 5370 * lpfc_bsg_request - handle a bsg request from the FC transport 5371 * @job: fc_bsg_job to handle 5372 **/ 5373 int 5374 lpfc_bsg_request(struct bsg_job *job) 5375 { 5376 struct fc_bsg_request *bsg_request = job->request; 5377 struct fc_bsg_reply *bsg_reply = job->reply; 5378 uint32_t msgcode; 5379 int rc; 5380 5381 msgcode = bsg_request->msgcode; 5382 switch (msgcode) { 5383 case FC_BSG_HST_VENDOR: 5384 rc = lpfc_bsg_hst_vendor(job); 5385 break; 5386 case FC_BSG_RPT_ELS: 5387 rc = lpfc_bsg_rport_els(job); 5388 break; 5389 case FC_BSG_RPT_CT: 5390 rc = lpfc_bsg_send_mgmt_cmd(job); 5391 break; 5392 default: 5393 rc = -EINVAL; 5394 bsg_reply->reply_payload_rcv_len = 0; 5395 /* make error code available to userspace */ 5396 bsg_reply->result = rc; 5397 break; 5398 } 5399 5400 return rc; 5401 } 5402 5403 /** 5404 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport 5405 * @job: fc_bsg_job that has timed out 5406 * 5407 * This function just aborts the job's IOCB. The aborted IOCB will return to 5408 * the waiting function which will handle passing the error back to userspace 5409 **/ 5410 int 5411 lpfc_bsg_timeout(struct bsg_job *job) 5412 { 5413 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 5414 struct lpfc_hba *phba = vport->phba; 5415 struct lpfc_iocbq *cmdiocb; 5416 struct lpfc_sli_ring *pring; 5417 struct bsg_job_data *dd_data; 5418 unsigned long flags; 5419 int rc = 0; 5420 LIST_HEAD(completions); 5421 struct lpfc_iocbq *check_iocb, *next_iocb; 5422 5423 pring = lpfc_phba_elsring(phba); 5424 if (unlikely(!pring)) 5425 return -EIO; 5426 5427 /* if job's driver data is NULL, the command completed or is in the 5428 * the process of completing. In this case, return status to request 5429 * so the timeout is retried. This avoids double completion issues 5430 * and the request will be pulled off the timer queue when the 5431 * command's completion handler executes. Otherwise, prevent the 5432 * command's completion handler from executing the job done callback 5433 * and continue processing to abort the outstanding the command. 5434 */ 5435 5436 spin_lock_irqsave(&phba->ct_ev_lock, flags); 5437 dd_data = (struct bsg_job_data *)job->dd_data; 5438 if (dd_data) { 5439 dd_data->set_job = NULL; 5440 job->dd_data = NULL; 5441 } else { 5442 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5443 return -EAGAIN; 5444 } 5445 5446 switch (dd_data->type) { 5447 case TYPE_IOCB: 5448 /* Check to see if IOCB was issued to the port or not. If not, 5449 * remove it from the txq queue and call cancel iocbs. 5450 * Otherwise, call abort iotag 5451 */ 5452 cmdiocb = dd_data->context_un.iocb.cmdiocbq; 5453 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5454 5455 spin_lock_irqsave(&phba->hbalock, flags); 5456 /* make sure the I/O abort window is still open */ 5457 if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) { 5458 spin_unlock_irqrestore(&phba->hbalock, flags); 5459 return -EAGAIN; 5460 } 5461 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, 5462 list) { 5463 if (check_iocb == cmdiocb) { 5464 list_move_tail(&check_iocb->list, &completions); 5465 break; 5466 } 5467 } 5468 if (list_empty(&completions)) 5469 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 5470 spin_unlock_irqrestore(&phba->hbalock, flags); 5471 if (!list_empty(&completions)) { 5472 lpfc_sli_cancel_iocbs(phba, &completions, 5473 IOSTAT_LOCAL_REJECT, 5474 IOERR_SLI_ABORTED); 5475 } 5476 break; 5477 5478 case TYPE_EVT: 5479 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5480 break; 5481 5482 case TYPE_MBOX: 5483 /* Update the ext buf ctx state if needed */ 5484 5485 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT) 5486 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS; 5487 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5488 break; 5489 case TYPE_MENLO: 5490 /* Check to see if IOCB was issued to the port or not. If not, 5491 * remove it from the txq queue and call cancel iocbs. 5492 * Otherwise, call abort iotag. 5493 */ 5494 cmdiocb = dd_data->context_un.menlo.cmdiocbq; 5495 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5496 5497 spin_lock_irqsave(&phba->hbalock, flags); 5498 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, 5499 list) { 5500 if (check_iocb == cmdiocb) { 5501 list_move_tail(&check_iocb->list, &completions); 5502 break; 5503 } 5504 } 5505 if (list_empty(&completions)) 5506 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 5507 spin_unlock_irqrestore(&phba->hbalock, flags); 5508 if (!list_empty(&completions)) { 5509 lpfc_sli_cancel_iocbs(phba, &completions, 5510 IOSTAT_LOCAL_REJECT, 5511 IOERR_SLI_ABORTED); 5512 } 5513 break; 5514 default: 5515 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5516 break; 5517 } 5518 5519 /* scsi transport fc fc_bsg_job_timeout expects a zero return code, 5520 * otherwise an error message will be displayed on the console 5521 * so always return success (zero) 5522 */ 5523 return rc; 5524 } 5525