1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * 6 * Copyright (C) 2009-2015 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * * 10 * This program is free software; you can redistribute it and/or * 11 * modify it under the terms of version 2 of the GNU General * 12 * Public License as published by the Free Software Foundation. * 13 * This program is distributed in the hope that it will be useful. * 14 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 15 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 16 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 17 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 18 * TO BE LEGALLY INVALID. See the GNU General Public License for * 19 * more details, a copy of which can be found in the file COPYING * 20 * included with this package. * 21 *******************************************************************/ 22 23 #include <linux/interrupt.h> 24 #include <linux/mempool.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/delay.h> 28 #include <linux/list.h> 29 #include <linux/bsg-lib.h> 30 31 #include <scsi/scsi.h> 32 #include <scsi/scsi_host.h> 33 #include <scsi/scsi_transport_fc.h> 34 #include <scsi/scsi_bsg_fc.h> 35 #include <scsi/fc/fc_fs.h> 36 37 #include "lpfc_hw4.h" 38 #include "lpfc_hw.h" 39 #include "lpfc_sli.h" 40 #include "lpfc_sli4.h" 41 #include "lpfc_nl.h" 42 #include "lpfc_bsg.h" 43 #include "lpfc_disc.h" 44 #include "lpfc_scsi.h" 45 #include "lpfc.h" 46 #include "lpfc_logmsg.h" 47 #include "lpfc_crtn.h" 48 #include "lpfc_debugfs.h" 49 #include "lpfc_vport.h" 50 #include "lpfc_version.h" 51 52 struct lpfc_bsg_event { 53 struct list_head node; 54 struct kref kref; 55 wait_queue_head_t wq; 56 57 /* Event type and waiter identifiers */ 58 uint32_t type_mask; 59 uint32_t req_id; 60 uint32_t reg_id; 61 62 /* next two flags are here for the auto-delete logic */ 63 unsigned long wait_time_stamp; 64 int waiting; 65 66 /* seen and not seen events */ 67 struct list_head events_to_get; 68 struct list_head events_to_see; 69 70 /* driver data associated with the job */ 71 void *dd_data; 72 }; 73 74 struct lpfc_bsg_iocb { 75 struct lpfc_iocbq *cmdiocbq; 76 struct lpfc_dmabuf *rmp; 77 struct lpfc_nodelist *ndlp; 78 }; 79 80 struct lpfc_bsg_mbox { 81 LPFC_MBOXQ_t *pmboxq; 82 MAILBOX_t *mb; 83 struct lpfc_dmabuf *dmabuffers; /* for BIU diags */ 84 uint8_t *ext; /* extended mailbox data */ 85 uint32_t mbOffset; /* from app */ 86 uint32_t inExtWLen; /* from app */ 87 uint32_t outExtWLen; /* from app */ 88 }; 89 90 #define MENLO_DID 0x0000FC0E 91 92 struct lpfc_bsg_menlo { 93 struct lpfc_iocbq *cmdiocbq; 94 struct lpfc_dmabuf *rmp; 95 }; 96 97 #define TYPE_EVT 1 98 #define TYPE_IOCB 2 99 #define TYPE_MBOX 3 100 #define TYPE_MENLO 4 101 struct bsg_job_data { 102 uint32_t type; 103 struct bsg_job *set_job; /* job waiting for this iocb to finish */ 104 union { 105 struct lpfc_bsg_event *evt; 106 struct lpfc_bsg_iocb iocb; 107 struct lpfc_bsg_mbox mbox; 108 struct lpfc_bsg_menlo menlo; 109 } context_un; 110 }; 111 112 struct event_data { 113 struct list_head node; 114 uint32_t type; 115 uint32_t immed_dat; 116 void *data; 117 uint32_t len; 118 }; 119 120 #define BUF_SZ_4K 4096 121 #define SLI_CT_ELX_LOOPBACK 0x10 122 123 enum ELX_LOOPBACK_CMD { 124 ELX_LOOPBACK_XRI_SETUP, 125 ELX_LOOPBACK_DATA, 126 }; 127 128 #define ELX_LOOPBACK_HEADER_SZ \ 129 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un) 130 131 struct lpfc_dmabufext { 132 struct lpfc_dmabuf dma; 133 uint32_t size; 134 uint32_t flag; 135 }; 136 137 static void 138 lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist) 139 { 140 struct lpfc_dmabuf *mlast, *next_mlast; 141 142 if (mlist) { 143 list_for_each_entry_safe(mlast, next_mlast, &mlist->list, 144 list) { 145 lpfc_mbuf_free(phba, mlast->virt, mlast->phys); 146 list_del(&mlast->list); 147 kfree(mlast); 148 } 149 lpfc_mbuf_free(phba, mlist->virt, mlist->phys); 150 kfree(mlist); 151 } 152 return; 153 } 154 155 static struct lpfc_dmabuf * 156 lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size, 157 int outbound_buffers, struct ulp_bde64 *bpl, 158 int *bpl_entries) 159 { 160 struct lpfc_dmabuf *mlist = NULL; 161 struct lpfc_dmabuf *mp; 162 unsigned int bytes_left = size; 163 164 /* Verify we can support the size specified */ 165 if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE))) 166 return NULL; 167 168 /* Determine the number of dma buffers to allocate */ 169 *bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 : 170 size/LPFC_BPL_SIZE); 171 172 /* Allocate dma buffer and place in BPL passed */ 173 while (bytes_left) { 174 /* Allocate dma buffer */ 175 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 176 if (!mp) { 177 if (mlist) 178 lpfc_free_bsg_buffers(phba, mlist); 179 return NULL; 180 } 181 182 INIT_LIST_HEAD(&mp->list); 183 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); 184 185 if (!mp->virt) { 186 kfree(mp); 187 if (mlist) 188 lpfc_free_bsg_buffers(phba, mlist); 189 return NULL; 190 } 191 192 /* Queue it to a linked list */ 193 if (!mlist) 194 mlist = mp; 195 else 196 list_add_tail(&mp->list, &mlist->list); 197 198 /* Add buffer to buffer pointer list */ 199 if (outbound_buffers) 200 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 201 else 202 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 203 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys)); 204 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys)); 205 bpl->tus.f.bdeSize = (uint16_t) 206 (bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE : 207 bytes_left); 208 bytes_left -= bpl->tus.f.bdeSize; 209 bpl->tus.w = le32_to_cpu(bpl->tus.w); 210 bpl++; 211 } 212 return mlist; 213 } 214 215 static unsigned int 216 lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers, 217 struct bsg_buffer *bsg_buffers, 218 unsigned int bytes_to_transfer, int to_buffers) 219 { 220 221 struct lpfc_dmabuf *mp; 222 unsigned int transfer_bytes, bytes_copied = 0; 223 unsigned int sg_offset, dma_offset; 224 unsigned char *dma_address, *sg_address; 225 LIST_HEAD(temp_list); 226 struct sg_mapping_iter miter; 227 unsigned long flags; 228 unsigned int sg_flags = SG_MITER_ATOMIC; 229 bool sg_valid; 230 231 list_splice_init(&dma_buffers->list, &temp_list); 232 list_add(&dma_buffers->list, &temp_list); 233 sg_offset = 0; 234 if (to_buffers) 235 sg_flags |= SG_MITER_FROM_SG; 236 else 237 sg_flags |= SG_MITER_TO_SG; 238 sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt, 239 sg_flags); 240 local_irq_save(flags); 241 sg_valid = sg_miter_next(&miter); 242 list_for_each_entry(mp, &temp_list, list) { 243 dma_offset = 0; 244 while (bytes_to_transfer && sg_valid && 245 (dma_offset < LPFC_BPL_SIZE)) { 246 dma_address = mp->virt + dma_offset; 247 if (sg_offset) { 248 /* Continue previous partial transfer of sg */ 249 sg_address = miter.addr + sg_offset; 250 transfer_bytes = miter.length - sg_offset; 251 } else { 252 sg_address = miter.addr; 253 transfer_bytes = miter.length; 254 } 255 if (bytes_to_transfer < transfer_bytes) 256 transfer_bytes = bytes_to_transfer; 257 if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset)) 258 transfer_bytes = LPFC_BPL_SIZE - dma_offset; 259 if (to_buffers) 260 memcpy(dma_address, sg_address, transfer_bytes); 261 else 262 memcpy(sg_address, dma_address, transfer_bytes); 263 dma_offset += transfer_bytes; 264 sg_offset += transfer_bytes; 265 bytes_to_transfer -= transfer_bytes; 266 bytes_copied += transfer_bytes; 267 if (sg_offset >= miter.length) { 268 sg_offset = 0; 269 sg_valid = sg_miter_next(&miter); 270 } 271 } 272 } 273 sg_miter_stop(&miter); 274 local_irq_restore(flags); 275 list_del_init(&dma_buffers->list); 276 list_splice(&temp_list, &dma_buffers->list); 277 return bytes_copied; 278 } 279 280 /** 281 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler 282 * @phba: Pointer to HBA context object. 283 * @cmdiocbq: Pointer to command iocb. 284 * @rspiocbq: Pointer to response iocb. 285 * 286 * This function is the completion handler for iocbs issued using 287 * lpfc_bsg_send_mgmt_cmd function. This function is called by the 288 * ring event handler function without any lock held. This function 289 * can be called from both worker thread context and interrupt 290 * context. This function also can be called from another thread which 291 * cleans up the SLI layer objects. 292 * This function copies the contents of the response iocb to the 293 * response iocb memory object provided by the caller of 294 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 295 * sleeps for the iocb completion. 296 **/ 297 static void 298 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, 299 struct lpfc_iocbq *cmdiocbq, 300 struct lpfc_iocbq *rspiocbq) 301 { 302 struct bsg_job_data *dd_data; 303 struct bsg_job *job; 304 struct fc_bsg_reply *bsg_reply; 305 IOCB_t *rsp; 306 struct lpfc_dmabuf *bmp, *cmp, *rmp; 307 struct lpfc_nodelist *ndlp; 308 struct lpfc_bsg_iocb *iocb; 309 unsigned long flags; 310 unsigned int rsp_size; 311 int rc = 0; 312 313 dd_data = cmdiocbq->context1; 314 315 /* Determine if job has been aborted */ 316 spin_lock_irqsave(&phba->ct_ev_lock, flags); 317 job = dd_data->set_job; 318 if (job) { 319 bsg_reply = job->reply; 320 /* Prevent timeout handling from trying to abort job */ 321 job->dd_data = NULL; 322 } 323 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 324 325 /* Close the timeout handler abort window */ 326 spin_lock_irqsave(&phba->hbalock, flags); 327 cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; 328 spin_unlock_irqrestore(&phba->hbalock, flags); 329 330 iocb = &dd_data->context_un.iocb; 331 ndlp = iocb->ndlp; 332 rmp = iocb->rmp; 333 cmp = cmdiocbq->context2; 334 bmp = cmdiocbq->context3; 335 rsp = &rspiocbq->iocb; 336 337 /* Copy the completed data or set the error status */ 338 339 if (job) { 340 if (rsp->ulpStatus) { 341 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 342 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { 343 case IOERR_SEQUENCE_TIMEOUT: 344 rc = -ETIMEDOUT; 345 break; 346 case IOERR_INVALID_RPI: 347 rc = -EFAULT; 348 break; 349 default: 350 rc = -EACCES; 351 break; 352 } 353 } else { 354 rc = -EACCES; 355 } 356 } else { 357 rsp_size = rsp->un.genreq64.bdl.bdeSize; 358 bsg_reply->reply_payload_rcv_len = 359 lpfc_bsg_copy_data(rmp, &job->reply_payload, 360 rsp_size, 0); 361 } 362 } 363 364 lpfc_free_bsg_buffers(phba, cmp); 365 lpfc_free_bsg_buffers(phba, rmp); 366 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 367 kfree(bmp); 368 lpfc_sli_release_iocbq(phba, cmdiocbq); 369 lpfc_nlp_put(ndlp); 370 kfree(dd_data); 371 372 /* Complete the job if the job is still active */ 373 374 if (job) { 375 bsg_reply->result = rc; 376 bsg_job_done(job, bsg_reply->result, 377 bsg_reply->reply_payload_rcv_len); 378 } 379 return; 380 } 381 382 /** 383 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request 384 * @job: fc_bsg_job to handle 385 **/ 386 static int 387 lpfc_bsg_send_mgmt_cmd(struct bsg_job *job) 388 { 389 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 390 struct lpfc_hba *phba = vport->phba; 391 struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data; 392 struct lpfc_nodelist *ndlp = rdata->pnode; 393 struct fc_bsg_reply *bsg_reply = job->reply; 394 struct ulp_bde64 *bpl = NULL; 395 uint32_t timeout; 396 struct lpfc_iocbq *cmdiocbq = NULL; 397 IOCB_t *cmd; 398 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL; 399 int request_nseg; 400 int reply_nseg; 401 struct bsg_job_data *dd_data; 402 unsigned long flags; 403 uint32_t creg_val; 404 int rc = 0; 405 int iocb_stat; 406 407 /* in case no data is transferred */ 408 bsg_reply->reply_payload_rcv_len = 0; 409 410 /* allocate our bsg tracking structure */ 411 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 412 if (!dd_data) { 413 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 414 "2733 Failed allocation of dd_data\n"); 415 rc = -ENOMEM; 416 goto no_dd_data; 417 } 418 419 if (!lpfc_nlp_get(ndlp)) { 420 rc = -ENODEV; 421 goto no_ndlp; 422 } 423 424 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) { 425 rc = -ENODEV; 426 goto free_ndlp; 427 } 428 429 cmdiocbq = lpfc_sli_get_iocbq(phba); 430 if (!cmdiocbq) { 431 rc = -ENOMEM; 432 goto free_ndlp; 433 } 434 435 cmd = &cmdiocbq->iocb; 436 437 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 438 if (!bmp) { 439 rc = -ENOMEM; 440 goto free_cmdiocbq; 441 } 442 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 443 if (!bmp->virt) { 444 rc = -ENOMEM; 445 goto free_bmp; 446 } 447 448 INIT_LIST_HEAD(&bmp->list); 449 450 bpl = (struct ulp_bde64 *) bmp->virt; 451 request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64); 452 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, 453 1, bpl, &request_nseg); 454 if (!cmp) { 455 rc = -ENOMEM; 456 goto free_bmp; 457 } 458 lpfc_bsg_copy_data(cmp, &job->request_payload, 459 job->request_payload.payload_len, 1); 460 461 bpl += request_nseg; 462 reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg; 463 rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0, 464 bpl, &reply_nseg); 465 if (!rmp) { 466 rc = -ENOMEM; 467 goto free_cmp; 468 } 469 470 cmd->un.genreq64.bdl.ulpIoTag32 = 0; 471 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 472 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); 473 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 474 cmd->un.genreq64.bdl.bdeSize = 475 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); 476 cmd->ulpCommand = CMD_GEN_REQUEST64_CR; 477 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); 478 cmd->un.genreq64.w5.hcsw.Dfctl = 0; 479 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 480 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT; 481 cmd->ulpBdeCount = 1; 482 cmd->ulpLe = 1; 483 cmd->ulpClass = CLASS3; 484 cmd->ulpContext = ndlp->nlp_rpi; 485 if (phba->sli_rev == LPFC_SLI_REV4) 486 cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 487 cmd->ulpOwner = OWN_CHIP; 488 cmdiocbq->vport = phba->pport; 489 cmdiocbq->context3 = bmp; 490 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 491 timeout = phba->fc_ratov * 2; 492 cmd->ulpTimeout = timeout; 493 494 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp; 495 cmdiocbq->context1 = dd_data; 496 cmdiocbq->context2 = cmp; 497 cmdiocbq->context3 = bmp; 498 cmdiocbq->context_un.ndlp = ndlp; 499 dd_data->type = TYPE_IOCB; 500 dd_data->set_job = job; 501 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 502 dd_data->context_un.iocb.ndlp = ndlp; 503 dd_data->context_un.iocb.rmp = rmp; 504 job->dd_data = dd_data; 505 506 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 507 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 508 rc = -EIO ; 509 goto free_rmp; 510 } 511 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 512 writel(creg_val, phba->HCregaddr); 513 readl(phba->HCregaddr); /* flush */ 514 } 515 516 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 517 518 if (iocb_stat == IOCB_SUCCESS) { 519 spin_lock_irqsave(&phba->hbalock, flags); 520 /* make sure the I/O had not been completed yet */ 521 if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) { 522 /* open up abort window to timeout handler */ 523 cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; 524 } 525 spin_unlock_irqrestore(&phba->hbalock, flags); 526 return 0; /* done for now */ 527 } else if (iocb_stat == IOCB_BUSY) { 528 rc = -EAGAIN; 529 } else { 530 rc = -EIO; 531 } 532 533 /* iocb failed so cleanup */ 534 job->dd_data = NULL; 535 536 free_rmp: 537 lpfc_free_bsg_buffers(phba, rmp); 538 free_cmp: 539 lpfc_free_bsg_buffers(phba, cmp); 540 free_bmp: 541 if (bmp->virt) 542 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 543 kfree(bmp); 544 free_cmdiocbq: 545 lpfc_sli_release_iocbq(phba, cmdiocbq); 546 free_ndlp: 547 lpfc_nlp_put(ndlp); 548 no_ndlp: 549 kfree(dd_data); 550 no_dd_data: 551 /* make error code available to userspace */ 552 bsg_reply->result = rc; 553 job->dd_data = NULL; 554 return rc; 555 } 556 557 /** 558 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler 559 * @phba: Pointer to HBA context object. 560 * @cmdiocbq: Pointer to command iocb. 561 * @rspiocbq: Pointer to response iocb. 562 * 563 * This function is the completion handler for iocbs issued using 564 * lpfc_bsg_rport_els_cmp function. This function is called by the 565 * ring event handler function without any lock held. This function 566 * can be called from both worker thread context and interrupt 567 * context. This function also can be called from other thread which 568 * cleans up the SLI layer objects. 569 * This function copies the contents of the response iocb to the 570 * response iocb memory object provided by the caller of 571 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 572 * sleeps for the iocb completion. 573 **/ 574 static void 575 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, 576 struct lpfc_iocbq *cmdiocbq, 577 struct lpfc_iocbq *rspiocbq) 578 { 579 struct bsg_job_data *dd_data; 580 struct bsg_job *job; 581 struct fc_bsg_reply *bsg_reply; 582 IOCB_t *rsp; 583 struct lpfc_nodelist *ndlp; 584 struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL; 585 struct fc_bsg_ctels_reply *els_reply; 586 uint8_t *rjt_data; 587 unsigned long flags; 588 unsigned int rsp_size; 589 int rc = 0; 590 591 dd_data = cmdiocbq->context1; 592 ndlp = dd_data->context_un.iocb.ndlp; 593 cmdiocbq->context1 = ndlp; 594 595 /* Determine if job has been aborted */ 596 spin_lock_irqsave(&phba->ct_ev_lock, flags); 597 job = dd_data->set_job; 598 if (job) { 599 bsg_reply = job->reply; 600 /* Prevent timeout handling from trying to abort job */ 601 job->dd_data = NULL; 602 } 603 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 604 605 /* Close the timeout handler abort window */ 606 spin_lock_irqsave(&phba->hbalock, flags); 607 cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; 608 spin_unlock_irqrestore(&phba->hbalock, flags); 609 610 rsp = &rspiocbq->iocb; 611 pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2; 612 prsp = (struct lpfc_dmabuf *)pcmd->list.next; 613 614 /* Copy the completed job data or determine the job status if job is 615 * still active 616 */ 617 618 if (job) { 619 if (rsp->ulpStatus == IOSTAT_SUCCESS) { 620 rsp_size = rsp->un.elsreq64.bdl.bdeSize; 621 bsg_reply->reply_payload_rcv_len = 622 sg_copy_from_buffer(job->reply_payload.sg_list, 623 job->reply_payload.sg_cnt, 624 prsp->virt, 625 rsp_size); 626 } else if (rsp->ulpStatus == IOSTAT_LS_RJT) { 627 bsg_reply->reply_payload_rcv_len = 628 sizeof(struct fc_bsg_ctels_reply); 629 /* LS_RJT data returned in word 4 */ 630 rjt_data = (uint8_t *)&rsp->un.ulpWord[4]; 631 els_reply = &bsg_reply->reply_data.ctels_reply; 632 els_reply->status = FC_CTELS_STATUS_REJECT; 633 els_reply->rjt_data.action = rjt_data[3]; 634 els_reply->rjt_data.reason_code = rjt_data[2]; 635 els_reply->rjt_data.reason_explanation = rjt_data[1]; 636 els_reply->rjt_data.vendor_unique = rjt_data[0]; 637 } else { 638 rc = -EIO; 639 } 640 } 641 642 lpfc_nlp_put(ndlp); 643 lpfc_els_free_iocb(phba, cmdiocbq); 644 kfree(dd_data); 645 646 /* Complete the job if the job is still active */ 647 648 if (job) { 649 bsg_reply->result = rc; 650 bsg_job_done(job, bsg_reply->result, 651 bsg_reply->reply_payload_rcv_len); 652 } 653 return; 654 } 655 656 /** 657 * lpfc_bsg_rport_els - send an ELS command from a bsg request 658 * @job: fc_bsg_job to handle 659 **/ 660 static int 661 lpfc_bsg_rport_els(struct bsg_job *job) 662 { 663 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 664 struct lpfc_hba *phba = vport->phba; 665 struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data; 666 struct lpfc_nodelist *ndlp = rdata->pnode; 667 struct fc_bsg_request *bsg_request = job->request; 668 struct fc_bsg_reply *bsg_reply = job->reply; 669 uint32_t elscmd; 670 uint32_t cmdsize; 671 struct lpfc_iocbq *cmdiocbq; 672 uint16_t rpi = 0; 673 struct bsg_job_data *dd_data; 674 unsigned long flags; 675 uint32_t creg_val; 676 int rc = 0; 677 678 /* in case no data is transferred */ 679 bsg_reply->reply_payload_rcv_len = 0; 680 681 /* verify the els command is not greater than the 682 * maximum ELS transfer size. 683 */ 684 685 if (job->request_payload.payload_len > FCELSSIZE) { 686 rc = -EINVAL; 687 goto no_dd_data; 688 } 689 690 /* allocate our bsg tracking structure */ 691 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 692 if (!dd_data) { 693 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 694 "2735 Failed allocation of dd_data\n"); 695 rc = -ENOMEM; 696 goto no_dd_data; 697 } 698 699 elscmd = bsg_request->rqst_data.r_els.els_code; 700 cmdsize = job->request_payload.payload_len; 701 702 if (!lpfc_nlp_get(ndlp)) { 703 rc = -ENODEV; 704 goto free_dd_data; 705 } 706 707 /* We will use the allocated dma buffers by prep els iocb for command 708 * and response to ensure if the job times out and the request is freed, 709 * we won't be dma into memory that is no longer allocated to for the 710 * request. 711 */ 712 713 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, 714 ndlp->nlp_DID, elscmd); 715 if (!cmdiocbq) { 716 rc = -EIO; 717 goto release_ndlp; 718 } 719 720 rpi = ndlp->nlp_rpi; 721 722 /* Transfer the request payload to allocated command dma buffer */ 723 724 sg_copy_to_buffer(job->request_payload.sg_list, 725 job->request_payload.sg_cnt, 726 ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt, 727 cmdsize); 728 729 if (phba->sli_rev == LPFC_SLI_REV4) 730 cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi]; 731 else 732 cmdiocbq->iocb.ulpContext = rpi; 733 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 734 cmdiocbq->context1 = dd_data; 735 cmdiocbq->context_un.ndlp = ndlp; 736 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp; 737 dd_data->type = TYPE_IOCB; 738 dd_data->set_job = job; 739 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 740 dd_data->context_un.iocb.ndlp = ndlp; 741 dd_data->context_un.iocb.rmp = NULL; 742 job->dd_data = dd_data; 743 744 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 745 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 746 rc = -EIO; 747 goto linkdown_err; 748 } 749 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 750 writel(creg_val, phba->HCregaddr); 751 readl(phba->HCregaddr); /* flush */ 752 } 753 754 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 755 756 if (rc == IOCB_SUCCESS) { 757 spin_lock_irqsave(&phba->hbalock, flags); 758 /* make sure the I/O had not been completed/released */ 759 if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) { 760 /* open up abort window to timeout handler */ 761 cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; 762 } 763 spin_unlock_irqrestore(&phba->hbalock, flags); 764 return 0; /* done for now */ 765 } else if (rc == IOCB_BUSY) { 766 rc = -EAGAIN; 767 } else { 768 rc = -EIO; 769 } 770 771 /* iocb failed so cleanup */ 772 job->dd_data = NULL; 773 774 linkdown_err: 775 cmdiocbq->context1 = ndlp; 776 lpfc_els_free_iocb(phba, cmdiocbq); 777 778 release_ndlp: 779 lpfc_nlp_put(ndlp); 780 781 free_dd_data: 782 kfree(dd_data); 783 784 no_dd_data: 785 /* make error code available to userspace */ 786 bsg_reply->result = rc; 787 job->dd_data = NULL; 788 return rc; 789 } 790 791 /** 792 * lpfc_bsg_event_free - frees an allocated event structure 793 * @kref: Pointer to a kref. 794 * 795 * Called from kref_put. Back cast the kref into an event structure address. 796 * Free any events to get, delete associated nodes, free any events to see, 797 * free any data then free the event itself. 798 **/ 799 static void 800 lpfc_bsg_event_free(struct kref *kref) 801 { 802 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event, 803 kref); 804 struct event_data *ed; 805 806 list_del(&evt->node); 807 808 while (!list_empty(&evt->events_to_get)) { 809 ed = list_entry(evt->events_to_get.next, typeof(*ed), node); 810 list_del(&ed->node); 811 kfree(ed->data); 812 kfree(ed); 813 } 814 815 while (!list_empty(&evt->events_to_see)) { 816 ed = list_entry(evt->events_to_see.next, typeof(*ed), node); 817 list_del(&ed->node); 818 kfree(ed->data); 819 kfree(ed); 820 } 821 822 kfree(evt->dd_data); 823 kfree(evt); 824 } 825 826 /** 827 * lpfc_bsg_event_ref - increments the kref for an event 828 * @evt: Pointer to an event structure. 829 **/ 830 static inline void 831 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt) 832 { 833 kref_get(&evt->kref); 834 } 835 836 /** 837 * lpfc_bsg_event_unref - Uses kref_put to free an event structure 838 * @evt: Pointer to an event structure. 839 **/ 840 static inline void 841 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt) 842 { 843 kref_put(&evt->kref, lpfc_bsg_event_free); 844 } 845 846 /** 847 * lpfc_bsg_event_new - allocate and initialize a event structure 848 * @ev_mask: Mask of events. 849 * @ev_reg_id: Event reg id. 850 * @ev_req_id: Event request id. 851 **/ 852 static struct lpfc_bsg_event * 853 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id) 854 { 855 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL); 856 857 if (!evt) 858 return NULL; 859 860 INIT_LIST_HEAD(&evt->events_to_get); 861 INIT_LIST_HEAD(&evt->events_to_see); 862 evt->type_mask = ev_mask; 863 evt->req_id = ev_req_id; 864 evt->reg_id = ev_reg_id; 865 evt->wait_time_stamp = jiffies; 866 evt->dd_data = NULL; 867 init_waitqueue_head(&evt->wq); 868 kref_init(&evt->kref); 869 return evt; 870 } 871 872 /** 873 * diag_cmd_data_free - Frees an lpfc dma buffer extension 874 * @phba: Pointer to HBA context object. 875 * @mlist: Pointer to an lpfc dma buffer extension. 876 **/ 877 static int 878 diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist) 879 { 880 struct lpfc_dmabufext *mlast; 881 struct pci_dev *pcidev; 882 struct list_head head, *curr, *next; 883 884 if ((!mlist) || (!lpfc_is_link_up(phba) && 885 (phba->link_flag & LS_LOOPBACK_MODE))) { 886 return 0; 887 } 888 889 pcidev = phba->pcidev; 890 list_add_tail(&head, &mlist->dma.list); 891 892 list_for_each_safe(curr, next, &head) { 893 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list); 894 if (mlast->dma.virt) 895 dma_free_coherent(&pcidev->dev, 896 mlast->size, 897 mlast->dma.virt, 898 mlast->dma.phys); 899 kfree(mlast); 900 } 901 return 0; 902 } 903 904 /** 905 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command 906 * @phba: 907 * @pring: 908 * @piocbq: 909 * 910 * This function is called when an unsolicited CT command is received. It 911 * forwards the event to any processes registered to receive CT events. 912 **/ 913 int 914 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 915 struct lpfc_iocbq *piocbq) 916 { 917 uint32_t evt_req_id = 0; 918 uint32_t cmd; 919 struct lpfc_dmabuf *dmabuf = NULL; 920 struct lpfc_bsg_event *evt; 921 struct event_data *evt_dat = NULL; 922 struct lpfc_iocbq *iocbq; 923 size_t offset = 0; 924 struct list_head head; 925 struct ulp_bde64 *bde; 926 dma_addr_t dma_addr; 927 int i; 928 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2; 929 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; 930 struct lpfc_hbq_entry *hbqe; 931 struct lpfc_sli_ct_request *ct_req; 932 struct bsg_job *job = NULL; 933 struct fc_bsg_reply *bsg_reply; 934 struct bsg_job_data *dd_data = NULL; 935 unsigned long flags; 936 int size = 0; 937 938 INIT_LIST_HEAD(&head); 939 list_add_tail(&head, &piocbq->list); 940 941 if (piocbq->iocb.ulpBdeCount == 0 || 942 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0) 943 goto error_ct_unsol_exit; 944 945 if (phba->link_state == LPFC_HBA_ERROR || 946 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) 947 goto error_ct_unsol_exit; 948 949 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 950 dmabuf = bdeBuf1; 951 else { 952 dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh, 953 piocbq->iocb.un.cont64[0].addrLow); 954 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr); 955 } 956 if (dmabuf == NULL) 957 goto error_ct_unsol_exit; 958 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt; 959 evt_req_id = ct_req->FsType; 960 cmd = ct_req->CommandResponse.bits.CmdRsp; 961 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 962 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf); 963 964 spin_lock_irqsave(&phba->ct_ev_lock, flags); 965 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 966 if (!(evt->type_mask & FC_REG_CT_EVENT) || 967 evt->req_id != evt_req_id) 968 continue; 969 970 lpfc_bsg_event_ref(evt); 971 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 972 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL); 973 if (evt_dat == NULL) { 974 spin_lock_irqsave(&phba->ct_ev_lock, flags); 975 lpfc_bsg_event_unref(evt); 976 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 977 "2614 Memory allocation failed for " 978 "CT event\n"); 979 break; 980 } 981 982 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 983 /* take accumulated byte count from the last iocbq */ 984 iocbq = list_entry(head.prev, typeof(*iocbq), list); 985 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len; 986 } else { 987 list_for_each_entry(iocbq, &head, list) { 988 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) 989 evt_dat->len += 990 iocbq->iocb.un.cont64[i].tus.f.bdeSize; 991 } 992 } 993 994 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL); 995 if (evt_dat->data == NULL) { 996 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 997 "2615 Memory allocation failed for " 998 "CT event data, size %d\n", 999 evt_dat->len); 1000 kfree(evt_dat); 1001 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1002 lpfc_bsg_event_unref(evt); 1003 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1004 goto error_ct_unsol_exit; 1005 } 1006 1007 list_for_each_entry(iocbq, &head, list) { 1008 size = 0; 1009 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 1010 bdeBuf1 = iocbq->context2; 1011 bdeBuf2 = iocbq->context3; 1012 } 1013 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) { 1014 if (phba->sli3_options & 1015 LPFC_SLI3_HBQ_ENABLED) { 1016 if (i == 0) { 1017 hbqe = (struct lpfc_hbq_entry *) 1018 &iocbq->iocb.un.ulpWord[0]; 1019 size = hbqe->bde.tus.f.bdeSize; 1020 dmabuf = bdeBuf1; 1021 } else if (i == 1) { 1022 hbqe = (struct lpfc_hbq_entry *) 1023 &iocbq->iocb.unsli3. 1024 sli3Words[4]; 1025 size = hbqe->bde.tus.f.bdeSize; 1026 dmabuf = bdeBuf2; 1027 } 1028 if ((offset + size) > evt_dat->len) 1029 size = evt_dat->len - offset; 1030 } else { 1031 size = iocbq->iocb.un.cont64[i]. 1032 tus.f.bdeSize; 1033 bde = &iocbq->iocb.un.cont64[i]; 1034 dma_addr = getPaddr(bde->addrHigh, 1035 bde->addrLow); 1036 dmabuf = lpfc_sli_ringpostbuf_get(phba, 1037 pring, dma_addr); 1038 } 1039 if (!dmabuf) { 1040 lpfc_printf_log(phba, KERN_ERR, 1041 LOG_LIBDFC, "2616 No dmabuf " 1042 "found for iocbq 0x%p\n", 1043 iocbq); 1044 kfree(evt_dat->data); 1045 kfree(evt_dat); 1046 spin_lock_irqsave(&phba->ct_ev_lock, 1047 flags); 1048 lpfc_bsg_event_unref(evt); 1049 spin_unlock_irqrestore( 1050 &phba->ct_ev_lock, flags); 1051 goto error_ct_unsol_exit; 1052 } 1053 memcpy((char *)(evt_dat->data) + offset, 1054 dmabuf->virt, size); 1055 offset += size; 1056 if (evt_req_id != SLI_CT_ELX_LOOPBACK && 1057 !(phba->sli3_options & 1058 LPFC_SLI3_HBQ_ENABLED)) { 1059 lpfc_sli_ringpostbuf_put(phba, pring, 1060 dmabuf); 1061 } else { 1062 switch (cmd) { 1063 case ELX_LOOPBACK_DATA: 1064 if (phba->sli_rev < 1065 LPFC_SLI_REV4) 1066 diag_cmd_data_free(phba, 1067 (struct lpfc_dmabufext 1068 *)dmabuf); 1069 break; 1070 case ELX_LOOPBACK_XRI_SETUP: 1071 if ((phba->sli_rev == 1072 LPFC_SLI_REV2) || 1073 (phba->sli3_options & 1074 LPFC_SLI3_HBQ_ENABLED 1075 )) { 1076 lpfc_in_buf_free(phba, 1077 dmabuf); 1078 } else { 1079 lpfc_post_buffer(phba, 1080 pring, 1081 1); 1082 } 1083 break; 1084 default: 1085 if (!(phba->sli3_options & 1086 LPFC_SLI3_HBQ_ENABLED)) 1087 lpfc_post_buffer(phba, 1088 pring, 1089 1); 1090 break; 1091 } 1092 } 1093 } 1094 } 1095 1096 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1097 if (phba->sli_rev == LPFC_SLI_REV4) { 1098 evt_dat->immed_dat = phba->ctx_idx; 1099 phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX; 1100 /* Provide warning for over-run of the ct_ctx array */ 1101 if (phba->ct_ctx[evt_dat->immed_dat].valid == 1102 UNSOL_VALID) 1103 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 1104 "2717 CT context array entry " 1105 "[%d] over-run: oxid:x%x, " 1106 "sid:x%x\n", phba->ctx_idx, 1107 phba->ct_ctx[ 1108 evt_dat->immed_dat].oxid, 1109 phba->ct_ctx[ 1110 evt_dat->immed_dat].SID); 1111 phba->ct_ctx[evt_dat->immed_dat].rxid = 1112 piocbq->iocb.ulpContext; 1113 phba->ct_ctx[evt_dat->immed_dat].oxid = 1114 piocbq->iocb.unsli3.rcvsli3.ox_id; 1115 phba->ct_ctx[evt_dat->immed_dat].SID = 1116 piocbq->iocb.un.rcvels.remoteID; 1117 phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID; 1118 } else 1119 evt_dat->immed_dat = piocbq->iocb.ulpContext; 1120 1121 evt_dat->type = FC_REG_CT_EVENT; 1122 list_add(&evt_dat->node, &evt->events_to_see); 1123 if (evt_req_id == SLI_CT_ELX_LOOPBACK) { 1124 wake_up_interruptible(&evt->wq); 1125 lpfc_bsg_event_unref(evt); 1126 break; 1127 } 1128 1129 list_move(evt->events_to_see.prev, &evt->events_to_get); 1130 1131 dd_data = (struct bsg_job_data *)evt->dd_data; 1132 job = dd_data->set_job; 1133 dd_data->set_job = NULL; 1134 lpfc_bsg_event_unref(evt); 1135 if (job) { 1136 bsg_reply = job->reply; 1137 bsg_reply->reply_payload_rcv_len = size; 1138 /* make error code available to userspace */ 1139 bsg_reply->result = 0; 1140 job->dd_data = NULL; 1141 /* complete the job back to userspace */ 1142 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1143 bsg_job_done(job, bsg_reply->result, 1144 bsg_reply->reply_payload_rcv_len); 1145 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1146 } 1147 } 1148 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1149 1150 error_ct_unsol_exit: 1151 if (!list_empty(&head)) 1152 list_del(&head); 1153 if ((phba->sli_rev < LPFC_SLI_REV4) && 1154 (evt_req_id == SLI_CT_ELX_LOOPBACK)) 1155 return 0; 1156 return 1; 1157 } 1158 1159 /** 1160 * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane 1161 * @phba: Pointer to HBA context object. 1162 * @dmabuf: pointer to a dmabuf that describes the FC sequence 1163 * 1164 * This function handles abort to the CT command toward management plane 1165 * for SLI4 port. 1166 * 1167 * If the pending context of a CT command to management plane present, clears 1168 * such context and returns 1 for handled; otherwise, it returns 0 indicating 1169 * no context exists. 1170 **/ 1171 int 1172 lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf) 1173 { 1174 struct fc_frame_header fc_hdr; 1175 struct fc_frame_header *fc_hdr_ptr = &fc_hdr; 1176 int ctx_idx, handled = 0; 1177 uint16_t oxid, rxid; 1178 uint32_t sid; 1179 1180 memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 1181 sid = sli4_sid_from_fc_hdr(fc_hdr_ptr); 1182 oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id); 1183 rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id); 1184 1185 for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) { 1186 if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID) 1187 continue; 1188 if (phba->ct_ctx[ctx_idx].rxid != rxid) 1189 continue; 1190 if (phba->ct_ctx[ctx_idx].oxid != oxid) 1191 continue; 1192 if (phba->ct_ctx[ctx_idx].SID != sid) 1193 continue; 1194 phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID; 1195 handled = 1; 1196 } 1197 return handled; 1198 } 1199 1200 /** 1201 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command 1202 * @job: SET_EVENT fc_bsg_job 1203 **/ 1204 static int 1205 lpfc_bsg_hba_set_event(struct bsg_job *job) 1206 { 1207 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 1208 struct lpfc_hba *phba = vport->phba; 1209 struct fc_bsg_request *bsg_request = job->request; 1210 struct set_ct_event *event_req; 1211 struct lpfc_bsg_event *evt; 1212 int rc = 0; 1213 struct bsg_job_data *dd_data = NULL; 1214 uint32_t ev_mask; 1215 unsigned long flags; 1216 1217 if (job->request_len < 1218 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) { 1219 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1220 "2612 Received SET_CT_EVENT below minimum " 1221 "size\n"); 1222 rc = -EINVAL; 1223 goto job_error; 1224 } 1225 1226 event_req = (struct set_ct_event *) 1227 bsg_request->rqst_data.h_vendor.vendor_cmd; 1228 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask & 1229 FC_REG_EVENT_MASK); 1230 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1231 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 1232 if (evt->reg_id == event_req->ev_reg_id) { 1233 lpfc_bsg_event_ref(evt); 1234 evt->wait_time_stamp = jiffies; 1235 dd_data = (struct bsg_job_data *)evt->dd_data; 1236 break; 1237 } 1238 } 1239 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1240 1241 if (&evt->node == &phba->ct_ev_waiters) { 1242 /* no event waiting struct yet - first call */ 1243 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 1244 if (dd_data == NULL) { 1245 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1246 "2734 Failed allocation of dd_data\n"); 1247 rc = -ENOMEM; 1248 goto job_error; 1249 } 1250 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id, 1251 event_req->ev_req_id); 1252 if (!evt) { 1253 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1254 "2617 Failed allocation of event " 1255 "waiter\n"); 1256 rc = -ENOMEM; 1257 goto job_error; 1258 } 1259 dd_data->type = TYPE_EVT; 1260 dd_data->set_job = NULL; 1261 dd_data->context_un.evt = evt; 1262 evt->dd_data = (void *)dd_data; 1263 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1264 list_add(&evt->node, &phba->ct_ev_waiters); 1265 lpfc_bsg_event_ref(evt); 1266 evt->wait_time_stamp = jiffies; 1267 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1268 } 1269 1270 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1271 evt->waiting = 1; 1272 dd_data->set_job = job; /* for unsolicited command */ 1273 job->dd_data = dd_data; /* for fc transport timeout callback*/ 1274 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1275 return 0; /* call job done later */ 1276 1277 job_error: 1278 if (dd_data != NULL) 1279 kfree(dd_data); 1280 1281 job->dd_data = NULL; 1282 return rc; 1283 } 1284 1285 /** 1286 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command 1287 * @job: GET_EVENT fc_bsg_job 1288 **/ 1289 static int 1290 lpfc_bsg_hba_get_event(struct bsg_job *job) 1291 { 1292 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 1293 struct lpfc_hba *phba = vport->phba; 1294 struct fc_bsg_request *bsg_request = job->request; 1295 struct fc_bsg_reply *bsg_reply = job->reply; 1296 struct get_ct_event *event_req; 1297 struct get_ct_event_reply *event_reply; 1298 struct lpfc_bsg_event *evt, *evt_next; 1299 struct event_data *evt_dat = NULL; 1300 unsigned long flags; 1301 uint32_t rc = 0; 1302 1303 if (job->request_len < 1304 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) { 1305 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1306 "2613 Received GET_CT_EVENT request below " 1307 "minimum size\n"); 1308 rc = -EINVAL; 1309 goto job_error; 1310 } 1311 1312 event_req = (struct get_ct_event *) 1313 bsg_request->rqst_data.h_vendor.vendor_cmd; 1314 1315 event_reply = (struct get_ct_event_reply *) 1316 bsg_reply->reply_data.vendor_reply.vendor_rsp; 1317 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1318 list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) { 1319 if (evt->reg_id == event_req->ev_reg_id) { 1320 if (list_empty(&evt->events_to_get)) 1321 break; 1322 lpfc_bsg_event_ref(evt); 1323 evt->wait_time_stamp = jiffies; 1324 evt_dat = list_entry(evt->events_to_get.prev, 1325 struct event_data, node); 1326 list_del(&evt_dat->node); 1327 break; 1328 } 1329 } 1330 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1331 1332 /* The app may continue to ask for event data until it gets 1333 * an error indicating that there isn't anymore 1334 */ 1335 if (evt_dat == NULL) { 1336 bsg_reply->reply_payload_rcv_len = 0; 1337 rc = -ENOENT; 1338 goto job_error; 1339 } 1340 1341 if (evt_dat->len > job->request_payload.payload_len) { 1342 evt_dat->len = job->request_payload.payload_len; 1343 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1344 "2618 Truncated event data at %d " 1345 "bytes\n", 1346 job->request_payload.payload_len); 1347 } 1348 1349 event_reply->type = evt_dat->type; 1350 event_reply->immed_data = evt_dat->immed_dat; 1351 if (evt_dat->len > 0) 1352 bsg_reply->reply_payload_rcv_len = 1353 sg_copy_from_buffer(job->request_payload.sg_list, 1354 job->request_payload.sg_cnt, 1355 evt_dat->data, evt_dat->len); 1356 else 1357 bsg_reply->reply_payload_rcv_len = 0; 1358 1359 if (evt_dat) { 1360 kfree(evt_dat->data); 1361 kfree(evt_dat); 1362 } 1363 1364 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1365 lpfc_bsg_event_unref(evt); 1366 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1367 job->dd_data = NULL; 1368 bsg_reply->result = 0; 1369 bsg_job_done(job, bsg_reply->result, 1370 bsg_reply->reply_payload_rcv_len); 1371 return 0; 1372 1373 job_error: 1374 job->dd_data = NULL; 1375 bsg_reply->result = rc; 1376 return rc; 1377 } 1378 1379 /** 1380 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler 1381 * @phba: Pointer to HBA context object. 1382 * @cmdiocbq: Pointer to command iocb. 1383 * @rspiocbq: Pointer to response iocb. 1384 * 1385 * This function is the completion handler for iocbs issued using 1386 * lpfc_issue_ct_rsp_cmp function. This function is called by the 1387 * ring event handler function without any lock held. This function 1388 * can be called from both worker thread context and interrupt 1389 * context. This function also can be called from other thread which 1390 * cleans up the SLI layer objects. 1391 * This function copy the contents of the response iocb to the 1392 * response iocb memory object provided by the caller of 1393 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 1394 * sleeps for the iocb completion. 1395 **/ 1396 static void 1397 lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, 1398 struct lpfc_iocbq *cmdiocbq, 1399 struct lpfc_iocbq *rspiocbq) 1400 { 1401 struct bsg_job_data *dd_data; 1402 struct bsg_job *job; 1403 struct fc_bsg_reply *bsg_reply; 1404 IOCB_t *rsp; 1405 struct lpfc_dmabuf *bmp, *cmp; 1406 struct lpfc_nodelist *ndlp; 1407 unsigned long flags; 1408 int rc = 0; 1409 1410 dd_data = cmdiocbq->context1; 1411 1412 /* Determine if job has been aborted */ 1413 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1414 job = dd_data->set_job; 1415 if (job) { 1416 /* Prevent timeout handling from trying to abort job */ 1417 job->dd_data = NULL; 1418 } 1419 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1420 1421 /* Close the timeout handler abort window */ 1422 spin_lock_irqsave(&phba->hbalock, flags); 1423 cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; 1424 spin_unlock_irqrestore(&phba->hbalock, flags); 1425 1426 ndlp = dd_data->context_un.iocb.ndlp; 1427 cmp = cmdiocbq->context2; 1428 bmp = cmdiocbq->context3; 1429 rsp = &rspiocbq->iocb; 1430 1431 /* Copy the completed job data or set the error status */ 1432 1433 if (job) { 1434 bsg_reply = job->reply; 1435 if (rsp->ulpStatus) { 1436 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 1437 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { 1438 case IOERR_SEQUENCE_TIMEOUT: 1439 rc = -ETIMEDOUT; 1440 break; 1441 case IOERR_INVALID_RPI: 1442 rc = -EFAULT; 1443 break; 1444 default: 1445 rc = -EACCES; 1446 break; 1447 } 1448 } else { 1449 rc = -EACCES; 1450 } 1451 } else { 1452 bsg_reply->reply_payload_rcv_len = 0; 1453 } 1454 } 1455 1456 lpfc_free_bsg_buffers(phba, cmp); 1457 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1458 kfree(bmp); 1459 lpfc_sli_release_iocbq(phba, cmdiocbq); 1460 lpfc_nlp_put(ndlp); 1461 kfree(dd_data); 1462 1463 /* Complete the job if the job is still active */ 1464 1465 if (job) { 1466 bsg_reply->result = rc; 1467 bsg_job_done(job, bsg_reply->result, 1468 bsg_reply->reply_payload_rcv_len); 1469 } 1470 return; 1471 } 1472 1473 /** 1474 * lpfc_issue_ct_rsp - issue a ct response 1475 * @phba: Pointer to HBA context object. 1476 * @job: Pointer to the job object. 1477 * @tag: tag index value into the ports context exchange array. 1478 * @bmp: Pointer to a dma buffer descriptor. 1479 * @num_entry: Number of enties in the bde. 1480 **/ 1481 static int 1482 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag, 1483 struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp, 1484 int num_entry) 1485 { 1486 IOCB_t *icmd; 1487 struct lpfc_iocbq *ctiocb = NULL; 1488 int rc = 0; 1489 struct lpfc_nodelist *ndlp = NULL; 1490 struct bsg_job_data *dd_data; 1491 unsigned long flags; 1492 uint32_t creg_val; 1493 1494 /* allocate our bsg tracking structure */ 1495 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 1496 if (!dd_data) { 1497 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1498 "2736 Failed allocation of dd_data\n"); 1499 rc = -ENOMEM; 1500 goto no_dd_data; 1501 } 1502 1503 /* Allocate buffer for command iocb */ 1504 ctiocb = lpfc_sli_get_iocbq(phba); 1505 if (!ctiocb) { 1506 rc = -ENOMEM; 1507 goto no_ctiocb; 1508 } 1509 1510 icmd = &ctiocb->iocb; 1511 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 1512 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 1513 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys); 1514 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 1515 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64)); 1516 icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA); 1517 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 1518 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL; 1519 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 1520 1521 /* Fill in rest of iocb */ 1522 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; 1523 icmd->ulpBdeCount = 1; 1524 icmd->ulpLe = 1; 1525 icmd->ulpClass = CLASS3; 1526 if (phba->sli_rev == LPFC_SLI_REV4) { 1527 /* Do not issue unsol response if oxid not marked as valid */ 1528 if (phba->ct_ctx[tag].valid != UNSOL_VALID) { 1529 rc = IOCB_ERROR; 1530 goto issue_ct_rsp_exit; 1531 } 1532 icmd->ulpContext = phba->ct_ctx[tag].rxid; 1533 icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid; 1534 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID); 1535 if (!ndlp) { 1536 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 1537 "2721 ndlp null for oxid %x SID %x\n", 1538 icmd->ulpContext, 1539 phba->ct_ctx[tag].SID); 1540 rc = IOCB_ERROR; 1541 goto issue_ct_rsp_exit; 1542 } 1543 1544 /* Check if the ndlp is active */ 1545 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 1546 rc = IOCB_ERROR; 1547 goto issue_ct_rsp_exit; 1548 } 1549 1550 /* get a refernece count so the ndlp doesn't go away while 1551 * we respond 1552 */ 1553 if (!lpfc_nlp_get(ndlp)) { 1554 rc = IOCB_ERROR; 1555 goto issue_ct_rsp_exit; 1556 } 1557 1558 icmd->un.ulpWord[3] = 1559 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 1560 1561 /* The exchange is done, mark the entry as invalid */ 1562 phba->ct_ctx[tag].valid = UNSOL_INVALID; 1563 } else 1564 icmd->ulpContext = (ushort) tag; 1565 1566 icmd->ulpTimeout = phba->fc_ratov * 2; 1567 1568 /* Xmit CT response on exchange <xid> */ 1569 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1570 "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n", 1571 icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state); 1572 1573 ctiocb->iocb_cmpl = NULL; 1574 ctiocb->iocb_flag |= LPFC_IO_LIBDFC; 1575 ctiocb->vport = phba->pport; 1576 ctiocb->context1 = dd_data; 1577 ctiocb->context2 = cmp; 1578 ctiocb->context3 = bmp; 1579 ctiocb->context_un.ndlp = ndlp; 1580 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp; 1581 1582 dd_data->type = TYPE_IOCB; 1583 dd_data->set_job = job; 1584 dd_data->context_un.iocb.cmdiocbq = ctiocb; 1585 dd_data->context_un.iocb.ndlp = ndlp; 1586 dd_data->context_un.iocb.rmp = NULL; 1587 job->dd_data = dd_data; 1588 1589 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1590 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 1591 rc = -IOCB_ERROR; 1592 goto issue_ct_rsp_exit; 1593 } 1594 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 1595 writel(creg_val, phba->HCregaddr); 1596 readl(phba->HCregaddr); /* flush */ 1597 } 1598 1599 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 1600 1601 if (rc == IOCB_SUCCESS) { 1602 spin_lock_irqsave(&phba->hbalock, flags); 1603 /* make sure the I/O had not been completed/released */ 1604 if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) { 1605 /* open up abort window to timeout handler */ 1606 ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; 1607 } 1608 spin_unlock_irqrestore(&phba->hbalock, flags); 1609 return 0; /* done for now */ 1610 } 1611 1612 /* iocb failed so cleanup */ 1613 job->dd_data = NULL; 1614 1615 issue_ct_rsp_exit: 1616 lpfc_sli_release_iocbq(phba, ctiocb); 1617 no_ctiocb: 1618 kfree(dd_data); 1619 no_dd_data: 1620 return rc; 1621 } 1622 1623 /** 1624 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command 1625 * @job: SEND_MGMT_RESP fc_bsg_job 1626 **/ 1627 static int 1628 lpfc_bsg_send_mgmt_rsp(struct bsg_job *job) 1629 { 1630 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 1631 struct lpfc_hba *phba = vport->phba; 1632 struct fc_bsg_request *bsg_request = job->request; 1633 struct fc_bsg_reply *bsg_reply = job->reply; 1634 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *) 1635 bsg_request->rqst_data.h_vendor.vendor_cmd; 1636 struct ulp_bde64 *bpl; 1637 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL; 1638 int bpl_entries; 1639 uint32_t tag = mgmt_resp->tag; 1640 unsigned long reqbfrcnt = 1641 (unsigned long)job->request_payload.payload_len; 1642 int rc = 0; 1643 1644 /* in case no data is transferred */ 1645 bsg_reply->reply_payload_rcv_len = 0; 1646 1647 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) { 1648 rc = -ERANGE; 1649 goto send_mgmt_rsp_exit; 1650 } 1651 1652 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1653 if (!bmp) { 1654 rc = -ENOMEM; 1655 goto send_mgmt_rsp_exit; 1656 } 1657 1658 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 1659 if (!bmp->virt) { 1660 rc = -ENOMEM; 1661 goto send_mgmt_rsp_free_bmp; 1662 } 1663 1664 INIT_LIST_HEAD(&bmp->list); 1665 bpl = (struct ulp_bde64 *) bmp->virt; 1666 bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64)); 1667 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, 1668 1, bpl, &bpl_entries); 1669 if (!cmp) { 1670 rc = -ENOMEM; 1671 goto send_mgmt_rsp_free_bmp; 1672 } 1673 lpfc_bsg_copy_data(cmp, &job->request_payload, 1674 job->request_payload.payload_len, 1); 1675 1676 rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries); 1677 1678 if (rc == IOCB_SUCCESS) 1679 return 0; /* done for now */ 1680 1681 rc = -EACCES; 1682 1683 lpfc_free_bsg_buffers(phba, cmp); 1684 1685 send_mgmt_rsp_free_bmp: 1686 if (bmp->virt) 1687 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1688 kfree(bmp); 1689 send_mgmt_rsp_exit: 1690 /* make error code available to userspace */ 1691 bsg_reply->result = rc; 1692 job->dd_data = NULL; 1693 return rc; 1694 } 1695 1696 /** 1697 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode 1698 * @phba: Pointer to HBA context object. 1699 * 1700 * This function is responsible for preparing driver for diag loopback 1701 * on device. 1702 */ 1703 static int 1704 lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba) 1705 { 1706 struct lpfc_vport **vports; 1707 struct Scsi_Host *shost; 1708 struct lpfc_sli *psli; 1709 struct lpfc_queue *qp = NULL; 1710 struct lpfc_sli_ring *pring; 1711 int i = 0; 1712 1713 psli = &phba->sli; 1714 if (!psli) 1715 return -ENODEV; 1716 1717 1718 if ((phba->link_state == LPFC_HBA_ERROR) || 1719 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || 1720 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) 1721 return -EACCES; 1722 1723 vports = lpfc_create_vport_work_array(phba); 1724 if (vports) { 1725 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1726 shost = lpfc_shost_from_vport(vports[i]); 1727 scsi_block_requests(shost); 1728 } 1729 lpfc_destroy_vport_work_array(phba, vports); 1730 } else { 1731 shost = lpfc_shost_from_vport(phba->pport); 1732 scsi_block_requests(shost); 1733 } 1734 1735 if (phba->sli_rev != LPFC_SLI_REV4) { 1736 pring = &psli->sli3_ring[LPFC_FCP_RING]; 1737 lpfc_emptyq_wait(phba, &pring->txcmplq, &phba->hbalock); 1738 return 0; 1739 } 1740 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 1741 pring = qp->pring; 1742 if (!pring || (pring->ringno != LPFC_FCP_RING)) 1743 continue; 1744 if (!lpfc_emptyq_wait(phba, &pring->txcmplq, 1745 &pring->ring_lock)) 1746 break; 1747 } 1748 return 0; 1749 } 1750 1751 /** 1752 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode 1753 * @phba: Pointer to HBA context object. 1754 * 1755 * This function is responsible for driver exit processing of setting up 1756 * diag loopback mode on device. 1757 */ 1758 static void 1759 lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba) 1760 { 1761 struct Scsi_Host *shost; 1762 struct lpfc_vport **vports; 1763 int i; 1764 1765 vports = lpfc_create_vport_work_array(phba); 1766 if (vports) { 1767 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1768 shost = lpfc_shost_from_vport(vports[i]); 1769 scsi_unblock_requests(shost); 1770 } 1771 lpfc_destroy_vport_work_array(phba, vports); 1772 } else { 1773 shost = lpfc_shost_from_vport(phba->pport); 1774 scsi_unblock_requests(shost); 1775 } 1776 return; 1777 } 1778 1779 /** 1780 * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command 1781 * @phba: Pointer to HBA context object. 1782 * @job: LPFC_BSG_VENDOR_DIAG_MODE 1783 * 1784 * This function is responsible for placing an sli3 port into diagnostic 1785 * loopback mode in order to perform a diagnostic loopback test. 1786 * All new scsi requests are blocked, a small delay is used to allow the 1787 * scsi requests to complete then the link is brought down. If the link is 1788 * is placed in loopback mode then scsi requests are again allowed 1789 * so the scsi mid-layer doesn't give up on the port. 1790 * All of this is done in-line. 1791 */ 1792 static int 1793 lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job) 1794 { 1795 struct fc_bsg_request *bsg_request = job->request; 1796 struct fc_bsg_reply *bsg_reply = job->reply; 1797 struct diag_mode_set *loopback_mode; 1798 uint32_t link_flags; 1799 uint32_t timeout; 1800 LPFC_MBOXQ_t *pmboxq = NULL; 1801 int mbxstatus = MBX_SUCCESS; 1802 int i = 0; 1803 int rc = 0; 1804 1805 /* no data to return just the return code */ 1806 bsg_reply->reply_payload_rcv_len = 0; 1807 1808 if (job->request_len < sizeof(struct fc_bsg_request) + 1809 sizeof(struct diag_mode_set)) { 1810 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1811 "2738 Received DIAG MODE request size:%d " 1812 "below the minimum size:%d\n", 1813 job->request_len, 1814 (int)(sizeof(struct fc_bsg_request) + 1815 sizeof(struct diag_mode_set))); 1816 rc = -EINVAL; 1817 goto job_error; 1818 } 1819 1820 rc = lpfc_bsg_diag_mode_enter(phba); 1821 if (rc) 1822 goto job_error; 1823 1824 /* bring the link to diagnostic mode */ 1825 loopback_mode = (struct diag_mode_set *) 1826 bsg_request->rqst_data.h_vendor.vendor_cmd; 1827 link_flags = loopback_mode->type; 1828 timeout = loopback_mode->timeout * 100; 1829 1830 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1831 if (!pmboxq) { 1832 rc = -ENOMEM; 1833 goto loopback_mode_exit; 1834 } 1835 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 1836 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; 1837 pmboxq->u.mb.mbxOwner = OWN_HOST; 1838 1839 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 1840 1841 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) { 1842 /* wait for link down before proceeding */ 1843 i = 0; 1844 while (phba->link_state != LPFC_LINK_DOWN) { 1845 if (i++ > timeout) { 1846 rc = -ETIMEDOUT; 1847 goto loopback_mode_exit; 1848 } 1849 msleep(10); 1850 } 1851 1852 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 1853 if (link_flags == INTERNAL_LOOP_BACK) 1854 pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB; 1855 else 1856 pmboxq->u.mb.un.varInitLnk.link_flags = 1857 FLAGS_TOPOLOGY_MODE_LOOP; 1858 1859 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK; 1860 pmboxq->u.mb.mbxOwner = OWN_HOST; 1861 1862 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, 1863 LPFC_MBOX_TMO); 1864 1865 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) 1866 rc = -ENODEV; 1867 else { 1868 spin_lock_irq(&phba->hbalock); 1869 phba->link_flag |= LS_LOOPBACK_MODE; 1870 spin_unlock_irq(&phba->hbalock); 1871 /* wait for the link attention interrupt */ 1872 msleep(100); 1873 1874 i = 0; 1875 while (phba->link_state != LPFC_HBA_READY) { 1876 if (i++ > timeout) { 1877 rc = -ETIMEDOUT; 1878 break; 1879 } 1880 1881 msleep(10); 1882 } 1883 } 1884 1885 } else 1886 rc = -ENODEV; 1887 1888 loopback_mode_exit: 1889 lpfc_bsg_diag_mode_exit(phba); 1890 1891 /* 1892 * Let SLI layer release mboxq if mbox command completed after timeout. 1893 */ 1894 if (pmboxq && mbxstatus != MBX_TIMEOUT) 1895 mempool_free(pmboxq, phba->mbox_mem_pool); 1896 1897 job_error: 1898 /* make error code available to userspace */ 1899 bsg_reply->result = rc; 1900 /* complete the job back to userspace if no error */ 1901 if (rc == 0) 1902 bsg_job_done(job, bsg_reply->result, 1903 bsg_reply->reply_payload_rcv_len); 1904 return rc; 1905 } 1906 1907 /** 1908 * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state 1909 * @phba: Pointer to HBA context object. 1910 * @diag: Flag for set link to diag or nomral operation state. 1911 * 1912 * This function is responsible for issuing a sli4 mailbox command for setting 1913 * link to either diag state or normal operation state. 1914 */ 1915 static int 1916 lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag) 1917 { 1918 LPFC_MBOXQ_t *pmboxq; 1919 struct lpfc_mbx_set_link_diag_state *link_diag_state; 1920 uint32_t req_len, alloc_len; 1921 int mbxstatus = MBX_SUCCESS, rc; 1922 1923 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1924 if (!pmboxq) 1925 return -ENOMEM; 1926 1927 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) - 1928 sizeof(struct lpfc_sli4_cfg_mhdr)); 1929 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 1930 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE, 1931 req_len, LPFC_SLI4_MBX_EMBED); 1932 if (alloc_len != req_len) { 1933 rc = -ENOMEM; 1934 goto link_diag_state_set_out; 1935 } 1936 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 1937 "3128 Set link to diagnostic state:x%x (x%x/x%x)\n", 1938 diag, phba->sli4_hba.lnk_info.lnk_tp, 1939 phba->sli4_hba.lnk_info.lnk_no); 1940 1941 link_diag_state = &pmboxq->u.mqe.un.link_diag_state; 1942 bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req, 1943 LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE); 1944 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req, 1945 phba->sli4_hba.lnk_info.lnk_no); 1946 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req, 1947 phba->sli4_hba.lnk_info.lnk_tp); 1948 if (diag) 1949 bf_set(lpfc_mbx_set_diag_state_diag, 1950 &link_diag_state->u.req, 1); 1951 else 1952 bf_set(lpfc_mbx_set_diag_state_diag, 1953 &link_diag_state->u.req, 0); 1954 1955 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 1956 1957 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) 1958 rc = 0; 1959 else 1960 rc = -ENODEV; 1961 1962 link_diag_state_set_out: 1963 if (pmboxq && (mbxstatus != MBX_TIMEOUT)) 1964 mempool_free(pmboxq, phba->mbox_mem_pool); 1965 1966 return rc; 1967 } 1968 1969 /** 1970 * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic 1971 * @phba: Pointer to HBA context object. 1972 * 1973 * This function is responsible for issuing a sli4 mailbox command for setting 1974 * up internal loopback diagnostic. 1975 */ 1976 static int 1977 lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba) 1978 { 1979 LPFC_MBOXQ_t *pmboxq; 1980 uint32_t req_len, alloc_len; 1981 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback; 1982 int mbxstatus = MBX_SUCCESS, rc = 0; 1983 1984 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1985 if (!pmboxq) 1986 return -ENOMEM; 1987 req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) - 1988 sizeof(struct lpfc_sli4_cfg_mhdr)); 1989 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 1990 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK, 1991 req_len, LPFC_SLI4_MBX_EMBED); 1992 if (alloc_len != req_len) { 1993 mempool_free(pmboxq, phba->mbox_mem_pool); 1994 return -ENOMEM; 1995 } 1996 link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback; 1997 bf_set(lpfc_mbx_set_diag_state_link_num, 1998 &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no); 1999 bf_set(lpfc_mbx_set_diag_state_link_type, 2000 &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp); 2001 bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req, 2002 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL); 2003 2004 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 2005 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) { 2006 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2007 "3127 Failed setup loopback mode mailbox " 2008 "command, rc:x%x, status:x%x\n", mbxstatus, 2009 pmboxq->u.mb.mbxStatus); 2010 rc = -ENODEV; 2011 } 2012 if (pmboxq && (mbxstatus != MBX_TIMEOUT)) 2013 mempool_free(pmboxq, phba->mbox_mem_pool); 2014 return rc; 2015 } 2016 2017 /** 2018 * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic 2019 * @phba: Pointer to HBA context object. 2020 * 2021 * This function set up SLI4 FC port registrations for diagnostic run, which 2022 * includes all the rpis, vfi, and also vpi. 2023 */ 2024 static int 2025 lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba) 2026 { 2027 int rc; 2028 2029 if (phba->pport->fc_flag & FC_VFI_REGISTERED) { 2030 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2031 "3136 Port still had vfi registered: " 2032 "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n", 2033 phba->pport->fc_myDID, phba->fcf.fcfi, 2034 phba->sli4_hba.vfi_ids[phba->pport->vfi], 2035 phba->vpi_ids[phba->pport->vpi]); 2036 return -EINVAL; 2037 } 2038 rc = lpfc_issue_reg_vfi(phba->pport); 2039 return rc; 2040 } 2041 2042 /** 2043 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command 2044 * @phba: Pointer to HBA context object. 2045 * @job: LPFC_BSG_VENDOR_DIAG_MODE 2046 * 2047 * This function is responsible for placing an sli4 port into diagnostic 2048 * loopback mode in order to perform a diagnostic loopback test. 2049 */ 2050 static int 2051 lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job) 2052 { 2053 struct fc_bsg_request *bsg_request = job->request; 2054 struct fc_bsg_reply *bsg_reply = job->reply; 2055 struct diag_mode_set *loopback_mode; 2056 uint32_t link_flags, timeout; 2057 int i, rc = 0; 2058 2059 /* no data to return just the return code */ 2060 bsg_reply->reply_payload_rcv_len = 0; 2061 2062 if (job->request_len < sizeof(struct fc_bsg_request) + 2063 sizeof(struct diag_mode_set)) { 2064 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2065 "3011 Received DIAG MODE request size:%d " 2066 "below the minimum size:%d\n", 2067 job->request_len, 2068 (int)(sizeof(struct fc_bsg_request) + 2069 sizeof(struct diag_mode_set))); 2070 rc = -EINVAL; 2071 goto job_error; 2072 } 2073 2074 rc = lpfc_bsg_diag_mode_enter(phba); 2075 if (rc) 2076 goto job_error; 2077 2078 /* indicate we are in loobpack diagnostic mode */ 2079 spin_lock_irq(&phba->hbalock); 2080 phba->link_flag |= LS_LOOPBACK_MODE; 2081 spin_unlock_irq(&phba->hbalock); 2082 2083 /* reset port to start frome scratch */ 2084 rc = lpfc_selective_reset(phba); 2085 if (rc) 2086 goto job_error; 2087 2088 /* bring the link to diagnostic mode */ 2089 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2090 "3129 Bring link to diagnostic state.\n"); 2091 loopback_mode = (struct diag_mode_set *) 2092 bsg_request->rqst_data.h_vendor.vendor_cmd; 2093 link_flags = loopback_mode->type; 2094 timeout = loopback_mode->timeout * 100; 2095 2096 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); 2097 if (rc) { 2098 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2099 "3130 Failed to bring link to diagnostic " 2100 "state, rc:x%x\n", rc); 2101 goto loopback_mode_exit; 2102 } 2103 2104 /* wait for link down before proceeding */ 2105 i = 0; 2106 while (phba->link_state != LPFC_LINK_DOWN) { 2107 if (i++ > timeout) { 2108 rc = -ETIMEDOUT; 2109 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2110 "3131 Timeout waiting for link to " 2111 "diagnostic mode, timeout:%d ms\n", 2112 timeout * 10); 2113 goto loopback_mode_exit; 2114 } 2115 msleep(10); 2116 } 2117 2118 /* set up loopback mode */ 2119 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2120 "3132 Set up loopback mode:x%x\n", link_flags); 2121 2122 if (link_flags == INTERNAL_LOOP_BACK) 2123 rc = lpfc_sli4_bsg_set_internal_loopback(phba); 2124 else if (link_flags == EXTERNAL_LOOP_BACK) 2125 rc = lpfc_hba_init_link_fc_topology(phba, 2126 FLAGS_TOPOLOGY_MODE_PT_PT, 2127 MBX_NOWAIT); 2128 else { 2129 rc = -EINVAL; 2130 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 2131 "3141 Loopback mode:x%x not supported\n", 2132 link_flags); 2133 goto loopback_mode_exit; 2134 } 2135 2136 if (!rc) { 2137 /* wait for the link attention interrupt */ 2138 msleep(100); 2139 i = 0; 2140 while (phba->link_state < LPFC_LINK_UP) { 2141 if (i++ > timeout) { 2142 rc = -ETIMEDOUT; 2143 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2144 "3137 Timeout waiting for link up " 2145 "in loopback mode, timeout:%d ms\n", 2146 timeout * 10); 2147 break; 2148 } 2149 msleep(10); 2150 } 2151 } 2152 2153 /* port resource registration setup for loopback diagnostic */ 2154 if (!rc) { 2155 /* set up a none zero myDID for loopback test */ 2156 phba->pport->fc_myDID = 1; 2157 rc = lpfc_sli4_diag_fcport_reg_setup(phba); 2158 } else 2159 goto loopback_mode_exit; 2160 2161 if (!rc) { 2162 /* wait for the port ready */ 2163 msleep(100); 2164 i = 0; 2165 while (phba->link_state != LPFC_HBA_READY) { 2166 if (i++ > timeout) { 2167 rc = -ETIMEDOUT; 2168 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2169 "3133 Timeout waiting for port " 2170 "loopback mode ready, timeout:%d ms\n", 2171 timeout * 10); 2172 break; 2173 } 2174 msleep(10); 2175 } 2176 } 2177 2178 loopback_mode_exit: 2179 /* clear loopback diagnostic mode */ 2180 if (rc) { 2181 spin_lock_irq(&phba->hbalock); 2182 phba->link_flag &= ~LS_LOOPBACK_MODE; 2183 spin_unlock_irq(&phba->hbalock); 2184 } 2185 lpfc_bsg_diag_mode_exit(phba); 2186 2187 job_error: 2188 /* make error code available to userspace */ 2189 bsg_reply->result = rc; 2190 /* complete the job back to userspace if no error */ 2191 if (rc == 0) 2192 bsg_job_done(job, bsg_reply->result, 2193 bsg_reply->reply_payload_rcv_len); 2194 return rc; 2195 } 2196 2197 /** 2198 * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode 2199 * @job: LPFC_BSG_VENDOR_DIAG_MODE 2200 * 2201 * This function is responsible for responding to check and dispatch bsg diag 2202 * command from the user to proper driver action routines. 2203 */ 2204 static int 2205 lpfc_bsg_diag_loopback_mode(struct bsg_job *job) 2206 { 2207 struct Scsi_Host *shost; 2208 struct lpfc_vport *vport; 2209 struct lpfc_hba *phba; 2210 int rc; 2211 2212 shost = fc_bsg_to_shost(job); 2213 if (!shost) 2214 return -ENODEV; 2215 vport = shost_priv(shost); 2216 if (!vport) 2217 return -ENODEV; 2218 phba = vport->phba; 2219 if (!phba) 2220 return -ENODEV; 2221 2222 if (phba->sli_rev < LPFC_SLI_REV4) 2223 rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job); 2224 else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 2225 LPFC_SLI_INTF_IF_TYPE_2) 2226 rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job); 2227 else 2228 rc = -ENODEV; 2229 2230 return rc; 2231 } 2232 2233 /** 2234 * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode 2235 * @job: LPFC_BSG_VENDOR_DIAG_MODE_END 2236 * 2237 * This function is responsible for responding to check and dispatch bsg diag 2238 * command from the user to proper driver action routines. 2239 */ 2240 static int 2241 lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job) 2242 { 2243 struct fc_bsg_request *bsg_request = job->request; 2244 struct fc_bsg_reply *bsg_reply = job->reply; 2245 struct Scsi_Host *shost; 2246 struct lpfc_vport *vport; 2247 struct lpfc_hba *phba; 2248 struct diag_mode_set *loopback_mode_end_cmd; 2249 uint32_t timeout; 2250 int rc, i; 2251 2252 shost = fc_bsg_to_shost(job); 2253 if (!shost) 2254 return -ENODEV; 2255 vport = shost_priv(shost); 2256 if (!vport) 2257 return -ENODEV; 2258 phba = vport->phba; 2259 if (!phba) 2260 return -ENODEV; 2261 2262 if (phba->sli_rev < LPFC_SLI_REV4) 2263 return -ENODEV; 2264 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 2265 LPFC_SLI_INTF_IF_TYPE_2) 2266 return -ENODEV; 2267 2268 /* clear loopback diagnostic mode */ 2269 spin_lock_irq(&phba->hbalock); 2270 phba->link_flag &= ~LS_LOOPBACK_MODE; 2271 spin_unlock_irq(&phba->hbalock); 2272 loopback_mode_end_cmd = (struct diag_mode_set *) 2273 bsg_request->rqst_data.h_vendor.vendor_cmd; 2274 timeout = loopback_mode_end_cmd->timeout * 100; 2275 2276 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0); 2277 if (rc) { 2278 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2279 "3139 Failed to bring link to diagnostic " 2280 "state, rc:x%x\n", rc); 2281 goto loopback_mode_end_exit; 2282 } 2283 2284 /* wait for link down before proceeding */ 2285 i = 0; 2286 while (phba->link_state != LPFC_LINK_DOWN) { 2287 if (i++ > timeout) { 2288 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2289 "3140 Timeout waiting for link to " 2290 "diagnostic mode_end, timeout:%d ms\n", 2291 timeout * 10); 2292 /* there is nothing much we can do here */ 2293 break; 2294 } 2295 msleep(10); 2296 } 2297 2298 /* reset port resource registrations */ 2299 rc = lpfc_selective_reset(phba); 2300 phba->pport->fc_myDID = 0; 2301 2302 loopback_mode_end_exit: 2303 /* make return code available to userspace */ 2304 bsg_reply->result = rc; 2305 /* complete the job back to userspace if no error */ 2306 if (rc == 0) 2307 bsg_job_done(job, bsg_reply->result, 2308 bsg_reply->reply_payload_rcv_len); 2309 return rc; 2310 } 2311 2312 /** 2313 * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test 2314 * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST 2315 * 2316 * This function is to perform SLI4 diag link test request from the user 2317 * applicaiton. 2318 */ 2319 static int 2320 lpfc_sli4_bsg_link_diag_test(struct bsg_job *job) 2321 { 2322 struct fc_bsg_request *bsg_request = job->request; 2323 struct fc_bsg_reply *bsg_reply = job->reply; 2324 struct Scsi_Host *shost; 2325 struct lpfc_vport *vport; 2326 struct lpfc_hba *phba; 2327 LPFC_MBOXQ_t *pmboxq; 2328 struct sli4_link_diag *link_diag_test_cmd; 2329 uint32_t req_len, alloc_len; 2330 struct lpfc_mbx_run_link_diag_test *run_link_diag_test; 2331 union lpfc_sli4_cfg_shdr *shdr; 2332 uint32_t shdr_status, shdr_add_status; 2333 struct diag_status *diag_status_reply; 2334 int mbxstatus, rc = 0; 2335 2336 shost = fc_bsg_to_shost(job); 2337 if (!shost) { 2338 rc = -ENODEV; 2339 goto job_error; 2340 } 2341 vport = shost_priv(shost); 2342 if (!vport) { 2343 rc = -ENODEV; 2344 goto job_error; 2345 } 2346 phba = vport->phba; 2347 if (!phba) { 2348 rc = -ENODEV; 2349 goto job_error; 2350 } 2351 2352 if (phba->sli_rev < LPFC_SLI_REV4) { 2353 rc = -ENODEV; 2354 goto job_error; 2355 } 2356 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 2357 LPFC_SLI_INTF_IF_TYPE_2) { 2358 rc = -ENODEV; 2359 goto job_error; 2360 } 2361 2362 if (job->request_len < sizeof(struct fc_bsg_request) + 2363 sizeof(struct sli4_link_diag)) { 2364 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2365 "3013 Received LINK DIAG TEST request " 2366 " size:%d below the minimum size:%d\n", 2367 job->request_len, 2368 (int)(sizeof(struct fc_bsg_request) + 2369 sizeof(struct sli4_link_diag))); 2370 rc = -EINVAL; 2371 goto job_error; 2372 } 2373 2374 rc = lpfc_bsg_diag_mode_enter(phba); 2375 if (rc) 2376 goto job_error; 2377 2378 link_diag_test_cmd = (struct sli4_link_diag *) 2379 bsg_request->rqst_data.h_vendor.vendor_cmd; 2380 2381 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); 2382 2383 if (rc) 2384 goto job_error; 2385 2386 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2387 if (!pmboxq) { 2388 rc = -ENOMEM; 2389 goto link_diag_test_exit; 2390 } 2391 2392 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) - 2393 sizeof(struct lpfc_sli4_cfg_mhdr)); 2394 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 2395 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE, 2396 req_len, LPFC_SLI4_MBX_EMBED); 2397 if (alloc_len != req_len) { 2398 rc = -ENOMEM; 2399 goto link_diag_test_exit; 2400 } 2401 run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test; 2402 bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req, 2403 phba->sli4_hba.lnk_info.lnk_no); 2404 bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req, 2405 phba->sli4_hba.lnk_info.lnk_tp); 2406 bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req, 2407 link_diag_test_cmd->test_id); 2408 bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req, 2409 link_diag_test_cmd->loops); 2410 bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req, 2411 link_diag_test_cmd->test_version); 2412 bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req, 2413 link_diag_test_cmd->error_action); 2414 2415 mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 2416 2417 shdr = (union lpfc_sli4_cfg_shdr *) 2418 &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr; 2419 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 2420 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 2421 if (shdr_status || shdr_add_status || mbxstatus) { 2422 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 2423 "3010 Run link diag test mailbox failed with " 2424 "mbx_status x%x status x%x, add_status x%x\n", 2425 mbxstatus, shdr_status, shdr_add_status); 2426 } 2427 2428 diag_status_reply = (struct diag_status *) 2429 bsg_reply->reply_data.vendor_reply.vendor_rsp; 2430 2431 if (job->reply_len < 2432 sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) { 2433 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2434 "3012 Received Run link diag test reply " 2435 "below minimum size (%d): reply_len:%d\n", 2436 (int)(sizeof(struct fc_bsg_request) + 2437 sizeof(struct diag_status)), 2438 job->reply_len); 2439 rc = -EINVAL; 2440 goto job_error; 2441 } 2442 2443 diag_status_reply->mbox_status = mbxstatus; 2444 diag_status_reply->shdr_status = shdr_status; 2445 diag_status_reply->shdr_add_status = shdr_add_status; 2446 2447 link_diag_test_exit: 2448 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0); 2449 2450 if (pmboxq) 2451 mempool_free(pmboxq, phba->mbox_mem_pool); 2452 2453 lpfc_bsg_diag_mode_exit(phba); 2454 2455 job_error: 2456 /* make error code available to userspace */ 2457 bsg_reply->result = rc; 2458 /* complete the job back to userspace if no error */ 2459 if (rc == 0) 2460 bsg_job_done(job, bsg_reply->result, 2461 bsg_reply->reply_payload_rcv_len); 2462 return rc; 2463 } 2464 2465 /** 2466 * lpfcdiag_loop_self_reg - obtains a remote port login id 2467 * @phba: Pointer to HBA context object 2468 * @rpi: Pointer to a remote port login id 2469 * 2470 * This function obtains a remote port login id so the diag loopback test 2471 * can send and receive its own unsolicited CT command. 2472 **/ 2473 static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi) 2474 { 2475 LPFC_MBOXQ_t *mbox; 2476 struct lpfc_dmabuf *dmabuff; 2477 int status; 2478 2479 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2480 if (!mbox) 2481 return -ENOMEM; 2482 2483 if (phba->sli_rev < LPFC_SLI_REV4) 2484 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID, 2485 (uint8_t *)&phba->pport->fc_sparam, 2486 mbox, *rpi); 2487 else { 2488 *rpi = lpfc_sli4_alloc_rpi(phba); 2489 if (*rpi == LPFC_RPI_ALLOC_ERROR) { 2490 mempool_free(mbox, phba->mbox_mem_pool); 2491 return -EBUSY; 2492 } 2493 status = lpfc_reg_rpi(phba, phba->pport->vpi, 2494 phba->pport->fc_myDID, 2495 (uint8_t *)&phba->pport->fc_sparam, 2496 mbox, *rpi); 2497 } 2498 2499 if (status) { 2500 mempool_free(mbox, phba->mbox_mem_pool); 2501 if (phba->sli_rev == LPFC_SLI_REV4) 2502 lpfc_sli4_free_rpi(phba, *rpi); 2503 return -ENOMEM; 2504 } 2505 2506 dmabuff = (struct lpfc_dmabuf *) mbox->context1; 2507 mbox->context1 = NULL; 2508 mbox->context2 = NULL; 2509 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 2510 2511 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 2512 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); 2513 kfree(dmabuff); 2514 if (status != MBX_TIMEOUT) 2515 mempool_free(mbox, phba->mbox_mem_pool); 2516 if (phba->sli_rev == LPFC_SLI_REV4) 2517 lpfc_sli4_free_rpi(phba, *rpi); 2518 return -ENODEV; 2519 } 2520 2521 if (phba->sli_rev < LPFC_SLI_REV4) 2522 *rpi = mbox->u.mb.un.varWords[0]; 2523 2524 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); 2525 kfree(dmabuff); 2526 mempool_free(mbox, phba->mbox_mem_pool); 2527 return 0; 2528 } 2529 2530 /** 2531 * lpfcdiag_loop_self_unreg - unregs from the rpi 2532 * @phba: Pointer to HBA context object 2533 * @rpi: Remote port login id 2534 * 2535 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg 2536 **/ 2537 static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi) 2538 { 2539 LPFC_MBOXQ_t *mbox; 2540 int status; 2541 2542 /* Allocate mboxq structure */ 2543 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2544 if (mbox == NULL) 2545 return -ENOMEM; 2546 2547 if (phba->sli_rev < LPFC_SLI_REV4) 2548 lpfc_unreg_login(phba, 0, rpi, mbox); 2549 else 2550 lpfc_unreg_login(phba, phba->pport->vpi, 2551 phba->sli4_hba.rpi_ids[rpi], mbox); 2552 2553 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 2554 2555 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 2556 if (status != MBX_TIMEOUT) 2557 mempool_free(mbox, phba->mbox_mem_pool); 2558 return -EIO; 2559 } 2560 mempool_free(mbox, phba->mbox_mem_pool); 2561 if (phba->sli_rev == LPFC_SLI_REV4) 2562 lpfc_sli4_free_rpi(phba, rpi); 2563 return 0; 2564 } 2565 2566 /** 2567 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids 2568 * @phba: Pointer to HBA context object 2569 * @rpi: Remote port login id 2570 * @txxri: Pointer to transmit exchange id 2571 * @rxxri: Pointer to response exchabge id 2572 * 2573 * This function obtains the transmit and receive ids required to send 2574 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp 2575 * flags are used to the unsolicted response handler is able to process 2576 * the ct command sent on the same port. 2577 **/ 2578 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, 2579 uint16_t *txxri, uint16_t * rxxri) 2580 { 2581 struct lpfc_bsg_event *evt; 2582 struct lpfc_iocbq *cmdiocbq, *rspiocbq; 2583 IOCB_t *cmd, *rsp; 2584 struct lpfc_dmabuf *dmabuf; 2585 struct ulp_bde64 *bpl = NULL; 2586 struct lpfc_sli_ct_request *ctreq = NULL; 2587 int ret_val = 0; 2588 int time_left; 2589 int iocb_stat = IOCB_SUCCESS; 2590 unsigned long flags; 2591 2592 *txxri = 0; 2593 *rxxri = 0; 2594 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, 2595 SLI_CT_ELX_LOOPBACK); 2596 if (!evt) 2597 return -ENOMEM; 2598 2599 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2600 list_add(&evt->node, &phba->ct_ev_waiters); 2601 lpfc_bsg_event_ref(evt); 2602 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2603 2604 cmdiocbq = lpfc_sli_get_iocbq(phba); 2605 rspiocbq = lpfc_sli_get_iocbq(phba); 2606 2607 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2608 if (dmabuf) { 2609 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys); 2610 if (dmabuf->virt) { 2611 INIT_LIST_HEAD(&dmabuf->list); 2612 bpl = (struct ulp_bde64 *) dmabuf->virt; 2613 memset(bpl, 0, sizeof(*bpl)); 2614 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1); 2615 bpl->addrHigh = 2616 le32_to_cpu(putPaddrHigh(dmabuf->phys + 2617 sizeof(*bpl))); 2618 bpl->addrLow = 2619 le32_to_cpu(putPaddrLow(dmabuf->phys + 2620 sizeof(*bpl))); 2621 bpl->tus.f.bdeFlags = 0; 2622 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ; 2623 bpl->tus.w = le32_to_cpu(bpl->tus.w); 2624 } 2625 } 2626 2627 if (cmdiocbq == NULL || rspiocbq == NULL || 2628 dmabuf == NULL || bpl == NULL || ctreq == NULL || 2629 dmabuf->virt == NULL) { 2630 ret_val = -ENOMEM; 2631 goto err_get_xri_exit; 2632 } 2633 2634 cmd = &cmdiocbq->iocb; 2635 rsp = &rspiocbq->iocb; 2636 2637 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); 2638 2639 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; 2640 ctreq->RevisionId.bits.InId = 0; 2641 ctreq->FsType = SLI_CT_ELX_LOOPBACK; 2642 ctreq->FsSubType = 0; 2643 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP; 2644 ctreq->CommandResponse.bits.Size = 0; 2645 2646 2647 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys); 2648 cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys); 2649 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 2650 cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl); 2651 2652 cmd->un.xseq64.w5.hcsw.Fctl = LA; 2653 cmd->un.xseq64.w5.hcsw.Dfctl = 0; 2654 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 2655 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 2656 2657 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR; 2658 cmd->ulpBdeCount = 1; 2659 cmd->ulpLe = 1; 2660 cmd->ulpClass = CLASS3; 2661 cmd->ulpContext = rpi; 2662 2663 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 2664 cmdiocbq->vport = phba->pport; 2665 cmdiocbq->iocb_cmpl = NULL; 2666 2667 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 2668 rspiocbq, 2669 (phba->fc_ratov * 2) 2670 + LPFC_DRVR_TIMEOUT); 2671 if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) { 2672 ret_val = -EIO; 2673 goto err_get_xri_exit; 2674 } 2675 *txxri = rsp->ulpContext; 2676 2677 evt->waiting = 1; 2678 evt->wait_time_stamp = jiffies; 2679 time_left = wait_event_interruptible_timeout( 2680 evt->wq, !list_empty(&evt->events_to_see), 2681 msecs_to_jiffies(1000 * 2682 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); 2683 if (list_empty(&evt->events_to_see)) 2684 ret_val = (time_left) ? -EINTR : -ETIMEDOUT; 2685 else { 2686 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2687 list_move(evt->events_to_see.prev, &evt->events_to_get); 2688 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2689 *rxxri = (list_entry(evt->events_to_get.prev, 2690 typeof(struct event_data), 2691 node))->immed_dat; 2692 } 2693 evt->waiting = 0; 2694 2695 err_get_xri_exit: 2696 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2697 lpfc_bsg_event_unref(evt); /* release ref */ 2698 lpfc_bsg_event_unref(evt); /* delete */ 2699 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2700 2701 if (dmabuf) { 2702 if (dmabuf->virt) 2703 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 2704 kfree(dmabuf); 2705 } 2706 2707 if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT)) 2708 lpfc_sli_release_iocbq(phba, cmdiocbq); 2709 if (rspiocbq) 2710 lpfc_sli_release_iocbq(phba, rspiocbq); 2711 return ret_val; 2712 } 2713 2714 /** 2715 * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers 2716 * @phba: Pointer to HBA context object 2717 * 2718 * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and 2719 * returns the pointer to the buffer. 2720 **/ 2721 static struct lpfc_dmabuf * 2722 lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba) 2723 { 2724 struct lpfc_dmabuf *dmabuf; 2725 struct pci_dev *pcidev = phba->pcidev; 2726 2727 /* allocate dma buffer struct */ 2728 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2729 if (!dmabuf) 2730 return NULL; 2731 2732 INIT_LIST_HEAD(&dmabuf->list); 2733 2734 /* now, allocate dma buffer */ 2735 dmabuf->virt = dma_zalloc_coherent(&pcidev->dev, BSG_MBOX_SIZE, 2736 &(dmabuf->phys), GFP_KERNEL); 2737 2738 if (!dmabuf->virt) { 2739 kfree(dmabuf); 2740 return NULL; 2741 } 2742 2743 return dmabuf; 2744 } 2745 2746 /** 2747 * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer 2748 * @phba: Pointer to HBA context object. 2749 * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor. 2750 * 2751 * This routine just simply frees a dma buffer and its associated buffer 2752 * descriptor referred by @dmabuf. 2753 **/ 2754 static void 2755 lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf) 2756 { 2757 struct pci_dev *pcidev = phba->pcidev; 2758 2759 if (!dmabuf) 2760 return; 2761 2762 if (dmabuf->virt) 2763 dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE, 2764 dmabuf->virt, dmabuf->phys); 2765 kfree(dmabuf); 2766 return; 2767 } 2768 2769 /** 2770 * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers 2771 * @phba: Pointer to HBA context object. 2772 * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs. 2773 * 2774 * This routine just simply frees all dma buffers and their associated buffer 2775 * descriptors referred by @dmabuf_list. 2776 **/ 2777 static void 2778 lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba, 2779 struct list_head *dmabuf_list) 2780 { 2781 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 2782 2783 if (list_empty(dmabuf_list)) 2784 return; 2785 2786 list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) { 2787 list_del_init(&dmabuf->list); 2788 lpfc_bsg_dma_page_free(phba, dmabuf); 2789 } 2790 return; 2791 } 2792 2793 /** 2794 * diag_cmd_data_alloc - fills in a bde struct with dma buffers 2795 * @phba: Pointer to HBA context object 2796 * @bpl: Pointer to 64 bit bde structure 2797 * @size: Number of bytes to process 2798 * @nocopydata: Flag to copy user data into the allocated buffer 2799 * 2800 * This function allocates page size buffers and populates an lpfc_dmabufext. 2801 * If allowed the user data pointed to with indataptr is copied into the kernel 2802 * memory. The chained list of page size buffers is returned. 2803 **/ 2804 static struct lpfc_dmabufext * 2805 diag_cmd_data_alloc(struct lpfc_hba *phba, 2806 struct ulp_bde64 *bpl, uint32_t size, 2807 int nocopydata) 2808 { 2809 struct lpfc_dmabufext *mlist = NULL; 2810 struct lpfc_dmabufext *dmp; 2811 int cnt, offset = 0, i = 0; 2812 struct pci_dev *pcidev; 2813 2814 pcidev = phba->pcidev; 2815 2816 while (size) { 2817 /* We get chunks of 4K */ 2818 if (size > BUF_SZ_4K) 2819 cnt = BUF_SZ_4K; 2820 else 2821 cnt = size; 2822 2823 /* allocate struct lpfc_dmabufext buffer header */ 2824 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL); 2825 if (!dmp) 2826 goto out; 2827 2828 INIT_LIST_HEAD(&dmp->dma.list); 2829 2830 /* Queue it to a linked list */ 2831 if (mlist) 2832 list_add_tail(&dmp->dma.list, &mlist->dma.list); 2833 else 2834 mlist = dmp; 2835 2836 /* allocate buffer */ 2837 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev, 2838 cnt, 2839 &(dmp->dma.phys), 2840 GFP_KERNEL); 2841 2842 if (!dmp->dma.virt) 2843 goto out; 2844 2845 dmp->size = cnt; 2846 2847 if (nocopydata) { 2848 bpl->tus.f.bdeFlags = 0; 2849 pci_dma_sync_single_for_device(phba->pcidev, 2850 dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE); 2851 2852 } else { 2853 memset((uint8_t *)dmp->dma.virt, 0, cnt); 2854 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 2855 } 2856 2857 /* build buffer ptr list for IOCB */ 2858 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys)); 2859 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys)); 2860 bpl->tus.f.bdeSize = (ushort) cnt; 2861 bpl->tus.w = le32_to_cpu(bpl->tus.w); 2862 bpl++; 2863 2864 i++; 2865 offset += cnt; 2866 size -= cnt; 2867 } 2868 2869 if (mlist) { 2870 mlist->flag = i; 2871 return mlist; 2872 } 2873 out: 2874 diag_cmd_data_free(phba, mlist); 2875 return NULL; 2876 } 2877 2878 /** 2879 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd 2880 * @phba: Pointer to HBA context object 2881 * @rxxri: Receive exchange id 2882 * @len: Number of data bytes 2883 * 2884 * This function allocates and posts a data buffer of sufficient size to receive 2885 * an unsolicted CT command. 2886 **/ 2887 static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, 2888 size_t len) 2889 { 2890 struct lpfc_sli_ring *pring; 2891 struct lpfc_iocbq *cmdiocbq; 2892 IOCB_t *cmd = NULL; 2893 struct list_head head, *curr, *next; 2894 struct lpfc_dmabuf *rxbmp; 2895 struct lpfc_dmabuf *dmp; 2896 struct lpfc_dmabuf *mp[2] = {NULL, NULL}; 2897 struct ulp_bde64 *rxbpl = NULL; 2898 uint32_t num_bde; 2899 struct lpfc_dmabufext *rxbuffer = NULL; 2900 int ret_val = 0; 2901 int iocb_stat; 2902 int i = 0; 2903 2904 pring = lpfc_phba_elsring(phba); 2905 2906 cmdiocbq = lpfc_sli_get_iocbq(phba); 2907 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2908 if (rxbmp != NULL) { 2909 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); 2910 if (rxbmp->virt) { 2911 INIT_LIST_HEAD(&rxbmp->list); 2912 rxbpl = (struct ulp_bde64 *) rxbmp->virt; 2913 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0); 2914 } 2915 } 2916 2917 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) { 2918 ret_val = -ENOMEM; 2919 goto err_post_rxbufs_exit; 2920 } 2921 2922 /* Queue buffers for the receive exchange */ 2923 num_bde = (uint32_t)rxbuffer->flag; 2924 dmp = &rxbuffer->dma; 2925 2926 cmd = &cmdiocbq->iocb; 2927 i = 0; 2928 2929 INIT_LIST_HEAD(&head); 2930 list_add_tail(&head, &dmp->list); 2931 list_for_each_safe(curr, next, &head) { 2932 mp[i] = list_entry(curr, struct lpfc_dmabuf, list); 2933 list_del(curr); 2934 2935 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2936 mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba); 2937 cmd->un.quexri64cx.buff.bde.addrHigh = 2938 putPaddrHigh(mp[i]->phys); 2939 cmd->un.quexri64cx.buff.bde.addrLow = 2940 putPaddrLow(mp[i]->phys); 2941 cmd->un.quexri64cx.buff.bde.tus.f.bdeSize = 2942 ((struct lpfc_dmabufext *)mp[i])->size; 2943 cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag; 2944 cmd->ulpCommand = CMD_QUE_XRI64_CX; 2945 cmd->ulpPU = 0; 2946 cmd->ulpLe = 1; 2947 cmd->ulpBdeCount = 1; 2948 cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0; 2949 2950 } else { 2951 cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys); 2952 cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys); 2953 cmd->un.cont64[i].tus.f.bdeSize = 2954 ((struct lpfc_dmabufext *)mp[i])->size; 2955 cmd->ulpBdeCount = ++i; 2956 2957 if ((--num_bde > 0) && (i < 2)) 2958 continue; 2959 2960 cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX; 2961 cmd->ulpLe = 1; 2962 } 2963 2964 cmd->ulpClass = CLASS3; 2965 cmd->ulpContext = rxxri; 2966 2967 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 2968 0); 2969 if (iocb_stat == IOCB_ERROR) { 2970 diag_cmd_data_free(phba, 2971 (struct lpfc_dmabufext *)mp[0]); 2972 if (mp[1]) 2973 diag_cmd_data_free(phba, 2974 (struct lpfc_dmabufext *)mp[1]); 2975 dmp = list_entry(next, struct lpfc_dmabuf, list); 2976 ret_val = -EIO; 2977 goto err_post_rxbufs_exit; 2978 } 2979 2980 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]); 2981 if (mp[1]) { 2982 lpfc_sli_ringpostbuf_put(phba, pring, mp[1]); 2983 mp[1] = NULL; 2984 } 2985 2986 /* The iocb was freed by lpfc_sli_issue_iocb */ 2987 cmdiocbq = lpfc_sli_get_iocbq(phba); 2988 if (!cmdiocbq) { 2989 dmp = list_entry(next, struct lpfc_dmabuf, list); 2990 ret_val = -EIO; 2991 goto err_post_rxbufs_exit; 2992 } 2993 2994 cmd = &cmdiocbq->iocb; 2995 i = 0; 2996 } 2997 list_del(&head); 2998 2999 err_post_rxbufs_exit: 3000 3001 if (rxbmp) { 3002 if (rxbmp->virt) 3003 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys); 3004 kfree(rxbmp); 3005 } 3006 3007 if (cmdiocbq) 3008 lpfc_sli_release_iocbq(phba, cmdiocbq); 3009 return ret_val; 3010 } 3011 3012 /** 3013 * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself 3014 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job 3015 * 3016 * This function receives a user data buffer to be transmitted and received on 3017 * the same port, the link must be up and in loopback mode prior 3018 * to being called. 3019 * 1. A kernel buffer is allocated to copy the user data into. 3020 * 2. The port registers with "itself". 3021 * 3. The transmit and receive exchange ids are obtained. 3022 * 4. The receive exchange id is posted. 3023 * 5. A new els loopback event is created. 3024 * 6. The command and response iocbs are allocated. 3025 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback. 3026 * 3027 * This function is meant to be called n times while the port is in loopback 3028 * so it is the apps responsibility to issue a reset to take the port out 3029 * of loopback mode. 3030 **/ 3031 static int 3032 lpfc_bsg_diag_loopback_run(struct bsg_job *job) 3033 { 3034 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 3035 struct fc_bsg_reply *bsg_reply = job->reply; 3036 struct lpfc_hba *phba = vport->phba; 3037 struct lpfc_bsg_event *evt; 3038 struct event_data *evdat; 3039 struct lpfc_sli *psli = &phba->sli; 3040 uint32_t size; 3041 uint32_t full_size; 3042 size_t segment_len = 0, segment_offset = 0, current_offset = 0; 3043 uint16_t rpi = 0; 3044 struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL; 3045 IOCB_t *cmd, *rsp = NULL; 3046 struct lpfc_sli_ct_request *ctreq; 3047 struct lpfc_dmabuf *txbmp; 3048 struct ulp_bde64 *txbpl = NULL; 3049 struct lpfc_dmabufext *txbuffer = NULL; 3050 struct list_head head; 3051 struct lpfc_dmabuf *curr; 3052 uint16_t txxri = 0, rxxri; 3053 uint32_t num_bde; 3054 uint8_t *ptr = NULL, *rx_databuf = NULL; 3055 int rc = 0; 3056 int time_left; 3057 int iocb_stat = IOCB_SUCCESS; 3058 unsigned long flags; 3059 void *dataout = NULL; 3060 uint32_t total_mem; 3061 3062 /* in case no data is returned return just the return code */ 3063 bsg_reply->reply_payload_rcv_len = 0; 3064 3065 if (job->request_len < 3066 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) { 3067 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3068 "2739 Received DIAG TEST request below minimum " 3069 "size\n"); 3070 rc = -EINVAL; 3071 goto loopback_test_exit; 3072 } 3073 3074 if (job->request_payload.payload_len != 3075 job->reply_payload.payload_len) { 3076 rc = -EINVAL; 3077 goto loopback_test_exit; 3078 } 3079 3080 if ((phba->link_state == LPFC_HBA_ERROR) || 3081 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || 3082 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { 3083 rc = -EACCES; 3084 goto loopback_test_exit; 3085 } 3086 3087 if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) { 3088 rc = -EACCES; 3089 goto loopback_test_exit; 3090 } 3091 3092 size = job->request_payload.payload_len; 3093 full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */ 3094 3095 if ((size == 0) || (size > 80 * BUF_SZ_4K)) { 3096 rc = -ERANGE; 3097 goto loopback_test_exit; 3098 } 3099 3100 if (full_size >= BUF_SZ_4K) { 3101 /* 3102 * Allocate memory for ioctl data. If buffer is bigger than 64k, 3103 * then we allocate 64k and re-use that buffer over and over to 3104 * xfer the whole block. This is because Linux kernel has a 3105 * problem allocating more than 120k of kernel space memory. Saw 3106 * problem with GET_FCPTARGETMAPPING... 3107 */ 3108 if (size <= (64 * 1024)) 3109 total_mem = full_size; 3110 else 3111 total_mem = 64 * 1024; 3112 } else 3113 /* Allocate memory for ioctl data */ 3114 total_mem = BUF_SZ_4K; 3115 3116 dataout = kmalloc(total_mem, GFP_KERNEL); 3117 if (dataout == NULL) { 3118 rc = -ENOMEM; 3119 goto loopback_test_exit; 3120 } 3121 3122 ptr = dataout; 3123 ptr += ELX_LOOPBACK_HEADER_SZ; 3124 sg_copy_to_buffer(job->request_payload.sg_list, 3125 job->request_payload.sg_cnt, 3126 ptr, size); 3127 rc = lpfcdiag_loop_self_reg(phba, &rpi); 3128 if (rc) 3129 goto loopback_test_exit; 3130 3131 if (phba->sli_rev < LPFC_SLI_REV4) { 3132 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri); 3133 if (rc) { 3134 lpfcdiag_loop_self_unreg(phba, rpi); 3135 goto loopback_test_exit; 3136 } 3137 3138 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size); 3139 if (rc) { 3140 lpfcdiag_loop_self_unreg(phba, rpi); 3141 goto loopback_test_exit; 3142 } 3143 } 3144 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, 3145 SLI_CT_ELX_LOOPBACK); 3146 if (!evt) { 3147 lpfcdiag_loop_self_unreg(phba, rpi); 3148 rc = -ENOMEM; 3149 goto loopback_test_exit; 3150 } 3151 3152 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3153 list_add(&evt->node, &phba->ct_ev_waiters); 3154 lpfc_bsg_event_ref(evt); 3155 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3156 3157 cmdiocbq = lpfc_sli_get_iocbq(phba); 3158 if (phba->sli_rev < LPFC_SLI_REV4) 3159 rspiocbq = lpfc_sli_get_iocbq(phba); 3160 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3161 3162 if (txbmp) { 3163 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys); 3164 if (txbmp->virt) { 3165 INIT_LIST_HEAD(&txbmp->list); 3166 txbpl = (struct ulp_bde64 *) txbmp->virt; 3167 txbuffer = diag_cmd_data_alloc(phba, 3168 txbpl, full_size, 0); 3169 } 3170 } 3171 3172 if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) { 3173 rc = -ENOMEM; 3174 goto err_loopback_test_exit; 3175 } 3176 if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) { 3177 rc = -ENOMEM; 3178 goto err_loopback_test_exit; 3179 } 3180 3181 cmd = &cmdiocbq->iocb; 3182 if (phba->sli_rev < LPFC_SLI_REV4) 3183 rsp = &rspiocbq->iocb; 3184 3185 INIT_LIST_HEAD(&head); 3186 list_add_tail(&head, &txbuffer->dma.list); 3187 list_for_each_entry(curr, &head, list) { 3188 segment_len = ((struct lpfc_dmabufext *)curr)->size; 3189 if (current_offset == 0) { 3190 ctreq = curr->virt; 3191 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); 3192 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; 3193 ctreq->RevisionId.bits.InId = 0; 3194 ctreq->FsType = SLI_CT_ELX_LOOPBACK; 3195 ctreq->FsSubType = 0; 3196 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA; 3197 ctreq->CommandResponse.bits.Size = size; 3198 segment_offset = ELX_LOOPBACK_HEADER_SZ; 3199 } else 3200 segment_offset = 0; 3201 3202 BUG_ON(segment_offset >= segment_len); 3203 memcpy(curr->virt + segment_offset, 3204 ptr + current_offset, 3205 segment_len - segment_offset); 3206 3207 current_offset += segment_len - segment_offset; 3208 BUG_ON(current_offset > size); 3209 } 3210 list_del(&head); 3211 3212 /* Build the XMIT_SEQUENCE iocb */ 3213 num_bde = (uint32_t)txbuffer->flag; 3214 3215 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys); 3216 cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys); 3217 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 3218 cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64)); 3219 3220 cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA); 3221 cmd->un.xseq64.w5.hcsw.Dfctl = 0; 3222 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 3223 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 3224 3225 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; 3226 cmd->ulpBdeCount = 1; 3227 cmd->ulpLe = 1; 3228 cmd->ulpClass = CLASS3; 3229 3230 if (phba->sli_rev < LPFC_SLI_REV4) { 3231 cmd->ulpContext = txxri; 3232 } else { 3233 cmd->un.xseq64.bdl.ulpIoTag32 = 0; 3234 cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi]; 3235 cmdiocbq->context3 = txbmp; 3236 cmdiocbq->sli4_xritag = NO_XRI; 3237 cmd->unsli3.rcvsli3.ox_id = 0xffff; 3238 } 3239 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 3240 cmdiocbq->iocb_flag |= LPFC_IO_LOOPBACK; 3241 cmdiocbq->vport = phba->pport; 3242 cmdiocbq->iocb_cmpl = NULL; 3243 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 3244 rspiocbq, (phba->fc_ratov * 2) + 3245 LPFC_DRVR_TIMEOUT); 3246 3247 if ((iocb_stat != IOCB_SUCCESS) || 3248 ((phba->sli_rev < LPFC_SLI_REV4) && 3249 (rsp->ulpStatus != IOSTAT_SUCCESS))) { 3250 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3251 "3126 Failed loopback test issue iocb: " 3252 "iocb_stat:x%x\n", iocb_stat); 3253 rc = -EIO; 3254 goto err_loopback_test_exit; 3255 } 3256 3257 evt->waiting = 1; 3258 time_left = wait_event_interruptible_timeout( 3259 evt->wq, !list_empty(&evt->events_to_see), 3260 msecs_to_jiffies(1000 * 3261 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); 3262 evt->waiting = 0; 3263 if (list_empty(&evt->events_to_see)) { 3264 rc = (time_left) ? -EINTR : -ETIMEDOUT; 3265 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3266 "3125 Not receiving unsolicited event, " 3267 "rc:x%x\n", rc); 3268 } else { 3269 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3270 list_move(evt->events_to_see.prev, &evt->events_to_get); 3271 evdat = list_entry(evt->events_to_get.prev, 3272 typeof(*evdat), node); 3273 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3274 rx_databuf = evdat->data; 3275 if (evdat->len != full_size) { 3276 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3277 "1603 Loopback test did not receive expected " 3278 "data length. actual length 0x%x expected " 3279 "length 0x%x\n", 3280 evdat->len, full_size); 3281 rc = -EIO; 3282 } else if (rx_databuf == NULL) 3283 rc = -EIO; 3284 else { 3285 rc = IOCB_SUCCESS; 3286 /* skip over elx loopback header */ 3287 rx_databuf += ELX_LOOPBACK_HEADER_SZ; 3288 bsg_reply->reply_payload_rcv_len = 3289 sg_copy_from_buffer(job->reply_payload.sg_list, 3290 job->reply_payload.sg_cnt, 3291 rx_databuf, size); 3292 bsg_reply->reply_payload_rcv_len = size; 3293 } 3294 } 3295 3296 err_loopback_test_exit: 3297 lpfcdiag_loop_self_unreg(phba, rpi); 3298 3299 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3300 lpfc_bsg_event_unref(evt); /* release ref */ 3301 lpfc_bsg_event_unref(evt); /* delete */ 3302 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3303 3304 if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT)) 3305 lpfc_sli_release_iocbq(phba, cmdiocbq); 3306 3307 if (rspiocbq != NULL) 3308 lpfc_sli_release_iocbq(phba, rspiocbq); 3309 3310 if (txbmp != NULL) { 3311 if (txbpl != NULL) { 3312 if (txbuffer != NULL) 3313 diag_cmd_data_free(phba, txbuffer); 3314 lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys); 3315 } 3316 kfree(txbmp); 3317 } 3318 3319 loopback_test_exit: 3320 kfree(dataout); 3321 /* make error code available to userspace */ 3322 bsg_reply->result = rc; 3323 job->dd_data = NULL; 3324 /* complete the job back to userspace if no error */ 3325 if (rc == IOCB_SUCCESS) 3326 bsg_job_done(job, bsg_reply->result, 3327 bsg_reply->reply_payload_rcv_len); 3328 return rc; 3329 } 3330 3331 /** 3332 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command 3333 * @job: GET_DFC_REV fc_bsg_job 3334 **/ 3335 static int 3336 lpfc_bsg_get_dfc_rev(struct bsg_job *job) 3337 { 3338 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 3339 struct fc_bsg_reply *bsg_reply = job->reply; 3340 struct lpfc_hba *phba = vport->phba; 3341 struct get_mgmt_rev_reply *event_reply; 3342 int rc = 0; 3343 3344 if (job->request_len < 3345 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) { 3346 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3347 "2740 Received GET_DFC_REV request below " 3348 "minimum size\n"); 3349 rc = -EINVAL; 3350 goto job_error; 3351 } 3352 3353 event_reply = (struct get_mgmt_rev_reply *) 3354 bsg_reply->reply_data.vendor_reply.vendor_rsp; 3355 3356 if (job->reply_len < 3357 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) { 3358 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3359 "2741 Received GET_DFC_REV reply below " 3360 "minimum size\n"); 3361 rc = -EINVAL; 3362 goto job_error; 3363 } 3364 3365 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV; 3366 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV; 3367 job_error: 3368 bsg_reply->result = rc; 3369 if (rc == 0) 3370 bsg_job_done(job, bsg_reply->result, 3371 bsg_reply->reply_payload_rcv_len); 3372 return rc; 3373 } 3374 3375 /** 3376 * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler 3377 * @phba: Pointer to HBA context object. 3378 * @pmboxq: Pointer to mailbox command. 3379 * 3380 * This is completion handler function for mailbox commands issued from 3381 * lpfc_bsg_issue_mbox function. This function is called by the 3382 * mailbox event handler function with no lock held. This function 3383 * will wake up thread waiting on the wait queue pointed by context1 3384 * of the mailbox. 3385 **/ 3386 static void 3387 lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3388 { 3389 struct bsg_job_data *dd_data; 3390 struct fc_bsg_reply *bsg_reply; 3391 struct bsg_job *job; 3392 uint32_t size; 3393 unsigned long flags; 3394 uint8_t *pmb, *pmb_buf; 3395 3396 dd_data = pmboxq->context1; 3397 3398 /* 3399 * The outgoing buffer is readily referred from the dma buffer, 3400 * just need to get header part from mailboxq structure. 3401 */ 3402 pmb = (uint8_t *)&pmboxq->u.mb; 3403 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; 3404 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t)); 3405 3406 /* Determine if job has been aborted */ 3407 3408 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3409 job = dd_data->set_job; 3410 if (job) { 3411 /* Prevent timeout handling from trying to abort job */ 3412 job->dd_data = NULL; 3413 } 3414 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3415 3416 /* Copy the mailbox data to the job if it is still active */ 3417 3418 if (job) { 3419 bsg_reply = job->reply; 3420 size = job->reply_payload.payload_len; 3421 bsg_reply->reply_payload_rcv_len = 3422 sg_copy_from_buffer(job->reply_payload.sg_list, 3423 job->reply_payload.sg_cnt, 3424 pmb_buf, size); 3425 } 3426 3427 dd_data->set_job = NULL; 3428 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); 3429 lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers); 3430 kfree(dd_data); 3431 3432 /* Complete the job if the job is still active */ 3433 3434 if (job) { 3435 bsg_reply->result = 0; 3436 bsg_job_done(job, bsg_reply->result, 3437 bsg_reply->reply_payload_rcv_len); 3438 } 3439 return; 3440 } 3441 3442 /** 3443 * lpfc_bsg_check_cmd_access - test for a supported mailbox command 3444 * @phba: Pointer to HBA context object. 3445 * @mb: Pointer to a mailbox object. 3446 * @vport: Pointer to a vport object. 3447 * 3448 * Some commands require the port to be offline, some may not be called from 3449 * the application. 3450 **/ 3451 static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba, 3452 MAILBOX_t *mb, struct lpfc_vport *vport) 3453 { 3454 /* return negative error values for bsg job */ 3455 switch (mb->mbxCommand) { 3456 /* Offline only */ 3457 case MBX_INIT_LINK: 3458 case MBX_DOWN_LINK: 3459 case MBX_CONFIG_LINK: 3460 case MBX_CONFIG_RING: 3461 case MBX_RESET_RING: 3462 case MBX_UNREG_LOGIN: 3463 case MBX_CLEAR_LA: 3464 case MBX_DUMP_CONTEXT: 3465 case MBX_RUN_DIAGS: 3466 case MBX_RESTART: 3467 case MBX_SET_MASK: 3468 if (!(vport->fc_flag & FC_OFFLINE_MODE)) { 3469 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3470 "2743 Command 0x%x is illegal in on-line " 3471 "state\n", 3472 mb->mbxCommand); 3473 return -EPERM; 3474 } 3475 case MBX_WRITE_NV: 3476 case MBX_WRITE_VPARMS: 3477 case MBX_LOAD_SM: 3478 case MBX_READ_NV: 3479 case MBX_READ_CONFIG: 3480 case MBX_READ_RCONFIG: 3481 case MBX_READ_STATUS: 3482 case MBX_READ_XRI: 3483 case MBX_READ_REV: 3484 case MBX_READ_LNK_STAT: 3485 case MBX_DUMP_MEMORY: 3486 case MBX_DOWN_LOAD: 3487 case MBX_UPDATE_CFG: 3488 case MBX_KILL_BOARD: 3489 case MBX_READ_TOPOLOGY: 3490 case MBX_LOAD_AREA: 3491 case MBX_LOAD_EXP_ROM: 3492 case MBX_BEACON: 3493 case MBX_DEL_LD_ENTRY: 3494 case MBX_SET_DEBUG: 3495 case MBX_WRITE_WWN: 3496 case MBX_SLI4_CONFIG: 3497 case MBX_READ_EVENT_LOG: 3498 case MBX_READ_EVENT_LOG_STATUS: 3499 case MBX_WRITE_EVENT_LOG: 3500 case MBX_PORT_CAPABILITIES: 3501 case MBX_PORT_IOV_CONTROL: 3502 case MBX_RUN_BIU_DIAG64: 3503 break; 3504 case MBX_SET_VARIABLE: 3505 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3506 "1226 mbox: set_variable 0x%x, 0x%x\n", 3507 mb->un.varWords[0], 3508 mb->un.varWords[1]); 3509 if ((mb->un.varWords[0] == SETVAR_MLOMNT) 3510 && (mb->un.varWords[1] == 1)) { 3511 phba->wait_4_mlo_maint_flg = 1; 3512 } else if (mb->un.varWords[0] == SETVAR_MLORST) { 3513 spin_lock_irq(&phba->hbalock); 3514 phba->link_flag &= ~LS_LOOPBACK_MODE; 3515 spin_unlock_irq(&phba->hbalock); 3516 phba->fc_topology = LPFC_TOPOLOGY_PT_PT; 3517 } 3518 break; 3519 case MBX_READ_SPARM64: 3520 case MBX_REG_LOGIN: 3521 case MBX_REG_LOGIN64: 3522 case MBX_CONFIG_PORT: 3523 case MBX_RUN_BIU_DIAG: 3524 default: 3525 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3526 "2742 Unknown Command 0x%x\n", 3527 mb->mbxCommand); 3528 return -EPERM; 3529 } 3530 3531 return 0; /* ok */ 3532 } 3533 3534 /** 3535 * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session 3536 * @phba: Pointer to HBA context object. 3537 * 3538 * This is routine clean up and reset BSG handling of multi-buffer mbox 3539 * command session. 3540 **/ 3541 static void 3542 lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba) 3543 { 3544 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) 3545 return; 3546 3547 /* free all memory, including dma buffers */ 3548 lpfc_bsg_dma_page_list_free(phba, 3549 &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3550 lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf); 3551 /* multi-buffer write mailbox command pass-through complete */ 3552 memset((char *)&phba->mbox_ext_buf_ctx, 0, 3553 sizeof(struct lpfc_mbox_ext_buf_ctx)); 3554 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3555 3556 return; 3557 } 3558 3559 /** 3560 * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl 3561 * @phba: Pointer to HBA context object. 3562 * @pmboxq: Pointer to mailbox command. 3563 * 3564 * This is routine handles BSG job for mailbox commands completions with 3565 * multiple external buffers. 3566 **/ 3567 static struct bsg_job * 3568 lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3569 { 3570 struct bsg_job_data *dd_data; 3571 struct bsg_job *job; 3572 struct fc_bsg_reply *bsg_reply; 3573 uint8_t *pmb, *pmb_buf; 3574 unsigned long flags; 3575 uint32_t size; 3576 int rc = 0; 3577 struct lpfc_dmabuf *dmabuf; 3578 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3579 uint8_t *pmbx; 3580 3581 dd_data = pmboxq->context1; 3582 3583 /* Determine if job has been aborted */ 3584 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3585 job = dd_data->set_job; 3586 if (job) { 3587 bsg_reply = job->reply; 3588 /* Prevent timeout handling from trying to abort job */ 3589 job->dd_data = NULL; 3590 } 3591 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3592 3593 /* 3594 * The outgoing buffer is readily referred from the dma buffer, 3595 * just need to get header part from mailboxq structure. 3596 */ 3597 3598 pmb = (uint8_t *)&pmboxq->u.mb; 3599 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; 3600 /* Copy the byte swapped response mailbox back to the user */ 3601 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t)); 3602 /* if there is any non-embedded extended data copy that too */ 3603 dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf; 3604 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 3605 if (!bsg_bf_get(lpfc_mbox_hdr_emb, 3606 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) { 3607 pmbx = (uint8_t *)dmabuf->virt; 3608 /* byte swap the extended data following the mailbox command */ 3609 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)], 3610 &pmbx[sizeof(MAILBOX_t)], 3611 sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len); 3612 } 3613 3614 /* Complete the job if the job is still active */ 3615 3616 if (job) { 3617 size = job->reply_payload.payload_len; 3618 bsg_reply->reply_payload_rcv_len = 3619 sg_copy_from_buffer(job->reply_payload.sg_list, 3620 job->reply_payload.sg_cnt, 3621 pmb_buf, size); 3622 3623 /* result for successful */ 3624 bsg_reply->result = 0; 3625 3626 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3627 "2937 SLI_CONFIG ext-buffer maibox command " 3628 "(x%x/x%x) complete bsg job done, bsize:%d\n", 3629 phba->mbox_ext_buf_ctx.nembType, 3630 phba->mbox_ext_buf_ctx.mboxType, size); 3631 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, 3632 phba->mbox_ext_buf_ctx.nembType, 3633 phba->mbox_ext_buf_ctx.mboxType, 3634 dma_ebuf, sta_pos_addr, 3635 phba->mbox_ext_buf_ctx.mbx_dmabuf, 0); 3636 } else { 3637 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3638 "2938 SLI_CONFIG ext-buffer maibox " 3639 "command (x%x/x%x) failure, rc:x%x\n", 3640 phba->mbox_ext_buf_ctx.nembType, 3641 phba->mbox_ext_buf_ctx.mboxType, rc); 3642 } 3643 3644 3645 /* state change */ 3646 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE; 3647 kfree(dd_data); 3648 return job; 3649 } 3650 3651 /** 3652 * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox 3653 * @phba: Pointer to HBA context object. 3654 * @pmboxq: Pointer to mailbox command. 3655 * 3656 * This is completion handler function for mailbox read commands with multiple 3657 * external buffers. 3658 **/ 3659 static void 3660 lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3661 { 3662 struct bsg_job *job; 3663 struct fc_bsg_reply *bsg_reply; 3664 3665 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); 3666 3667 /* handle the BSG job with mailbox command */ 3668 if (!job) 3669 pmboxq->u.mb.mbxStatus = MBXERR_ERROR; 3670 3671 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3672 "2939 SLI_CONFIG ext-buffer rd maibox command " 3673 "complete, ctxState:x%x, mbxStatus:x%x\n", 3674 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); 3675 3676 if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1) 3677 lpfc_bsg_mbox_ext_session_reset(phba); 3678 3679 /* free base driver mailbox structure memory */ 3680 mempool_free(pmboxq, phba->mbox_mem_pool); 3681 3682 /* if the job is still active, call job done */ 3683 if (job) { 3684 bsg_reply = job->reply; 3685 bsg_job_done(job, bsg_reply->result, 3686 bsg_reply->reply_payload_rcv_len); 3687 } 3688 return; 3689 } 3690 3691 /** 3692 * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox 3693 * @phba: Pointer to HBA context object. 3694 * @pmboxq: Pointer to mailbox command. 3695 * 3696 * This is completion handler function for mailbox write commands with multiple 3697 * external buffers. 3698 **/ 3699 static void 3700 lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3701 { 3702 struct bsg_job *job; 3703 struct fc_bsg_reply *bsg_reply; 3704 3705 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); 3706 3707 /* handle the BSG job with the mailbox command */ 3708 if (!job) 3709 pmboxq->u.mb.mbxStatus = MBXERR_ERROR; 3710 3711 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3712 "2940 SLI_CONFIG ext-buffer wr maibox command " 3713 "complete, ctxState:x%x, mbxStatus:x%x\n", 3714 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); 3715 3716 /* free all memory, including dma buffers */ 3717 mempool_free(pmboxq, phba->mbox_mem_pool); 3718 lpfc_bsg_mbox_ext_session_reset(phba); 3719 3720 /* if the job is still active, call job done */ 3721 if (job) { 3722 bsg_reply = job->reply; 3723 bsg_job_done(job, bsg_reply->result, 3724 bsg_reply->reply_payload_rcv_len); 3725 } 3726 3727 return; 3728 } 3729 3730 static void 3731 lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp, 3732 uint32_t index, struct lpfc_dmabuf *mbx_dmabuf, 3733 struct lpfc_dmabuf *ext_dmabuf) 3734 { 3735 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3736 3737 /* pointer to the start of mailbox command */ 3738 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt; 3739 3740 if (nemb_tp == nemb_mse) { 3741 if (index == 0) { 3742 sli_cfg_mbx->un.sli_config_emb0_subsys. 3743 mse[index].pa_hi = 3744 putPaddrHigh(mbx_dmabuf->phys + 3745 sizeof(MAILBOX_t)); 3746 sli_cfg_mbx->un.sli_config_emb0_subsys. 3747 mse[index].pa_lo = 3748 putPaddrLow(mbx_dmabuf->phys + 3749 sizeof(MAILBOX_t)); 3750 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3751 "2943 SLI_CONFIG(mse)[%d], " 3752 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3753 index, 3754 sli_cfg_mbx->un.sli_config_emb0_subsys. 3755 mse[index].buf_len, 3756 sli_cfg_mbx->un.sli_config_emb0_subsys. 3757 mse[index].pa_hi, 3758 sli_cfg_mbx->un.sli_config_emb0_subsys. 3759 mse[index].pa_lo); 3760 } else { 3761 sli_cfg_mbx->un.sli_config_emb0_subsys. 3762 mse[index].pa_hi = 3763 putPaddrHigh(ext_dmabuf->phys); 3764 sli_cfg_mbx->un.sli_config_emb0_subsys. 3765 mse[index].pa_lo = 3766 putPaddrLow(ext_dmabuf->phys); 3767 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3768 "2944 SLI_CONFIG(mse)[%d], " 3769 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3770 index, 3771 sli_cfg_mbx->un.sli_config_emb0_subsys. 3772 mse[index].buf_len, 3773 sli_cfg_mbx->un.sli_config_emb0_subsys. 3774 mse[index].pa_hi, 3775 sli_cfg_mbx->un.sli_config_emb0_subsys. 3776 mse[index].pa_lo); 3777 } 3778 } else { 3779 if (index == 0) { 3780 sli_cfg_mbx->un.sli_config_emb1_subsys. 3781 hbd[index].pa_hi = 3782 putPaddrHigh(mbx_dmabuf->phys + 3783 sizeof(MAILBOX_t)); 3784 sli_cfg_mbx->un.sli_config_emb1_subsys. 3785 hbd[index].pa_lo = 3786 putPaddrLow(mbx_dmabuf->phys + 3787 sizeof(MAILBOX_t)); 3788 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3789 "3007 SLI_CONFIG(hbd)[%d], " 3790 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3791 index, 3792 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 3793 &sli_cfg_mbx->un. 3794 sli_config_emb1_subsys.hbd[index]), 3795 sli_cfg_mbx->un.sli_config_emb1_subsys. 3796 hbd[index].pa_hi, 3797 sli_cfg_mbx->un.sli_config_emb1_subsys. 3798 hbd[index].pa_lo); 3799 3800 } else { 3801 sli_cfg_mbx->un.sli_config_emb1_subsys. 3802 hbd[index].pa_hi = 3803 putPaddrHigh(ext_dmabuf->phys); 3804 sli_cfg_mbx->un.sli_config_emb1_subsys. 3805 hbd[index].pa_lo = 3806 putPaddrLow(ext_dmabuf->phys); 3807 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3808 "3008 SLI_CONFIG(hbd)[%d], " 3809 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3810 index, 3811 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 3812 &sli_cfg_mbx->un. 3813 sli_config_emb1_subsys.hbd[index]), 3814 sli_cfg_mbx->un.sli_config_emb1_subsys. 3815 hbd[index].pa_hi, 3816 sli_cfg_mbx->un.sli_config_emb1_subsys. 3817 hbd[index].pa_lo); 3818 } 3819 } 3820 return; 3821 } 3822 3823 /** 3824 * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read 3825 * @phba: Pointer to HBA context object. 3826 * @mb: Pointer to a BSG mailbox object. 3827 * @nemb_tp: Enumerate of non-embedded mailbox command type. 3828 * @dmabuff: Pointer to a DMA buffer descriptor. 3829 * 3830 * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with 3831 * non-embedded external bufffers. 3832 **/ 3833 static int 3834 lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, 3835 enum nemb_type nemb_tp, 3836 struct lpfc_dmabuf *dmabuf) 3837 { 3838 struct fc_bsg_request *bsg_request = job->request; 3839 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3840 struct dfc_mbox_req *mbox_req; 3841 struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf; 3842 uint32_t ext_buf_cnt, ext_buf_index; 3843 struct lpfc_dmabuf *ext_dmabuf = NULL; 3844 struct bsg_job_data *dd_data = NULL; 3845 LPFC_MBOXQ_t *pmboxq = NULL; 3846 MAILBOX_t *pmb; 3847 uint8_t *pmbx; 3848 int rc, i; 3849 3850 mbox_req = 3851 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; 3852 3853 /* pointer to the start of mailbox command */ 3854 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 3855 3856 if (nemb_tp == nemb_mse) { 3857 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt, 3858 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr); 3859 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) { 3860 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3861 "2945 Handled SLI_CONFIG(mse) rd, " 3862 "ext_buf_cnt(%d) out of range(%d)\n", 3863 ext_buf_cnt, 3864 LPFC_MBX_SLI_CONFIG_MAX_MSE); 3865 rc = -ERANGE; 3866 goto job_error; 3867 } 3868 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3869 "2941 Handled SLI_CONFIG(mse) rd, " 3870 "ext_buf_cnt:%d\n", ext_buf_cnt); 3871 } else { 3872 /* sanity check on interface type for support */ 3873 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 3874 LPFC_SLI_INTF_IF_TYPE_2) { 3875 rc = -ENODEV; 3876 goto job_error; 3877 } 3878 /* nemb_tp == nemb_hbd */ 3879 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count; 3880 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) { 3881 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3882 "2946 Handled SLI_CONFIG(hbd) rd, " 3883 "ext_buf_cnt(%d) out of range(%d)\n", 3884 ext_buf_cnt, 3885 LPFC_MBX_SLI_CONFIG_MAX_HBD); 3886 rc = -ERANGE; 3887 goto job_error; 3888 } 3889 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3890 "2942 Handled SLI_CONFIG(hbd) rd, " 3891 "ext_buf_cnt:%d\n", ext_buf_cnt); 3892 } 3893 3894 /* before dma descriptor setup */ 3895 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox, 3896 sta_pre_addr, dmabuf, ext_buf_cnt); 3897 3898 /* reject non-embedded mailbox command with none external buffer */ 3899 if (ext_buf_cnt == 0) { 3900 rc = -EPERM; 3901 goto job_error; 3902 } else if (ext_buf_cnt > 1) { 3903 /* additional external read buffers */ 3904 for (i = 1; i < ext_buf_cnt; i++) { 3905 ext_dmabuf = lpfc_bsg_dma_page_alloc(phba); 3906 if (!ext_dmabuf) { 3907 rc = -ENOMEM; 3908 goto job_error; 3909 } 3910 list_add_tail(&ext_dmabuf->list, 3911 &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3912 } 3913 } 3914 3915 /* bsg tracking structure */ 3916 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 3917 if (!dd_data) { 3918 rc = -ENOMEM; 3919 goto job_error; 3920 } 3921 3922 /* mailbox command structure for base driver */ 3923 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3924 if (!pmboxq) { 3925 rc = -ENOMEM; 3926 goto job_error; 3927 } 3928 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 3929 3930 /* for the first external buffer */ 3931 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf); 3932 3933 /* for the rest of external buffer descriptors if any */ 3934 if (ext_buf_cnt > 1) { 3935 ext_buf_index = 1; 3936 list_for_each_entry_safe(curr_dmabuf, next_dmabuf, 3937 &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) { 3938 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 3939 ext_buf_index, dmabuf, 3940 curr_dmabuf); 3941 ext_buf_index++; 3942 } 3943 } 3944 3945 /* after dma descriptor setup */ 3946 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox, 3947 sta_pos_addr, dmabuf, ext_buf_cnt); 3948 3949 /* construct base driver mbox command */ 3950 pmb = &pmboxq->u.mb; 3951 pmbx = (uint8_t *)dmabuf->virt; 3952 memcpy(pmb, pmbx, sizeof(*pmb)); 3953 pmb->mbxOwner = OWN_HOST; 3954 pmboxq->vport = phba->pport; 3955 3956 /* multi-buffer handling context */ 3957 phba->mbox_ext_buf_ctx.nembType = nemb_tp; 3958 phba->mbox_ext_buf_ctx.mboxType = mbox_rd; 3959 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt; 3960 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag; 3961 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum; 3962 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf; 3963 3964 /* callback for multi-buffer read mailbox command */ 3965 pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl; 3966 3967 /* context fields to callback function */ 3968 pmboxq->context1 = dd_data; 3969 dd_data->type = TYPE_MBOX; 3970 dd_data->set_job = job; 3971 dd_data->context_un.mbox.pmboxq = pmboxq; 3972 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; 3973 job->dd_data = dd_data; 3974 3975 /* state change */ 3976 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; 3977 3978 /* 3979 * Non-embedded mailbox subcommand data gets byte swapped here because 3980 * the lower level driver code only does the first 64 mailbox words. 3981 */ 3982 if ((!bsg_bf_get(lpfc_mbox_hdr_emb, 3983 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) && 3984 (nemb_tp == nemb_mse)) 3985 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)], 3986 &pmbx[sizeof(MAILBOX_t)], 3987 sli_cfg_mbx->un.sli_config_emb0_subsys. 3988 mse[0].buf_len); 3989 3990 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 3991 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 3992 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3993 "2947 Issued SLI_CONFIG ext-buffer " 3994 "maibox command, rc:x%x\n", rc); 3995 return SLI_CONFIG_HANDLED; 3996 } 3997 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3998 "2948 Failed to issue SLI_CONFIG ext-buffer " 3999 "maibox command, rc:x%x\n", rc); 4000 rc = -EPIPE; 4001 4002 job_error: 4003 if (pmboxq) 4004 mempool_free(pmboxq, phba->mbox_mem_pool); 4005 lpfc_bsg_dma_page_list_free(phba, 4006 &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 4007 kfree(dd_data); 4008 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE; 4009 return rc; 4010 } 4011 4012 /** 4013 * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write 4014 * @phba: Pointer to HBA context object. 4015 * @mb: Pointer to a BSG mailbox object. 4016 * @dmabuff: Pointer to a DMA buffer descriptor. 4017 * 4018 * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with 4019 * non-embedded external bufffers. 4020 **/ 4021 static int 4022 lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, 4023 enum nemb_type nemb_tp, 4024 struct lpfc_dmabuf *dmabuf) 4025 { 4026 struct fc_bsg_request *bsg_request = job->request; 4027 struct fc_bsg_reply *bsg_reply = job->reply; 4028 struct dfc_mbox_req *mbox_req; 4029 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4030 uint32_t ext_buf_cnt; 4031 struct bsg_job_data *dd_data = NULL; 4032 LPFC_MBOXQ_t *pmboxq = NULL; 4033 MAILBOX_t *pmb; 4034 uint8_t *mbx; 4035 int rc = SLI_CONFIG_NOT_HANDLED, i; 4036 4037 mbox_req = 4038 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; 4039 4040 /* pointer to the start of mailbox command */ 4041 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 4042 4043 if (nemb_tp == nemb_mse) { 4044 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt, 4045 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr); 4046 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) { 4047 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4048 "2953 Failed SLI_CONFIG(mse) wr, " 4049 "ext_buf_cnt(%d) out of range(%d)\n", 4050 ext_buf_cnt, 4051 LPFC_MBX_SLI_CONFIG_MAX_MSE); 4052 return -ERANGE; 4053 } 4054 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4055 "2949 Handled SLI_CONFIG(mse) wr, " 4056 "ext_buf_cnt:%d\n", ext_buf_cnt); 4057 } else { 4058 /* sanity check on interface type for support */ 4059 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 4060 LPFC_SLI_INTF_IF_TYPE_2) 4061 return -ENODEV; 4062 /* nemb_tp == nemb_hbd */ 4063 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count; 4064 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) { 4065 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4066 "2954 Failed SLI_CONFIG(hbd) wr, " 4067 "ext_buf_cnt(%d) out of range(%d)\n", 4068 ext_buf_cnt, 4069 LPFC_MBX_SLI_CONFIG_MAX_HBD); 4070 return -ERANGE; 4071 } 4072 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4073 "2950 Handled SLI_CONFIG(hbd) wr, " 4074 "ext_buf_cnt:%d\n", ext_buf_cnt); 4075 } 4076 4077 /* before dma buffer descriptor setup */ 4078 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox, 4079 sta_pre_addr, dmabuf, ext_buf_cnt); 4080 4081 if (ext_buf_cnt == 0) 4082 return -EPERM; 4083 4084 /* for the first external buffer */ 4085 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf); 4086 4087 /* after dma descriptor setup */ 4088 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox, 4089 sta_pos_addr, dmabuf, ext_buf_cnt); 4090 4091 /* log for looking forward */ 4092 for (i = 1; i < ext_buf_cnt; i++) { 4093 if (nemb_tp == nemb_mse) 4094 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4095 "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n", 4096 i, sli_cfg_mbx->un.sli_config_emb0_subsys. 4097 mse[i].buf_len); 4098 else 4099 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4100 "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n", 4101 i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 4102 &sli_cfg_mbx->un.sli_config_emb1_subsys. 4103 hbd[i])); 4104 } 4105 4106 /* multi-buffer handling context */ 4107 phba->mbox_ext_buf_ctx.nembType = nemb_tp; 4108 phba->mbox_ext_buf_ctx.mboxType = mbox_wr; 4109 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt; 4110 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag; 4111 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum; 4112 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf; 4113 4114 if (ext_buf_cnt == 1) { 4115 /* bsg tracking structure */ 4116 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4117 if (!dd_data) { 4118 rc = -ENOMEM; 4119 goto job_error; 4120 } 4121 4122 /* mailbox command structure for base driver */ 4123 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4124 if (!pmboxq) { 4125 rc = -ENOMEM; 4126 goto job_error; 4127 } 4128 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4129 pmb = &pmboxq->u.mb; 4130 mbx = (uint8_t *)dmabuf->virt; 4131 memcpy(pmb, mbx, sizeof(*pmb)); 4132 pmb->mbxOwner = OWN_HOST; 4133 pmboxq->vport = phba->pport; 4134 4135 /* callback for multi-buffer read mailbox command */ 4136 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl; 4137 4138 /* context fields to callback function */ 4139 pmboxq->context1 = dd_data; 4140 dd_data->type = TYPE_MBOX; 4141 dd_data->set_job = job; 4142 dd_data->context_un.mbox.pmboxq = pmboxq; 4143 dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx; 4144 job->dd_data = dd_data; 4145 4146 /* state change */ 4147 4148 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; 4149 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4150 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 4151 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4152 "2955 Issued SLI_CONFIG ext-buffer " 4153 "maibox command, rc:x%x\n", rc); 4154 return SLI_CONFIG_HANDLED; 4155 } 4156 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4157 "2956 Failed to issue SLI_CONFIG ext-buffer " 4158 "maibox command, rc:x%x\n", rc); 4159 rc = -EPIPE; 4160 goto job_error; 4161 } 4162 4163 /* wait for additoinal external buffers */ 4164 4165 bsg_reply->result = 0; 4166 bsg_job_done(job, bsg_reply->result, 4167 bsg_reply->reply_payload_rcv_len); 4168 return SLI_CONFIG_HANDLED; 4169 4170 job_error: 4171 if (pmboxq) 4172 mempool_free(pmboxq, phba->mbox_mem_pool); 4173 kfree(dd_data); 4174 4175 return rc; 4176 } 4177 4178 /** 4179 * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer 4180 * @phba: Pointer to HBA context object. 4181 * @mb: Pointer to a BSG mailbox object. 4182 * @dmabuff: Pointer to a DMA buffer descriptor. 4183 * 4184 * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded 4185 * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B 4186 * with embedded sussystem 0x1 and opcodes with external HBDs. 4187 **/ 4188 static int 4189 lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job, 4190 struct lpfc_dmabuf *dmabuf) 4191 { 4192 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4193 uint32_t subsys; 4194 uint32_t opcode; 4195 int rc = SLI_CONFIG_NOT_HANDLED; 4196 4197 /* state change on new multi-buffer pass-through mailbox command */ 4198 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST; 4199 4200 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 4201 4202 if (!bsg_bf_get(lpfc_mbox_hdr_emb, 4203 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) { 4204 subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys, 4205 &sli_cfg_mbx->un.sli_config_emb0_subsys); 4206 opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode, 4207 &sli_cfg_mbx->un.sli_config_emb0_subsys); 4208 if (subsys == SLI_CONFIG_SUBSYS_FCOE) { 4209 switch (opcode) { 4210 case FCOE_OPCODE_READ_FCF: 4211 case FCOE_OPCODE_GET_DPORT_RESULTS: 4212 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4213 "2957 Handled SLI_CONFIG " 4214 "subsys_fcoe, opcode:x%x\n", 4215 opcode); 4216 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, 4217 nemb_mse, dmabuf); 4218 break; 4219 case FCOE_OPCODE_ADD_FCF: 4220 case FCOE_OPCODE_SET_DPORT_MODE: 4221 case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE: 4222 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4223 "2958 Handled SLI_CONFIG " 4224 "subsys_fcoe, opcode:x%x\n", 4225 opcode); 4226 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job, 4227 nemb_mse, dmabuf); 4228 break; 4229 default: 4230 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4231 "2959 Reject SLI_CONFIG " 4232 "subsys_fcoe, opcode:x%x\n", 4233 opcode); 4234 rc = -EPERM; 4235 break; 4236 } 4237 } else if (subsys == SLI_CONFIG_SUBSYS_COMN) { 4238 switch (opcode) { 4239 case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES: 4240 case COMN_OPCODE_GET_CNTL_ATTRIBUTES: 4241 case COMN_OPCODE_GET_PROFILE_CONFIG: 4242 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4243 "3106 Handled SLI_CONFIG " 4244 "subsys_comn, opcode:x%x\n", 4245 opcode); 4246 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, 4247 nemb_mse, dmabuf); 4248 break; 4249 default: 4250 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4251 "3107 Reject SLI_CONFIG " 4252 "subsys_comn, opcode:x%x\n", 4253 opcode); 4254 rc = -EPERM; 4255 break; 4256 } 4257 } else { 4258 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4259 "2977 Reject SLI_CONFIG " 4260 "subsys:x%d, opcode:x%x\n", 4261 subsys, opcode); 4262 rc = -EPERM; 4263 } 4264 } else { 4265 subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys, 4266 &sli_cfg_mbx->un.sli_config_emb1_subsys); 4267 opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode, 4268 &sli_cfg_mbx->un.sli_config_emb1_subsys); 4269 if (subsys == SLI_CONFIG_SUBSYS_COMN) { 4270 switch (opcode) { 4271 case COMN_OPCODE_READ_OBJECT: 4272 case COMN_OPCODE_READ_OBJECT_LIST: 4273 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4274 "2960 Handled SLI_CONFIG " 4275 "subsys_comn, opcode:x%x\n", 4276 opcode); 4277 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, 4278 nemb_hbd, dmabuf); 4279 break; 4280 case COMN_OPCODE_WRITE_OBJECT: 4281 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4282 "2961 Handled SLI_CONFIG " 4283 "subsys_comn, opcode:x%x\n", 4284 opcode); 4285 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job, 4286 nemb_hbd, dmabuf); 4287 break; 4288 default: 4289 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4290 "2962 Not handled SLI_CONFIG " 4291 "subsys_comn, opcode:x%x\n", 4292 opcode); 4293 rc = SLI_CONFIG_NOT_HANDLED; 4294 break; 4295 } 4296 } else { 4297 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4298 "2978 Not handled SLI_CONFIG " 4299 "subsys:x%d, opcode:x%x\n", 4300 subsys, opcode); 4301 rc = SLI_CONFIG_NOT_HANDLED; 4302 } 4303 } 4304 4305 /* state reset on not handled new multi-buffer mailbox command */ 4306 if (rc != SLI_CONFIG_HANDLED) 4307 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE; 4308 4309 return rc; 4310 } 4311 4312 /** 4313 * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers 4314 * @phba: Pointer to HBA context object. 4315 * 4316 * This routine is for requesting to abort a pass-through mailbox command with 4317 * multiple external buffers due to error condition. 4318 **/ 4319 static void 4320 lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba) 4321 { 4322 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT) 4323 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS; 4324 else 4325 lpfc_bsg_mbox_ext_session_reset(phba); 4326 return; 4327 } 4328 4329 /** 4330 * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer 4331 * @phba: Pointer to HBA context object. 4332 * @dmabuf: Pointer to a DMA buffer descriptor. 4333 * 4334 * This routine extracts the next mailbox read external buffer back to 4335 * user space through BSG. 4336 **/ 4337 static int 4338 lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job) 4339 { 4340 struct fc_bsg_reply *bsg_reply = job->reply; 4341 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4342 struct lpfc_dmabuf *dmabuf; 4343 uint8_t *pbuf; 4344 uint32_t size; 4345 uint32_t index; 4346 4347 index = phba->mbox_ext_buf_ctx.seqNum; 4348 phba->mbox_ext_buf_ctx.seqNum++; 4349 4350 sli_cfg_mbx = (struct lpfc_sli_config_mbox *) 4351 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt; 4352 4353 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) { 4354 size = bsg_bf_get(lpfc_mbox_sli_config_mse_len, 4355 &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]); 4356 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4357 "2963 SLI_CONFIG (mse) ext-buffer rd get " 4358 "buffer[%d], size:%d\n", index, size); 4359 } else { 4360 size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 4361 &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]); 4362 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4363 "2964 SLI_CONFIG (hbd) ext-buffer rd get " 4364 "buffer[%d], size:%d\n", index, size); 4365 } 4366 if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list)) 4367 return -EPIPE; 4368 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list, 4369 struct lpfc_dmabuf, list); 4370 list_del_init(&dmabuf->list); 4371 4372 /* after dma buffer descriptor setup */ 4373 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType, 4374 mbox_rd, dma_ebuf, sta_pos_addr, 4375 dmabuf, index); 4376 4377 pbuf = (uint8_t *)dmabuf->virt; 4378 bsg_reply->reply_payload_rcv_len = 4379 sg_copy_from_buffer(job->reply_payload.sg_list, 4380 job->reply_payload.sg_cnt, 4381 pbuf, size); 4382 4383 lpfc_bsg_dma_page_free(phba, dmabuf); 4384 4385 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) { 4386 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4387 "2965 SLI_CONFIG (hbd) ext-buffer rd mbox " 4388 "command session done\n"); 4389 lpfc_bsg_mbox_ext_session_reset(phba); 4390 } 4391 4392 bsg_reply->result = 0; 4393 bsg_job_done(job, bsg_reply->result, 4394 bsg_reply->reply_payload_rcv_len); 4395 4396 return SLI_CONFIG_HANDLED; 4397 } 4398 4399 /** 4400 * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer 4401 * @phba: Pointer to HBA context object. 4402 * @dmabuf: Pointer to a DMA buffer descriptor. 4403 * 4404 * This routine sets up the next mailbox read external buffer obtained 4405 * from user space through BSG. 4406 **/ 4407 static int 4408 lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, 4409 struct lpfc_dmabuf *dmabuf) 4410 { 4411 struct fc_bsg_reply *bsg_reply = job->reply; 4412 struct bsg_job_data *dd_data = NULL; 4413 LPFC_MBOXQ_t *pmboxq = NULL; 4414 MAILBOX_t *pmb; 4415 enum nemb_type nemb_tp; 4416 uint8_t *pbuf; 4417 uint32_t size; 4418 uint32_t index; 4419 int rc; 4420 4421 index = phba->mbox_ext_buf_ctx.seqNum; 4422 phba->mbox_ext_buf_ctx.seqNum++; 4423 nemb_tp = phba->mbox_ext_buf_ctx.nembType; 4424 4425 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4426 if (!dd_data) { 4427 rc = -ENOMEM; 4428 goto job_error; 4429 } 4430 4431 pbuf = (uint8_t *)dmabuf->virt; 4432 size = job->request_payload.payload_len; 4433 sg_copy_to_buffer(job->request_payload.sg_list, 4434 job->request_payload.sg_cnt, 4435 pbuf, size); 4436 4437 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) { 4438 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4439 "2966 SLI_CONFIG (mse) ext-buffer wr set " 4440 "buffer[%d], size:%d\n", 4441 phba->mbox_ext_buf_ctx.seqNum, size); 4442 4443 } else { 4444 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4445 "2967 SLI_CONFIG (hbd) ext-buffer wr set " 4446 "buffer[%d], size:%d\n", 4447 phba->mbox_ext_buf_ctx.seqNum, size); 4448 4449 } 4450 4451 /* set up external buffer descriptor and add to external buffer list */ 4452 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index, 4453 phba->mbox_ext_buf_ctx.mbx_dmabuf, 4454 dmabuf); 4455 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 4456 4457 /* after write dma buffer */ 4458 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType, 4459 mbox_wr, dma_ebuf, sta_pos_addr, 4460 dmabuf, index); 4461 4462 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) { 4463 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4464 "2968 SLI_CONFIG ext-buffer wr all %d " 4465 "ebuffers received\n", 4466 phba->mbox_ext_buf_ctx.numBuf); 4467 /* mailbox command structure for base driver */ 4468 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4469 if (!pmboxq) { 4470 rc = -ENOMEM; 4471 goto job_error; 4472 } 4473 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4474 pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt; 4475 pmb = &pmboxq->u.mb; 4476 memcpy(pmb, pbuf, sizeof(*pmb)); 4477 pmb->mbxOwner = OWN_HOST; 4478 pmboxq->vport = phba->pport; 4479 4480 /* callback for multi-buffer write mailbox command */ 4481 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl; 4482 4483 /* context fields to callback function */ 4484 pmboxq->context1 = dd_data; 4485 dd_data->type = TYPE_MBOX; 4486 dd_data->set_job = job; 4487 dd_data->context_un.mbox.pmboxq = pmboxq; 4488 dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf; 4489 job->dd_data = dd_data; 4490 4491 /* state change */ 4492 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; 4493 4494 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4495 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 4496 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4497 "2969 Issued SLI_CONFIG ext-buffer " 4498 "maibox command, rc:x%x\n", rc); 4499 return SLI_CONFIG_HANDLED; 4500 } 4501 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4502 "2970 Failed to issue SLI_CONFIG ext-buffer " 4503 "maibox command, rc:x%x\n", rc); 4504 rc = -EPIPE; 4505 goto job_error; 4506 } 4507 4508 /* wait for additoinal external buffers */ 4509 bsg_reply->result = 0; 4510 bsg_job_done(job, bsg_reply->result, 4511 bsg_reply->reply_payload_rcv_len); 4512 return SLI_CONFIG_HANDLED; 4513 4514 job_error: 4515 lpfc_bsg_dma_page_free(phba, dmabuf); 4516 kfree(dd_data); 4517 4518 return rc; 4519 } 4520 4521 /** 4522 * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd 4523 * @phba: Pointer to HBA context object. 4524 * @mb: Pointer to a BSG mailbox object. 4525 * @dmabuff: Pointer to a DMA buffer descriptor. 4526 * 4527 * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox 4528 * command with multiple non-embedded external buffers. 4529 **/ 4530 static int 4531 lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job, 4532 struct lpfc_dmabuf *dmabuf) 4533 { 4534 int rc; 4535 4536 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4537 "2971 SLI_CONFIG buffer (type:x%x)\n", 4538 phba->mbox_ext_buf_ctx.mboxType); 4539 4540 if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) { 4541 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) { 4542 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4543 "2972 SLI_CONFIG rd buffer state " 4544 "mismatch:x%x\n", 4545 phba->mbox_ext_buf_ctx.state); 4546 lpfc_bsg_mbox_ext_abort(phba); 4547 return -EPIPE; 4548 } 4549 rc = lpfc_bsg_read_ebuf_get(phba, job); 4550 if (rc == SLI_CONFIG_HANDLED) 4551 lpfc_bsg_dma_page_free(phba, dmabuf); 4552 } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */ 4553 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) { 4554 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4555 "2973 SLI_CONFIG wr buffer state " 4556 "mismatch:x%x\n", 4557 phba->mbox_ext_buf_ctx.state); 4558 lpfc_bsg_mbox_ext_abort(phba); 4559 return -EPIPE; 4560 } 4561 rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf); 4562 } 4563 return rc; 4564 } 4565 4566 /** 4567 * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer 4568 * @phba: Pointer to HBA context object. 4569 * @mb: Pointer to a BSG mailbox object. 4570 * @dmabuff: Pointer to a DMA buffer descriptor. 4571 * 4572 * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG 4573 * (0x9B) mailbox commands and external buffers. 4574 **/ 4575 static int 4576 lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job, 4577 struct lpfc_dmabuf *dmabuf) 4578 { 4579 struct fc_bsg_request *bsg_request = job->request; 4580 struct dfc_mbox_req *mbox_req; 4581 int rc = SLI_CONFIG_NOT_HANDLED; 4582 4583 mbox_req = 4584 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; 4585 4586 /* mbox command with/without single external buffer */ 4587 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0) 4588 return rc; 4589 4590 /* mbox command and first external buffer */ 4591 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) { 4592 if (mbox_req->extSeqNum == 1) { 4593 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4594 "2974 SLI_CONFIG mailbox: tag:%d, " 4595 "seq:%d\n", mbox_req->extMboxTag, 4596 mbox_req->extSeqNum); 4597 rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf); 4598 return rc; 4599 } else 4600 goto sli_cfg_ext_error; 4601 } 4602 4603 /* 4604 * handle additional external buffers 4605 */ 4606 4607 /* check broken pipe conditions */ 4608 if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag) 4609 goto sli_cfg_ext_error; 4610 if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf) 4611 goto sli_cfg_ext_error; 4612 if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1) 4613 goto sli_cfg_ext_error; 4614 4615 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4616 "2975 SLI_CONFIG mailbox external buffer: " 4617 "extSta:x%x, tag:%d, seq:%d\n", 4618 phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag, 4619 mbox_req->extSeqNum); 4620 rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf); 4621 return rc; 4622 4623 sli_cfg_ext_error: 4624 /* all other cases, broken pipe */ 4625 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4626 "2976 SLI_CONFIG mailbox broken pipe: " 4627 "ctxSta:x%x, ctxNumBuf:%d " 4628 "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n", 4629 phba->mbox_ext_buf_ctx.state, 4630 phba->mbox_ext_buf_ctx.numBuf, 4631 phba->mbox_ext_buf_ctx.mbxTag, 4632 phba->mbox_ext_buf_ctx.seqNum, 4633 mbox_req->extMboxTag, mbox_req->extSeqNum); 4634 4635 lpfc_bsg_mbox_ext_session_reset(phba); 4636 4637 return -EPIPE; 4638 } 4639 4640 /** 4641 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app 4642 * @phba: Pointer to HBA context object. 4643 * @mb: Pointer to a mailbox object. 4644 * @vport: Pointer to a vport object. 4645 * 4646 * Allocate a tracking object, mailbox command memory, get a mailbox 4647 * from the mailbox pool, copy the caller mailbox command. 4648 * 4649 * If offline and the sli is active we need to poll for the command (port is 4650 * being reset) and com-plete the job, otherwise issue the mailbox command and 4651 * let our completion handler finish the command. 4652 **/ 4653 static int 4654 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job, 4655 struct lpfc_vport *vport) 4656 { 4657 struct fc_bsg_request *bsg_request = job->request; 4658 struct fc_bsg_reply *bsg_reply = job->reply; 4659 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */ 4660 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */ 4661 /* a 4k buffer to hold the mb and extended data from/to the bsg */ 4662 uint8_t *pmbx = NULL; 4663 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */ 4664 struct lpfc_dmabuf *dmabuf = NULL; 4665 struct dfc_mbox_req *mbox_req; 4666 struct READ_EVENT_LOG_VAR *rdEventLog; 4667 uint32_t transmit_length, receive_length, mode; 4668 struct lpfc_mbx_sli4_config *sli4_config; 4669 struct lpfc_mbx_nembed_cmd *nembed_sge; 4670 struct ulp_bde64 *bde; 4671 uint8_t *ext = NULL; 4672 int rc = 0; 4673 uint8_t *from; 4674 uint32_t size; 4675 4676 /* in case no data is transferred */ 4677 bsg_reply->reply_payload_rcv_len = 0; 4678 4679 /* sanity check to protect driver */ 4680 if (job->reply_payload.payload_len > BSG_MBOX_SIZE || 4681 job->request_payload.payload_len > BSG_MBOX_SIZE) { 4682 rc = -ERANGE; 4683 goto job_done; 4684 } 4685 4686 /* 4687 * Don't allow mailbox commands to be sent when blocked or when in 4688 * the middle of discovery 4689 */ 4690 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { 4691 rc = -EAGAIN; 4692 goto job_done; 4693 } 4694 4695 mbox_req = 4696 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; 4697 4698 /* check if requested extended data lengths are valid */ 4699 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) || 4700 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) { 4701 rc = -ERANGE; 4702 goto job_done; 4703 } 4704 4705 dmabuf = lpfc_bsg_dma_page_alloc(phba); 4706 if (!dmabuf || !dmabuf->virt) { 4707 rc = -ENOMEM; 4708 goto job_done; 4709 } 4710 4711 /* Get the mailbox command or external buffer from BSG */ 4712 pmbx = (uint8_t *)dmabuf->virt; 4713 size = job->request_payload.payload_len; 4714 sg_copy_to_buffer(job->request_payload.sg_list, 4715 job->request_payload.sg_cnt, pmbx, size); 4716 4717 /* Handle possible SLI_CONFIG with non-embedded payloads */ 4718 if (phba->sli_rev == LPFC_SLI_REV4) { 4719 rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf); 4720 if (rc == SLI_CONFIG_HANDLED) 4721 goto job_cont; 4722 if (rc) 4723 goto job_done; 4724 /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */ 4725 } 4726 4727 rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport); 4728 if (rc != 0) 4729 goto job_done; /* must be negative */ 4730 4731 /* allocate our bsg tracking structure */ 4732 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4733 if (!dd_data) { 4734 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 4735 "2727 Failed allocation of dd_data\n"); 4736 rc = -ENOMEM; 4737 goto job_done; 4738 } 4739 4740 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4741 if (!pmboxq) { 4742 rc = -ENOMEM; 4743 goto job_done; 4744 } 4745 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4746 4747 pmb = &pmboxq->u.mb; 4748 memcpy(pmb, pmbx, sizeof(*pmb)); 4749 pmb->mbxOwner = OWN_HOST; 4750 pmboxq->vport = vport; 4751 4752 /* If HBA encountered an error attention, allow only DUMP 4753 * or RESTART mailbox commands until the HBA is restarted. 4754 */ 4755 if (phba->pport->stopped && 4756 pmb->mbxCommand != MBX_DUMP_MEMORY && 4757 pmb->mbxCommand != MBX_RESTART && 4758 pmb->mbxCommand != MBX_WRITE_VPARMS && 4759 pmb->mbxCommand != MBX_WRITE_WWN) 4760 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 4761 "2797 mbox: Issued mailbox cmd " 4762 "0x%x while in stopped state.\n", 4763 pmb->mbxCommand); 4764 4765 /* extended mailbox commands will need an extended buffer */ 4766 if (mbox_req->inExtWLen || mbox_req->outExtWLen) { 4767 from = pmbx; 4768 ext = from + sizeof(MAILBOX_t); 4769 pmboxq->context2 = ext; 4770 pmboxq->in_ext_byte_len = 4771 mbox_req->inExtWLen * sizeof(uint32_t); 4772 pmboxq->out_ext_byte_len = 4773 mbox_req->outExtWLen * sizeof(uint32_t); 4774 pmboxq->mbox_offset_word = mbox_req->mbOffset; 4775 } 4776 4777 /* biu diag will need a kernel buffer to transfer the data 4778 * allocate our own buffer and setup the mailbox command to 4779 * use ours 4780 */ 4781 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) { 4782 transmit_length = pmb->un.varWords[1]; 4783 receive_length = pmb->un.varWords[4]; 4784 /* transmit length cannot be greater than receive length or 4785 * mailbox extension size 4786 */ 4787 if ((transmit_length > receive_length) || 4788 (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) { 4789 rc = -ERANGE; 4790 goto job_done; 4791 } 4792 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh = 4793 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)); 4794 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow = 4795 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)); 4796 4797 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh = 4798 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t) 4799 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize); 4800 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow = 4801 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t) 4802 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize); 4803 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) { 4804 rdEventLog = &pmb->un.varRdEventLog; 4805 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize; 4806 mode = bf_get(lpfc_event_log, rdEventLog); 4807 4808 /* receive length cannot be greater than mailbox 4809 * extension size 4810 */ 4811 if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) { 4812 rc = -ERANGE; 4813 goto job_done; 4814 } 4815 4816 /* mode zero uses a bde like biu diags command */ 4817 if (mode == 0) { 4818 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys 4819 + sizeof(MAILBOX_t)); 4820 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys 4821 + sizeof(MAILBOX_t)); 4822 } 4823 } else if (phba->sli_rev == LPFC_SLI_REV4) { 4824 /* Let type 4 (well known data) through because the data is 4825 * returned in varwords[4-8] 4826 * otherwise check the recieve length and fetch the buffer addr 4827 */ 4828 if ((pmb->mbxCommand == MBX_DUMP_MEMORY) && 4829 (pmb->un.varDmp.type != DMP_WELL_KNOWN)) { 4830 /* rebuild the command for sli4 using our own buffers 4831 * like we do for biu diags 4832 */ 4833 receive_length = pmb->un.varWords[2]; 4834 /* receive length cannot be greater than mailbox 4835 * extension size 4836 */ 4837 if (receive_length == 0) { 4838 rc = -ERANGE; 4839 goto job_done; 4840 } 4841 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys 4842 + sizeof(MAILBOX_t)); 4843 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys 4844 + sizeof(MAILBOX_t)); 4845 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) && 4846 pmb->un.varUpdateCfg.co) { 4847 bde = (struct ulp_bde64 *)&pmb->un.varWords[4]; 4848 4849 /* bde size cannot be greater than mailbox ext size */ 4850 if (bde->tus.f.bdeSize > 4851 BSG_MBOX_SIZE - sizeof(MAILBOX_t)) { 4852 rc = -ERANGE; 4853 goto job_done; 4854 } 4855 bde->addrHigh = putPaddrHigh(dmabuf->phys 4856 + sizeof(MAILBOX_t)); 4857 bde->addrLow = putPaddrLow(dmabuf->phys 4858 + sizeof(MAILBOX_t)); 4859 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) { 4860 /* Handling non-embedded SLI_CONFIG mailbox command */ 4861 sli4_config = &pmboxq->u.mqe.un.sli4_config; 4862 if (!bf_get(lpfc_mbox_hdr_emb, 4863 &sli4_config->header.cfg_mhdr)) { 4864 /* rebuild the command for sli4 using our 4865 * own buffers like we do for biu diags 4866 */ 4867 nembed_sge = (struct lpfc_mbx_nembed_cmd *) 4868 &pmb->un.varWords[0]; 4869 receive_length = nembed_sge->sge[0].length; 4870 4871 /* receive length cannot be greater than 4872 * mailbox extension size 4873 */ 4874 if ((receive_length == 0) || 4875 (receive_length > 4876 BSG_MBOX_SIZE - sizeof(MAILBOX_t))) { 4877 rc = -ERANGE; 4878 goto job_done; 4879 } 4880 4881 nembed_sge->sge[0].pa_hi = 4882 putPaddrHigh(dmabuf->phys 4883 + sizeof(MAILBOX_t)); 4884 nembed_sge->sge[0].pa_lo = 4885 putPaddrLow(dmabuf->phys 4886 + sizeof(MAILBOX_t)); 4887 } 4888 } 4889 } 4890 4891 dd_data->context_un.mbox.dmabuffers = dmabuf; 4892 4893 /* setup wake call as IOCB callback */ 4894 pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl; 4895 4896 /* setup context field to pass wait_queue pointer to wake function */ 4897 pmboxq->context1 = dd_data; 4898 dd_data->type = TYPE_MBOX; 4899 dd_data->set_job = job; 4900 dd_data->context_un.mbox.pmboxq = pmboxq; 4901 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; 4902 dd_data->context_un.mbox.ext = ext; 4903 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset; 4904 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen; 4905 dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen; 4906 job->dd_data = dd_data; 4907 4908 if ((vport->fc_flag & FC_OFFLINE_MODE) || 4909 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) { 4910 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 4911 if (rc != MBX_SUCCESS) { 4912 rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; 4913 goto job_done; 4914 } 4915 4916 /* job finished, copy the data */ 4917 memcpy(pmbx, pmb, sizeof(*pmb)); 4918 bsg_reply->reply_payload_rcv_len = 4919 sg_copy_from_buffer(job->reply_payload.sg_list, 4920 job->reply_payload.sg_cnt, 4921 pmbx, size); 4922 /* not waiting mbox already done */ 4923 rc = 0; 4924 goto job_done; 4925 } 4926 4927 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4928 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) 4929 return 1; /* job started */ 4930 4931 job_done: 4932 /* common exit for error or job completed inline */ 4933 if (pmboxq) 4934 mempool_free(pmboxq, phba->mbox_mem_pool); 4935 lpfc_bsg_dma_page_free(phba, dmabuf); 4936 kfree(dd_data); 4937 4938 job_cont: 4939 return rc; 4940 } 4941 4942 /** 4943 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command 4944 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX. 4945 **/ 4946 static int 4947 lpfc_bsg_mbox_cmd(struct bsg_job *job) 4948 { 4949 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 4950 struct fc_bsg_request *bsg_request = job->request; 4951 struct fc_bsg_reply *bsg_reply = job->reply; 4952 struct lpfc_hba *phba = vport->phba; 4953 struct dfc_mbox_req *mbox_req; 4954 int rc = 0; 4955 4956 /* mix-and-match backward compatibility */ 4957 bsg_reply->reply_payload_rcv_len = 0; 4958 if (job->request_len < 4959 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) { 4960 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4961 "2737 Mix-and-match backward compatibility " 4962 "between MBOX_REQ old size:%d and " 4963 "new request size:%d\n", 4964 (int)(job->request_len - 4965 sizeof(struct fc_bsg_request)), 4966 (int)sizeof(struct dfc_mbox_req)); 4967 mbox_req = (struct dfc_mbox_req *) 4968 bsg_request->rqst_data.h_vendor.vendor_cmd; 4969 mbox_req->extMboxTag = 0; 4970 mbox_req->extSeqNum = 0; 4971 } 4972 4973 rc = lpfc_bsg_issue_mbox(phba, job, vport); 4974 4975 if (rc == 0) { 4976 /* job done */ 4977 bsg_reply->result = 0; 4978 job->dd_data = NULL; 4979 bsg_job_done(job, bsg_reply->result, 4980 bsg_reply->reply_payload_rcv_len); 4981 } else if (rc == 1) 4982 /* job submitted, will complete later*/ 4983 rc = 0; /* return zero, no error */ 4984 else { 4985 /* some error occurred */ 4986 bsg_reply->result = rc; 4987 job->dd_data = NULL; 4988 } 4989 4990 return rc; 4991 } 4992 4993 /** 4994 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler 4995 * @phba: Pointer to HBA context object. 4996 * @cmdiocbq: Pointer to command iocb. 4997 * @rspiocbq: Pointer to response iocb. 4998 * 4999 * This function is the completion handler for iocbs issued using 5000 * lpfc_menlo_cmd function. This function is called by the 5001 * ring event handler function without any lock held. This function 5002 * can be called from both worker thread context and interrupt 5003 * context. This function also can be called from another thread which 5004 * cleans up the SLI layer objects. 5005 * This function copies the contents of the response iocb to the 5006 * response iocb memory object provided by the caller of 5007 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 5008 * sleeps for the iocb completion. 5009 **/ 5010 static void 5011 lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba, 5012 struct lpfc_iocbq *cmdiocbq, 5013 struct lpfc_iocbq *rspiocbq) 5014 { 5015 struct bsg_job_data *dd_data; 5016 struct bsg_job *job; 5017 struct fc_bsg_reply *bsg_reply; 5018 IOCB_t *rsp; 5019 struct lpfc_dmabuf *bmp, *cmp, *rmp; 5020 struct lpfc_bsg_menlo *menlo; 5021 unsigned long flags; 5022 struct menlo_response *menlo_resp; 5023 unsigned int rsp_size; 5024 int rc = 0; 5025 5026 dd_data = cmdiocbq->context1; 5027 cmp = cmdiocbq->context2; 5028 bmp = cmdiocbq->context3; 5029 menlo = &dd_data->context_un.menlo; 5030 rmp = menlo->rmp; 5031 rsp = &rspiocbq->iocb; 5032 5033 /* Determine if job has been aborted */ 5034 spin_lock_irqsave(&phba->ct_ev_lock, flags); 5035 job = dd_data->set_job; 5036 if (job) { 5037 bsg_reply = job->reply; 5038 /* Prevent timeout handling from trying to abort job */ 5039 job->dd_data = NULL; 5040 } 5041 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5042 5043 /* Copy the job data or set the failing status for the job */ 5044 5045 if (job) { 5046 /* always return the xri, this would be used in the case 5047 * of a menlo download to allow the data to be sent as a 5048 * continuation of the exchange. 5049 */ 5050 5051 menlo_resp = (struct menlo_response *) 5052 bsg_reply->reply_data.vendor_reply.vendor_rsp; 5053 menlo_resp->xri = rsp->ulpContext; 5054 if (rsp->ulpStatus) { 5055 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 5056 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { 5057 case IOERR_SEQUENCE_TIMEOUT: 5058 rc = -ETIMEDOUT; 5059 break; 5060 case IOERR_INVALID_RPI: 5061 rc = -EFAULT; 5062 break; 5063 default: 5064 rc = -EACCES; 5065 break; 5066 } 5067 } else { 5068 rc = -EACCES; 5069 } 5070 } else { 5071 rsp_size = rsp->un.genreq64.bdl.bdeSize; 5072 bsg_reply->reply_payload_rcv_len = 5073 lpfc_bsg_copy_data(rmp, &job->reply_payload, 5074 rsp_size, 0); 5075 } 5076 5077 } 5078 5079 lpfc_sli_release_iocbq(phba, cmdiocbq); 5080 lpfc_free_bsg_buffers(phba, cmp); 5081 lpfc_free_bsg_buffers(phba, rmp); 5082 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 5083 kfree(bmp); 5084 kfree(dd_data); 5085 5086 /* Complete the job if active */ 5087 5088 if (job) { 5089 bsg_reply->result = rc; 5090 bsg_job_done(job, bsg_reply->result, 5091 bsg_reply->reply_payload_rcv_len); 5092 } 5093 5094 return; 5095 } 5096 5097 /** 5098 * lpfc_menlo_cmd - send an ioctl for menlo hardware 5099 * @job: fc_bsg_job to handle 5100 * 5101 * This function issues a gen request 64 CR ioctl for all menlo cmd requests, 5102 * all the command completions will return the xri for the command. 5103 * For menlo data requests a gen request 64 CX is used to continue the exchange 5104 * supplied in the menlo request header xri field. 5105 **/ 5106 static int 5107 lpfc_menlo_cmd(struct bsg_job *job) 5108 { 5109 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 5110 struct fc_bsg_request *bsg_request = job->request; 5111 struct fc_bsg_reply *bsg_reply = job->reply; 5112 struct lpfc_hba *phba = vport->phba; 5113 struct lpfc_iocbq *cmdiocbq; 5114 IOCB_t *cmd; 5115 int rc = 0; 5116 struct menlo_command *menlo_cmd; 5117 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL; 5118 int request_nseg; 5119 int reply_nseg; 5120 struct bsg_job_data *dd_data; 5121 struct ulp_bde64 *bpl = NULL; 5122 5123 /* in case no data is returned return just the return code */ 5124 bsg_reply->reply_payload_rcv_len = 0; 5125 5126 if (job->request_len < 5127 sizeof(struct fc_bsg_request) + 5128 sizeof(struct menlo_command)) { 5129 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5130 "2784 Received MENLO_CMD request below " 5131 "minimum size\n"); 5132 rc = -ERANGE; 5133 goto no_dd_data; 5134 } 5135 5136 if (job->reply_len < 5137 sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) { 5138 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5139 "2785 Received MENLO_CMD reply below " 5140 "minimum size\n"); 5141 rc = -ERANGE; 5142 goto no_dd_data; 5143 } 5144 5145 if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) { 5146 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5147 "2786 Adapter does not support menlo " 5148 "commands\n"); 5149 rc = -EPERM; 5150 goto no_dd_data; 5151 } 5152 5153 menlo_cmd = (struct menlo_command *) 5154 bsg_request->rqst_data.h_vendor.vendor_cmd; 5155 5156 /* allocate our bsg tracking structure */ 5157 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 5158 if (!dd_data) { 5159 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5160 "2787 Failed allocation of dd_data\n"); 5161 rc = -ENOMEM; 5162 goto no_dd_data; 5163 } 5164 5165 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5166 if (!bmp) { 5167 rc = -ENOMEM; 5168 goto free_dd; 5169 } 5170 5171 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 5172 if (!bmp->virt) { 5173 rc = -ENOMEM; 5174 goto free_bmp; 5175 } 5176 5177 INIT_LIST_HEAD(&bmp->list); 5178 5179 bpl = (struct ulp_bde64 *)bmp->virt; 5180 request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64); 5181 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, 5182 1, bpl, &request_nseg); 5183 if (!cmp) { 5184 rc = -ENOMEM; 5185 goto free_bmp; 5186 } 5187 lpfc_bsg_copy_data(cmp, &job->request_payload, 5188 job->request_payload.payload_len, 1); 5189 5190 bpl += request_nseg; 5191 reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg; 5192 rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0, 5193 bpl, &reply_nseg); 5194 if (!rmp) { 5195 rc = -ENOMEM; 5196 goto free_cmp; 5197 } 5198 5199 cmdiocbq = lpfc_sli_get_iocbq(phba); 5200 if (!cmdiocbq) { 5201 rc = -ENOMEM; 5202 goto free_rmp; 5203 } 5204 5205 cmd = &cmdiocbq->iocb; 5206 cmd->un.genreq64.bdl.ulpIoTag32 = 0; 5207 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 5208 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); 5209 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 5210 cmd->un.genreq64.bdl.bdeSize = 5211 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); 5212 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); 5213 cmd->un.genreq64.w5.hcsw.Dfctl = 0; 5214 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD; 5215 cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */ 5216 cmd->ulpBdeCount = 1; 5217 cmd->ulpClass = CLASS3; 5218 cmd->ulpOwner = OWN_CHIP; 5219 cmd->ulpLe = 1; /* Limited Edition */ 5220 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 5221 cmdiocbq->vport = phba->pport; 5222 /* We want the firmware to timeout before we do */ 5223 cmd->ulpTimeout = MENLO_TIMEOUT - 5; 5224 cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp; 5225 cmdiocbq->context1 = dd_data; 5226 cmdiocbq->context2 = cmp; 5227 cmdiocbq->context3 = bmp; 5228 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) { 5229 cmd->ulpCommand = CMD_GEN_REQUEST64_CR; 5230 cmd->ulpPU = MENLO_PU; /* 3 */ 5231 cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */ 5232 cmd->ulpContext = MENLO_CONTEXT; /* 0 */ 5233 } else { 5234 cmd->ulpCommand = CMD_GEN_REQUEST64_CX; 5235 cmd->ulpPU = 1; 5236 cmd->un.ulpWord[4] = 0; 5237 cmd->ulpContext = menlo_cmd->xri; 5238 } 5239 5240 dd_data->type = TYPE_MENLO; 5241 dd_data->set_job = job; 5242 dd_data->context_un.menlo.cmdiocbq = cmdiocbq; 5243 dd_data->context_un.menlo.rmp = rmp; 5244 job->dd_data = dd_data; 5245 5246 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 5247 MENLO_TIMEOUT - 5); 5248 if (rc == IOCB_SUCCESS) 5249 return 0; /* done for now */ 5250 5251 lpfc_sli_release_iocbq(phba, cmdiocbq); 5252 5253 free_rmp: 5254 lpfc_free_bsg_buffers(phba, rmp); 5255 free_cmp: 5256 lpfc_free_bsg_buffers(phba, cmp); 5257 free_bmp: 5258 if (bmp->virt) 5259 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 5260 kfree(bmp); 5261 free_dd: 5262 kfree(dd_data); 5263 no_dd_data: 5264 /* make error code available to userspace */ 5265 bsg_reply->result = rc; 5266 job->dd_data = NULL; 5267 return rc; 5268 } 5269 5270 static int 5271 lpfc_forced_link_speed(struct bsg_job *job) 5272 { 5273 struct Scsi_Host *shost = fc_bsg_to_shost(job); 5274 struct lpfc_vport *vport = shost_priv(shost); 5275 struct lpfc_hba *phba = vport->phba; 5276 struct fc_bsg_reply *bsg_reply = job->reply; 5277 struct forced_link_speed_support_reply *forced_reply; 5278 int rc = 0; 5279 5280 if (job->request_len < 5281 sizeof(struct fc_bsg_request) + 5282 sizeof(struct get_forced_link_speed_support)) { 5283 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5284 "0048 Received FORCED_LINK_SPEED request " 5285 "below minimum size\n"); 5286 rc = -EINVAL; 5287 goto job_error; 5288 } 5289 5290 forced_reply = (struct forced_link_speed_support_reply *) 5291 bsg_reply->reply_data.vendor_reply.vendor_rsp; 5292 5293 if (job->reply_len < 5294 sizeof(struct fc_bsg_request) + 5295 sizeof(struct forced_link_speed_support_reply)) { 5296 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5297 "0049 Received FORCED_LINK_SPEED reply below " 5298 "minimum size\n"); 5299 rc = -EINVAL; 5300 goto job_error; 5301 } 5302 5303 forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED) 5304 ? LPFC_FORCED_LINK_SPEED_SUPPORTED 5305 : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED; 5306 job_error: 5307 bsg_reply->result = rc; 5308 if (rc == 0) 5309 bsg_job_done(job, bsg_reply->result, 5310 bsg_reply->reply_payload_rcv_len); 5311 return rc; 5312 } 5313 5314 /** 5315 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job 5316 * @job: fc_bsg_job to handle 5317 **/ 5318 static int 5319 lpfc_bsg_hst_vendor(struct bsg_job *job) 5320 { 5321 struct fc_bsg_request *bsg_request = job->request; 5322 struct fc_bsg_reply *bsg_reply = job->reply; 5323 int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0]; 5324 int rc; 5325 5326 switch (command) { 5327 case LPFC_BSG_VENDOR_SET_CT_EVENT: 5328 rc = lpfc_bsg_hba_set_event(job); 5329 break; 5330 case LPFC_BSG_VENDOR_GET_CT_EVENT: 5331 rc = lpfc_bsg_hba_get_event(job); 5332 break; 5333 case LPFC_BSG_VENDOR_SEND_MGMT_RESP: 5334 rc = lpfc_bsg_send_mgmt_rsp(job); 5335 break; 5336 case LPFC_BSG_VENDOR_DIAG_MODE: 5337 rc = lpfc_bsg_diag_loopback_mode(job); 5338 break; 5339 case LPFC_BSG_VENDOR_DIAG_MODE_END: 5340 rc = lpfc_sli4_bsg_diag_mode_end(job); 5341 break; 5342 case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK: 5343 rc = lpfc_bsg_diag_loopback_run(job); 5344 break; 5345 case LPFC_BSG_VENDOR_LINK_DIAG_TEST: 5346 rc = lpfc_sli4_bsg_link_diag_test(job); 5347 break; 5348 case LPFC_BSG_VENDOR_GET_MGMT_REV: 5349 rc = lpfc_bsg_get_dfc_rev(job); 5350 break; 5351 case LPFC_BSG_VENDOR_MBOX: 5352 rc = lpfc_bsg_mbox_cmd(job); 5353 break; 5354 case LPFC_BSG_VENDOR_MENLO_CMD: 5355 case LPFC_BSG_VENDOR_MENLO_DATA: 5356 rc = lpfc_menlo_cmd(job); 5357 break; 5358 case LPFC_BSG_VENDOR_FORCED_LINK_SPEED: 5359 rc = lpfc_forced_link_speed(job); 5360 break; 5361 default: 5362 rc = -EINVAL; 5363 bsg_reply->reply_payload_rcv_len = 0; 5364 /* make error code available to userspace */ 5365 bsg_reply->result = rc; 5366 break; 5367 } 5368 5369 return rc; 5370 } 5371 5372 /** 5373 * lpfc_bsg_request - handle a bsg request from the FC transport 5374 * @job: fc_bsg_job to handle 5375 **/ 5376 int 5377 lpfc_bsg_request(struct bsg_job *job) 5378 { 5379 struct fc_bsg_request *bsg_request = job->request; 5380 struct fc_bsg_reply *bsg_reply = job->reply; 5381 uint32_t msgcode; 5382 int rc; 5383 5384 msgcode = bsg_request->msgcode; 5385 switch (msgcode) { 5386 case FC_BSG_HST_VENDOR: 5387 rc = lpfc_bsg_hst_vendor(job); 5388 break; 5389 case FC_BSG_RPT_ELS: 5390 rc = lpfc_bsg_rport_els(job); 5391 break; 5392 case FC_BSG_RPT_CT: 5393 rc = lpfc_bsg_send_mgmt_cmd(job); 5394 break; 5395 default: 5396 rc = -EINVAL; 5397 bsg_reply->reply_payload_rcv_len = 0; 5398 /* make error code available to userspace */ 5399 bsg_reply->result = rc; 5400 break; 5401 } 5402 5403 return rc; 5404 } 5405 5406 /** 5407 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport 5408 * @job: fc_bsg_job that has timed out 5409 * 5410 * This function just aborts the job's IOCB. The aborted IOCB will return to 5411 * the waiting function which will handle passing the error back to userspace 5412 **/ 5413 int 5414 lpfc_bsg_timeout(struct bsg_job *job) 5415 { 5416 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 5417 struct lpfc_hba *phba = vport->phba; 5418 struct lpfc_iocbq *cmdiocb; 5419 struct lpfc_sli_ring *pring; 5420 struct bsg_job_data *dd_data; 5421 unsigned long flags; 5422 int rc = 0; 5423 LIST_HEAD(completions); 5424 struct lpfc_iocbq *check_iocb, *next_iocb; 5425 5426 pring = lpfc_phba_elsring(phba); 5427 5428 /* if job's driver data is NULL, the command completed or is in the 5429 * the process of completing. In this case, return status to request 5430 * so the timeout is retried. This avoids double completion issues 5431 * and the request will be pulled off the timer queue when the 5432 * command's completion handler executes. Otherwise, prevent the 5433 * command's completion handler from executing the job done callback 5434 * and continue processing to abort the outstanding the command. 5435 */ 5436 5437 spin_lock_irqsave(&phba->ct_ev_lock, flags); 5438 dd_data = (struct bsg_job_data *)job->dd_data; 5439 if (dd_data) { 5440 dd_data->set_job = NULL; 5441 job->dd_data = NULL; 5442 } else { 5443 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5444 return -EAGAIN; 5445 } 5446 5447 switch (dd_data->type) { 5448 case TYPE_IOCB: 5449 /* Check to see if IOCB was issued to the port or not. If not, 5450 * remove it from the txq queue and call cancel iocbs. 5451 * Otherwise, call abort iotag 5452 */ 5453 cmdiocb = dd_data->context_un.iocb.cmdiocbq; 5454 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5455 5456 spin_lock_irqsave(&phba->hbalock, flags); 5457 /* make sure the I/O abort window is still open */ 5458 if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) { 5459 spin_unlock_irqrestore(&phba->hbalock, flags); 5460 return -EAGAIN; 5461 } 5462 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, 5463 list) { 5464 if (check_iocb == cmdiocb) { 5465 list_move_tail(&check_iocb->list, &completions); 5466 break; 5467 } 5468 } 5469 if (list_empty(&completions)) 5470 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 5471 spin_unlock_irqrestore(&phba->hbalock, flags); 5472 if (!list_empty(&completions)) { 5473 lpfc_sli_cancel_iocbs(phba, &completions, 5474 IOSTAT_LOCAL_REJECT, 5475 IOERR_SLI_ABORTED); 5476 } 5477 break; 5478 5479 case TYPE_EVT: 5480 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5481 break; 5482 5483 case TYPE_MBOX: 5484 /* Update the ext buf ctx state if needed */ 5485 5486 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT) 5487 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS; 5488 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5489 break; 5490 case TYPE_MENLO: 5491 /* Check to see if IOCB was issued to the port or not. If not, 5492 * remove it from the txq queue and call cancel iocbs. 5493 * Otherwise, call abort iotag. 5494 */ 5495 cmdiocb = dd_data->context_un.menlo.cmdiocbq; 5496 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5497 5498 spin_lock_irqsave(&phba->hbalock, flags); 5499 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, 5500 list) { 5501 if (check_iocb == cmdiocb) { 5502 list_move_tail(&check_iocb->list, &completions); 5503 break; 5504 } 5505 } 5506 if (list_empty(&completions)) 5507 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 5508 spin_unlock_irqrestore(&phba->hbalock, flags); 5509 if (!list_empty(&completions)) { 5510 lpfc_sli_cancel_iocbs(phba, &completions, 5511 IOSTAT_LOCAL_REJECT, 5512 IOERR_SLI_ABORTED); 5513 } 5514 break; 5515 default: 5516 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5517 break; 5518 } 5519 5520 /* scsi transport fc fc_bsg_job_timeout expects a zero return code, 5521 * otherwise an error message will be displayed on the console 5522 * so always return success (zero) 5523 */ 5524 return rc; 5525 } 5526