1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * 6 * Copyright (C) 2009-2015 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * * 10 * This program is free software; you can redistribute it and/or * 11 * modify it under the terms of version 2 of the GNU General * 12 * Public License as published by the Free Software Foundation. * 13 * This program is distributed in the hope that it will be useful. * 14 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 15 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 16 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 17 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 18 * TO BE LEGALLY INVALID. See the GNU General Public License for * 19 * more details, a copy of which can be found in the file COPYING * 20 * included with this package. * 21 *******************************************************************/ 22 23 #include <linux/interrupt.h> 24 #include <linux/mempool.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/delay.h> 28 #include <linux/list.h> 29 #include <linux/bsg-lib.h> 30 31 #include <scsi/scsi.h> 32 #include <scsi/scsi_host.h> 33 #include <scsi/scsi_transport_fc.h> 34 #include <scsi/scsi_bsg_fc.h> 35 #include <scsi/fc/fc_fs.h> 36 37 #include "lpfc_hw4.h" 38 #include "lpfc_hw.h" 39 #include "lpfc_sli.h" 40 #include "lpfc_sli4.h" 41 #include "lpfc_nl.h" 42 #include "lpfc_bsg.h" 43 #include "lpfc_disc.h" 44 #include "lpfc_scsi.h" 45 #include "lpfc.h" 46 #include "lpfc_logmsg.h" 47 #include "lpfc_crtn.h" 48 #include "lpfc_debugfs.h" 49 #include "lpfc_vport.h" 50 #include "lpfc_version.h" 51 52 struct lpfc_bsg_event { 53 struct list_head node; 54 struct kref kref; 55 wait_queue_head_t wq; 56 57 /* Event type and waiter identifiers */ 58 uint32_t type_mask; 59 uint32_t req_id; 60 uint32_t reg_id; 61 62 /* next two flags are here for the auto-delete logic */ 63 unsigned long wait_time_stamp; 64 int waiting; 65 66 /* seen and not seen events */ 67 struct list_head events_to_get; 68 struct list_head events_to_see; 69 70 /* driver data associated with the job */ 71 void *dd_data; 72 }; 73 74 struct lpfc_bsg_iocb { 75 struct lpfc_iocbq *cmdiocbq; 76 struct lpfc_dmabuf *rmp; 77 struct lpfc_nodelist *ndlp; 78 }; 79 80 struct lpfc_bsg_mbox { 81 LPFC_MBOXQ_t *pmboxq; 82 MAILBOX_t *mb; 83 struct lpfc_dmabuf *dmabuffers; /* for BIU diags */ 84 uint8_t *ext; /* extended mailbox data */ 85 uint32_t mbOffset; /* from app */ 86 uint32_t inExtWLen; /* from app */ 87 uint32_t outExtWLen; /* from app */ 88 }; 89 90 #define MENLO_DID 0x0000FC0E 91 92 struct lpfc_bsg_menlo { 93 struct lpfc_iocbq *cmdiocbq; 94 struct lpfc_dmabuf *rmp; 95 }; 96 97 #define TYPE_EVT 1 98 #define TYPE_IOCB 2 99 #define TYPE_MBOX 3 100 #define TYPE_MENLO 4 101 struct bsg_job_data { 102 uint32_t type; 103 struct bsg_job *set_job; /* job waiting for this iocb to finish */ 104 union { 105 struct lpfc_bsg_event *evt; 106 struct lpfc_bsg_iocb iocb; 107 struct lpfc_bsg_mbox mbox; 108 struct lpfc_bsg_menlo menlo; 109 } context_un; 110 }; 111 112 struct event_data { 113 struct list_head node; 114 uint32_t type; 115 uint32_t immed_dat; 116 void *data; 117 uint32_t len; 118 }; 119 120 #define BUF_SZ_4K 4096 121 #define SLI_CT_ELX_LOOPBACK 0x10 122 123 enum ELX_LOOPBACK_CMD { 124 ELX_LOOPBACK_XRI_SETUP, 125 ELX_LOOPBACK_DATA, 126 }; 127 128 #define ELX_LOOPBACK_HEADER_SZ \ 129 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un) 130 131 struct lpfc_dmabufext { 132 struct lpfc_dmabuf dma; 133 uint32_t size; 134 uint32_t flag; 135 }; 136 137 static void 138 lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist) 139 { 140 struct lpfc_dmabuf *mlast, *next_mlast; 141 142 if (mlist) { 143 list_for_each_entry_safe(mlast, next_mlast, &mlist->list, 144 list) { 145 lpfc_mbuf_free(phba, mlast->virt, mlast->phys); 146 list_del(&mlast->list); 147 kfree(mlast); 148 } 149 lpfc_mbuf_free(phba, mlist->virt, mlist->phys); 150 kfree(mlist); 151 } 152 return; 153 } 154 155 static struct lpfc_dmabuf * 156 lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size, 157 int outbound_buffers, struct ulp_bde64 *bpl, 158 int *bpl_entries) 159 { 160 struct lpfc_dmabuf *mlist = NULL; 161 struct lpfc_dmabuf *mp; 162 unsigned int bytes_left = size; 163 164 /* Verify we can support the size specified */ 165 if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE))) 166 return NULL; 167 168 /* Determine the number of dma buffers to allocate */ 169 *bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 : 170 size/LPFC_BPL_SIZE); 171 172 /* Allocate dma buffer and place in BPL passed */ 173 while (bytes_left) { 174 /* Allocate dma buffer */ 175 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 176 if (!mp) { 177 if (mlist) 178 lpfc_free_bsg_buffers(phba, mlist); 179 return NULL; 180 } 181 182 INIT_LIST_HEAD(&mp->list); 183 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); 184 185 if (!mp->virt) { 186 kfree(mp); 187 if (mlist) 188 lpfc_free_bsg_buffers(phba, mlist); 189 return NULL; 190 } 191 192 /* Queue it to a linked list */ 193 if (!mlist) 194 mlist = mp; 195 else 196 list_add_tail(&mp->list, &mlist->list); 197 198 /* Add buffer to buffer pointer list */ 199 if (outbound_buffers) 200 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 201 else 202 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 203 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys)); 204 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys)); 205 bpl->tus.f.bdeSize = (uint16_t) 206 (bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE : 207 bytes_left); 208 bytes_left -= bpl->tus.f.bdeSize; 209 bpl->tus.w = le32_to_cpu(bpl->tus.w); 210 bpl++; 211 } 212 return mlist; 213 } 214 215 static unsigned int 216 lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers, 217 struct bsg_buffer *bsg_buffers, 218 unsigned int bytes_to_transfer, int to_buffers) 219 { 220 221 struct lpfc_dmabuf *mp; 222 unsigned int transfer_bytes, bytes_copied = 0; 223 unsigned int sg_offset, dma_offset; 224 unsigned char *dma_address, *sg_address; 225 LIST_HEAD(temp_list); 226 struct sg_mapping_iter miter; 227 unsigned long flags; 228 unsigned int sg_flags = SG_MITER_ATOMIC; 229 bool sg_valid; 230 231 list_splice_init(&dma_buffers->list, &temp_list); 232 list_add(&dma_buffers->list, &temp_list); 233 sg_offset = 0; 234 if (to_buffers) 235 sg_flags |= SG_MITER_FROM_SG; 236 else 237 sg_flags |= SG_MITER_TO_SG; 238 sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt, 239 sg_flags); 240 local_irq_save(flags); 241 sg_valid = sg_miter_next(&miter); 242 list_for_each_entry(mp, &temp_list, list) { 243 dma_offset = 0; 244 while (bytes_to_transfer && sg_valid && 245 (dma_offset < LPFC_BPL_SIZE)) { 246 dma_address = mp->virt + dma_offset; 247 if (sg_offset) { 248 /* Continue previous partial transfer of sg */ 249 sg_address = miter.addr + sg_offset; 250 transfer_bytes = miter.length - sg_offset; 251 } else { 252 sg_address = miter.addr; 253 transfer_bytes = miter.length; 254 } 255 if (bytes_to_transfer < transfer_bytes) 256 transfer_bytes = bytes_to_transfer; 257 if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset)) 258 transfer_bytes = LPFC_BPL_SIZE - dma_offset; 259 if (to_buffers) 260 memcpy(dma_address, sg_address, transfer_bytes); 261 else 262 memcpy(sg_address, dma_address, transfer_bytes); 263 dma_offset += transfer_bytes; 264 sg_offset += transfer_bytes; 265 bytes_to_transfer -= transfer_bytes; 266 bytes_copied += transfer_bytes; 267 if (sg_offset >= miter.length) { 268 sg_offset = 0; 269 sg_valid = sg_miter_next(&miter); 270 } 271 } 272 } 273 sg_miter_stop(&miter); 274 local_irq_restore(flags); 275 list_del_init(&dma_buffers->list); 276 list_splice(&temp_list, &dma_buffers->list); 277 return bytes_copied; 278 } 279 280 /** 281 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler 282 * @phba: Pointer to HBA context object. 283 * @cmdiocbq: Pointer to command iocb. 284 * @rspiocbq: Pointer to response iocb. 285 * 286 * This function is the completion handler for iocbs issued using 287 * lpfc_bsg_send_mgmt_cmd function. This function is called by the 288 * ring event handler function without any lock held. This function 289 * can be called from both worker thread context and interrupt 290 * context. This function also can be called from another thread which 291 * cleans up the SLI layer objects. 292 * This function copies the contents of the response iocb to the 293 * response iocb memory object provided by the caller of 294 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 295 * sleeps for the iocb completion. 296 **/ 297 static void 298 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, 299 struct lpfc_iocbq *cmdiocbq, 300 struct lpfc_iocbq *rspiocbq) 301 { 302 struct bsg_job_data *dd_data; 303 struct bsg_job *job; 304 struct fc_bsg_reply *bsg_reply; 305 IOCB_t *rsp; 306 struct lpfc_dmabuf *bmp, *cmp, *rmp; 307 struct lpfc_nodelist *ndlp; 308 struct lpfc_bsg_iocb *iocb; 309 unsigned long flags; 310 unsigned int rsp_size; 311 int rc = 0; 312 313 dd_data = cmdiocbq->context1; 314 315 /* Determine if job has been aborted */ 316 spin_lock_irqsave(&phba->ct_ev_lock, flags); 317 job = dd_data->set_job; 318 if (job) { 319 bsg_reply = job->reply; 320 /* Prevent timeout handling from trying to abort job */ 321 job->dd_data = NULL; 322 } 323 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 324 325 /* Close the timeout handler abort window */ 326 spin_lock_irqsave(&phba->hbalock, flags); 327 cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; 328 spin_unlock_irqrestore(&phba->hbalock, flags); 329 330 iocb = &dd_data->context_un.iocb; 331 ndlp = iocb->ndlp; 332 rmp = iocb->rmp; 333 cmp = cmdiocbq->context2; 334 bmp = cmdiocbq->context3; 335 rsp = &rspiocbq->iocb; 336 337 /* Copy the completed data or set the error status */ 338 339 if (job) { 340 if (rsp->ulpStatus) { 341 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 342 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { 343 case IOERR_SEQUENCE_TIMEOUT: 344 rc = -ETIMEDOUT; 345 break; 346 case IOERR_INVALID_RPI: 347 rc = -EFAULT; 348 break; 349 default: 350 rc = -EACCES; 351 break; 352 } 353 } else { 354 rc = -EACCES; 355 } 356 } else { 357 rsp_size = rsp->un.genreq64.bdl.bdeSize; 358 bsg_reply->reply_payload_rcv_len = 359 lpfc_bsg_copy_data(rmp, &job->reply_payload, 360 rsp_size, 0); 361 } 362 } 363 364 lpfc_free_bsg_buffers(phba, cmp); 365 lpfc_free_bsg_buffers(phba, rmp); 366 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 367 kfree(bmp); 368 lpfc_sli_release_iocbq(phba, cmdiocbq); 369 lpfc_nlp_put(ndlp); 370 kfree(dd_data); 371 372 /* Complete the job if the job is still active */ 373 374 if (job) { 375 bsg_reply->result = rc; 376 bsg_job_done(job, bsg_reply->result, 377 bsg_reply->reply_payload_rcv_len); 378 } 379 return; 380 } 381 382 /** 383 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request 384 * @job: fc_bsg_job to handle 385 **/ 386 static int 387 lpfc_bsg_send_mgmt_cmd(struct bsg_job *job) 388 { 389 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 390 struct lpfc_hba *phba = vport->phba; 391 struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data; 392 struct lpfc_nodelist *ndlp = rdata->pnode; 393 struct fc_bsg_reply *bsg_reply = job->reply; 394 struct ulp_bde64 *bpl = NULL; 395 uint32_t timeout; 396 struct lpfc_iocbq *cmdiocbq = NULL; 397 IOCB_t *cmd; 398 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL; 399 int request_nseg; 400 int reply_nseg; 401 struct bsg_job_data *dd_data; 402 unsigned long flags; 403 uint32_t creg_val; 404 int rc = 0; 405 int iocb_stat; 406 407 /* in case no data is transferred */ 408 bsg_reply->reply_payload_rcv_len = 0; 409 410 /* allocate our bsg tracking structure */ 411 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 412 if (!dd_data) { 413 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 414 "2733 Failed allocation of dd_data\n"); 415 rc = -ENOMEM; 416 goto no_dd_data; 417 } 418 419 if (!lpfc_nlp_get(ndlp)) { 420 rc = -ENODEV; 421 goto no_ndlp; 422 } 423 424 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) { 425 rc = -ENODEV; 426 goto free_ndlp; 427 } 428 429 cmdiocbq = lpfc_sli_get_iocbq(phba); 430 if (!cmdiocbq) { 431 rc = -ENOMEM; 432 goto free_ndlp; 433 } 434 435 cmd = &cmdiocbq->iocb; 436 437 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 438 if (!bmp) { 439 rc = -ENOMEM; 440 goto free_cmdiocbq; 441 } 442 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 443 if (!bmp->virt) { 444 rc = -ENOMEM; 445 goto free_bmp; 446 } 447 448 INIT_LIST_HEAD(&bmp->list); 449 450 bpl = (struct ulp_bde64 *) bmp->virt; 451 request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64); 452 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, 453 1, bpl, &request_nseg); 454 if (!cmp) { 455 rc = -ENOMEM; 456 goto free_bmp; 457 } 458 lpfc_bsg_copy_data(cmp, &job->request_payload, 459 job->request_payload.payload_len, 1); 460 461 bpl += request_nseg; 462 reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg; 463 rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0, 464 bpl, &reply_nseg); 465 if (!rmp) { 466 rc = -ENOMEM; 467 goto free_cmp; 468 } 469 470 cmd->un.genreq64.bdl.ulpIoTag32 = 0; 471 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 472 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); 473 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 474 cmd->un.genreq64.bdl.bdeSize = 475 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); 476 cmd->ulpCommand = CMD_GEN_REQUEST64_CR; 477 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); 478 cmd->un.genreq64.w5.hcsw.Dfctl = 0; 479 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 480 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT; 481 cmd->ulpBdeCount = 1; 482 cmd->ulpLe = 1; 483 cmd->ulpClass = CLASS3; 484 cmd->ulpContext = ndlp->nlp_rpi; 485 if (phba->sli_rev == LPFC_SLI_REV4) 486 cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 487 cmd->ulpOwner = OWN_CHIP; 488 cmdiocbq->vport = phba->pport; 489 cmdiocbq->context3 = bmp; 490 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 491 timeout = phba->fc_ratov * 2; 492 cmd->ulpTimeout = timeout; 493 494 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp; 495 cmdiocbq->context1 = dd_data; 496 cmdiocbq->context2 = cmp; 497 cmdiocbq->context3 = bmp; 498 cmdiocbq->context_un.ndlp = ndlp; 499 dd_data->type = TYPE_IOCB; 500 dd_data->set_job = job; 501 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 502 dd_data->context_un.iocb.ndlp = ndlp; 503 dd_data->context_un.iocb.rmp = rmp; 504 job->dd_data = dd_data; 505 506 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 507 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 508 rc = -EIO ; 509 goto free_rmp; 510 } 511 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 512 writel(creg_val, phba->HCregaddr); 513 readl(phba->HCregaddr); /* flush */ 514 } 515 516 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 517 518 if (iocb_stat == IOCB_SUCCESS) { 519 spin_lock_irqsave(&phba->hbalock, flags); 520 /* make sure the I/O had not been completed yet */ 521 if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) { 522 /* open up abort window to timeout handler */ 523 cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; 524 } 525 spin_unlock_irqrestore(&phba->hbalock, flags); 526 return 0; /* done for now */ 527 } else if (iocb_stat == IOCB_BUSY) { 528 rc = -EAGAIN; 529 } else { 530 rc = -EIO; 531 } 532 533 /* iocb failed so cleanup */ 534 job->dd_data = NULL; 535 536 free_rmp: 537 lpfc_free_bsg_buffers(phba, rmp); 538 free_cmp: 539 lpfc_free_bsg_buffers(phba, cmp); 540 free_bmp: 541 if (bmp->virt) 542 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 543 kfree(bmp); 544 free_cmdiocbq: 545 lpfc_sli_release_iocbq(phba, cmdiocbq); 546 free_ndlp: 547 lpfc_nlp_put(ndlp); 548 no_ndlp: 549 kfree(dd_data); 550 no_dd_data: 551 /* make error code available to userspace */ 552 bsg_reply->result = rc; 553 job->dd_data = NULL; 554 return rc; 555 } 556 557 /** 558 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler 559 * @phba: Pointer to HBA context object. 560 * @cmdiocbq: Pointer to command iocb. 561 * @rspiocbq: Pointer to response iocb. 562 * 563 * This function is the completion handler for iocbs issued using 564 * lpfc_bsg_rport_els_cmp function. This function is called by the 565 * ring event handler function without any lock held. This function 566 * can be called from both worker thread context and interrupt 567 * context. This function also can be called from other thread which 568 * cleans up the SLI layer objects. 569 * This function copies the contents of the response iocb to the 570 * response iocb memory object provided by the caller of 571 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 572 * sleeps for the iocb completion. 573 **/ 574 static void 575 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, 576 struct lpfc_iocbq *cmdiocbq, 577 struct lpfc_iocbq *rspiocbq) 578 { 579 struct bsg_job_data *dd_data; 580 struct bsg_job *job; 581 struct fc_bsg_reply *bsg_reply; 582 IOCB_t *rsp; 583 struct lpfc_nodelist *ndlp; 584 struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL; 585 struct fc_bsg_ctels_reply *els_reply; 586 uint8_t *rjt_data; 587 unsigned long flags; 588 unsigned int rsp_size; 589 int rc = 0; 590 591 dd_data = cmdiocbq->context1; 592 ndlp = dd_data->context_un.iocb.ndlp; 593 cmdiocbq->context1 = ndlp; 594 595 /* Determine if job has been aborted */ 596 spin_lock_irqsave(&phba->ct_ev_lock, flags); 597 job = dd_data->set_job; 598 if (job) { 599 bsg_reply = job->reply; 600 /* Prevent timeout handling from trying to abort job */ 601 job->dd_data = NULL; 602 } 603 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 604 605 /* Close the timeout handler abort window */ 606 spin_lock_irqsave(&phba->hbalock, flags); 607 cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; 608 spin_unlock_irqrestore(&phba->hbalock, flags); 609 610 rsp = &rspiocbq->iocb; 611 pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2; 612 prsp = (struct lpfc_dmabuf *)pcmd->list.next; 613 614 /* Copy the completed job data or determine the job status if job is 615 * still active 616 */ 617 618 if (job) { 619 if (rsp->ulpStatus == IOSTAT_SUCCESS) { 620 rsp_size = rsp->un.elsreq64.bdl.bdeSize; 621 bsg_reply->reply_payload_rcv_len = 622 sg_copy_from_buffer(job->reply_payload.sg_list, 623 job->reply_payload.sg_cnt, 624 prsp->virt, 625 rsp_size); 626 } else if (rsp->ulpStatus == IOSTAT_LS_RJT) { 627 bsg_reply->reply_payload_rcv_len = 628 sizeof(struct fc_bsg_ctels_reply); 629 /* LS_RJT data returned in word 4 */ 630 rjt_data = (uint8_t *)&rsp->un.ulpWord[4]; 631 els_reply = &bsg_reply->reply_data.ctels_reply; 632 els_reply->status = FC_CTELS_STATUS_REJECT; 633 els_reply->rjt_data.action = rjt_data[3]; 634 els_reply->rjt_data.reason_code = rjt_data[2]; 635 els_reply->rjt_data.reason_explanation = rjt_data[1]; 636 els_reply->rjt_data.vendor_unique = rjt_data[0]; 637 } else { 638 rc = -EIO; 639 } 640 } 641 642 lpfc_nlp_put(ndlp); 643 lpfc_els_free_iocb(phba, cmdiocbq); 644 kfree(dd_data); 645 646 /* Complete the job if the job is still active */ 647 648 if (job) { 649 bsg_reply->result = rc; 650 bsg_job_done(job, bsg_reply->result, 651 bsg_reply->reply_payload_rcv_len); 652 } 653 return; 654 } 655 656 /** 657 * lpfc_bsg_rport_els - send an ELS command from a bsg request 658 * @job: fc_bsg_job to handle 659 **/ 660 static int 661 lpfc_bsg_rport_els(struct bsg_job *job) 662 { 663 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 664 struct lpfc_hba *phba = vport->phba; 665 struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data; 666 struct lpfc_nodelist *ndlp = rdata->pnode; 667 struct fc_bsg_request *bsg_request = job->request; 668 struct fc_bsg_reply *bsg_reply = job->reply; 669 uint32_t elscmd; 670 uint32_t cmdsize; 671 struct lpfc_iocbq *cmdiocbq; 672 uint16_t rpi = 0; 673 struct bsg_job_data *dd_data; 674 unsigned long flags; 675 uint32_t creg_val; 676 int rc = 0; 677 678 /* in case no data is transferred */ 679 bsg_reply->reply_payload_rcv_len = 0; 680 681 /* verify the els command is not greater than the 682 * maximum ELS transfer size. 683 */ 684 685 if (job->request_payload.payload_len > FCELSSIZE) { 686 rc = -EINVAL; 687 goto no_dd_data; 688 } 689 690 /* allocate our bsg tracking structure */ 691 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 692 if (!dd_data) { 693 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 694 "2735 Failed allocation of dd_data\n"); 695 rc = -ENOMEM; 696 goto no_dd_data; 697 } 698 699 elscmd = bsg_request->rqst_data.r_els.els_code; 700 cmdsize = job->request_payload.payload_len; 701 702 if (!lpfc_nlp_get(ndlp)) { 703 rc = -ENODEV; 704 goto free_dd_data; 705 } 706 707 /* We will use the allocated dma buffers by prep els iocb for command 708 * and response to ensure if the job times out and the request is freed, 709 * we won't be dma into memory that is no longer allocated to for the 710 * request. 711 */ 712 713 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, 714 ndlp->nlp_DID, elscmd); 715 if (!cmdiocbq) { 716 rc = -EIO; 717 goto release_ndlp; 718 } 719 720 rpi = ndlp->nlp_rpi; 721 722 /* Transfer the request payload to allocated command dma buffer */ 723 724 sg_copy_to_buffer(job->request_payload.sg_list, 725 job->request_payload.sg_cnt, 726 ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt, 727 cmdsize); 728 729 if (phba->sli_rev == LPFC_SLI_REV4) 730 cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi]; 731 else 732 cmdiocbq->iocb.ulpContext = rpi; 733 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 734 cmdiocbq->context1 = dd_data; 735 cmdiocbq->context_un.ndlp = ndlp; 736 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp; 737 dd_data->type = TYPE_IOCB; 738 dd_data->set_job = job; 739 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 740 dd_data->context_un.iocb.ndlp = ndlp; 741 dd_data->context_un.iocb.rmp = NULL; 742 job->dd_data = dd_data; 743 744 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 745 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 746 rc = -EIO; 747 goto linkdown_err; 748 } 749 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 750 writel(creg_val, phba->HCregaddr); 751 readl(phba->HCregaddr); /* flush */ 752 } 753 754 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 755 756 if (rc == IOCB_SUCCESS) { 757 spin_lock_irqsave(&phba->hbalock, flags); 758 /* make sure the I/O had not been completed/released */ 759 if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) { 760 /* open up abort window to timeout handler */ 761 cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; 762 } 763 spin_unlock_irqrestore(&phba->hbalock, flags); 764 return 0; /* done for now */ 765 } else if (rc == IOCB_BUSY) { 766 rc = -EAGAIN; 767 } else { 768 rc = -EIO; 769 } 770 771 /* iocb failed so cleanup */ 772 job->dd_data = NULL; 773 774 linkdown_err: 775 cmdiocbq->context1 = ndlp; 776 lpfc_els_free_iocb(phba, cmdiocbq); 777 778 release_ndlp: 779 lpfc_nlp_put(ndlp); 780 781 free_dd_data: 782 kfree(dd_data); 783 784 no_dd_data: 785 /* make error code available to userspace */ 786 bsg_reply->result = rc; 787 job->dd_data = NULL; 788 return rc; 789 } 790 791 /** 792 * lpfc_bsg_event_free - frees an allocated event structure 793 * @kref: Pointer to a kref. 794 * 795 * Called from kref_put. Back cast the kref into an event structure address. 796 * Free any events to get, delete associated nodes, free any events to see, 797 * free any data then free the event itself. 798 **/ 799 static void 800 lpfc_bsg_event_free(struct kref *kref) 801 { 802 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event, 803 kref); 804 struct event_data *ed; 805 806 list_del(&evt->node); 807 808 while (!list_empty(&evt->events_to_get)) { 809 ed = list_entry(evt->events_to_get.next, typeof(*ed), node); 810 list_del(&ed->node); 811 kfree(ed->data); 812 kfree(ed); 813 } 814 815 while (!list_empty(&evt->events_to_see)) { 816 ed = list_entry(evt->events_to_see.next, typeof(*ed), node); 817 list_del(&ed->node); 818 kfree(ed->data); 819 kfree(ed); 820 } 821 822 kfree(evt->dd_data); 823 kfree(evt); 824 } 825 826 /** 827 * lpfc_bsg_event_ref - increments the kref for an event 828 * @evt: Pointer to an event structure. 829 **/ 830 static inline void 831 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt) 832 { 833 kref_get(&evt->kref); 834 } 835 836 /** 837 * lpfc_bsg_event_unref - Uses kref_put to free an event structure 838 * @evt: Pointer to an event structure. 839 **/ 840 static inline void 841 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt) 842 { 843 kref_put(&evt->kref, lpfc_bsg_event_free); 844 } 845 846 /** 847 * lpfc_bsg_event_new - allocate and initialize a event structure 848 * @ev_mask: Mask of events. 849 * @ev_reg_id: Event reg id. 850 * @ev_req_id: Event request id. 851 **/ 852 static struct lpfc_bsg_event * 853 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id) 854 { 855 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL); 856 857 if (!evt) 858 return NULL; 859 860 INIT_LIST_HEAD(&evt->events_to_get); 861 INIT_LIST_HEAD(&evt->events_to_see); 862 evt->type_mask = ev_mask; 863 evt->req_id = ev_req_id; 864 evt->reg_id = ev_reg_id; 865 evt->wait_time_stamp = jiffies; 866 evt->dd_data = NULL; 867 init_waitqueue_head(&evt->wq); 868 kref_init(&evt->kref); 869 return evt; 870 } 871 872 /** 873 * diag_cmd_data_free - Frees an lpfc dma buffer extension 874 * @phba: Pointer to HBA context object. 875 * @mlist: Pointer to an lpfc dma buffer extension. 876 **/ 877 static int 878 diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist) 879 { 880 struct lpfc_dmabufext *mlast; 881 struct pci_dev *pcidev; 882 struct list_head head, *curr, *next; 883 884 if ((!mlist) || (!lpfc_is_link_up(phba) && 885 (phba->link_flag & LS_LOOPBACK_MODE))) { 886 return 0; 887 } 888 889 pcidev = phba->pcidev; 890 list_add_tail(&head, &mlist->dma.list); 891 892 list_for_each_safe(curr, next, &head) { 893 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list); 894 if (mlast->dma.virt) 895 dma_free_coherent(&pcidev->dev, 896 mlast->size, 897 mlast->dma.virt, 898 mlast->dma.phys); 899 kfree(mlast); 900 } 901 return 0; 902 } 903 904 /** 905 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command 906 * @phba: 907 * @pring: 908 * @piocbq: 909 * 910 * This function is called when an unsolicited CT command is received. It 911 * forwards the event to any processes registered to receive CT events. 912 **/ 913 int 914 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 915 struct lpfc_iocbq *piocbq) 916 { 917 uint32_t evt_req_id = 0; 918 uint32_t cmd; 919 struct lpfc_dmabuf *dmabuf = NULL; 920 struct lpfc_bsg_event *evt; 921 struct event_data *evt_dat = NULL; 922 struct lpfc_iocbq *iocbq; 923 size_t offset = 0; 924 struct list_head head; 925 struct ulp_bde64 *bde; 926 dma_addr_t dma_addr; 927 int i; 928 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2; 929 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; 930 struct lpfc_hbq_entry *hbqe; 931 struct lpfc_sli_ct_request *ct_req; 932 struct bsg_job *job = NULL; 933 struct fc_bsg_reply *bsg_reply; 934 struct bsg_job_data *dd_data = NULL; 935 unsigned long flags; 936 int size = 0; 937 938 INIT_LIST_HEAD(&head); 939 list_add_tail(&head, &piocbq->list); 940 941 if (piocbq->iocb.ulpBdeCount == 0 || 942 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0) 943 goto error_ct_unsol_exit; 944 945 if (phba->link_state == LPFC_HBA_ERROR || 946 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) 947 goto error_ct_unsol_exit; 948 949 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 950 dmabuf = bdeBuf1; 951 else { 952 dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh, 953 piocbq->iocb.un.cont64[0].addrLow); 954 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr); 955 } 956 if (dmabuf == NULL) 957 goto error_ct_unsol_exit; 958 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt; 959 evt_req_id = ct_req->FsType; 960 cmd = ct_req->CommandResponse.bits.CmdRsp; 961 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 962 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf); 963 964 spin_lock_irqsave(&phba->ct_ev_lock, flags); 965 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 966 if (!(evt->type_mask & FC_REG_CT_EVENT) || 967 evt->req_id != evt_req_id) 968 continue; 969 970 lpfc_bsg_event_ref(evt); 971 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 972 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL); 973 if (evt_dat == NULL) { 974 spin_lock_irqsave(&phba->ct_ev_lock, flags); 975 lpfc_bsg_event_unref(evt); 976 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 977 "2614 Memory allocation failed for " 978 "CT event\n"); 979 break; 980 } 981 982 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 983 /* take accumulated byte count from the last iocbq */ 984 iocbq = list_entry(head.prev, typeof(*iocbq), list); 985 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len; 986 } else { 987 list_for_each_entry(iocbq, &head, list) { 988 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) 989 evt_dat->len += 990 iocbq->iocb.un.cont64[i].tus.f.bdeSize; 991 } 992 } 993 994 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL); 995 if (evt_dat->data == NULL) { 996 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 997 "2615 Memory allocation failed for " 998 "CT event data, size %d\n", 999 evt_dat->len); 1000 kfree(evt_dat); 1001 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1002 lpfc_bsg_event_unref(evt); 1003 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1004 goto error_ct_unsol_exit; 1005 } 1006 1007 list_for_each_entry(iocbq, &head, list) { 1008 size = 0; 1009 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 1010 bdeBuf1 = iocbq->context2; 1011 bdeBuf2 = iocbq->context3; 1012 } 1013 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) { 1014 if (phba->sli3_options & 1015 LPFC_SLI3_HBQ_ENABLED) { 1016 if (i == 0) { 1017 hbqe = (struct lpfc_hbq_entry *) 1018 &iocbq->iocb.un.ulpWord[0]; 1019 size = hbqe->bde.tus.f.bdeSize; 1020 dmabuf = bdeBuf1; 1021 } else if (i == 1) { 1022 hbqe = (struct lpfc_hbq_entry *) 1023 &iocbq->iocb.unsli3. 1024 sli3Words[4]; 1025 size = hbqe->bde.tus.f.bdeSize; 1026 dmabuf = bdeBuf2; 1027 } 1028 if ((offset + size) > evt_dat->len) 1029 size = evt_dat->len - offset; 1030 } else { 1031 size = iocbq->iocb.un.cont64[i]. 1032 tus.f.bdeSize; 1033 bde = &iocbq->iocb.un.cont64[i]; 1034 dma_addr = getPaddr(bde->addrHigh, 1035 bde->addrLow); 1036 dmabuf = lpfc_sli_ringpostbuf_get(phba, 1037 pring, dma_addr); 1038 } 1039 if (!dmabuf) { 1040 lpfc_printf_log(phba, KERN_ERR, 1041 LOG_LIBDFC, "2616 No dmabuf " 1042 "found for iocbq 0x%p\n", 1043 iocbq); 1044 kfree(evt_dat->data); 1045 kfree(evt_dat); 1046 spin_lock_irqsave(&phba->ct_ev_lock, 1047 flags); 1048 lpfc_bsg_event_unref(evt); 1049 spin_unlock_irqrestore( 1050 &phba->ct_ev_lock, flags); 1051 goto error_ct_unsol_exit; 1052 } 1053 memcpy((char *)(evt_dat->data) + offset, 1054 dmabuf->virt, size); 1055 offset += size; 1056 if (evt_req_id != SLI_CT_ELX_LOOPBACK && 1057 !(phba->sli3_options & 1058 LPFC_SLI3_HBQ_ENABLED)) { 1059 lpfc_sli_ringpostbuf_put(phba, pring, 1060 dmabuf); 1061 } else { 1062 switch (cmd) { 1063 case ELX_LOOPBACK_DATA: 1064 if (phba->sli_rev < 1065 LPFC_SLI_REV4) 1066 diag_cmd_data_free(phba, 1067 (struct lpfc_dmabufext 1068 *)dmabuf); 1069 break; 1070 case ELX_LOOPBACK_XRI_SETUP: 1071 if ((phba->sli_rev == 1072 LPFC_SLI_REV2) || 1073 (phba->sli3_options & 1074 LPFC_SLI3_HBQ_ENABLED 1075 )) { 1076 lpfc_in_buf_free(phba, 1077 dmabuf); 1078 } else { 1079 lpfc_post_buffer(phba, 1080 pring, 1081 1); 1082 } 1083 break; 1084 default: 1085 if (!(phba->sli3_options & 1086 LPFC_SLI3_HBQ_ENABLED)) 1087 lpfc_post_buffer(phba, 1088 pring, 1089 1); 1090 break; 1091 } 1092 } 1093 } 1094 } 1095 1096 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1097 if (phba->sli_rev == LPFC_SLI_REV4) { 1098 evt_dat->immed_dat = phba->ctx_idx; 1099 phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX; 1100 /* Provide warning for over-run of the ct_ctx array */ 1101 if (phba->ct_ctx[evt_dat->immed_dat].valid == 1102 UNSOL_VALID) 1103 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 1104 "2717 CT context array entry " 1105 "[%d] over-run: oxid:x%x, " 1106 "sid:x%x\n", phba->ctx_idx, 1107 phba->ct_ctx[ 1108 evt_dat->immed_dat].oxid, 1109 phba->ct_ctx[ 1110 evt_dat->immed_dat].SID); 1111 phba->ct_ctx[evt_dat->immed_dat].rxid = 1112 piocbq->iocb.ulpContext; 1113 phba->ct_ctx[evt_dat->immed_dat].oxid = 1114 piocbq->iocb.unsli3.rcvsli3.ox_id; 1115 phba->ct_ctx[evt_dat->immed_dat].SID = 1116 piocbq->iocb.un.rcvels.remoteID; 1117 phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID; 1118 } else 1119 evt_dat->immed_dat = piocbq->iocb.ulpContext; 1120 1121 evt_dat->type = FC_REG_CT_EVENT; 1122 list_add(&evt_dat->node, &evt->events_to_see); 1123 if (evt_req_id == SLI_CT_ELX_LOOPBACK) { 1124 wake_up_interruptible(&evt->wq); 1125 lpfc_bsg_event_unref(evt); 1126 break; 1127 } 1128 1129 list_move(evt->events_to_see.prev, &evt->events_to_get); 1130 1131 dd_data = (struct bsg_job_data *)evt->dd_data; 1132 job = dd_data->set_job; 1133 dd_data->set_job = NULL; 1134 lpfc_bsg_event_unref(evt); 1135 if (job) { 1136 bsg_reply = job->reply; 1137 bsg_reply->reply_payload_rcv_len = size; 1138 /* make error code available to userspace */ 1139 bsg_reply->result = 0; 1140 job->dd_data = NULL; 1141 /* complete the job back to userspace */ 1142 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1143 bsg_job_done(job, bsg_reply->result, 1144 bsg_reply->reply_payload_rcv_len); 1145 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1146 } 1147 } 1148 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1149 1150 error_ct_unsol_exit: 1151 if (!list_empty(&head)) 1152 list_del(&head); 1153 if ((phba->sli_rev < LPFC_SLI_REV4) && 1154 (evt_req_id == SLI_CT_ELX_LOOPBACK)) 1155 return 0; 1156 return 1; 1157 } 1158 1159 /** 1160 * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane 1161 * @phba: Pointer to HBA context object. 1162 * @dmabuf: pointer to a dmabuf that describes the FC sequence 1163 * 1164 * This function handles abort to the CT command toward management plane 1165 * for SLI4 port. 1166 * 1167 * If the pending context of a CT command to management plane present, clears 1168 * such context and returns 1 for handled; otherwise, it returns 0 indicating 1169 * no context exists. 1170 **/ 1171 int 1172 lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf) 1173 { 1174 struct fc_frame_header fc_hdr; 1175 struct fc_frame_header *fc_hdr_ptr = &fc_hdr; 1176 int ctx_idx, handled = 0; 1177 uint16_t oxid, rxid; 1178 uint32_t sid; 1179 1180 memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 1181 sid = sli4_sid_from_fc_hdr(fc_hdr_ptr); 1182 oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id); 1183 rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id); 1184 1185 for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) { 1186 if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID) 1187 continue; 1188 if (phba->ct_ctx[ctx_idx].rxid != rxid) 1189 continue; 1190 if (phba->ct_ctx[ctx_idx].oxid != oxid) 1191 continue; 1192 if (phba->ct_ctx[ctx_idx].SID != sid) 1193 continue; 1194 phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID; 1195 handled = 1; 1196 } 1197 return handled; 1198 } 1199 1200 /** 1201 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command 1202 * @job: SET_EVENT fc_bsg_job 1203 **/ 1204 static int 1205 lpfc_bsg_hba_set_event(struct bsg_job *job) 1206 { 1207 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 1208 struct lpfc_hba *phba = vport->phba; 1209 struct fc_bsg_request *bsg_request = job->request; 1210 struct set_ct_event *event_req; 1211 struct lpfc_bsg_event *evt; 1212 int rc = 0; 1213 struct bsg_job_data *dd_data = NULL; 1214 uint32_t ev_mask; 1215 unsigned long flags; 1216 1217 if (job->request_len < 1218 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) { 1219 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1220 "2612 Received SET_CT_EVENT below minimum " 1221 "size\n"); 1222 rc = -EINVAL; 1223 goto job_error; 1224 } 1225 1226 event_req = (struct set_ct_event *) 1227 bsg_request->rqst_data.h_vendor.vendor_cmd; 1228 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask & 1229 FC_REG_EVENT_MASK); 1230 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1231 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 1232 if (evt->reg_id == event_req->ev_reg_id) { 1233 lpfc_bsg_event_ref(evt); 1234 evt->wait_time_stamp = jiffies; 1235 dd_data = (struct bsg_job_data *)evt->dd_data; 1236 break; 1237 } 1238 } 1239 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1240 1241 if (&evt->node == &phba->ct_ev_waiters) { 1242 /* no event waiting struct yet - first call */ 1243 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 1244 if (dd_data == NULL) { 1245 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1246 "2734 Failed allocation of dd_data\n"); 1247 rc = -ENOMEM; 1248 goto job_error; 1249 } 1250 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id, 1251 event_req->ev_req_id); 1252 if (!evt) { 1253 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1254 "2617 Failed allocation of event " 1255 "waiter\n"); 1256 rc = -ENOMEM; 1257 goto job_error; 1258 } 1259 dd_data->type = TYPE_EVT; 1260 dd_data->set_job = NULL; 1261 dd_data->context_un.evt = evt; 1262 evt->dd_data = (void *)dd_data; 1263 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1264 list_add(&evt->node, &phba->ct_ev_waiters); 1265 lpfc_bsg_event_ref(evt); 1266 evt->wait_time_stamp = jiffies; 1267 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1268 } 1269 1270 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1271 evt->waiting = 1; 1272 dd_data->set_job = job; /* for unsolicited command */ 1273 job->dd_data = dd_data; /* for fc transport timeout callback*/ 1274 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1275 return 0; /* call job done later */ 1276 1277 job_error: 1278 if (dd_data != NULL) 1279 kfree(dd_data); 1280 1281 job->dd_data = NULL; 1282 return rc; 1283 } 1284 1285 /** 1286 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command 1287 * @job: GET_EVENT fc_bsg_job 1288 **/ 1289 static int 1290 lpfc_bsg_hba_get_event(struct bsg_job *job) 1291 { 1292 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 1293 struct lpfc_hba *phba = vport->phba; 1294 struct fc_bsg_request *bsg_request = job->request; 1295 struct fc_bsg_reply *bsg_reply = job->reply; 1296 struct get_ct_event *event_req; 1297 struct get_ct_event_reply *event_reply; 1298 struct lpfc_bsg_event *evt, *evt_next; 1299 struct event_data *evt_dat = NULL; 1300 unsigned long flags; 1301 uint32_t rc = 0; 1302 1303 if (job->request_len < 1304 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) { 1305 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1306 "2613 Received GET_CT_EVENT request below " 1307 "minimum size\n"); 1308 rc = -EINVAL; 1309 goto job_error; 1310 } 1311 1312 event_req = (struct get_ct_event *) 1313 bsg_request->rqst_data.h_vendor.vendor_cmd; 1314 1315 event_reply = (struct get_ct_event_reply *) 1316 bsg_reply->reply_data.vendor_reply.vendor_rsp; 1317 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1318 list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) { 1319 if (evt->reg_id == event_req->ev_reg_id) { 1320 if (list_empty(&evt->events_to_get)) 1321 break; 1322 lpfc_bsg_event_ref(evt); 1323 evt->wait_time_stamp = jiffies; 1324 evt_dat = list_entry(evt->events_to_get.prev, 1325 struct event_data, node); 1326 list_del(&evt_dat->node); 1327 break; 1328 } 1329 } 1330 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1331 1332 /* The app may continue to ask for event data until it gets 1333 * an error indicating that there isn't anymore 1334 */ 1335 if (evt_dat == NULL) { 1336 bsg_reply->reply_payload_rcv_len = 0; 1337 rc = -ENOENT; 1338 goto job_error; 1339 } 1340 1341 if (evt_dat->len > job->request_payload.payload_len) { 1342 evt_dat->len = job->request_payload.payload_len; 1343 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1344 "2618 Truncated event data at %d " 1345 "bytes\n", 1346 job->request_payload.payload_len); 1347 } 1348 1349 event_reply->type = evt_dat->type; 1350 event_reply->immed_data = evt_dat->immed_dat; 1351 if (evt_dat->len > 0) 1352 bsg_reply->reply_payload_rcv_len = 1353 sg_copy_from_buffer(job->request_payload.sg_list, 1354 job->request_payload.sg_cnt, 1355 evt_dat->data, evt_dat->len); 1356 else 1357 bsg_reply->reply_payload_rcv_len = 0; 1358 1359 if (evt_dat) { 1360 kfree(evt_dat->data); 1361 kfree(evt_dat); 1362 } 1363 1364 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1365 lpfc_bsg_event_unref(evt); 1366 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1367 job->dd_data = NULL; 1368 bsg_reply->result = 0; 1369 bsg_job_done(job, bsg_reply->result, 1370 bsg_reply->reply_payload_rcv_len); 1371 return 0; 1372 1373 job_error: 1374 job->dd_data = NULL; 1375 bsg_reply->result = rc; 1376 return rc; 1377 } 1378 1379 /** 1380 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler 1381 * @phba: Pointer to HBA context object. 1382 * @cmdiocbq: Pointer to command iocb. 1383 * @rspiocbq: Pointer to response iocb. 1384 * 1385 * This function is the completion handler for iocbs issued using 1386 * lpfc_issue_ct_rsp_cmp function. This function is called by the 1387 * ring event handler function without any lock held. This function 1388 * can be called from both worker thread context and interrupt 1389 * context. This function also can be called from other thread which 1390 * cleans up the SLI layer objects. 1391 * This function copy the contents of the response iocb to the 1392 * response iocb memory object provided by the caller of 1393 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 1394 * sleeps for the iocb completion. 1395 **/ 1396 static void 1397 lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, 1398 struct lpfc_iocbq *cmdiocbq, 1399 struct lpfc_iocbq *rspiocbq) 1400 { 1401 struct bsg_job_data *dd_data; 1402 struct bsg_job *job; 1403 struct fc_bsg_reply *bsg_reply; 1404 IOCB_t *rsp; 1405 struct lpfc_dmabuf *bmp, *cmp; 1406 struct lpfc_nodelist *ndlp; 1407 unsigned long flags; 1408 int rc = 0; 1409 1410 dd_data = cmdiocbq->context1; 1411 1412 /* Determine if job has been aborted */ 1413 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1414 job = dd_data->set_job; 1415 if (job) { 1416 /* Prevent timeout handling from trying to abort job */ 1417 job->dd_data = NULL; 1418 } 1419 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1420 1421 /* Close the timeout handler abort window */ 1422 spin_lock_irqsave(&phba->hbalock, flags); 1423 cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; 1424 spin_unlock_irqrestore(&phba->hbalock, flags); 1425 1426 ndlp = dd_data->context_un.iocb.ndlp; 1427 cmp = cmdiocbq->context2; 1428 bmp = cmdiocbq->context3; 1429 rsp = &rspiocbq->iocb; 1430 1431 /* Copy the completed job data or set the error status */ 1432 1433 if (job) { 1434 bsg_reply = job->reply; 1435 if (rsp->ulpStatus) { 1436 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 1437 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { 1438 case IOERR_SEQUENCE_TIMEOUT: 1439 rc = -ETIMEDOUT; 1440 break; 1441 case IOERR_INVALID_RPI: 1442 rc = -EFAULT; 1443 break; 1444 default: 1445 rc = -EACCES; 1446 break; 1447 } 1448 } else { 1449 rc = -EACCES; 1450 } 1451 } else { 1452 bsg_reply->reply_payload_rcv_len = 0; 1453 } 1454 } 1455 1456 lpfc_free_bsg_buffers(phba, cmp); 1457 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1458 kfree(bmp); 1459 lpfc_sli_release_iocbq(phba, cmdiocbq); 1460 lpfc_nlp_put(ndlp); 1461 kfree(dd_data); 1462 1463 /* Complete the job if the job is still active */ 1464 1465 if (job) { 1466 bsg_reply->result = rc; 1467 bsg_job_done(job, bsg_reply->result, 1468 bsg_reply->reply_payload_rcv_len); 1469 } 1470 return; 1471 } 1472 1473 /** 1474 * lpfc_issue_ct_rsp - issue a ct response 1475 * @phba: Pointer to HBA context object. 1476 * @job: Pointer to the job object. 1477 * @tag: tag index value into the ports context exchange array. 1478 * @bmp: Pointer to a dma buffer descriptor. 1479 * @num_entry: Number of enties in the bde. 1480 **/ 1481 static int 1482 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag, 1483 struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp, 1484 int num_entry) 1485 { 1486 IOCB_t *icmd; 1487 struct lpfc_iocbq *ctiocb = NULL; 1488 int rc = 0; 1489 struct lpfc_nodelist *ndlp = NULL; 1490 struct bsg_job_data *dd_data; 1491 unsigned long flags; 1492 uint32_t creg_val; 1493 1494 /* allocate our bsg tracking structure */ 1495 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 1496 if (!dd_data) { 1497 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1498 "2736 Failed allocation of dd_data\n"); 1499 rc = -ENOMEM; 1500 goto no_dd_data; 1501 } 1502 1503 /* Allocate buffer for command iocb */ 1504 ctiocb = lpfc_sli_get_iocbq(phba); 1505 if (!ctiocb) { 1506 rc = -ENOMEM; 1507 goto no_ctiocb; 1508 } 1509 1510 icmd = &ctiocb->iocb; 1511 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 1512 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 1513 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys); 1514 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 1515 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64)); 1516 icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA); 1517 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 1518 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL; 1519 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 1520 1521 /* Fill in rest of iocb */ 1522 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; 1523 icmd->ulpBdeCount = 1; 1524 icmd->ulpLe = 1; 1525 icmd->ulpClass = CLASS3; 1526 if (phba->sli_rev == LPFC_SLI_REV4) { 1527 /* Do not issue unsol response if oxid not marked as valid */ 1528 if (phba->ct_ctx[tag].valid != UNSOL_VALID) { 1529 rc = IOCB_ERROR; 1530 goto issue_ct_rsp_exit; 1531 } 1532 icmd->ulpContext = phba->ct_ctx[tag].rxid; 1533 icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid; 1534 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID); 1535 if (!ndlp) { 1536 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 1537 "2721 ndlp null for oxid %x SID %x\n", 1538 icmd->ulpContext, 1539 phba->ct_ctx[tag].SID); 1540 rc = IOCB_ERROR; 1541 goto issue_ct_rsp_exit; 1542 } 1543 1544 /* Check if the ndlp is active */ 1545 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 1546 rc = IOCB_ERROR; 1547 goto issue_ct_rsp_exit; 1548 } 1549 1550 /* get a refernece count so the ndlp doesn't go away while 1551 * we respond 1552 */ 1553 if (!lpfc_nlp_get(ndlp)) { 1554 rc = IOCB_ERROR; 1555 goto issue_ct_rsp_exit; 1556 } 1557 1558 icmd->un.ulpWord[3] = 1559 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 1560 1561 /* The exchange is done, mark the entry as invalid */ 1562 phba->ct_ctx[tag].valid = UNSOL_INVALID; 1563 } else 1564 icmd->ulpContext = (ushort) tag; 1565 1566 icmd->ulpTimeout = phba->fc_ratov * 2; 1567 1568 /* Xmit CT response on exchange <xid> */ 1569 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1570 "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n", 1571 icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state); 1572 1573 ctiocb->iocb_cmpl = NULL; 1574 ctiocb->iocb_flag |= LPFC_IO_LIBDFC; 1575 ctiocb->vport = phba->pport; 1576 ctiocb->context1 = dd_data; 1577 ctiocb->context2 = cmp; 1578 ctiocb->context3 = bmp; 1579 ctiocb->context_un.ndlp = ndlp; 1580 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp; 1581 1582 dd_data->type = TYPE_IOCB; 1583 dd_data->set_job = job; 1584 dd_data->context_un.iocb.cmdiocbq = ctiocb; 1585 dd_data->context_un.iocb.ndlp = ndlp; 1586 dd_data->context_un.iocb.rmp = NULL; 1587 job->dd_data = dd_data; 1588 1589 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1590 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 1591 rc = -IOCB_ERROR; 1592 goto issue_ct_rsp_exit; 1593 } 1594 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 1595 writel(creg_val, phba->HCregaddr); 1596 readl(phba->HCregaddr); /* flush */ 1597 } 1598 1599 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 1600 1601 if (rc == IOCB_SUCCESS) { 1602 spin_lock_irqsave(&phba->hbalock, flags); 1603 /* make sure the I/O had not been completed/released */ 1604 if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) { 1605 /* open up abort window to timeout handler */ 1606 ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; 1607 } 1608 spin_unlock_irqrestore(&phba->hbalock, flags); 1609 return 0; /* done for now */ 1610 } 1611 1612 /* iocb failed so cleanup */ 1613 job->dd_data = NULL; 1614 1615 issue_ct_rsp_exit: 1616 lpfc_sli_release_iocbq(phba, ctiocb); 1617 no_ctiocb: 1618 kfree(dd_data); 1619 no_dd_data: 1620 return rc; 1621 } 1622 1623 /** 1624 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command 1625 * @job: SEND_MGMT_RESP fc_bsg_job 1626 **/ 1627 static int 1628 lpfc_bsg_send_mgmt_rsp(struct bsg_job *job) 1629 { 1630 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 1631 struct lpfc_hba *phba = vport->phba; 1632 struct fc_bsg_request *bsg_request = job->request; 1633 struct fc_bsg_reply *bsg_reply = job->reply; 1634 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *) 1635 bsg_request->rqst_data.h_vendor.vendor_cmd; 1636 struct ulp_bde64 *bpl; 1637 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL; 1638 int bpl_entries; 1639 uint32_t tag = mgmt_resp->tag; 1640 unsigned long reqbfrcnt = 1641 (unsigned long)job->request_payload.payload_len; 1642 int rc = 0; 1643 1644 /* in case no data is transferred */ 1645 bsg_reply->reply_payload_rcv_len = 0; 1646 1647 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) { 1648 rc = -ERANGE; 1649 goto send_mgmt_rsp_exit; 1650 } 1651 1652 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1653 if (!bmp) { 1654 rc = -ENOMEM; 1655 goto send_mgmt_rsp_exit; 1656 } 1657 1658 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 1659 if (!bmp->virt) { 1660 rc = -ENOMEM; 1661 goto send_mgmt_rsp_free_bmp; 1662 } 1663 1664 INIT_LIST_HEAD(&bmp->list); 1665 bpl = (struct ulp_bde64 *) bmp->virt; 1666 bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64)); 1667 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, 1668 1, bpl, &bpl_entries); 1669 if (!cmp) { 1670 rc = -ENOMEM; 1671 goto send_mgmt_rsp_free_bmp; 1672 } 1673 lpfc_bsg_copy_data(cmp, &job->request_payload, 1674 job->request_payload.payload_len, 1); 1675 1676 rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries); 1677 1678 if (rc == IOCB_SUCCESS) 1679 return 0; /* done for now */ 1680 1681 rc = -EACCES; 1682 1683 lpfc_free_bsg_buffers(phba, cmp); 1684 1685 send_mgmt_rsp_free_bmp: 1686 if (bmp->virt) 1687 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1688 kfree(bmp); 1689 send_mgmt_rsp_exit: 1690 /* make error code available to userspace */ 1691 bsg_reply->result = rc; 1692 job->dd_data = NULL; 1693 return rc; 1694 } 1695 1696 /** 1697 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode 1698 * @phba: Pointer to HBA context object. 1699 * 1700 * This function is responsible for preparing driver for diag loopback 1701 * on device. 1702 */ 1703 static int 1704 lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba) 1705 { 1706 struct lpfc_vport **vports; 1707 struct Scsi_Host *shost; 1708 struct lpfc_sli *psli; 1709 struct lpfc_queue *qp = NULL; 1710 struct lpfc_sli_ring *pring; 1711 int i = 0; 1712 1713 psli = &phba->sli; 1714 if (!psli) 1715 return -ENODEV; 1716 1717 1718 if ((phba->link_state == LPFC_HBA_ERROR) || 1719 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || 1720 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) 1721 return -EACCES; 1722 1723 vports = lpfc_create_vport_work_array(phba); 1724 if (vports) { 1725 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1726 shost = lpfc_shost_from_vport(vports[i]); 1727 scsi_block_requests(shost); 1728 } 1729 lpfc_destroy_vport_work_array(phba, vports); 1730 } else { 1731 shost = lpfc_shost_from_vport(phba->pport); 1732 scsi_block_requests(shost); 1733 } 1734 1735 if (phba->sli_rev != LPFC_SLI_REV4) { 1736 pring = &psli->sli3_ring[LPFC_FCP_RING]; 1737 lpfc_emptyq_wait(phba, &pring->txcmplq, &phba->hbalock); 1738 return 0; 1739 } 1740 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 1741 pring = qp->pring; 1742 if (!pring || (pring->ringno != LPFC_FCP_RING)) 1743 continue; 1744 if (!lpfc_emptyq_wait(phba, &pring->txcmplq, 1745 &pring->ring_lock)) 1746 break; 1747 } 1748 return 0; 1749 } 1750 1751 /** 1752 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode 1753 * @phba: Pointer to HBA context object. 1754 * 1755 * This function is responsible for driver exit processing of setting up 1756 * diag loopback mode on device. 1757 */ 1758 static void 1759 lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba) 1760 { 1761 struct Scsi_Host *shost; 1762 struct lpfc_vport **vports; 1763 int i; 1764 1765 vports = lpfc_create_vport_work_array(phba); 1766 if (vports) { 1767 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1768 shost = lpfc_shost_from_vport(vports[i]); 1769 scsi_unblock_requests(shost); 1770 } 1771 lpfc_destroy_vport_work_array(phba, vports); 1772 } else { 1773 shost = lpfc_shost_from_vport(phba->pport); 1774 scsi_unblock_requests(shost); 1775 } 1776 return; 1777 } 1778 1779 /** 1780 * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command 1781 * @phba: Pointer to HBA context object. 1782 * @job: LPFC_BSG_VENDOR_DIAG_MODE 1783 * 1784 * This function is responsible for placing an sli3 port into diagnostic 1785 * loopback mode in order to perform a diagnostic loopback test. 1786 * All new scsi requests are blocked, a small delay is used to allow the 1787 * scsi requests to complete then the link is brought down. If the link is 1788 * is placed in loopback mode then scsi requests are again allowed 1789 * so the scsi mid-layer doesn't give up on the port. 1790 * All of this is done in-line. 1791 */ 1792 static int 1793 lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job) 1794 { 1795 struct fc_bsg_request *bsg_request = job->request; 1796 struct fc_bsg_reply *bsg_reply = job->reply; 1797 struct diag_mode_set *loopback_mode; 1798 uint32_t link_flags; 1799 uint32_t timeout; 1800 LPFC_MBOXQ_t *pmboxq = NULL; 1801 int mbxstatus = MBX_SUCCESS; 1802 int i = 0; 1803 int rc = 0; 1804 1805 /* no data to return just the return code */ 1806 bsg_reply->reply_payload_rcv_len = 0; 1807 1808 if (job->request_len < sizeof(struct fc_bsg_request) + 1809 sizeof(struct diag_mode_set)) { 1810 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1811 "2738 Received DIAG MODE request size:%d " 1812 "below the minimum size:%d\n", 1813 job->request_len, 1814 (int)(sizeof(struct fc_bsg_request) + 1815 sizeof(struct diag_mode_set))); 1816 rc = -EINVAL; 1817 goto job_error; 1818 } 1819 1820 rc = lpfc_bsg_diag_mode_enter(phba); 1821 if (rc) 1822 goto job_error; 1823 1824 /* bring the link to diagnostic mode */ 1825 loopback_mode = (struct diag_mode_set *) 1826 bsg_request->rqst_data.h_vendor.vendor_cmd; 1827 link_flags = loopback_mode->type; 1828 timeout = loopback_mode->timeout * 100; 1829 1830 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1831 if (!pmboxq) { 1832 rc = -ENOMEM; 1833 goto loopback_mode_exit; 1834 } 1835 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 1836 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; 1837 pmboxq->u.mb.mbxOwner = OWN_HOST; 1838 1839 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 1840 1841 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) { 1842 /* wait for link down before proceeding */ 1843 i = 0; 1844 while (phba->link_state != LPFC_LINK_DOWN) { 1845 if (i++ > timeout) { 1846 rc = -ETIMEDOUT; 1847 goto loopback_mode_exit; 1848 } 1849 msleep(10); 1850 } 1851 1852 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 1853 if (link_flags == INTERNAL_LOOP_BACK) 1854 pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB; 1855 else 1856 pmboxq->u.mb.un.varInitLnk.link_flags = 1857 FLAGS_TOPOLOGY_MODE_LOOP; 1858 1859 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK; 1860 pmboxq->u.mb.mbxOwner = OWN_HOST; 1861 1862 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, 1863 LPFC_MBOX_TMO); 1864 1865 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) 1866 rc = -ENODEV; 1867 else { 1868 spin_lock_irq(&phba->hbalock); 1869 phba->link_flag |= LS_LOOPBACK_MODE; 1870 spin_unlock_irq(&phba->hbalock); 1871 /* wait for the link attention interrupt */ 1872 msleep(100); 1873 1874 i = 0; 1875 while (phba->link_state != LPFC_HBA_READY) { 1876 if (i++ > timeout) { 1877 rc = -ETIMEDOUT; 1878 break; 1879 } 1880 1881 msleep(10); 1882 } 1883 } 1884 1885 } else 1886 rc = -ENODEV; 1887 1888 loopback_mode_exit: 1889 lpfc_bsg_diag_mode_exit(phba); 1890 1891 /* 1892 * Let SLI layer release mboxq if mbox command completed after timeout. 1893 */ 1894 if (pmboxq && mbxstatus != MBX_TIMEOUT) 1895 mempool_free(pmboxq, phba->mbox_mem_pool); 1896 1897 job_error: 1898 /* make error code available to userspace */ 1899 bsg_reply->result = rc; 1900 /* complete the job back to userspace if no error */ 1901 if (rc == 0) 1902 bsg_job_done(job, bsg_reply->result, 1903 bsg_reply->reply_payload_rcv_len); 1904 return rc; 1905 } 1906 1907 /** 1908 * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state 1909 * @phba: Pointer to HBA context object. 1910 * @diag: Flag for set link to diag or nomral operation state. 1911 * 1912 * This function is responsible for issuing a sli4 mailbox command for setting 1913 * link to either diag state or normal operation state. 1914 */ 1915 static int 1916 lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag) 1917 { 1918 LPFC_MBOXQ_t *pmboxq; 1919 struct lpfc_mbx_set_link_diag_state *link_diag_state; 1920 uint32_t req_len, alloc_len; 1921 int mbxstatus = MBX_SUCCESS, rc; 1922 1923 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1924 if (!pmboxq) 1925 return -ENOMEM; 1926 1927 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) - 1928 sizeof(struct lpfc_sli4_cfg_mhdr)); 1929 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 1930 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE, 1931 req_len, LPFC_SLI4_MBX_EMBED); 1932 if (alloc_len != req_len) { 1933 rc = -ENOMEM; 1934 goto link_diag_state_set_out; 1935 } 1936 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 1937 "3128 Set link to diagnostic state:x%x (x%x/x%x)\n", 1938 diag, phba->sli4_hba.lnk_info.lnk_tp, 1939 phba->sli4_hba.lnk_info.lnk_no); 1940 1941 link_diag_state = &pmboxq->u.mqe.un.link_diag_state; 1942 bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req, 1943 LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE); 1944 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req, 1945 phba->sli4_hba.lnk_info.lnk_no); 1946 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req, 1947 phba->sli4_hba.lnk_info.lnk_tp); 1948 if (diag) 1949 bf_set(lpfc_mbx_set_diag_state_diag, 1950 &link_diag_state->u.req, 1); 1951 else 1952 bf_set(lpfc_mbx_set_diag_state_diag, 1953 &link_diag_state->u.req, 0); 1954 1955 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 1956 1957 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) 1958 rc = 0; 1959 else 1960 rc = -ENODEV; 1961 1962 link_diag_state_set_out: 1963 if (pmboxq && (mbxstatus != MBX_TIMEOUT)) 1964 mempool_free(pmboxq, phba->mbox_mem_pool); 1965 1966 return rc; 1967 } 1968 1969 /** 1970 * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic 1971 * @phba: Pointer to HBA context object. 1972 * 1973 * This function is responsible for issuing a sli4 mailbox command for setting 1974 * up internal loopback diagnostic. 1975 */ 1976 static int 1977 lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba) 1978 { 1979 LPFC_MBOXQ_t *pmboxq; 1980 uint32_t req_len, alloc_len; 1981 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback; 1982 int mbxstatus = MBX_SUCCESS, rc = 0; 1983 1984 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1985 if (!pmboxq) 1986 return -ENOMEM; 1987 req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) - 1988 sizeof(struct lpfc_sli4_cfg_mhdr)); 1989 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 1990 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK, 1991 req_len, LPFC_SLI4_MBX_EMBED); 1992 if (alloc_len != req_len) { 1993 mempool_free(pmboxq, phba->mbox_mem_pool); 1994 return -ENOMEM; 1995 } 1996 link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback; 1997 bf_set(lpfc_mbx_set_diag_state_link_num, 1998 &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no); 1999 bf_set(lpfc_mbx_set_diag_state_link_type, 2000 &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp); 2001 bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req, 2002 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL); 2003 2004 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 2005 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) { 2006 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2007 "3127 Failed setup loopback mode mailbox " 2008 "command, rc:x%x, status:x%x\n", mbxstatus, 2009 pmboxq->u.mb.mbxStatus); 2010 rc = -ENODEV; 2011 } 2012 if (pmboxq && (mbxstatus != MBX_TIMEOUT)) 2013 mempool_free(pmboxq, phba->mbox_mem_pool); 2014 return rc; 2015 } 2016 2017 /** 2018 * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic 2019 * @phba: Pointer to HBA context object. 2020 * 2021 * This function set up SLI4 FC port registrations for diagnostic run, which 2022 * includes all the rpis, vfi, and also vpi. 2023 */ 2024 static int 2025 lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba) 2026 { 2027 int rc; 2028 2029 if (phba->pport->fc_flag & FC_VFI_REGISTERED) { 2030 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2031 "3136 Port still had vfi registered: " 2032 "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n", 2033 phba->pport->fc_myDID, phba->fcf.fcfi, 2034 phba->sli4_hba.vfi_ids[phba->pport->vfi], 2035 phba->vpi_ids[phba->pport->vpi]); 2036 return -EINVAL; 2037 } 2038 rc = lpfc_issue_reg_vfi(phba->pport); 2039 return rc; 2040 } 2041 2042 /** 2043 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command 2044 * @phba: Pointer to HBA context object. 2045 * @job: LPFC_BSG_VENDOR_DIAG_MODE 2046 * 2047 * This function is responsible for placing an sli4 port into diagnostic 2048 * loopback mode in order to perform a diagnostic loopback test. 2049 */ 2050 static int 2051 lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job) 2052 { 2053 struct fc_bsg_request *bsg_request = job->request; 2054 struct fc_bsg_reply *bsg_reply = job->reply; 2055 struct diag_mode_set *loopback_mode; 2056 uint32_t link_flags, timeout; 2057 int i, rc = 0; 2058 2059 /* no data to return just the return code */ 2060 bsg_reply->reply_payload_rcv_len = 0; 2061 2062 if (job->request_len < sizeof(struct fc_bsg_request) + 2063 sizeof(struct diag_mode_set)) { 2064 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2065 "3011 Received DIAG MODE request size:%d " 2066 "below the minimum size:%d\n", 2067 job->request_len, 2068 (int)(sizeof(struct fc_bsg_request) + 2069 sizeof(struct diag_mode_set))); 2070 rc = -EINVAL; 2071 goto job_error; 2072 } 2073 2074 rc = lpfc_bsg_diag_mode_enter(phba); 2075 if (rc) 2076 goto job_error; 2077 2078 /* indicate we are in loobpack diagnostic mode */ 2079 spin_lock_irq(&phba->hbalock); 2080 phba->link_flag |= LS_LOOPBACK_MODE; 2081 spin_unlock_irq(&phba->hbalock); 2082 2083 /* reset port to start frome scratch */ 2084 rc = lpfc_selective_reset(phba); 2085 if (rc) 2086 goto job_error; 2087 2088 /* bring the link to diagnostic mode */ 2089 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2090 "3129 Bring link to diagnostic state.\n"); 2091 loopback_mode = (struct diag_mode_set *) 2092 bsg_request->rqst_data.h_vendor.vendor_cmd; 2093 link_flags = loopback_mode->type; 2094 timeout = loopback_mode->timeout * 100; 2095 2096 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); 2097 if (rc) { 2098 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2099 "3130 Failed to bring link to diagnostic " 2100 "state, rc:x%x\n", rc); 2101 goto loopback_mode_exit; 2102 } 2103 2104 /* wait for link down before proceeding */ 2105 i = 0; 2106 while (phba->link_state != LPFC_LINK_DOWN) { 2107 if (i++ > timeout) { 2108 rc = -ETIMEDOUT; 2109 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2110 "3131 Timeout waiting for link to " 2111 "diagnostic mode, timeout:%d ms\n", 2112 timeout * 10); 2113 goto loopback_mode_exit; 2114 } 2115 msleep(10); 2116 } 2117 2118 /* set up loopback mode */ 2119 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2120 "3132 Set up loopback mode:x%x\n", link_flags); 2121 2122 if (link_flags == INTERNAL_LOOP_BACK) 2123 rc = lpfc_sli4_bsg_set_internal_loopback(phba); 2124 else if (link_flags == EXTERNAL_LOOP_BACK) 2125 rc = lpfc_hba_init_link_fc_topology(phba, 2126 FLAGS_TOPOLOGY_MODE_PT_PT, 2127 MBX_NOWAIT); 2128 else { 2129 rc = -EINVAL; 2130 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 2131 "3141 Loopback mode:x%x not supported\n", 2132 link_flags); 2133 goto loopback_mode_exit; 2134 } 2135 2136 if (!rc) { 2137 /* wait for the link attention interrupt */ 2138 msleep(100); 2139 i = 0; 2140 while (phba->link_state < LPFC_LINK_UP) { 2141 if (i++ > timeout) { 2142 rc = -ETIMEDOUT; 2143 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2144 "3137 Timeout waiting for link up " 2145 "in loopback mode, timeout:%d ms\n", 2146 timeout * 10); 2147 break; 2148 } 2149 msleep(10); 2150 } 2151 } 2152 2153 /* port resource registration setup for loopback diagnostic */ 2154 if (!rc) { 2155 /* set up a none zero myDID for loopback test */ 2156 phba->pport->fc_myDID = 1; 2157 rc = lpfc_sli4_diag_fcport_reg_setup(phba); 2158 } else 2159 goto loopback_mode_exit; 2160 2161 if (!rc) { 2162 /* wait for the port ready */ 2163 msleep(100); 2164 i = 0; 2165 while (phba->link_state != LPFC_HBA_READY) { 2166 if (i++ > timeout) { 2167 rc = -ETIMEDOUT; 2168 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2169 "3133 Timeout waiting for port " 2170 "loopback mode ready, timeout:%d ms\n", 2171 timeout * 10); 2172 break; 2173 } 2174 msleep(10); 2175 } 2176 } 2177 2178 loopback_mode_exit: 2179 /* clear loopback diagnostic mode */ 2180 if (rc) { 2181 spin_lock_irq(&phba->hbalock); 2182 phba->link_flag &= ~LS_LOOPBACK_MODE; 2183 spin_unlock_irq(&phba->hbalock); 2184 } 2185 lpfc_bsg_diag_mode_exit(phba); 2186 2187 job_error: 2188 /* make error code available to userspace */ 2189 bsg_reply->result = rc; 2190 /* complete the job back to userspace if no error */ 2191 if (rc == 0) 2192 bsg_job_done(job, bsg_reply->result, 2193 bsg_reply->reply_payload_rcv_len); 2194 return rc; 2195 } 2196 2197 /** 2198 * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode 2199 * @job: LPFC_BSG_VENDOR_DIAG_MODE 2200 * 2201 * This function is responsible for responding to check and dispatch bsg diag 2202 * command from the user to proper driver action routines. 2203 */ 2204 static int 2205 lpfc_bsg_diag_loopback_mode(struct bsg_job *job) 2206 { 2207 struct Scsi_Host *shost; 2208 struct lpfc_vport *vport; 2209 struct lpfc_hba *phba; 2210 int rc; 2211 2212 shost = fc_bsg_to_shost(job); 2213 if (!shost) 2214 return -ENODEV; 2215 vport = shost_priv(shost); 2216 if (!vport) 2217 return -ENODEV; 2218 phba = vport->phba; 2219 if (!phba) 2220 return -ENODEV; 2221 2222 if (phba->sli_rev < LPFC_SLI_REV4) 2223 rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job); 2224 else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 2225 LPFC_SLI_INTF_IF_TYPE_2) 2226 rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job); 2227 else 2228 rc = -ENODEV; 2229 2230 return rc; 2231 } 2232 2233 /** 2234 * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode 2235 * @job: LPFC_BSG_VENDOR_DIAG_MODE_END 2236 * 2237 * This function is responsible for responding to check and dispatch bsg diag 2238 * command from the user to proper driver action routines. 2239 */ 2240 static int 2241 lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job) 2242 { 2243 struct fc_bsg_request *bsg_request = job->request; 2244 struct fc_bsg_reply *bsg_reply = job->reply; 2245 struct Scsi_Host *shost; 2246 struct lpfc_vport *vport; 2247 struct lpfc_hba *phba; 2248 struct diag_mode_set *loopback_mode_end_cmd; 2249 uint32_t timeout; 2250 int rc, i; 2251 2252 shost = fc_bsg_to_shost(job); 2253 if (!shost) 2254 return -ENODEV; 2255 vport = shost_priv(shost); 2256 if (!vport) 2257 return -ENODEV; 2258 phba = vport->phba; 2259 if (!phba) 2260 return -ENODEV; 2261 2262 if (phba->sli_rev < LPFC_SLI_REV4) 2263 return -ENODEV; 2264 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 2265 LPFC_SLI_INTF_IF_TYPE_2) 2266 return -ENODEV; 2267 2268 /* clear loopback diagnostic mode */ 2269 spin_lock_irq(&phba->hbalock); 2270 phba->link_flag &= ~LS_LOOPBACK_MODE; 2271 spin_unlock_irq(&phba->hbalock); 2272 loopback_mode_end_cmd = (struct diag_mode_set *) 2273 bsg_request->rqst_data.h_vendor.vendor_cmd; 2274 timeout = loopback_mode_end_cmd->timeout * 100; 2275 2276 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0); 2277 if (rc) { 2278 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2279 "3139 Failed to bring link to diagnostic " 2280 "state, rc:x%x\n", rc); 2281 goto loopback_mode_end_exit; 2282 } 2283 2284 /* wait for link down before proceeding */ 2285 i = 0; 2286 while (phba->link_state != LPFC_LINK_DOWN) { 2287 if (i++ > timeout) { 2288 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2289 "3140 Timeout waiting for link to " 2290 "diagnostic mode_end, timeout:%d ms\n", 2291 timeout * 10); 2292 /* there is nothing much we can do here */ 2293 break; 2294 } 2295 msleep(10); 2296 } 2297 2298 /* reset port resource registrations */ 2299 rc = lpfc_selective_reset(phba); 2300 phba->pport->fc_myDID = 0; 2301 2302 loopback_mode_end_exit: 2303 /* make return code available to userspace */ 2304 bsg_reply->result = rc; 2305 /* complete the job back to userspace if no error */ 2306 if (rc == 0) 2307 bsg_job_done(job, bsg_reply->result, 2308 bsg_reply->reply_payload_rcv_len); 2309 return rc; 2310 } 2311 2312 /** 2313 * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test 2314 * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST 2315 * 2316 * This function is to perform SLI4 diag link test request from the user 2317 * applicaiton. 2318 */ 2319 static int 2320 lpfc_sli4_bsg_link_diag_test(struct bsg_job *job) 2321 { 2322 struct fc_bsg_request *bsg_request = job->request; 2323 struct fc_bsg_reply *bsg_reply = job->reply; 2324 struct Scsi_Host *shost; 2325 struct lpfc_vport *vport; 2326 struct lpfc_hba *phba; 2327 LPFC_MBOXQ_t *pmboxq; 2328 struct sli4_link_diag *link_diag_test_cmd; 2329 uint32_t req_len, alloc_len; 2330 struct lpfc_mbx_run_link_diag_test *run_link_diag_test; 2331 union lpfc_sli4_cfg_shdr *shdr; 2332 uint32_t shdr_status, shdr_add_status; 2333 struct diag_status *diag_status_reply; 2334 int mbxstatus, rc = 0; 2335 2336 shost = fc_bsg_to_shost(job); 2337 if (!shost) { 2338 rc = -ENODEV; 2339 goto job_error; 2340 } 2341 vport = shost_priv(shost); 2342 if (!vport) { 2343 rc = -ENODEV; 2344 goto job_error; 2345 } 2346 phba = vport->phba; 2347 if (!phba) { 2348 rc = -ENODEV; 2349 goto job_error; 2350 } 2351 2352 if (phba->sli_rev < LPFC_SLI_REV4) { 2353 rc = -ENODEV; 2354 goto job_error; 2355 } 2356 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 2357 LPFC_SLI_INTF_IF_TYPE_2) { 2358 rc = -ENODEV; 2359 goto job_error; 2360 } 2361 2362 if (job->request_len < sizeof(struct fc_bsg_request) + 2363 sizeof(struct sli4_link_diag)) { 2364 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2365 "3013 Received LINK DIAG TEST request " 2366 " size:%d below the minimum size:%d\n", 2367 job->request_len, 2368 (int)(sizeof(struct fc_bsg_request) + 2369 sizeof(struct sli4_link_diag))); 2370 rc = -EINVAL; 2371 goto job_error; 2372 } 2373 2374 rc = lpfc_bsg_diag_mode_enter(phba); 2375 if (rc) 2376 goto job_error; 2377 2378 link_diag_test_cmd = (struct sli4_link_diag *) 2379 bsg_request->rqst_data.h_vendor.vendor_cmd; 2380 2381 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); 2382 2383 if (rc) 2384 goto job_error; 2385 2386 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2387 if (!pmboxq) { 2388 rc = -ENOMEM; 2389 goto link_diag_test_exit; 2390 } 2391 2392 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) - 2393 sizeof(struct lpfc_sli4_cfg_mhdr)); 2394 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 2395 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE, 2396 req_len, LPFC_SLI4_MBX_EMBED); 2397 if (alloc_len != req_len) { 2398 rc = -ENOMEM; 2399 goto link_diag_test_exit; 2400 } 2401 run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test; 2402 bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req, 2403 phba->sli4_hba.lnk_info.lnk_no); 2404 bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req, 2405 phba->sli4_hba.lnk_info.lnk_tp); 2406 bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req, 2407 link_diag_test_cmd->test_id); 2408 bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req, 2409 link_diag_test_cmd->loops); 2410 bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req, 2411 link_diag_test_cmd->test_version); 2412 bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req, 2413 link_diag_test_cmd->error_action); 2414 2415 mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 2416 2417 shdr = (union lpfc_sli4_cfg_shdr *) 2418 &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr; 2419 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 2420 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 2421 if (shdr_status || shdr_add_status || mbxstatus) { 2422 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 2423 "3010 Run link diag test mailbox failed with " 2424 "mbx_status x%x status x%x, add_status x%x\n", 2425 mbxstatus, shdr_status, shdr_add_status); 2426 } 2427 2428 diag_status_reply = (struct diag_status *) 2429 bsg_reply->reply_data.vendor_reply.vendor_rsp; 2430 2431 if (job->reply_len < 2432 sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) { 2433 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2434 "3012 Received Run link diag test reply " 2435 "below minimum size (%d): reply_len:%d\n", 2436 (int)(sizeof(struct fc_bsg_request) + 2437 sizeof(struct diag_status)), 2438 job->reply_len); 2439 rc = -EINVAL; 2440 goto job_error; 2441 } 2442 2443 diag_status_reply->mbox_status = mbxstatus; 2444 diag_status_reply->shdr_status = shdr_status; 2445 diag_status_reply->shdr_add_status = shdr_add_status; 2446 2447 link_diag_test_exit: 2448 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0); 2449 2450 if (pmboxq) 2451 mempool_free(pmboxq, phba->mbox_mem_pool); 2452 2453 lpfc_bsg_diag_mode_exit(phba); 2454 2455 job_error: 2456 /* make error code available to userspace */ 2457 bsg_reply->result = rc; 2458 /* complete the job back to userspace if no error */ 2459 if (rc == 0) 2460 bsg_job_done(job, bsg_reply->result, 2461 bsg_reply->reply_payload_rcv_len); 2462 return rc; 2463 } 2464 2465 /** 2466 * lpfcdiag_loop_self_reg - obtains a remote port login id 2467 * @phba: Pointer to HBA context object 2468 * @rpi: Pointer to a remote port login id 2469 * 2470 * This function obtains a remote port login id so the diag loopback test 2471 * can send and receive its own unsolicited CT command. 2472 **/ 2473 static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi) 2474 { 2475 LPFC_MBOXQ_t *mbox; 2476 struct lpfc_dmabuf *dmabuff; 2477 int status; 2478 2479 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2480 if (!mbox) 2481 return -ENOMEM; 2482 2483 if (phba->sli_rev < LPFC_SLI_REV4) 2484 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID, 2485 (uint8_t *)&phba->pport->fc_sparam, 2486 mbox, *rpi); 2487 else { 2488 *rpi = lpfc_sli4_alloc_rpi(phba); 2489 status = lpfc_reg_rpi(phba, phba->pport->vpi, 2490 phba->pport->fc_myDID, 2491 (uint8_t *)&phba->pport->fc_sparam, 2492 mbox, *rpi); 2493 } 2494 2495 if (status) { 2496 mempool_free(mbox, phba->mbox_mem_pool); 2497 if (phba->sli_rev == LPFC_SLI_REV4) 2498 lpfc_sli4_free_rpi(phba, *rpi); 2499 return -ENOMEM; 2500 } 2501 2502 dmabuff = (struct lpfc_dmabuf *) mbox->context1; 2503 mbox->context1 = NULL; 2504 mbox->context2 = NULL; 2505 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 2506 2507 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 2508 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); 2509 kfree(dmabuff); 2510 if (status != MBX_TIMEOUT) 2511 mempool_free(mbox, phba->mbox_mem_pool); 2512 if (phba->sli_rev == LPFC_SLI_REV4) 2513 lpfc_sli4_free_rpi(phba, *rpi); 2514 return -ENODEV; 2515 } 2516 2517 if (phba->sli_rev < LPFC_SLI_REV4) 2518 *rpi = mbox->u.mb.un.varWords[0]; 2519 2520 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); 2521 kfree(dmabuff); 2522 mempool_free(mbox, phba->mbox_mem_pool); 2523 return 0; 2524 } 2525 2526 /** 2527 * lpfcdiag_loop_self_unreg - unregs from the rpi 2528 * @phba: Pointer to HBA context object 2529 * @rpi: Remote port login id 2530 * 2531 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg 2532 **/ 2533 static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi) 2534 { 2535 LPFC_MBOXQ_t *mbox; 2536 int status; 2537 2538 /* Allocate mboxq structure */ 2539 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2540 if (mbox == NULL) 2541 return -ENOMEM; 2542 2543 if (phba->sli_rev < LPFC_SLI_REV4) 2544 lpfc_unreg_login(phba, 0, rpi, mbox); 2545 else 2546 lpfc_unreg_login(phba, phba->pport->vpi, 2547 phba->sli4_hba.rpi_ids[rpi], mbox); 2548 2549 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 2550 2551 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 2552 if (status != MBX_TIMEOUT) 2553 mempool_free(mbox, phba->mbox_mem_pool); 2554 return -EIO; 2555 } 2556 mempool_free(mbox, phba->mbox_mem_pool); 2557 if (phba->sli_rev == LPFC_SLI_REV4) 2558 lpfc_sli4_free_rpi(phba, rpi); 2559 return 0; 2560 } 2561 2562 /** 2563 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids 2564 * @phba: Pointer to HBA context object 2565 * @rpi: Remote port login id 2566 * @txxri: Pointer to transmit exchange id 2567 * @rxxri: Pointer to response exchabge id 2568 * 2569 * This function obtains the transmit and receive ids required to send 2570 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp 2571 * flags are used to the unsolicted response handler is able to process 2572 * the ct command sent on the same port. 2573 **/ 2574 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, 2575 uint16_t *txxri, uint16_t * rxxri) 2576 { 2577 struct lpfc_bsg_event *evt; 2578 struct lpfc_iocbq *cmdiocbq, *rspiocbq; 2579 IOCB_t *cmd, *rsp; 2580 struct lpfc_dmabuf *dmabuf; 2581 struct ulp_bde64 *bpl = NULL; 2582 struct lpfc_sli_ct_request *ctreq = NULL; 2583 int ret_val = 0; 2584 int time_left; 2585 int iocb_stat = IOCB_SUCCESS; 2586 unsigned long flags; 2587 2588 *txxri = 0; 2589 *rxxri = 0; 2590 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, 2591 SLI_CT_ELX_LOOPBACK); 2592 if (!evt) 2593 return -ENOMEM; 2594 2595 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2596 list_add(&evt->node, &phba->ct_ev_waiters); 2597 lpfc_bsg_event_ref(evt); 2598 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2599 2600 cmdiocbq = lpfc_sli_get_iocbq(phba); 2601 rspiocbq = lpfc_sli_get_iocbq(phba); 2602 2603 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2604 if (dmabuf) { 2605 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys); 2606 if (dmabuf->virt) { 2607 INIT_LIST_HEAD(&dmabuf->list); 2608 bpl = (struct ulp_bde64 *) dmabuf->virt; 2609 memset(bpl, 0, sizeof(*bpl)); 2610 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1); 2611 bpl->addrHigh = 2612 le32_to_cpu(putPaddrHigh(dmabuf->phys + 2613 sizeof(*bpl))); 2614 bpl->addrLow = 2615 le32_to_cpu(putPaddrLow(dmabuf->phys + 2616 sizeof(*bpl))); 2617 bpl->tus.f.bdeFlags = 0; 2618 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ; 2619 bpl->tus.w = le32_to_cpu(bpl->tus.w); 2620 } 2621 } 2622 2623 if (cmdiocbq == NULL || rspiocbq == NULL || 2624 dmabuf == NULL || bpl == NULL || ctreq == NULL || 2625 dmabuf->virt == NULL) { 2626 ret_val = -ENOMEM; 2627 goto err_get_xri_exit; 2628 } 2629 2630 cmd = &cmdiocbq->iocb; 2631 rsp = &rspiocbq->iocb; 2632 2633 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); 2634 2635 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; 2636 ctreq->RevisionId.bits.InId = 0; 2637 ctreq->FsType = SLI_CT_ELX_LOOPBACK; 2638 ctreq->FsSubType = 0; 2639 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP; 2640 ctreq->CommandResponse.bits.Size = 0; 2641 2642 2643 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys); 2644 cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys); 2645 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 2646 cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl); 2647 2648 cmd->un.xseq64.w5.hcsw.Fctl = LA; 2649 cmd->un.xseq64.w5.hcsw.Dfctl = 0; 2650 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 2651 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 2652 2653 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR; 2654 cmd->ulpBdeCount = 1; 2655 cmd->ulpLe = 1; 2656 cmd->ulpClass = CLASS3; 2657 cmd->ulpContext = rpi; 2658 2659 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 2660 cmdiocbq->vport = phba->pport; 2661 cmdiocbq->iocb_cmpl = NULL; 2662 2663 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 2664 rspiocbq, 2665 (phba->fc_ratov * 2) 2666 + LPFC_DRVR_TIMEOUT); 2667 if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) { 2668 ret_val = -EIO; 2669 goto err_get_xri_exit; 2670 } 2671 *txxri = rsp->ulpContext; 2672 2673 evt->waiting = 1; 2674 evt->wait_time_stamp = jiffies; 2675 time_left = wait_event_interruptible_timeout( 2676 evt->wq, !list_empty(&evt->events_to_see), 2677 msecs_to_jiffies(1000 * 2678 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); 2679 if (list_empty(&evt->events_to_see)) 2680 ret_val = (time_left) ? -EINTR : -ETIMEDOUT; 2681 else { 2682 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2683 list_move(evt->events_to_see.prev, &evt->events_to_get); 2684 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2685 *rxxri = (list_entry(evt->events_to_get.prev, 2686 typeof(struct event_data), 2687 node))->immed_dat; 2688 } 2689 evt->waiting = 0; 2690 2691 err_get_xri_exit: 2692 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2693 lpfc_bsg_event_unref(evt); /* release ref */ 2694 lpfc_bsg_event_unref(evt); /* delete */ 2695 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2696 2697 if (dmabuf) { 2698 if (dmabuf->virt) 2699 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 2700 kfree(dmabuf); 2701 } 2702 2703 if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT)) 2704 lpfc_sli_release_iocbq(phba, cmdiocbq); 2705 if (rspiocbq) 2706 lpfc_sli_release_iocbq(phba, rspiocbq); 2707 return ret_val; 2708 } 2709 2710 /** 2711 * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers 2712 * @phba: Pointer to HBA context object 2713 * 2714 * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and 2715 * returns the pointer to the buffer. 2716 **/ 2717 static struct lpfc_dmabuf * 2718 lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba) 2719 { 2720 struct lpfc_dmabuf *dmabuf; 2721 struct pci_dev *pcidev = phba->pcidev; 2722 2723 /* allocate dma buffer struct */ 2724 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2725 if (!dmabuf) 2726 return NULL; 2727 2728 INIT_LIST_HEAD(&dmabuf->list); 2729 2730 /* now, allocate dma buffer */ 2731 dmabuf->virt = dma_zalloc_coherent(&pcidev->dev, BSG_MBOX_SIZE, 2732 &(dmabuf->phys), GFP_KERNEL); 2733 2734 if (!dmabuf->virt) { 2735 kfree(dmabuf); 2736 return NULL; 2737 } 2738 2739 return dmabuf; 2740 } 2741 2742 /** 2743 * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer 2744 * @phba: Pointer to HBA context object. 2745 * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor. 2746 * 2747 * This routine just simply frees a dma buffer and its associated buffer 2748 * descriptor referred by @dmabuf. 2749 **/ 2750 static void 2751 lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf) 2752 { 2753 struct pci_dev *pcidev = phba->pcidev; 2754 2755 if (!dmabuf) 2756 return; 2757 2758 if (dmabuf->virt) 2759 dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE, 2760 dmabuf->virt, dmabuf->phys); 2761 kfree(dmabuf); 2762 return; 2763 } 2764 2765 /** 2766 * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers 2767 * @phba: Pointer to HBA context object. 2768 * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs. 2769 * 2770 * This routine just simply frees all dma buffers and their associated buffer 2771 * descriptors referred by @dmabuf_list. 2772 **/ 2773 static void 2774 lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba, 2775 struct list_head *dmabuf_list) 2776 { 2777 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 2778 2779 if (list_empty(dmabuf_list)) 2780 return; 2781 2782 list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) { 2783 list_del_init(&dmabuf->list); 2784 lpfc_bsg_dma_page_free(phba, dmabuf); 2785 } 2786 return; 2787 } 2788 2789 /** 2790 * diag_cmd_data_alloc - fills in a bde struct with dma buffers 2791 * @phba: Pointer to HBA context object 2792 * @bpl: Pointer to 64 bit bde structure 2793 * @size: Number of bytes to process 2794 * @nocopydata: Flag to copy user data into the allocated buffer 2795 * 2796 * This function allocates page size buffers and populates an lpfc_dmabufext. 2797 * If allowed the user data pointed to with indataptr is copied into the kernel 2798 * memory. The chained list of page size buffers is returned. 2799 **/ 2800 static struct lpfc_dmabufext * 2801 diag_cmd_data_alloc(struct lpfc_hba *phba, 2802 struct ulp_bde64 *bpl, uint32_t size, 2803 int nocopydata) 2804 { 2805 struct lpfc_dmabufext *mlist = NULL; 2806 struct lpfc_dmabufext *dmp; 2807 int cnt, offset = 0, i = 0; 2808 struct pci_dev *pcidev; 2809 2810 pcidev = phba->pcidev; 2811 2812 while (size) { 2813 /* We get chunks of 4K */ 2814 if (size > BUF_SZ_4K) 2815 cnt = BUF_SZ_4K; 2816 else 2817 cnt = size; 2818 2819 /* allocate struct lpfc_dmabufext buffer header */ 2820 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL); 2821 if (!dmp) 2822 goto out; 2823 2824 INIT_LIST_HEAD(&dmp->dma.list); 2825 2826 /* Queue it to a linked list */ 2827 if (mlist) 2828 list_add_tail(&dmp->dma.list, &mlist->dma.list); 2829 else 2830 mlist = dmp; 2831 2832 /* allocate buffer */ 2833 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev, 2834 cnt, 2835 &(dmp->dma.phys), 2836 GFP_KERNEL); 2837 2838 if (!dmp->dma.virt) 2839 goto out; 2840 2841 dmp->size = cnt; 2842 2843 if (nocopydata) { 2844 bpl->tus.f.bdeFlags = 0; 2845 pci_dma_sync_single_for_device(phba->pcidev, 2846 dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE); 2847 2848 } else { 2849 memset((uint8_t *)dmp->dma.virt, 0, cnt); 2850 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 2851 } 2852 2853 /* build buffer ptr list for IOCB */ 2854 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys)); 2855 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys)); 2856 bpl->tus.f.bdeSize = (ushort) cnt; 2857 bpl->tus.w = le32_to_cpu(bpl->tus.w); 2858 bpl++; 2859 2860 i++; 2861 offset += cnt; 2862 size -= cnt; 2863 } 2864 2865 if (mlist) { 2866 mlist->flag = i; 2867 return mlist; 2868 } 2869 out: 2870 diag_cmd_data_free(phba, mlist); 2871 return NULL; 2872 } 2873 2874 /** 2875 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd 2876 * @phba: Pointer to HBA context object 2877 * @rxxri: Receive exchange id 2878 * @len: Number of data bytes 2879 * 2880 * This function allocates and posts a data buffer of sufficient size to receive 2881 * an unsolicted CT command. 2882 **/ 2883 static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, 2884 size_t len) 2885 { 2886 struct lpfc_sli_ring *pring; 2887 struct lpfc_iocbq *cmdiocbq; 2888 IOCB_t *cmd = NULL; 2889 struct list_head head, *curr, *next; 2890 struct lpfc_dmabuf *rxbmp; 2891 struct lpfc_dmabuf *dmp; 2892 struct lpfc_dmabuf *mp[2] = {NULL, NULL}; 2893 struct ulp_bde64 *rxbpl = NULL; 2894 uint32_t num_bde; 2895 struct lpfc_dmabufext *rxbuffer = NULL; 2896 int ret_val = 0; 2897 int iocb_stat; 2898 int i = 0; 2899 2900 pring = lpfc_phba_elsring(phba); 2901 2902 cmdiocbq = lpfc_sli_get_iocbq(phba); 2903 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2904 if (rxbmp != NULL) { 2905 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); 2906 if (rxbmp->virt) { 2907 INIT_LIST_HEAD(&rxbmp->list); 2908 rxbpl = (struct ulp_bde64 *) rxbmp->virt; 2909 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0); 2910 } 2911 } 2912 2913 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) { 2914 ret_val = -ENOMEM; 2915 goto err_post_rxbufs_exit; 2916 } 2917 2918 /* Queue buffers for the receive exchange */ 2919 num_bde = (uint32_t)rxbuffer->flag; 2920 dmp = &rxbuffer->dma; 2921 2922 cmd = &cmdiocbq->iocb; 2923 i = 0; 2924 2925 INIT_LIST_HEAD(&head); 2926 list_add_tail(&head, &dmp->list); 2927 list_for_each_safe(curr, next, &head) { 2928 mp[i] = list_entry(curr, struct lpfc_dmabuf, list); 2929 list_del(curr); 2930 2931 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2932 mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba); 2933 cmd->un.quexri64cx.buff.bde.addrHigh = 2934 putPaddrHigh(mp[i]->phys); 2935 cmd->un.quexri64cx.buff.bde.addrLow = 2936 putPaddrLow(mp[i]->phys); 2937 cmd->un.quexri64cx.buff.bde.tus.f.bdeSize = 2938 ((struct lpfc_dmabufext *)mp[i])->size; 2939 cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag; 2940 cmd->ulpCommand = CMD_QUE_XRI64_CX; 2941 cmd->ulpPU = 0; 2942 cmd->ulpLe = 1; 2943 cmd->ulpBdeCount = 1; 2944 cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0; 2945 2946 } else { 2947 cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys); 2948 cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys); 2949 cmd->un.cont64[i].tus.f.bdeSize = 2950 ((struct lpfc_dmabufext *)mp[i])->size; 2951 cmd->ulpBdeCount = ++i; 2952 2953 if ((--num_bde > 0) && (i < 2)) 2954 continue; 2955 2956 cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX; 2957 cmd->ulpLe = 1; 2958 } 2959 2960 cmd->ulpClass = CLASS3; 2961 cmd->ulpContext = rxxri; 2962 2963 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 2964 0); 2965 if (iocb_stat == IOCB_ERROR) { 2966 diag_cmd_data_free(phba, 2967 (struct lpfc_dmabufext *)mp[0]); 2968 if (mp[1]) 2969 diag_cmd_data_free(phba, 2970 (struct lpfc_dmabufext *)mp[1]); 2971 dmp = list_entry(next, struct lpfc_dmabuf, list); 2972 ret_val = -EIO; 2973 goto err_post_rxbufs_exit; 2974 } 2975 2976 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]); 2977 if (mp[1]) { 2978 lpfc_sli_ringpostbuf_put(phba, pring, mp[1]); 2979 mp[1] = NULL; 2980 } 2981 2982 /* The iocb was freed by lpfc_sli_issue_iocb */ 2983 cmdiocbq = lpfc_sli_get_iocbq(phba); 2984 if (!cmdiocbq) { 2985 dmp = list_entry(next, struct lpfc_dmabuf, list); 2986 ret_val = -EIO; 2987 goto err_post_rxbufs_exit; 2988 } 2989 2990 cmd = &cmdiocbq->iocb; 2991 i = 0; 2992 } 2993 list_del(&head); 2994 2995 err_post_rxbufs_exit: 2996 2997 if (rxbmp) { 2998 if (rxbmp->virt) 2999 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys); 3000 kfree(rxbmp); 3001 } 3002 3003 if (cmdiocbq) 3004 lpfc_sli_release_iocbq(phba, cmdiocbq); 3005 return ret_val; 3006 } 3007 3008 /** 3009 * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself 3010 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job 3011 * 3012 * This function receives a user data buffer to be transmitted and received on 3013 * the same port, the link must be up and in loopback mode prior 3014 * to being called. 3015 * 1. A kernel buffer is allocated to copy the user data into. 3016 * 2. The port registers with "itself". 3017 * 3. The transmit and receive exchange ids are obtained. 3018 * 4. The receive exchange id is posted. 3019 * 5. A new els loopback event is created. 3020 * 6. The command and response iocbs are allocated. 3021 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback. 3022 * 3023 * This function is meant to be called n times while the port is in loopback 3024 * so it is the apps responsibility to issue a reset to take the port out 3025 * of loopback mode. 3026 **/ 3027 static int 3028 lpfc_bsg_diag_loopback_run(struct bsg_job *job) 3029 { 3030 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 3031 struct fc_bsg_reply *bsg_reply = job->reply; 3032 struct lpfc_hba *phba = vport->phba; 3033 struct lpfc_bsg_event *evt; 3034 struct event_data *evdat; 3035 struct lpfc_sli *psli = &phba->sli; 3036 uint32_t size; 3037 uint32_t full_size; 3038 size_t segment_len = 0, segment_offset = 0, current_offset = 0; 3039 uint16_t rpi = 0; 3040 struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL; 3041 IOCB_t *cmd, *rsp = NULL; 3042 struct lpfc_sli_ct_request *ctreq; 3043 struct lpfc_dmabuf *txbmp; 3044 struct ulp_bde64 *txbpl = NULL; 3045 struct lpfc_dmabufext *txbuffer = NULL; 3046 struct list_head head; 3047 struct lpfc_dmabuf *curr; 3048 uint16_t txxri = 0, rxxri; 3049 uint32_t num_bde; 3050 uint8_t *ptr = NULL, *rx_databuf = NULL; 3051 int rc = 0; 3052 int time_left; 3053 int iocb_stat = IOCB_SUCCESS; 3054 unsigned long flags; 3055 void *dataout = NULL; 3056 uint32_t total_mem; 3057 3058 /* in case no data is returned return just the return code */ 3059 bsg_reply->reply_payload_rcv_len = 0; 3060 3061 if (job->request_len < 3062 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) { 3063 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3064 "2739 Received DIAG TEST request below minimum " 3065 "size\n"); 3066 rc = -EINVAL; 3067 goto loopback_test_exit; 3068 } 3069 3070 if (job->request_payload.payload_len != 3071 job->reply_payload.payload_len) { 3072 rc = -EINVAL; 3073 goto loopback_test_exit; 3074 } 3075 3076 if ((phba->link_state == LPFC_HBA_ERROR) || 3077 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || 3078 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { 3079 rc = -EACCES; 3080 goto loopback_test_exit; 3081 } 3082 3083 if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) { 3084 rc = -EACCES; 3085 goto loopback_test_exit; 3086 } 3087 3088 size = job->request_payload.payload_len; 3089 full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */ 3090 3091 if ((size == 0) || (size > 80 * BUF_SZ_4K)) { 3092 rc = -ERANGE; 3093 goto loopback_test_exit; 3094 } 3095 3096 if (full_size >= BUF_SZ_4K) { 3097 /* 3098 * Allocate memory for ioctl data. If buffer is bigger than 64k, 3099 * then we allocate 64k and re-use that buffer over and over to 3100 * xfer the whole block. This is because Linux kernel has a 3101 * problem allocating more than 120k of kernel space memory. Saw 3102 * problem with GET_FCPTARGETMAPPING... 3103 */ 3104 if (size <= (64 * 1024)) 3105 total_mem = full_size; 3106 else 3107 total_mem = 64 * 1024; 3108 } else 3109 /* Allocate memory for ioctl data */ 3110 total_mem = BUF_SZ_4K; 3111 3112 dataout = kmalloc(total_mem, GFP_KERNEL); 3113 if (dataout == NULL) { 3114 rc = -ENOMEM; 3115 goto loopback_test_exit; 3116 } 3117 3118 ptr = dataout; 3119 ptr += ELX_LOOPBACK_HEADER_SZ; 3120 sg_copy_to_buffer(job->request_payload.sg_list, 3121 job->request_payload.sg_cnt, 3122 ptr, size); 3123 rc = lpfcdiag_loop_self_reg(phba, &rpi); 3124 if (rc) 3125 goto loopback_test_exit; 3126 3127 if (phba->sli_rev < LPFC_SLI_REV4) { 3128 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri); 3129 if (rc) { 3130 lpfcdiag_loop_self_unreg(phba, rpi); 3131 goto loopback_test_exit; 3132 } 3133 3134 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size); 3135 if (rc) { 3136 lpfcdiag_loop_self_unreg(phba, rpi); 3137 goto loopback_test_exit; 3138 } 3139 } 3140 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, 3141 SLI_CT_ELX_LOOPBACK); 3142 if (!evt) { 3143 lpfcdiag_loop_self_unreg(phba, rpi); 3144 rc = -ENOMEM; 3145 goto loopback_test_exit; 3146 } 3147 3148 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3149 list_add(&evt->node, &phba->ct_ev_waiters); 3150 lpfc_bsg_event_ref(evt); 3151 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3152 3153 cmdiocbq = lpfc_sli_get_iocbq(phba); 3154 if (phba->sli_rev < LPFC_SLI_REV4) 3155 rspiocbq = lpfc_sli_get_iocbq(phba); 3156 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3157 3158 if (txbmp) { 3159 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys); 3160 if (txbmp->virt) { 3161 INIT_LIST_HEAD(&txbmp->list); 3162 txbpl = (struct ulp_bde64 *) txbmp->virt; 3163 txbuffer = diag_cmd_data_alloc(phba, 3164 txbpl, full_size, 0); 3165 } 3166 } 3167 3168 if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) { 3169 rc = -ENOMEM; 3170 goto err_loopback_test_exit; 3171 } 3172 if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) { 3173 rc = -ENOMEM; 3174 goto err_loopback_test_exit; 3175 } 3176 3177 cmd = &cmdiocbq->iocb; 3178 if (phba->sli_rev < LPFC_SLI_REV4) 3179 rsp = &rspiocbq->iocb; 3180 3181 INIT_LIST_HEAD(&head); 3182 list_add_tail(&head, &txbuffer->dma.list); 3183 list_for_each_entry(curr, &head, list) { 3184 segment_len = ((struct lpfc_dmabufext *)curr)->size; 3185 if (current_offset == 0) { 3186 ctreq = curr->virt; 3187 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); 3188 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; 3189 ctreq->RevisionId.bits.InId = 0; 3190 ctreq->FsType = SLI_CT_ELX_LOOPBACK; 3191 ctreq->FsSubType = 0; 3192 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA; 3193 ctreq->CommandResponse.bits.Size = size; 3194 segment_offset = ELX_LOOPBACK_HEADER_SZ; 3195 } else 3196 segment_offset = 0; 3197 3198 BUG_ON(segment_offset >= segment_len); 3199 memcpy(curr->virt + segment_offset, 3200 ptr + current_offset, 3201 segment_len - segment_offset); 3202 3203 current_offset += segment_len - segment_offset; 3204 BUG_ON(current_offset > size); 3205 } 3206 list_del(&head); 3207 3208 /* Build the XMIT_SEQUENCE iocb */ 3209 num_bde = (uint32_t)txbuffer->flag; 3210 3211 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys); 3212 cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys); 3213 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 3214 cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64)); 3215 3216 cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA); 3217 cmd->un.xseq64.w5.hcsw.Dfctl = 0; 3218 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 3219 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 3220 3221 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; 3222 cmd->ulpBdeCount = 1; 3223 cmd->ulpLe = 1; 3224 cmd->ulpClass = CLASS3; 3225 3226 if (phba->sli_rev < LPFC_SLI_REV4) { 3227 cmd->ulpContext = txxri; 3228 } else { 3229 cmd->un.xseq64.bdl.ulpIoTag32 = 0; 3230 cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi]; 3231 cmdiocbq->context3 = txbmp; 3232 cmdiocbq->sli4_xritag = NO_XRI; 3233 cmd->unsli3.rcvsli3.ox_id = 0xffff; 3234 } 3235 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 3236 cmdiocbq->iocb_flag |= LPFC_IO_LOOPBACK; 3237 cmdiocbq->vport = phba->pport; 3238 cmdiocbq->iocb_cmpl = NULL; 3239 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 3240 rspiocbq, (phba->fc_ratov * 2) + 3241 LPFC_DRVR_TIMEOUT); 3242 3243 if ((iocb_stat != IOCB_SUCCESS) || 3244 ((phba->sli_rev < LPFC_SLI_REV4) && 3245 (rsp->ulpStatus != IOSTAT_SUCCESS))) { 3246 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3247 "3126 Failed loopback test issue iocb: " 3248 "iocb_stat:x%x\n", iocb_stat); 3249 rc = -EIO; 3250 goto err_loopback_test_exit; 3251 } 3252 3253 evt->waiting = 1; 3254 time_left = wait_event_interruptible_timeout( 3255 evt->wq, !list_empty(&evt->events_to_see), 3256 msecs_to_jiffies(1000 * 3257 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); 3258 evt->waiting = 0; 3259 if (list_empty(&evt->events_to_see)) { 3260 rc = (time_left) ? -EINTR : -ETIMEDOUT; 3261 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3262 "3125 Not receiving unsolicited event, " 3263 "rc:x%x\n", rc); 3264 } else { 3265 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3266 list_move(evt->events_to_see.prev, &evt->events_to_get); 3267 evdat = list_entry(evt->events_to_get.prev, 3268 typeof(*evdat), node); 3269 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3270 rx_databuf = evdat->data; 3271 if (evdat->len != full_size) { 3272 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3273 "1603 Loopback test did not receive expected " 3274 "data length. actual length 0x%x expected " 3275 "length 0x%x\n", 3276 evdat->len, full_size); 3277 rc = -EIO; 3278 } else if (rx_databuf == NULL) 3279 rc = -EIO; 3280 else { 3281 rc = IOCB_SUCCESS; 3282 /* skip over elx loopback header */ 3283 rx_databuf += ELX_LOOPBACK_HEADER_SZ; 3284 bsg_reply->reply_payload_rcv_len = 3285 sg_copy_from_buffer(job->reply_payload.sg_list, 3286 job->reply_payload.sg_cnt, 3287 rx_databuf, size); 3288 bsg_reply->reply_payload_rcv_len = size; 3289 } 3290 } 3291 3292 err_loopback_test_exit: 3293 lpfcdiag_loop_self_unreg(phba, rpi); 3294 3295 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3296 lpfc_bsg_event_unref(evt); /* release ref */ 3297 lpfc_bsg_event_unref(evt); /* delete */ 3298 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3299 3300 if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT)) 3301 lpfc_sli_release_iocbq(phba, cmdiocbq); 3302 3303 if (rspiocbq != NULL) 3304 lpfc_sli_release_iocbq(phba, rspiocbq); 3305 3306 if (txbmp != NULL) { 3307 if (txbpl != NULL) { 3308 if (txbuffer != NULL) 3309 diag_cmd_data_free(phba, txbuffer); 3310 lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys); 3311 } 3312 kfree(txbmp); 3313 } 3314 3315 loopback_test_exit: 3316 kfree(dataout); 3317 /* make error code available to userspace */ 3318 bsg_reply->result = rc; 3319 job->dd_data = NULL; 3320 /* complete the job back to userspace if no error */ 3321 if (rc == IOCB_SUCCESS) 3322 bsg_job_done(job, bsg_reply->result, 3323 bsg_reply->reply_payload_rcv_len); 3324 return rc; 3325 } 3326 3327 /** 3328 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command 3329 * @job: GET_DFC_REV fc_bsg_job 3330 **/ 3331 static int 3332 lpfc_bsg_get_dfc_rev(struct bsg_job *job) 3333 { 3334 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 3335 struct fc_bsg_reply *bsg_reply = job->reply; 3336 struct lpfc_hba *phba = vport->phba; 3337 struct get_mgmt_rev_reply *event_reply; 3338 int rc = 0; 3339 3340 if (job->request_len < 3341 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) { 3342 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3343 "2740 Received GET_DFC_REV request below " 3344 "minimum size\n"); 3345 rc = -EINVAL; 3346 goto job_error; 3347 } 3348 3349 event_reply = (struct get_mgmt_rev_reply *) 3350 bsg_reply->reply_data.vendor_reply.vendor_rsp; 3351 3352 if (job->reply_len < 3353 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) { 3354 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3355 "2741 Received GET_DFC_REV reply below " 3356 "minimum size\n"); 3357 rc = -EINVAL; 3358 goto job_error; 3359 } 3360 3361 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV; 3362 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV; 3363 job_error: 3364 bsg_reply->result = rc; 3365 if (rc == 0) 3366 bsg_job_done(job, bsg_reply->result, 3367 bsg_reply->reply_payload_rcv_len); 3368 return rc; 3369 } 3370 3371 /** 3372 * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler 3373 * @phba: Pointer to HBA context object. 3374 * @pmboxq: Pointer to mailbox command. 3375 * 3376 * This is completion handler function for mailbox commands issued from 3377 * lpfc_bsg_issue_mbox function. This function is called by the 3378 * mailbox event handler function with no lock held. This function 3379 * will wake up thread waiting on the wait queue pointed by context1 3380 * of the mailbox. 3381 **/ 3382 static void 3383 lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3384 { 3385 struct bsg_job_data *dd_data; 3386 struct fc_bsg_reply *bsg_reply; 3387 struct bsg_job *job; 3388 uint32_t size; 3389 unsigned long flags; 3390 uint8_t *pmb, *pmb_buf; 3391 3392 dd_data = pmboxq->context1; 3393 3394 /* 3395 * The outgoing buffer is readily referred from the dma buffer, 3396 * just need to get header part from mailboxq structure. 3397 */ 3398 pmb = (uint8_t *)&pmboxq->u.mb; 3399 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; 3400 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t)); 3401 3402 /* Determine if job has been aborted */ 3403 3404 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3405 job = dd_data->set_job; 3406 if (job) { 3407 /* Prevent timeout handling from trying to abort job */ 3408 job->dd_data = NULL; 3409 } 3410 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3411 3412 /* Copy the mailbox data to the job if it is still active */ 3413 3414 if (job) { 3415 bsg_reply = job->reply; 3416 size = job->reply_payload.payload_len; 3417 bsg_reply->reply_payload_rcv_len = 3418 sg_copy_from_buffer(job->reply_payload.sg_list, 3419 job->reply_payload.sg_cnt, 3420 pmb_buf, size); 3421 } 3422 3423 dd_data->set_job = NULL; 3424 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); 3425 lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers); 3426 kfree(dd_data); 3427 3428 /* Complete the job if the job is still active */ 3429 3430 if (job) { 3431 bsg_reply->result = 0; 3432 bsg_job_done(job, bsg_reply->result, 3433 bsg_reply->reply_payload_rcv_len); 3434 } 3435 return; 3436 } 3437 3438 /** 3439 * lpfc_bsg_check_cmd_access - test for a supported mailbox command 3440 * @phba: Pointer to HBA context object. 3441 * @mb: Pointer to a mailbox object. 3442 * @vport: Pointer to a vport object. 3443 * 3444 * Some commands require the port to be offline, some may not be called from 3445 * the application. 3446 **/ 3447 static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba, 3448 MAILBOX_t *mb, struct lpfc_vport *vport) 3449 { 3450 /* return negative error values for bsg job */ 3451 switch (mb->mbxCommand) { 3452 /* Offline only */ 3453 case MBX_INIT_LINK: 3454 case MBX_DOWN_LINK: 3455 case MBX_CONFIG_LINK: 3456 case MBX_CONFIG_RING: 3457 case MBX_RESET_RING: 3458 case MBX_UNREG_LOGIN: 3459 case MBX_CLEAR_LA: 3460 case MBX_DUMP_CONTEXT: 3461 case MBX_RUN_DIAGS: 3462 case MBX_RESTART: 3463 case MBX_SET_MASK: 3464 if (!(vport->fc_flag & FC_OFFLINE_MODE)) { 3465 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3466 "2743 Command 0x%x is illegal in on-line " 3467 "state\n", 3468 mb->mbxCommand); 3469 return -EPERM; 3470 } 3471 case MBX_WRITE_NV: 3472 case MBX_WRITE_VPARMS: 3473 case MBX_LOAD_SM: 3474 case MBX_READ_NV: 3475 case MBX_READ_CONFIG: 3476 case MBX_READ_RCONFIG: 3477 case MBX_READ_STATUS: 3478 case MBX_READ_XRI: 3479 case MBX_READ_REV: 3480 case MBX_READ_LNK_STAT: 3481 case MBX_DUMP_MEMORY: 3482 case MBX_DOWN_LOAD: 3483 case MBX_UPDATE_CFG: 3484 case MBX_KILL_BOARD: 3485 case MBX_READ_TOPOLOGY: 3486 case MBX_LOAD_AREA: 3487 case MBX_LOAD_EXP_ROM: 3488 case MBX_BEACON: 3489 case MBX_DEL_LD_ENTRY: 3490 case MBX_SET_DEBUG: 3491 case MBX_WRITE_WWN: 3492 case MBX_SLI4_CONFIG: 3493 case MBX_READ_EVENT_LOG: 3494 case MBX_READ_EVENT_LOG_STATUS: 3495 case MBX_WRITE_EVENT_LOG: 3496 case MBX_PORT_CAPABILITIES: 3497 case MBX_PORT_IOV_CONTROL: 3498 case MBX_RUN_BIU_DIAG64: 3499 break; 3500 case MBX_SET_VARIABLE: 3501 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3502 "1226 mbox: set_variable 0x%x, 0x%x\n", 3503 mb->un.varWords[0], 3504 mb->un.varWords[1]); 3505 if ((mb->un.varWords[0] == SETVAR_MLOMNT) 3506 && (mb->un.varWords[1] == 1)) { 3507 phba->wait_4_mlo_maint_flg = 1; 3508 } else if (mb->un.varWords[0] == SETVAR_MLORST) { 3509 spin_lock_irq(&phba->hbalock); 3510 phba->link_flag &= ~LS_LOOPBACK_MODE; 3511 spin_unlock_irq(&phba->hbalock); 3512 phba->fc_topology = LPFC_TOPOLOGY_PT_PT; 3513 } 3514 break; 3515 case MBX_READ_SPARM64: 3516 case MBX_REG_LOGIN: 3517 case MBX_REG_LOGIN64: 3518 case MBX_CONFIG_PORT: 3519 case MBX_RUN_BIU_DIAG: 3520 default: 3521 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3522 "2742 Unknown Command 0x%x\n", 3523 mb->mbxCommand); 3524 return -EPERM; 3525 } 3526 3527 return 0; /* ok */ 3528 } 3529 3530 /** 3531 * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session 3532 * @phba: Pointer to HBA context object. 3533 * 3534 * This is routine clean up and reset BSG handling of multi-buffer mbox 3535 * command session. 3536 **/ 3537 static void 3538 lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba) 3539 { 3540 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) 3541 return; 3542 3543 /* free all memory, including dma buffers */ 3544 lpfc_bsg_dma_page_list_free(phba, 3545 &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3546 lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf); 3547 /* multi-buffer write mailbox command pass-through complete */ 3548 memset((char *)&phba->mbox_ext_buf_ctx, 0, 3549 sizeof(struct lpfc_mbox_ext_buf_ctx)); 3550 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3551 3552 return; 3553 } 3554 3555 /** 3556 * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl 3557 * @phba: Pointer to HBA context object. 3558 * @pmboxq: Pointer to mailbox command. 3559 * 3560 * This is routine handles BSG job for mailbox commands completions with 3561 * multiple external buffers. 3562 **/ 3563 static struct bsg_job * 3564 lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3565 { 3566 struct bsg_job_data *dd_data; 3567 struct bsg_job *job; 3568 struct fc_bsg_reply *bsg_reply; 3569 uint8_t *pmb, *pmb_buf; 3570 unsigned long flags; 3571 uint32_t size; 3572 int rc = 0; 3573 struct lpfc_dmabuf *dmabuf; 3574 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3575 uint8_t *pmbx; 3576 3577 dd_data = pmboxq->context1; 3578 3579 /* Determine if job has been aborted */ 3580 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3581 job = dd_data->set_job; 3582 if (job) { 3583 bsg_reply = job->reply; 3584 /* Prevent timeout handling from trying to abort job */ 3585 job->dd_data = NULL; 3586 } 3587 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3588 3589 /* 3590 * The outgoing buffer is readily referred from the dma buffer, 3591 * just need to get header part from mailboxq structure. 3592 */ 3593 3594 pmb = (uint8_t *)&pmboxq->u.mb; 3595 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; 3596 /* Copy the byte swapped response mailbox back to the user */ 3597 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t)); 3598 /* if there is any non-embedded extended data copy that too */ 3599 dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf; 3600 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 3601 if (!bsg_bf_get(lpfc_mbox_hdr_emb, 3602 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) { 3603 pmbx = (uint8_t *)dmabuf->virt; 3604 /* byte swap the extended data following the mailbox command */ 3605 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)], 3606 &pmbx[sizeof(MAILBOX_t)], 3607 sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len); 3608 } 3609 3610 /* Complete the job if the job is still active */ 3611 3612 if (job) { 3613 size = job->reply_payload.payload_len; 3614 bsg_reply->reply_payload_rcv_len = 3615 sg_copy_from_buffer(job->reply_payload.sg_list, 3616 job->reply_payload.sg_cnt, 3617 pmb_buf, size); 3618 3619 /* result for successful */ 3620 bsg_reply->result = 0; 3621 3622 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3623 "2937 SLI_CONFIG ext-buffer maibox command " 3624 "(x%x/x%x) complete bsg job done, bsize:%d\n", 3625 phba->mbox_ext_buf_ctx.nembType, 3626 phba->mbox_ext_buf_ctx.mboxType, size); 3627 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, 3628 phba->mbox_ext_buf_ctx.nembType, 3629 phba->mbox_ext_buf_ctx.mboxType, 3630 dma_ebuf, sta_pos_addr, 3631 phba->mbox_ext_buf_ctx.mbx_dmabuf, 0); 3632 } else { 3633 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3634 "2938 SLI_CONFIG ext-buffer maibox " 3635 "command (x%x/x%x) failure, rc:x%x\n", 3636 phba->mbox_ext_buf_ctx.nembType, 3637 phba->mbox_ext_buf_ctx.mboxType, rc); 3638 } 3639 3640 3641 /* state change */ 3642 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE; 3643 kfree(dd_data); 3644 return job; 3645 } 3646 3647 /** 3648 * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox 3649 * @phba: Pointer to HBA context object. 3650 * @pmboxq: Pointer to mailbox command. 3651 * 3652 * This is completion handler function for mailbox read commands with multiple 3653 * external buffers. 3654 **/ 3655 static void 3656 lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3657 { 3658 struct bsg_job *job; 3659 struct fc_bsg_reply *bsg_reply; 3660 3661 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); 3662 3663 /* handle the BSG job with mailbox command */ 3664 if (!job) 3665 pmboxq->u.mb.mbxStatus = MBXERR_ERROR; 3666 3667 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3668 "2939 SLI_CONFIG ext-buffer rd maibox command " 3669 "complete, ctxState:x%x, mbxStatus:x%x\n", 3670 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); 3671 3672 if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1) 3673 lpfc_bsg_mbox_ext_session_reset(phba); 3674 3675 /* free base driver mailbox structure memory */ 3676 mempool_free(pmboxq, phba->mbox_mem_pool); 3677 3678 /* if the job is still active, call job done */ 3679 if (job) { 3680 bsg_reply = job->reply; 3681 bsg_job_done(job, bsg_reply->result, 3682 bsg_reply->reply_payload_rcv_len); 3683 } 3684 return; 3685 } 3686 3687 /** 3688 * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox 3689 * @phba: Pointer to HBA context object. 3690 * @pmboxq: Pointer to mailbox command. 3691 * 3692 * This is completion handler function for mailbox write commands with multiple 3693 * external buffers. 3694 **/ 3695 static void 3696 lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3697 { 3698 struct bsg_job *job; 3699 struct fc_bsg_reply *bsg_reply; 3700 3701 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); 3702 3703 /* handle the BSG job with the mailbox command */ 3704 if (!job) 3705 pmboxq->u.mb.mbxStatus = MBXERR_ERROR; 3706 3707 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3708 "2940 SLI_CONFIG ext-buffer wr maibox command " 3709 "complete, ctxState:x%x, mbxStatus:x%x\n", 3710 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); 3711 3712 /* free all memory, including dma buffers */ 3713 mempool_free(pmboxq, phba->mbox_mem_pool); 3714 lpfc_bsg_mbox_ext_session_reset(phba); 3715 3716 /* if the job is still active, call job done */ 3717 if (job) { 3718 bsg_reply = job->reply; 3719 bsg_job_done(job, bsg_reply->result, 3720 bsg_reply->reply_payload_rcv_len); 3721 } 3722 3723 return; 3724 } 3725 3726 static void 3727 lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp, 3728 uint32_t index, struct lpfc_dmabuf *mbx_dmabuf, 3729 struct lpfc_dmabuf *ext_dmabuf) 3730 { 3731 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3732 3733 /* pointer to the start of mailbox command */ 3734 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt; 3735 3736 if (nemb_tp == nemb_mse) { 3737 if (index == 0) { 3738 sli_cfg_mbx->un.sli_config_emb0_subsys. 3739 mse[index].pa_hi = 3740 putPaddrHigh(mbx_dmabuf->phys + 3741 sizeof(MAILBOX_t)); 3742 sli_cfg_mbx->un.sli_config_emb0_subsys. 3743 mse[index].pa_lo = 3744 putPaddrLow(mbx_dmabuf->phys + 3745 sizeof(MAILBOX_t)); 3746 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3747 "2943 SLI_CONFIG(mse)[%d], " 3748 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3749 index, 3750 sli_cfg_mbx->un.sli_config_emb0_subsys. 3751 mse[index].buf_len, 3752 sli_cfg_mbx->un.sli_config_emb0_subsys. 3753 mse[index].pa_hi, 3754 sli_cfg_mbx->un.sli_config_emb0_subsys. 3755 mse[index].pa_lo); 3756 } else { 3757 sli_cfg_mbx->un.sli_config_emb0_subsys. 3758 mse[index].pa_hi = 3759 putPaddrHigh(ext_dmabuf->phys); 3760 sli_cfg_mbx->un.sli_config_emb0_subsys. 3761 mse[index].pa_lo = 3762 putPaddrLow(ext_dmabuf->phys); 3763 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3764 "2944 SLI_CONFIG(mse)[%d], " 3765 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3766 index, 3767 sli_cfg_mbx->un.sli_config_emb0_subsys. 3768 mse[index].buf_len, 3769 sli_cfg_mbx->un.sli_config_emb0_subsys. 3770 mse[index].pa_hi, 3771 sli_cfg_mbx->un.sli_config_emb0_subsys. 3772 mse[index].pa_lo); 3773 } 3774 } else { 3775 if (index == 0) { 3776 sli_cfg_mbx->un.sli_config_emb1_subsys. 3777 hbd[index].pa_hi = 3778 putPaddrHigh(mbx_dmabuf->phys + 3779 sizeof(MAILBOX_t)); 3780 sli_cfg_mbx->un.sli_config_emb1_subsys. 3781 hbd[index].pa_lo = 3782 putPaddrLow(mbx_dmabuf->phys + 3783 sizeof(MAILBOX_t)); 3784 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3785 "3007 SLI_CONFIG(hbd)[%d], " 3786 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3787 index, 3788 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 3789 &sli_cfg_mbx->un. 3790 sli_config_emb1_subsys.hbd[index]), 3791 sli_cfg_mbx->un.sli_config_emb1_subsys. 3792 hbd[index].pa_hi, 3793 sli_cfg_mbx->un.sli_config_emb1_subsys. 3794 hbd[index].pa_lo); 3795 3796 } else { 3797 sli_cfg_mbx->un.sli_config_emb1_subsys. 3798 hbd[index].pa_hi = 3799 putPaddrHigh(ext_dmabuf->phys); 3800 sli_cfg_mbx->un.sli_config_emb1_subsys. 3801 hbd[index].pa_lo = 3802 putPaddrLow(ext_dmabuf->phys); 3803 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3804 "3008 SLI_CONFIG(hbd)[%d], " 3805 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3806 index, 3807 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 3808 &sli_cfg_mbx->un. 3809 sli_config_emb1_subsys.hbd[index]), 3810 sli_cfg_mbx->un.sli_config_emb1_subsys. 3811 hbd[index].pa_hi, 3812 sli_cfg_mbx->un.sli_config_emb1_subsys. 3813 hbd[index].pa_lo); 3814 } 3815 } 3816 return; 3817 } 3818 3819 /** 3820 * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read 3821 * @phba: Pointer to HBA context object. 3822 * @mb: Pointer to a BSG mailbox object. 3823 * @nemb_tp: Enumerate of non-embedded mailbox command type. 3824 * @dmabuff: Pointer to a DMA buffer descriptor. 3825 * 3826 * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with 3827 * non-embedded external bufffers. 3828 **/ 3829 static int 3830 lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, 3831 enum nemb_type nemb_tp, 3832 struct lpfc_dmabuf *dmabuf) 3833 { 3834 struct fc_bsg_request *bsg_request = job->request; 3835 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3836 struct dfc_mbox_req *mbox_req; 3837 struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf; 3838 uint32_t ext_buf_cnt, ext_buf_index; 3839 struct lpfc_dmabuf *ext_dmabuf = NULL; 3840 struct bsg_job_data *dd_data = NULL; 3841 LPFC_MBOXQ_t *pmboxq = NULL; 3842 MAILBOX_t *pmb; 3843 uint8_t *pmbx; 3844 int rc, i; 3845 3846 mbox_req = 3847 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; 3848 3849 /* pointer to the start of mailbox command */ 3850 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 3851 3852 if (nemb_tp == nemb_mse) { 3853 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt, 3854 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr); 3855 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) { 3856 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3857 "2945 Handled SLI_CONFIG(mse) rd, " 3858 "ext_buf_cnt(%d) out of range(%d)\n", 3859 ext_buf_cnt, 3860 LPFC_MBX_SLI_CONFIG_MAX_MSE); 3861 rc = -ERANGE; 3862 goto job_error; 3863 } 3864 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3865 "2941 Handled SLI_CONFIG(mse) rd, " 3866 "ext_buf_cnt:%d\n", ext_buf_cnt); 3867 } else { 3868 /* sanity check on interface type for support */ 3869 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 3870 LPFC_SLI_INTF_IF_TYPE_2) { 3871 rc = -ENODEV; 3872 goto job_error; 3873 } 3874 /* nemb_tp == nemb_hbd */ 3875 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count; 3876 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) { 3877 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3878 "2946 Handled SLI_CONFIG(hbd) rd, " 3879 "ext_buf_cnt(%d) out of range(%d)\n", 3880 ext_buf_cnt, 3881 LPFC_MBX_SLI_CONFIG_MAX_HBD); 3882 rc = -ERANGE; 3883 goto job_error; 3884 } 3885 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3886 "2942 Handled SLI_CONFIG(hbd) rd, " 3887 "ext_buf_cnt:%d\n", ext_buf_cnt); 3888 } 3889 3890 /* before dma descriptor setup */ 3891 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox, 3892 sta_pre_addr, dmabuf, ext_buf_cnt); 3893 3894 /* reject non-embedded mailbox command with none external buffer */ 3895 if (ext_buf_cnt == 0) { 3896 rc = -EPERM; 3897 goto job_error; 3898 } else if (ext_buf_cnt > 1) { 3899 /* additional external read buffers */ 3900 for (i = 1; i < ext_buf_cnt; i++) { 3901 ext_dmabuf = lpfc_bsg_dma_page_alloc(phba); 3902 if (!ext_dmabuf) { 3903 rc = -ENOMEM; 3904 goto job_error; 3905 } 3906 list_add_tail(&ext_dmabuf->list, 3907 &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3908 } 3909 } 3910 3911 /* bsg tracking structure */ 3912 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 3913 if (!dd_data) { 3914 rc = -ENOMEM; 3915 goto job_error; 3916 } 3917 3918 /* mailbox command structure for base driver */ 3919 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3920 if (!pmboxq) { 3921 rc = -ENOMEM; 3922 goto job_error; 3923 } 3924 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 3925 3926 /* for the first external buffer */ 3927 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf); 3928 3929 /* for the rest of external buffer descriptors if any */ 3930 if (ext_buf_cnt > 1) { 3931 ext_buf_index = 1; 3932 list_for_each_entry_safe(curr_dmabuf, next_dmabuf, 3933 &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) { 3934 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 3935 ext_buf_index, dmabuf, 3936 curr_dmabuf); 3937 ext_buf_index++; 3938 } 3939 } 3940 3941 /* after dma descriptor setup */ 3942 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox, 3943 sta_pos_addr, dmabuf, ext_buf_cnt); 3944 3945 /* construct base driver mbox command */ 3946 pmb = &pmboxq->u.mb; 3947 pmbx = (uint8_t *)dmabuf->virt; 3948 memcpy(pmb, pmbx, sizeof(*pmb)); 3949 pmb->mbxOwner = OWN_HOST; 3950 pmboxq->vport = phba->pport; 3951 3952 /* multi-buffer handling context */ 3953 phba->mbox_ext_buf_ctx.nembType = nemb_tp; 3954 phba->mbox_ext_buf_ctx.mboxType = mbox_rd; 3955 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt; 3956 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag; 3957 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum; 3958 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf; 3959 3960 /* callback for multi-buffer read mailbox command */ 3961 pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl; 3962 3963 /* context fields to callback function */ 3964 pmboxq->context1 = dd_data; 3965 dd_data->type = TYPE_MBOX; 3966 dd_data->set_job = job; 3967 dd_data->context_un.mbox.pmboxq = pmboxq; 3968 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; 3969 job->dd_data = dd_data; 3970 3971 /* state change */ 3972 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; 3973 3974 /* 3975 * Non-embedded mailbox subcommand data gets byte swapped here because 3976 * the lower level driver code only does the first 64 mailbox words. 3977 */ 3978 if ((!bsg_bf_get(lpfc_mbox_hdr_emb, 3979 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) && 3980 (nemb_tp == nemb_mse)) 3981 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)], 3982 &pmbx[sizeof(MAILBOX_t)], 3983 sli_cfg_mbx->un.sli_config_emb0_subsys. 3984 mse[0].buf_len); 3985 3986 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 3987 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 3988 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3989 "2947 Issued SLI_CONFIG ext-buffer " 3990 "maibox command, rc:x%x\n", rc); 3991 return SLI_CONFIG_HANDLED; 3992 } 3993 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3994 "2948 Failed to issue SLI_CONFIG ext-buffer " 3995 "maibox command, rc:x%x\n", rc); 3996 rc = -EPIPE; 3997 3998 job_error: 3999 if (pmboxq) 4000 mempool_free(pmboxq, phba->mbox_mem_pool); 4001 lpfc_bsg_dma_page_list_free(phba, 4002 &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 4003 kfree(dd_data); 4004 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE; 4005 return rc; 4006 } 4007 4008 /** 4009 * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write 4010 * @phba: Pointer to HBA context object. 4011 * @mb: Pointer to a BSG mailbox object. 4012 * @dmabuff: Pointer to a DMA buffer descriptor. 4013 * 4014 * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with 4015 * non-embedded external bufffers. 4016 **/ 4017 static int 4018 lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, 4019 enum nemb_type nemb_tp, 4020 struct lpfc_dmabuf *dmabuf) 4021 { 4022 struct fc_bsg_request *bsg_request = job->request; 4023 struct fc_bsg_reply *bsg_reply = job->reply; 4024 struct dfc_mbox_req *mbox_req; 4025 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4026 uint32_t ext_buf_cnt; 4027 struct bsg_job_data *dd_data = NULL; 4028 LPFC_MBOXQ_t *pmboxq = NULL; 4029 MAILBOX_t *pmb; 4030 uint8_t *mbx; 4031 int rc = SLI_CONFIG_NOT_HANDLED, i; 4032 4033 mbox_req = 4034 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; 4035 4036 /* pointer to the start of mailbox command */ 4037 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 4038 4039 if (nemb_tp == nemb_mse) { 4040 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt, 4041 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr); 4042 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) { 4043 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4044 "2953 Failed SLI_CONFIG(mse) wr, " 4045 "ext_buf_cnt(%d) out of range(%d)\n", 4046 ext_buf_cnt, 4047 LPFC_MBX_SLI_CONFIG_MAX_MSE); 4048 return -ERANGE; 4049 } 4050 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4051 "2949 Handled SLI_CONFIG(mse) wr, " 4052 "ext_buf_cnt:%d\n", ext_buf_cnt); 4053 } else { 4054 /* sanity check on interface type for support */ 4055 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 4056 LPFC_SLI_INTF_IF_TYPE_2) 4057 return -ENODEV; 4058 /* nemb_tp == nemb_hbd */ 4059 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count; 4060 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) { 4061 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4062 "2954 Failed SLI_CONFIG(hbd) wr, " 4063 "ext_buf_cnt(%d) out of range(%d)\n", 4064 ext_buf_cnt, 4065 LPFC_MBX_SLI_CONFIG_MAX_HBD); 4066 return -ERANGE; 4067 } 4068 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4069 "2950 Handled SLI_CONFIG(hbd) wr, " 4070 "ext_buf_cnt:%d\n", ext_buf_cnt); 4071 } 4072 4073 /* before dma buffer descriptor setup */ 4074 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox, 4075 sta_pre_addr, dmabuf, ext_buf_cnt); 4076 4077 if (ext_buf_cnt == 0) 4078 return -EPERM; 4079 4080 /* for the first external buffer */ 4081 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf); 4082 4083 /* after dma descriptor setup */ 4084 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox, 4085 sta_pos_addr, dmabuf, ext_buf_cnt); 4086 4087 /* log for looking forward */ 4088 for (i = 1; i < ext_buf_cnt; i++) { 4089 if (nemb_tp == nemb_mse) 4090 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4091 "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n", 4092 i, sli_cfg_mbx->un.sli_config_emb0_subsys. 4093 mse[i].buf_len); 4094 else 4095 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4096 "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n", 4097 i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 4098 &sli_cfg_mbx->un.sli_config_emb1_subsys. 4099 hbd[i])); 4100 } 4101 4102 /* multi-buffer handling context */ 4103 phba->mbox_ext_buf_ctx.nembType = nemb_tp; 4104 phba->mbox_ext_buf_ctx.mboxType = mbox_wr; 4105 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt; 4106 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag; 4107 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum; 4108 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf; 4109 4110 if (ext_buf_cnt == 1) { 4111 /* bsg tracking structure */ 4112 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4113 if (!dd_data) { 4114 rc = -ENOMEM; 4115 goto job_error; 4116 } 4117 4118 /* mailbox command structure for base driver */ 4119 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4120 if (!pmboxq) { 4121 rc = -ENOMEM; 4122 goto job_error; 4123 } 4124 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4125 pmb = &pmboxq->u.mb; 4126 mbx = (uint8_t *)dmabuf->virt; 4127 memcpy(pmb, mbx, sizeof(*pmb)); 4128 pmb->mbxOwner = OWN_HOST; 4129 pmboxq->vport = phba->pport; 4130 4131 /* callback for multi-buffer read mailbox command */ 4132 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl; 4133 4134 /* context fields to callback function */ 4135 pmboxq->context1 = dd_data; 4136 dd_data->type = TYPE_MBOX; 4137 dd_data->set_job = job; 4138 dd_data->context_un.mbox.pmboxq = pmboxq; 4139 dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx; 4140 job->dd_data = dd_data; 4141 4142 /* state change */ 4143 4144 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; 4145 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4146 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 4147 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4148 "2955 Issued SLI_CONFIG ext-buffer " 4149 "maibox command, rc:x%x\n", rc); 4150 return SLI_CONFIG_HANDLED; 4151 } 4152 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4153 "2956 Failed to issue SLI_CONFIG ext-buffer " 4154 "maibox command, rc:x%x\n", rc); 4155 rc = -EPIPE; 4156 goto job_error; 4157 } 4158 4159 /* wait for additoinal external buffers */ 4160 4161 bsg_reply->result = 0; 4162 bsg_job_done(job, bsg_reply->result, 4163 bsg_reply->reply_payload_rcv_len); 4164 return SLI_CONFIG_HANDLED; 4165 4166 job_error: 4167 if (pmboxq) 4168 mempool_free(pmboxq, phba->mbox_mem_pool); 4169 kfree(dd_data); 4170 4171 return rc; 4172 } 4173 4174 /** 4175 * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer 4176 * @phba: Pointer to HBA context object. 4177 * @mb: Pointer to a BSG mailbox object. 4178 * @dmabuff: Pointer to a DMA buffer descriptor. 4179 * 4180 * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded 4181 * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B 4182 * with embedded sussystem 0x1 and opcodes with external HBDs. 4183 **/ 4184 static int 4185 lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job, 4186 struct lpfc_dmabuf *dmabuf) 4187 { 4188 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4189 uint32_t subsys; 4190 uint32_t opcode; 4191 int rc = SLI_CONFIG_NOT_HANDLED; 4192 4193 /* state change on new multi-buffer pass-through mailbox command */ 4194 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST; 4195 4196 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 4197 4198 if (!bsg_bf_get(lpfc_mbox_hdr_emb, 4199 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) { 4200 subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys, 4201 &sli_cfg_mbx->un.sli_config_emb0_subsys); 4202 opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode, 4203 &sli_cfg_mbx->un.sli_config_emb0_subsys); 4204 if (subsys == SLI_CONFIG_SUBSYS_FCOE) { 4205 switch (opcode) { 4206 case FCOE_OPCODE_READ_FCF: 4207 case FCOE_OPCODE_GET_DPORT_RESULTS: 4208 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4209 "2957 Handled SLI_CONFIG " 4210 "subsys_fcoe, opcode:x%x\n", 4211 opcode); 4212 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, 4213 nemb_mse, dmabuf); 4214 break; 4215 case FCOE_OPCODE_ADD_FCF: 4216 case FCOE_OPCODE_SET_DPORT_MODE: 4217 case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE: 4218 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4219 "2958 Handled SLI_CONFIG " 4220 "subsys_fcoe, opcode:x%x\n", 4221 opcode); 4222 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job, 4223 nemb_mse, dmabuf); 4224 break; 4225 default: 4226 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4227 "2959 Reject SLI_CONFIG " 4228 "subsys_fcoe, opcode:x%x\n", 4229 opcode); 4230 rc = -EPERM; 4231 break; 4232 } 4233 } else if (subsys == SLI_CONFIG_SUBSYS_COMN) { 4234 switch (opcode) { 4235 case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES: 4236 case COMN_OPCODE_GET_CNTL_ATTRIBUTES: 4237 case COMN_OPCODE_GET_PROFILE_CONFIG: 4238 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4239 "3106 Handled SLI_CONFIG " 4240 "subsys_comn, opcode:x%x\n", 4241 opcode); 4242 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, 4243 nemb_mse, dmabuf); 4244 break; 4245 default: 4246 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4247 "3107 Reject SLI_CONFIG " 4248 "subsys_comn, opcode:x%x\n", 4249 opcode); 4250 rc = -EPERM; 4251 break; 4252 } 4253 } else { 4254 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4255 "2977 Reject SLI_CONFIG " 4256 "subsys:x%d, opcode:x%x\n", 4257 subsys, opcode); 4258 rc = -EPERM; 4259 } 4260 } else { 4261 subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys, 4262 &sli_cfg_mbx->un.sli_config_emb1_subsys); 4263 opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode, 4264 &sli_cfg_mbx->un.sli_config_emb1_subsys); 4265 if (subsys == SLI_CONFIG_SUBSYS_COMN) { 4266 switch (opcode) { 4267 case COMN_OPCODE_READ_OBJECT: 4268 case COMN_OPCODE_READ_OBJECT_LIST: 4269 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4270 "2960 Handled SLI_CONFIG " 4271 "subsys_comn, opcode:x%x\n", 4272 opcode); 4273 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, 4274 nemb_hbd, dmabuf); 4275 break; 4276 case COMN_OPCODE_WRITE_OBJECT: 4277 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4278 "2961 Handled SLI_CONFIG " 4279 "subsys_comn, opcode:x%x\n", 4280 opcode); 4281 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job, 4282 nemb_hbd, dmabuf); 4283 break; 4284 default: 4285 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4286 "2962 Not handled SLI_CONFIG " 4287 "subsys_comn, opcode:x%x\n", 4288 opcode); 4289 rc = SLI_CONFIG_NOT_HANDLED; 4290 break; 4291 } 4292 } else { 4293 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4294 "2978 Not handled SLI_CONFIG " 4295 "subsys:x%d, opcode:x%x\n", 4296 subsys, opcode); 4297 rc = SLI_CONFIG_NOT_HANDLED; 4298 } 4299 } 4300 4301 /* state reset on not handled new multi-buffer mailbox command */ 4302 if (rc != SLI_CONFIG_HANDLED) 4303 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE; 4304 4305 return rc; 4306 } 4307 4308 /** 4309 * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers 4310 * @phba: Pointer to HBA context object. 4311 * 4312 * This routine is for requesting to abort a pass-through mailbox command with 4313 * multiple external buffers due to error condition. 4314 **/ 4315 static void 4316 lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba) 4317 { 4318 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT) 4319 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS; 4320 else 4321 lpfc_bsg_mbox_ext_session_reset(phba); 4322 return; 4323 } 4324 4325 /** 4326 * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer 4327 * @phba: Pointer to HBA context object. 4328 * @dmabuf: Pointer to a DMA buffer descriptor. 4329 * 4330 * This routine extracts the next mailbox read external buffer back to 4331 * user space through BSG. 4332 **/ 4333 static int 4334 lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job) 4335 { 4336 struct fc_bsg_reply *bsg_reply = job->reply; 4337 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4338 struct lpfc_dmabuf *dmabuf; 4339 uint8_t *pbuf; 4340 uint32_t size; 4341 uint32_t index; 4342 4343 index = phba->mbox_ext_buf_ctx.seqNum; 4344 phba->mbox_ext_buf_ctx.seqNum++; 4345 4346 sli_cfg_mbx = (struct lpfc_sli_config_mbox *) 4347 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt; 4348 4349 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) { 4350 size = bsg_bf_get(lpfc_mbox_sli_config_mse_len, 4351 &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]); 4352 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4353 "2963 SLI_CONFIG (mse) ext-buffer rd get " 4354 "buffer[%d], size:%d\n", index, size); 4355 } else { 4356 size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 4357 &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]); 4358 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4359 "2964 SLI_CONFIG (hbd) ext-buffer rd get " 4360 "buffer[%d], size:%d\n", index, size); 4361 } 4362 if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list)) 4363 return -EPIPE; 4364 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list, 4365 struct lpfc_dmabuf, list); 4366 list_del_init(&dmabuf->list); 4367 4368 /* after dma buffer descriptor setup */ 4369 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType, 4370 mbox_rd, dma_ebuf, sta_pos_addr, 4371 dmabuf, index); 4372 4373 pbuf = (uint8_t *)dmabuf->virt; 4374 bsg_reply->reply_payload_rcv_len = 4375 sg_copy_from_buffer(job->reply_payload.sg_list, 4376 job->reply_payload.sg_cnt, 4377 pbuf, size); 4378 4379 lpfc_bsg_dma_page_free(phba, dmabuf); 4380 4381 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) { 4382 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4383 "2965 SLI_CONFIG (hbd) ext-buffer rd mbox " 4384 "command session done\n"); 4385 lpfc_bsg_mbox_ext_session_reset(phba); 4386 } 4387 4388 bsg_reply->result = 0; 4389 bsg_job_done(job, bsg_reply->result, 4390 bsg_reply->reply_payload_rcv_len); 4391 4392 return SLI_CONFIG_HANDLED; 4393 } 4394 4395 /** 4396 * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer 4397 * @phba: Pointer to HBA context object. 4398 * @dmabuf: Pointer to a DMA buffer descriptor. 4399 * 4400 * This routine sets up the next mailbox read external buffer obtained 4401 * from user space through BSG. 4402 **/ 4403 static int 4404 lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, 4405 struct lpfc_dmabuf *dmabuf) 4406 { 4407 struct fc_bsg_reply *bsg_reply = job->reply; 4408 struct bsg_job_data *dd_data = NULL; 4409 LPFC_MBOXQ_t *pmboxq = NULL; 4410 MAILBOX_t *pmb; 4411 enum nemb_type nemb_tp; 4412 uint8_t *pbuf; 4413 uint32_t size; 4414 uint32_t index; 4415 int rc; 4416 4417 index = phba->mbox_ext_buf_ctx.seqNum; 4418 phba->mbox_ext_buf_ctx.seqNum++; 4419 nemb_tp = phba->mbox_ext_buf_ctx.nembType; 4420 4421 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4422 if (!dd_data) { 4423 rc = -ENOMEM; 4424 goto job_error; 4425 } 4426 4427 pbuf = (uint8_t *)dmabuf->virt; 4428 size = job->request_payload.payload_len; 4429 sg_copy_to_buffer(job->request_payload.sg_list, 4430 job->request_payload.sg_cnt, 4431 pbuf, size); 4432 4433 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) { 4434 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4435 "2966 SLI_CONFIG (mse) ext-buffer wr set " 4436 "buffer[%d], size:%d\n", 4437 phba->mbox_ext_buf_ctx.seqNum, size); 4438 4439 } else { 4440 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4441 "2967 SLI_CONFIG (hbd) ext-buffer wr set " 4442 "buffer[%d], size:%d\n", 4443 phba->mbox_ext_buf_ctx.seqNum, size); 4444 4445 } 4446 4447 /* set up external buffer descriptor and add to external buffer list */ 4448 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index, 4449 phba->mbox_ext_buf_ctx.mbx_dmabuf, 4450 dmabuf); 4451 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 4452 4453 /* after write dma buffer */ 4454 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType, 4455 mbox_wr, dma_ebuf, sta_pos_addr, 4456 dmabuf, index); 4457 4458 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) { 4459 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4460 "2968 SLI_CONFIG ext-buffer wr all %d " 4461 "ebuffers received\n", 4462 phba->mbox_ext_buf_ctx.numBuf); 4463 /* mailbox command structure for base driver */ 4464 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4465 if (!pmboxq) { 4466 rc = -ENOMEM; 4467 goto job_error; 4468 } 4469 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4470 pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt; 4471 pmb = &pmboxq->u.mb; 4472 memcpy(pmb, pbuf, sizeof(*pmb)); 4473 pmb->mbxOwner = OWN_HOST; 4474 pmboxq->vport = phba->pport; 4475 4476 /* callback for multi-buffer write mailbox command */ 4477 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl; 4478 4479 /* context fields to callback function */ 4480 pmboxq->context1 = dd_data; 4481 dd_data->type = TYPE_MBOX; 4482 dd_data->set_job = job; 4483 dd_data->context_un.mbox.pmboxq = pmboxq; 4484 dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf; 4485 job->dd_data = dd_data; 4486 4487 /* state change */ 4488 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; 4489 4490 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4491 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 4492 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4493 "2969 Issued SLI_CONFIG ext-buffer " 4494 "maibox command, rc:x%x\n", rc); 4495 return SLI_CONFIG_HANDLED; 4496 } 4497 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4498 "2970 Failed to issue SLI_CONFIG ext-buffer " 4499 "maibox command, rc:x%x\n", rc); 4500 rc = -EPIPE; 4501 goto job_error; 4502 } 4503 4504 /* wait for additoinal external buffers */ 4505 bsg_reply->result = 0; 4506 bsg_job_done(job, bsg_reply->result, 4507 bsg_reply->reply_payload_rcv_len); 4508 return SLI_CONFIG_HANDLED; 4509 4510 job_error: 4511 lpfc_bsg_dma_page_free(phba, dmabuf); 4512 kfree(dd_data); 4513 4514 return rc; 4515 } 4516 4517 /** 4518 * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd 4519 * @phba: Pointer to HBA context object. 4520 * @mb: Pointer to a BSG mailbox object. 4521 * @dmabuff: Pointer to a DMA buffer descriptor. 4522 * 4523 * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox 4524 * command with multiple non-embedded external buffers. 4525 **/ 4526 static int 4527 lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job, 4528 struct lpfc_dmabuf *dmabuf) 4529 { 4530 int rc; 4531 4532 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4533 "2971 SLI_CONFIG buffer (type:x%x)\n", 4534 phba->mbox_ext_buf_ctx.mboxType); 4535 4536 if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) { 4537 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) { 4538 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4539 "2972 SLI_CONFIG rd buffer state " 4540 "mismatch:x%x\n", 4541 phba->mbox_ext_buf_ctx.state); 4542 lpfc_bsg_mbox_ext_abort(phba); 4543 return -EPIPE; 4544 } 4545 rc = lpfc_bsg_read_ebuf_get(phba, job); 4546 if (rc == SLI_CONFIG_HANDLED) 4547 lpfc_bsg_dma_page_free(phba, dmabuf); 4548 } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */ 4549 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) { 4550 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4551 "2973 SLI_CONFIG wr buffer state " 4552 "mismatch:x%x\n", 4553 phba->mbox_ext_buf_ctx.state); 4554 lpfc_bsg_mbox_ext_abort(phba); 4555 return -EPIPE; 4556 } 4557 rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf); 4558 } 4559 return rc; 4560 } 4561 4562 /** 4563 * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer 4564 * @phba: Pointer to HBA context object. 4565 * @mb: Pointer to a BSG mailbox object. 4566 * @dmabuff: Pointer to a DMA buffer descriptor. 4567 * 4568 * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG 4569 * (0x9B) mailbox commands and external buffers. 4570 **/ 4571 static int 4572 lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job, 4573 struct lpfc_dmabuf *dmabuf) 4574 { 4575 struct fc_bsg_request *bsg_request = job->request; 4576 struct dfc_mbox_req *mbox_req; 4577 int rc = SLI_CONFIG_NOT_HANDLED; 4578 4579 mbox_req = 4580 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; 4581 4582 /* mbox command with/without single external buffer */ 4583 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0) 4584 return rc; 4585 4586 /* mbox command and first external buffer */ 4587 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) { 4588 if (mbox_req->extSeqNum == 1) { 4589 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4590 "2974 SLI_CONFIG mailbox: tag:%d, " 4591 "seq:%d\n", mbox_req->extMboxTag, 4592 mbox_req->extSeqNum); 4593 rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf); 4594 return rc; 4595 } else 4596 goto sli_cfg_ext_error; 4597 } 4598 4599 /* 4600 * handle additional external buffers 4601 */ 4602 4603 /* check broken pipe conditions */ 4604 if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag) 4605 goto sli_cfg_ext_error; 4606 if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf) 4607 goto sli_cfg_ext_error; 4608 if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1) 4609 goto sli_cfg_ext_error; 4610 4611 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4612 "2975 SLI_CONFIG mailbox external buffer: " 4613 "extSta:x%x, tag:%d, seq:%d\n", 4614 phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag, 4615 mbox_req->extSeqNum); 4616 rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf); 4617 return rc; 4618 4619 sli_cfg_ext_error: 4620 /* all other cases, broken pipe */ 4621 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4622 "2976 SLI_CONFIG mailbox broken pipe: " 4623 "ctxSta:x%x, ctxNumBuf:%d " 4624 "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n", 4625 phba->mbox_ext_buf_ctx.state, 4626 phba->mbox_ext_buf_ctx.numBuf, 4627 phba->mbox_ext_buf_ctx.mbxTag, 4628 phba->mbox_ext_buf_ctx.seqNum, 4629 mbox_req->extMboxTag, mbox_req->extSeqNum); 4630 4631 lpfc_bsg_mbox_ext_session_reset(phba); 4632 4633 return -EPIPE; 4634 } 4635 4636 /** 4637 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app 4638 * @phba: Pointer to HBA context object. 4639 * @mb: Pointer to a mailbox object. 4640 * @vport: Pointer to a vport object. 4641 * 4642 * Allocate a tracking object, mailbox command memory, get a mailbox 4643 * from the mailbox pool, copy the caller mailbox command. 4644 * 4645 * If offline and the sli is active we need to poll for the command (port is 4646 * being reset) and com-plete the job, otherwise issue the mailbox command and 4647 * let our completion handler finish the command. 4648 **/ 4649 static int 4650 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job, 4651 struct lpfc_vport *vport) 4652 { 4653 struct fc_bsg_request *bsg_request = job->request; 4654 struct fc_bsg_reply *bsg_reply = job->reply; 4655 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */ 4656 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */ 4657 /* a 4k buffer to hold the mb and extended data from/to the bsg */ 4658 uint8_t *pmbx = NULL; 4659 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */ 4660 struct lpfc_dmabuf *dmabuf = NULL; 4661 struct dfc_mbox_req *mbox_req; 4662 struct READ_EVENT_LOG_VAR *rdEventLog; 4663 uint32_t transmit_length, receive_length, mode; 4664 struct lpfc_mbx_sli4_config *sli4_config; 4665 struct lpfc_mbx_nembed_cmd *nembed_sge; 4666 struct ulp_bde64 *bde; 4667 uint8_t *ext = NULL; 4668 int rc = 0; 4669 uint8_t *from; 4670 uint32_t size; 4671 4672 /* in case no data is transferred */ 4673 bsg_reply->reply_payload_rcv_len = 0; 4674 4675 /* sanity check to protect driver */ 4676 if (job->reply_payload.payload_len > BSG_MBOX_SIZE || 4677 job->request_payload.payload_len > BSG_MBOX_SIZE) { 4678 rc = -ERANGE; 4679 goto job_done; 4680 } 4681 4682 /* 4683 * Don't allow mailbox commands to be sent when blocked or when in 4684 * the middle of discovery 4685 */ 4686 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { 4687 rc = -EAGAIN; 4688 goto job_done; 4689 } 4690 4691 mbox_req = 4692 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; 4693 4694 /* check if requested extended data lengths are valid */ 4695 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) || 4696 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) { 4697 rc = -ERANGE; 4698 goto job_done; 4699 } 4700 4701 dmabuf = lpfc_bsg_dma_page_alloc(phba); 4702 if (!dmabuf || !dmabuf->virt) { 4703 rc = -ENOMEM; 4704 goto job_done; 4705 } 4706 4707 /* Get the mailbox command or external buffer from BSG */ 4708 pmbx = (uint8_t *)dmabuf->virt; 4709 size = job->request_payload.payload_len; 4710 sg_copy_to_buffer(job->request_payload.sg_list, 4711 job->request_payload.sg_cnt, pmbx, size); 4712 4713 /* Handle possible SLI_CONFIG with non-embedded payloads */ 4714 if (phba->sli_rev == LPFC_SLI_REV4) { 4715 rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf); 4716 if (rc == SLI_CONFIG_HANDLED) 4717 goto job_cont; 4718 if (rc) 4719 goto job_done; 4720 /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */ 4721 } 4722 4723 rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport); 4724 if (rc != 0) 4725 goto job_done; /* must be negative */ 4726 4727 /* allocate our bsg tracking structure */ 4728 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4729 if (!dd_data) { 4730 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 4731 "2727 Failed allocation of dd_data\n"); 4732 rc = -ENOMEM; 4733 goto job_done; 4734 } 4735 4736 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4737 if (!pmboxq) { 4738 rc = -ENOMEM; 4739 goto job_done; 4740 } 4741 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4742 4743 pmb = &pmboxq->u.mb; 4744 memcpy(pmb, pmbx, sizeof(*pmb)); 4745 pmb->mbxOwner = OWN_HOST; 4746 pmboxq->vport = vport; 4747 4748 /* If HBA encountered an error attention, allow only DUMP 4749 * or RESTART mailbox commands until the HBA is restarted. 4750 */ 4751 if (phba->pport->stopped && 4752 pmb->mbxCommand != MBX_DUMP_MEMORY && 4753 pmb->mbxCommand != MBX_RESTART && 4754 pmb->mbxCommand != MBX_WRITE_VPARMS && 4755 pmb->mbxCommand != MBX_WRITE_WWN) 4756 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 4757 "2797 mbox: Issued mailbox cmd " 4758 "0x%x while in stopped state.\n", 4759 pmb->mbxCommand); 4760 4761 /* extended mailbox commands will need an extended buffer */ 4762 if (mbox_req->inExtWLen || mbox_req->outExtWLen) { 4763 from = pmbx; 4764 ext = from + sizeof(MAILBOX_t); 4765 pmboxq->context2 = ext; 4766 pmboxq->in_ext_byte_len = 4767 mbox_req->inExtWLen * sizeof(uint32_t); 4768 pmboxq->out_ext_byte_len = 4769 mbox_req->outExtWLen * sizeof(uint32_t); 4770 pmboxq->mbox_offset_word = mbox_req->mbOffset; 4771 } 4772 4773 /* biu diag will need a kernel buffer to transfer the data 4774 * allocate our own buffer and setup the mailbox command to 4775 * use ours 4776 */ 4777 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) { 4778 transmit_length = pmb->un.varWords[1]; 4779 receive_length = pmb->un.varWords[4]; 4780 /* transmit length cannot be greater than receive length or 4781 * mailbox extension size 4782 */ 4783 if ((transmit_length > receive_length) || 4784 (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) { 4785 rc = -ERANGE; 4786 goto job_done; 4787 } 4788 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh = 4789 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)); 4790 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow = 4791 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)); 4792 4793 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh = 4794 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t) 4795 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize); 4796 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow = 4797 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t) 4798 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize); 4799 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) { 4800 rdEventLog = &pmb->un.varRdEventLog; 4801 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize; 4802 mode = bf_get(lpfc_event_log, rdEventLog); 4803 4804 /* receive length cannot be greater than mailbox 4805 * extension size 4806 */ 4807 if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) { 4808 rc = -ERANGE; 4809 goto job_done; 4810 } 4811 4812 /* mode zero uses a bde like biu diags command */ 4813 if (mode == 0) { 4814 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys 4815 + sizeof(MAILBOX_t)); 4816 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys 4817 + sizeof(MAILBOX_t)); 4818 } 4819 } else if (phba->sli_rev == LPFC_SLI_REV4) { 4820 /* Let type 4 (well known data) through because the data is 4821 * returned in varwords[4-8] 4822 * otherwise check the recieve length and fetch the buffer addr 4823 */ 4824 if ((pmb->mbxCommand == MBX_DUMP_MEMORY) && 4825 (pmb->un.varDmp.type != DMP_WELL_KNOWN)) { 4826 /* rebuild the command for sli4 using our own buffers 4827 * like we do for biu diags 4828 */ 4829 receive_length = pmb->un.varWords[2]; 4830 /* receive length cannot be greater than mailbox 4831 * extension size 4832 */ 4833 if (receive_length == 0) { 4834 rc = -ERANGE; 4835 goto job_done; 4836 } 4837 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys 4838 + sizeof(MAILBOX_t)); 4839 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys 4840 + sizeof(MAILBOX_t)); 4841 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) && 4842 pmb->un.varUpdateCfg.co) { 4843 bde = (struct ulp_bde64 *)&pmb->un.varWords[4]; 4844 4845 /* bde size cannot be greater than mailbox ext size */ 4846 if (bde->tus.f.bdeSize > 4847 BSG_MBOX_SIZE - sizeof(MAILBOX_t)) { 4848 rc = -ERANGE; 4849 goto job_done; 4850 } 4851 bde->addrHigh = putPaddrHigh(dmabuf->phys 4852 + sizeof(MAILBOX_t)); 4853 bde->addrLow = putPaddrLow(dmabuf->phys 4854 + sizeof(MAILBOX_t)); 4855 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) { 4856 /* Handling non-embedded SLI_CONFIG mailbox command */ 4857 sli4_config = &pmboxq->u.mqe.un.sli4_config; 4858 if (!bf_get(lpfc_mbox_hdr_emb, 4859 &sli4_config->header.cfg_mhdr)) { 4860 /* rebuild the command for sli4 using our 4861 * own buffers like we do for biu diags 4862 */ 4863 nembed_sge = (struct lpfc_mbx_nembed_cmd *) 4864 &pmb->un.varWords[0]; 4865 receive_length = nembed_sge->sge[0].length; 4866 4867 /* receive length cannot be greater than 4868 * mailbox extension size 4869 */ 4870 if ((receive_length == 0) || 4871 (receive_length > 4872 BSG_MBOX_SIZE - sizeof(MAILBOX_t))) { 4873 rc = -ERANGE; 4874 goto job_done; 4875 } 4876 4877 nembed_sge->sge[0].pa_hi = 4878 putPaddrHigh(dmabuf->phys 4879 + sizeof(MAILBOX_t)); 4880 nembed_sge->sge[0].pa_lo = 4881 putPaddrLow(dmabuf->phys 4882 + sizeof(MAILBOX_t)); 4883 } 4884 } 4885 } 4886 4887 dd_data->context_un.mbox.dmabuffers = dmabuf; 4888 4889 /* setup wake call as IOCB callback */ 4890 pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl; 4891 4892 /* setup context field to pass wait_queue pointer to wake function */ 4893 pmboxq->context1 = dd_data; 4894 dd_data->type = TYPE_MBOX; 4895 dd_data->set_job = job; 4896 dd_data->context_un.mbox.pmboxq = pmboxq; 4897 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; 4898 dd_data->context_un.mbox.ext = ext; 4899 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset; 4900 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen; 4901 dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen; 4902 job->dd_data = dd_data; 4903 4904 if ((vport->fc_flag & FC_OFFLINE_MODE) || 4905 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) { 4906 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 4907 if (rc != MBX_SUCCESS) { 4908 rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; 4909 goto job_done; 4910 } 4911 4912 /* job finished, copy the data */ 4913 memcpy(pmbx, pmb, sizeof(*pmb)); 4914 bsg_reply->reply_payload_rcv_len = 4915 sg_copy_from_buffer(job->reply_payload.sg_list, 4916 job->reply_payload.sg_cnt, 4917 pmbx, size); 4918 /* not waiting mbox already done */ 4919 rc = 0; 4920 goto job_done; 4921 } 4922 4923 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4924 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) 4925 return 1; /* job started */ 4926 4927 job_done: 4928 /* common exit for error or job completed inline */ 4929 if (pmboxq) 4930 mempool_free(pmboxq, phba->mbox_mem_pool); 4931 lpfc_bsg_dma_page_free(phba, dmabuf); 4932 kfree(dd_data); 4933 4934 job_cont: 4935 return rc; 4936 } 4937 4938 /** 4939 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command 4940 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX. 4941 **/ 4942 static int 4943 lpfc_bsg_mbox_cmd(struct bsg_job *job) 4944 { 4945 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 4946 struct fc_bsg_request *bsg_request = job->request; 4947 struct fc_bsg_reply *bsg_reply = job->reply; 4948 struct lpfc_hba *phba = vport->phba; 4949 struct dfc_mbox_req *mbox_req; 4950 int rc = 0; 4951 4952 /* mix-and-match backward compatibility */ 4953 bsg_reply->reply_payload_rcv_len = 0; 4954 if (job->request_len < 4955 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) { 4956 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4957 "2737 Mix-and-match backward compatibility " 4958 "between MBOX_REQ old size:%d and " 4959 "new request size:%d\n", 4960 (int)(job->request_len - 4961 sizeof(struct fc_bsg_request)), 4962 (int)sizeof(struct dfc_mbox_req)); 4963 mbox_req = (struct dfc_mbox_req *) 4964 bsg_request->rqst_data.h_vendor.vendor_cmd; 4965 mbox_req->extMboxTag = 0; 4966 mbox_req->extSeqNum = 0; 4967 } 4968 4969 rc = lpfc_bsg_issue_mbox(phba, job, vport); 4970 4971 if (rc == 0) { 4972 /* job done */ 4973 bsg_reply->result = 0; 4974 job->dd_data = NULL; 4975 bsg_job_done(job, bsg_reply->result, 4976 bsg_reply->reply_payload_rcv_len); 4977 } else if (rc == 1) 4978 /* job submitted, will complete later*/ 4979 rc = 0; /* return zero, no error */ 4980 else { 4981 /* some error occurred */ 4982 bsg_reply->result = rc; 4983 job->dd_data = NULL; 4984 } 4985 4986 return rc; 4987 } 4988 4989 /** 4990 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler 4991 * @phba: Pointer to HBA context object. 4992 * @cmdiocbq: Pointer to command iocb. 4993 * @rspiocbq: Pointer to response iocb. 4994 * 4995 * This function is the completion handler for iocbs issued using 4996 * lpfc_menlo_cmd function. This function is called by the 4997 * ring event handler function without any lock held. This function 4998 * can be called from both worker thread context and interrupt 4999 * context. This function also can be called from another thread which 5000 * cleans up the SLI layer objects. 5001 * This function copies the contents of the response iocb to the 5002 * response iocb memory object provided by the caller of 5003 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 5004 * sleeps for the iocb completion. 5005 **/ 5006 static void 5007 lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba, 5008 struct lpfc_iocbq *cmdiocbq, 5009 struct lpfc_iocbq *rspiocbq) 5010 { 5011 struct bsg_job_data *dd_data; 5012 struct bsg_job *job; 5013 struct fc_bsg_reply *bsg_reply; 5014 IOCB_t *rsp; 5015 struct lpfc_dmabuf *bmp, *cmp, *rmp; 5016 struct lpfc_bsg_menlo *menlo; 5017 unsigned long flags; 5018 struct menlo_response *menlo_resp; 5019 unsigned int rsp_size; 5020 int rc = 0; 5021 5022 dd_data = cmdiocbq->context1; 5023 cmp = cmdiocbq->context2; 5024 bmp = cmdiocbq->context3; 5025 menlo = &dd_data->context_un.menlo; 5026 rmp = menlo->rmp; 5027 rsp = &rspiocbq->iocb; 5028 5029 /* Determine if job has been aborted */ 5030 spin_lock_irqsave(&phba->ct_ev_lock, flags); 5031 job = dd_data->set_job; 5032 if (job) { 5033 bsg_reply = job->reply; 5034 /* Prevent timeout handling from trying to abort job */ 5035 job->dd_data = NULL; 5036 } 5037 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5038 5039 /* Copy the job data or set the failing status for the job */ 5040 5041 if (job) { 5042 /* always return the xri, this would be used in the case 5043 * of a menlo download to allow the data to be sent as a 5044 * continuation of the exchange. 5045 */ 5046 5047 menlo_resp = (struct menlo_response *) 5048 bsg_reply->reply_data.vendor_reply.vendor_rsp; 5049 menlo_resp->xri = rsp->ulpContext; 5050 if (rsp->ulpStatus) { 5051 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 5052 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { 5053 case IOERR_SEQUENCE_TIMEOUT: 5054 rc = -ETIMEDOUT; 5055 break; 5056 case IOERR_INVALID_RPI: 5057 rc = -EFAULT; 5058 break; 5059 default: 5060 rc = -EACCES; 5061 break; 5062 } 5063 } else { 5064 rc = -EACCES; 5065 } 5066 } else { 5067 rsp_size = rsp->un.genreq64.bdl.bdeSize; 5068 bsg_reply->reply_payload_rcv_len = 5069 lpfc_bsg_copy_data(rmp, &job->reply_payload, 5070 rsp_size, 0); 5071 } 5072 5073 } 5074 5075 lpfc_sli_release_iocbq(phba, cmdiocbq); 5076 lpfc_free_bsg_buffers(phba, cmp); 5077 lpfc_free_bsg_buffers(phba, rmp); 5078 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 5079 kfree(bmp); 5080 kfree(dd_data); 5081 5082 /* Complete the job if active */ 5083 5084 if (job) { 5085 bsg_reply->result = rc; 5086 bsg_job_done(job, bsg_reply->result, 5087 bsg_reply->reply_payload_rcv_len); 5088 } 5089 5090 return; 5091 } 5092 5093 /** 5094 * lpfc_menlo_cmd - send an ioctl for menlo hardware 5095 * @job: fc_bsg_job to handle 5096 * 5097 * This function issues a gen request 64 CR ioctl for all menlo cmd requests, 5098 * all the command completions will return the xri for the command. 5099 * For menlo data requests a gen request 64 CX is used to continue the exchange 5100 * supplied in the menlo request header xri field. 5101 **/ 5102 static int 5103 lpfc_menlo_cmd(struct bsg_job *job) 5104 { 5105 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 5106 struct fc_bsg_request *bsg_request = job->request; 5107 struct fc_bsg_reply *bsg_reply = job->reply; 5108 struct lpfc_hba *phba = vport->phba; 5109 struct lpfc_iocbq *cmdiocbq; 5110 IOCB_t *cmd; 5111 int rc = 0; 5112 struct menlo_command *menlo_cmd; 5113 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL; 5114 int request_nseg; 5115 int reply_nseg; 5116 struct bsg_job_data *dd_data; 5117 struct ulp_bde64 *bpl = NULL; 5118 5119 /* in case no data is returned return just the return code */ 5120 bsg_reply->reply_payload_rcv_len = 0; 5121 5122 if (job->request_len < 5123 sizeof(struct fc_bsg_request) + 5124 sizeof(struct menlo_command)) { 5125 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5126 "2784 Received MENLO_CMD request below " 5127 "minimum size\n"); 5128 rc = -ERANGE; 5129 goto no_dd_data; 5130 } 5131 5132 if (job->reply_len < 5133 sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) { 5134 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5135 "2785 Received MENLO_CMD reply below " 5136 "minimum size\n"); 5137 rc = -ERANGE; 5138 goto no_dd_data; 5139 } 5140 5141 if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) { 5142 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5143 "2786 Adapter does not support menlo " 5144 "commands\n"); 5145 rc = -EPERM; 5146 goto no_dd_data; 5147 } 5148 5149 menlo_cmd = (struct menlo_command *) 5150 bsg_request->rqst_data.h_vendor.vendor_cmd; 5151 5152 /* allocate our bsg tracking structure */ 5153 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 5154 if (!dd_data) { 5155 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5156 "2787 Failed allocation of dd_data\n"); 5157 rc = -ENOMEM; 5158 goto no_dd_data; 5159 } 5160 5161 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5162 if (!bmp) { 5163 rc = -ENOMEM; 5164 goto free_dd; 5165 } 5166 5167 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 5168 if (!bmp->virt) { 5169 rc = -ENOMEM; 5170 goto free_bmp; 5171 } 5172 5173 INIT_LIST_HEAD(&bmp->list); 5174 5175 bpl = (struct ulp_bde64 *)bmp->virt; 5176 request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64); 5177 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, 5178 1, bpl, &request_nseg); 5179 if (!cmp) { 5180 rc = -ENOMEM; 5181 goto free_bmp; 5182 } 5183 lpfc_bsg_copy_data(cmp, &job->request_payload, 5184 job->request_payload.payload_len, 1); 5185 5186 bpl += request_nseg; 5187 reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg; 5188 rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0, 5189 bpl, &reply_nseg); 5190 if (!rmp) { 5191 rc = -ENOMEM; 5192 goto free_cmp; 5193 } 5194 5195 cmdiocbq = lpfc_sli_get_iocbq(phba); 5196 if (!cmdiocbq) { 5197 rc = -ENOMEM; 5198 goto free_rmp; 5199 } 5200 5201 cmd = &cmdiocbq->iocb; 5202 cmd->un.genreq64.bdl.ulpIoTag32 = 0; 5203 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 5204 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); 5205 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 5206 cmd->un.genreq64.bdl.bdeSize = 5207 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); 5208 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); 5209 cmd->un.genreq64.w5.hcsw.Dfctl = 0; 5210 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD; 5211 cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */ 5212 cmd->ulpBdeCount = 1; 5213 cmd->ulpClass = CLASS3; 5214 cmd->ulpOwner = OWN_CHIP; 5215 cmd->ulpLe = 1; /* Limited Edition */ 5216 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 5217 cmdiocbq->vport = phba->pport; 5218 /* We want the firmware to timeout before we do */ 5219 cmd->ulpTimeout = MENLO_TIMEOUT - 5; 5220 cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp; 5221 cmdiocbq->context1 = dd_data; 5222 cmdiocbq->context2 = cmp; 5223 cmdiocbq->context3 = bmp; 5224 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) { 5225 cmd->ulpCommand = CMD_GEN_REQUEST64_CR; 5226 cmd->ulpPU = MENLO_PU; /* 3 */ 5227 cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */ 5228 cmd->ulpContext = MENLO_CONTEXT; /* 0 */ 5229 } else { 5230 cmd->ulpCommand = CMD_GEN_REQUEST64_CX; 5231 cmd->ulpPU = 1; 5232 cmd->un.ulpWord[4] = 0; 5233 cmd->ulpContext = menlo_cmd->xri; 5234 } 5235 5236 dd_data->type = TYPE_MENLO; 5237 dd_data->set_job = job; 5238 dd_data->context_un.menlo.cmdiocbq = cmdiocbq; 5239 dd_data->context_un.menlo.rmp = rmp; 5240 job->dd_data = dd_data; 5241 5242 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 5243 MENLO_TIMEOUT - 5); 5244 if (rc == IOCB_SUCCESS) 5245 return 0; /* done for now */ 5246 5247 lpfc_sli_release_iocbq(phba, cmdiocbq); 5248 5249 free_rmp: 5250 lpfc_free_bsg_buffers(phba, rmp); 5251 free_cmp: 5252 lpfc_free_bsg_buffers(phba, cmp); 5253 free_bmp: 5254 if (bmp->virt) 5255 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 5256 kfree(bmp); 5257 free_dd: 5258 kfree(dd_data); 5259 no_dd_data: 5260 /* make error code available to userspace */ 5261 bsg_reply->result = rc; 5262 job->dd_data = NULL; 5263 return rc; 5264 } 5265 5266 static int 5267 lpfc_forced_link_speed(struct bsg_job *job) 5268 { 5269 struct Scsi_Host *shost = fc_bsg_to_shost(job); 5270 struct lpfc_vport *vport = shost_priv(shost); 5271 struct lpfc_hba *phba = vport->phba; 5272 struct fc_bsg_reply *bsg_reply = job->reply; 5273 struct forced_link_speed_support_reply *forced_reply; 5274 int rc = 0; 5275 5276 if (job->request_len < 5277 sizeof(struct fc_bsg_request) + 5278 sizeof(struct get_forced_link_speed_support)) { 5279 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5280 "0048 Received FORCED_LINK_SPEED request " 5281 "below minimum size\n"); 5282 rc = -EINVAL; 5283 goto job_error; 5284 } 5285 5286 forced_reply = (struct forced_link_speed_support_reply *) 5287 bsg_reply->reply_data.vendor_reply.vendor_rsp; 5288 5289 if (job->reply_len < 5290 sizeof(struct fc_bsg_request) + 5291 sizeof(struct forced_link_speed_support_reply)) { 5292 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 5293 "0049 Received FORCED_LINK_SPEED reply below " 5294 "minimum size\n"); 5295 rc = -EINVAL; 5296 goto job_error; 5297 } 5298 5299 forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED) 5300 ? LPFC_FORCED_LINK_SPEED_SUPPORTED 5301 : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED; 5302 job_error: 5303 bsg_reply->result = rc; 5304 if (rc == 0) 5305 bsg_job_done(job, bsg_reply->result, 5306 bsg_reply->reply_payload_rcv_len); 5307 return rc; 5308 } 5309 5310 /** 5311 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job 5312 * @job: fc_bsg_job to handle 5313 **/ 5314 static int 5315 lpfc_bsg_hst_vendor(struct bsg_job *job) 5316 { 5317 struct fc_bsg_request *bsg_request = job->request; 5318 struct fc_bsg_reply *bsg_reply = job->reply; 5319 int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0]; 5320 int rc; 5321 5322 switch (command) { 5323 case LPFC_BSG_VENDOR_SET_CT_EVENT: 5324 rc = lpfc_bsg_hba_set_event(job); 5325 break; 5326 case LPFC_BSG_VENDOR_GET_CT_EVENT: 5327 rc = lpfc_bsg_hba_get_event(job); 5328 break; 5329 case LPFC_BSG_VENDOR_SEND_MGMT_RESP: 5330 rc = lpfc_bsg_send_mgmt_rsp(job); 5331 break; 5332 case LPFC_BSG_VENDOR_DIAG_MODE: 5333 rc = lpfc_bsg_diag_loopback_mode(job); 5334 break; 5335 case LPFC_BSG_VENDOR_DIAG_MODE_END: 5336 rc = lpfc_sli4_bsg_diag_mode_end(job); 5337 break; 5338 case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK: 5339 rc = lpfc_bsg_diag_loopback_run(job); 5340 break; 5341 case LPFC_BSG_VENDOR_LINK_DIAG_TEST: 5342 rc = lpfc_sli4_bsg_link_diag_test(job); 5343 break; 5344 case LPFC_BSG_VENDOR_GET_MGMT_REV: 5345 rc = lpfc_bsg_get_dfc_rev(job); 5346 break; 5347 case LPFC_BSG_VENDOR_MBOX: 5348 rc = lpfc_bsg_mbox_cmd(job); 5349 break; 5350 case LPFC_BSG_VENDOR_MENLO_CMD: 5351 case LPFC_BSG_VENDOR_MENLO_DATA: 5352 rc = lpfc_menlo_cmd(job); 5353 break; 5354 case LPFC_BSG_VENDOR_FORCED_LINK_SPEED: 5355 rc = lpfc_forced_link_speed(job); 5356 break; 5357 default: 5358 rc = -EINVAL; 5359 bsg_reply->reply_payload_rcv_len = 0; 5360 /* make error code available to userspace */ 5361 bsg_reply->result = rc; 5362 break; 5363 } 5364 5365 return rc; 5366 } 5367 5368 /** 5369 * lpfc_bsg_request - handle a bsg request from the FC transport 5370 * @job: fc_bsg_job to handle 5371 **/ 5372 int 5373 lpfc_bsg_request(struct bsg_job *job) 5374 { 5375 struct fc_bsg_request *bsg_request = job->request; 5376 struct fc_bsg_reply *bsg_reply = job->reply; 5377 uint32_t msgcode; 5378 int rc; 5379 5380 msgcode = bsg_request->msgcode; 5381 switch (msgcode) { 5382 case FC_BSG_HST_VENDOR: 5383 rc = lpfc_bsg_hst_vendor(job); 5384 break; 5385 case FC_BSG_RPT_ELS: 5386 rc = lpfc_bsg_rport_els(job); 5387 break; 5388 case FC_BSG_RPT_CT: 5389 rc = lpfc_bsg_send_mgmt_cmd(job); 5390 break; 5391 default: 5392 rc = -EINVAL; 5393 bsg_reply->reply_payload_rcv_len = 0; 5394 /* make error code available to userspace */ 5395 bsg_reply->result = rc; 5396 break; 5397 } 5398 5399 return rc; 5400 } 5401 5402 /** 5403 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport 5404 * @job: fc_bsg_job that has timed out 5405 * 5406 * This function just aborts the job's IOCB. The aborted IOCB will return to 5407 * the waiting function which will handle passing the error back to userspace 5408 **/ 5409 int 5410 lpfc_bsg_timeout(struct bsg_job *job) 5411 { 5412 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); 5413 struct lpfc_hba *phba = vport->phba; 5414 struct lpfc_iocbq *cmdiocb; 5415 struct lpfc_sli_ring *pring; 5416 struct bsg_job_data *dd_data; 5417 unsigned long flags; 5418 int rc = 0; 5419 LIST_HEAD(completions); 5420 struct lpfc_iocbq *check_iocb, *next_iocb; 5421 5422 pring = lpfc_phba_elsring(phba); 5423 5424 /* if job's driver data is NULL, the command completed or is in the 5425 * the process of completing. In this case, return status to request 5426 * so the timeout is retried. This avoids double completion issues 5427 * and the request will be pulled off the timer queue when the 5428 * command's completion handler executes. Otherwise, prevent the 5429 * command's completion handler from executing the job done callback 5430 * and continue processing to abort the outstanding the command. 5431 */ 5432 5433 spin_lock_irqsave(&phba->ct_ev_lock, flags); 5434 dd_data = (struct bsg_job_data *)job->dd_data; 5435 if (dd_data) { 5436 dd_data->set_job = NULL; 5437 job->dd_data = NULL; 5438 } else { 5439 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5440 return -EAGAIN; 5441 } 5442 5443 switch (dd_data->type) { 5444 case TYPE_IOCB: 5445 /* Check to see if IOCB was issued to the port or not. If not, 5446 * remove it from the txq queue and call cancel iocbs. 5447 * Otherwise, call abort iotag 5448 */ 5449 cmdiocb = dd_data->context_un.iocb.cmdiocbq; 5450 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5451 5452 spin_lock_irqsave(&phba->hbalock, flags); 5453 /* make sure the I/O abort window is still open */ 5454 if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) { 5455 spin_unlock_irqrestore(&phba->hbalock, flags); 5456 return -EAGAIN; 5457 } 5458 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, 5459 list) { 5460 if (check_iocb == cmdiocb) { 5461 list_move_tail(&check_iocb->list, &completions); 5462 break; 5463 } 5464 } 5465 if (list_empty(&completions)) 5466 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 5467 spin_unlock_irqrestore(&phba->hbalock, flags); 5468 if (!list_empty(&completions)) { 5469 lpfc_sli_cancel_iocbs(phba, &completions, 5470 IOSTAT_LOCAL_REJECT, 5471 IOERR_SLI_ABORTED); 5472 } 5473 break; 5474 5475 case TYPE_EVT: 5476 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5477 break; 5478 5479 case TYPE_MBOX: 5480 /* Update the ext buf ctx state if needed */ 5481 5482 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT) 5483 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS; 5484 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5485 break; 5486 case TYPE_MENLO: 5487 /* Check to see if IOCB was issued to the port or not. If not, 5488 * remove it from the txq queue and call cancel iocbs. 5489 * Otherwise, call abort iotag. 5490 */ 5491 cmdiocb = dd_data->context_un.menlo.cmdiocbq; 5492 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5493 5494 spin_lock_irqsave(&phba->hbalock, flags); 5495 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, 5496 list) { 5497 if (check_iocb == cmdiocb) { 5498 list_move_tail(&check_iocb->list, &completions); 5499 break; 5500 } 5501 } 5502 if (list_empty(&completions)) 5503 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 5504 spin_unlock_irqrestore(&phba->hbalock, flags); 5505 if (!list_empty(&completions)) { 5506 lpfc_sli_cancel_iocbs(phba, &completions, 5507 IOSTAT_LOCAL_REJECT, 5508 IOERR_SLI_ABORTED); 5509 } 5510 break; 5511 default: 5512 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5513 break; 5514 } 5515 5516 /* scsi transport fc fc_bsg_job_timeout expects a zero return code, 5517 * otherwise an error message will be displayed on the console 5518 * so always return success (zero) 5519 */ 5520 return rc; 5521 } 5522