1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2021 Broadcom. All Rights Reserved. The term 4 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 5 */ 6 7 /* 8 * Functions to build and send ELS/CT/BLS commands and responses. 9 */ 10 11 #include "efc.h" 12 #include "efc_els.h" 13 #include "../libefc_sli/sli4.h" 14 15 #define EFC_LOG_ENABLE_ELS_TRACE(efc) \ 16 (((efc) != NULL) ? (((efc)->logmask & (1U << 1)) != 0) : 0) 17 18 #define node_els_trace() \ 19 do { \ 20 if (EFC_LOG_ENABLE_ELS_TRACE(efc)) \ 21 efc_log_info(efc, "[%s] %-20s\n", \ 22 node->display_name, __func__); \ 23 } while (0) 24 25 #define els_io_printf(els, fmt, ...) \ 26 efc_log_err((struct efc *)els->node->efc,\ 27 "[%s] %-8s " fmt, \ 28 els->node->display_name,\ 29 els->display_name, ##__VA_ARGS__) 30 31 #define EFC_ELS_RSP_LEN 1024 32 #define EFC_ELS_GID_PT_RSP_LEN 8096 33 34 struct efc_els_io_req * 35 efc_els_io_alloc(struct efc_node *node, u32 reqlen) 36 { 37 return efc_els_io_alloc_size(node, reqlen, EFC_ELS_RSP_LEN); 38 } 39 40 struct efc_els_io_req * 41 efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen) 42 { 43 struct efc *efc; 44 struct efc_els_io_req *els; 45 unsigned long flags = 0; 46 47 efc = node->efc; 48 49 spin_lock_irqsave(&node->els_ios_lock, flags); 50 51 if (!node->els_io_enabled) { 52 efc_log_err(efc, "els io alloc disabled\n"); 53 spin_unlock_irqrestore(&node->els_ios_lock, flags); 54 return NULL; 55 } 56 57 els = mempool_alloc(efc->els_io_pool, GFP_ATOMIC); 58 if (!els) { 59 atomic_add_return(1, &efc->els_io_alloc_failed_count); 60 spin_unlock_irqrestore(&node->els_ios_lock, flags); 61 return NULL; 62 } 63 64 /* initialize refcount */ 65 kref_init(&els->ref); 66 els->release = _efc_els_io_free; 67 68 /* populate generic io fields */ 69 els->node = node; 70 71 /* now allocate DMA for request and response */ 72 els->io.req.size = reqlen; 73 els->io.req.virt = dma_alloc_coherent(&efc->pci->dev, els->io.req.size, 74 &els->io.req.phys, GFP_KERNEL); 75 if (!els->io.req.virt) { 76 mempool_free(els, efc->els_io_pool); 77 spin_unlock_irqrestore(&node->els_ios_lock, flags); 78 return NULL; 79 } 80 81 els->io.rsp.size = rsplen; 82 els->io.rsp.virt = dma_alloc_coherent(&efc->pci->dev, els->io.rsp.size, 83 &els->io.rsp.phys, GFP_KERNEL); 84 if (!els->io.rsp.virt) { 85 dma_free_coherent(&efc->pci->dev, els->io.req.size, 86 els->io.req.virt, els->io.req.phys); 87 mempool_free(els, efc->els_io_pool); 88 els = NULL; 89 } 90 91 if (els) { 92 /* initialize fields */ 93 els->els_retries_remaining = EFC_FC_ELS_DEFAULT_RETRIES; 94 95 /* add els structure to ELS IO list */ 96 INIT_LIST_HEAD(&els->list_entry); 97 list_add_tail(&els->list_entry, &node->els_ios_list); 98 } 99 100 spin_unlock_irqrestore(&node->els_ios_lock, flags); 101 return els; 102 } 103 104 void 105 efc_els_io_free(struct efc_els_io_req *els) 106 { 107 kref_put(&els->ref, els->release); 108 } 109 110 void 111 _efc_els_io_free(struct kref *arg) 112 { 113 struct efc_els_io_req *els = 114 container_of(arg, struct efc_els_io_req, ref); 115 struct efc *efc; 116 struct efc_node *node; 117 int send_empty_event = false; 118 unsigned long flags = 0; 119 120 node = els->node; 121 efc = node->efc; 122 123 spin_lock_irqsave(&node->els_ios_lock, flags); 124 125 list_del(&els->list_entry); 126 /* Send list empty event if the IO allocator 127 * is disabled, and the list is empty 128 * If node->els_io_enabled was not checked, 129 * the event would be posted continually 130 */ 131 send_empty_event = (!node->els_io_enabled && 132 list_empty(&node->els_ios_list)); 133 134 spin_unlock_irqrestore(&node->els_ios_lock, flags); 135 136 /* free ELS request and response buffers */ 137 dma_free_coherent(&efc->pci->dev, els->io.rsp.size, 138 els->io.rsp.virt, els->io.rsp.phys); 139 dma_free_coherent(&efc->pci->dev, els->io.req.size, 140 els->io.req.virt, els->io.req.phys); 141 142 mempool_free(els, efc->els_io_pool); 143 144 if (send_empty_event) 145 efc_scsi_io_list_empty(node->efc, node); 146 } 147 148 static void 149 efc_els_retry(struct efc_els_io_req *els); 150 151 static void 152 efc_els_delay_timer_cb(struct timer_list *t) 153 { 154 struct efc_els_io_req *els = from_timer(els, t, delay_timer); 155 156 /* Retry delay timer expired, retry the ELS request */ 157 efc_els_retry(els); 158 } 159 160 static int 161 efc_els_req_cb(void *arg, u32 length, int status, u32 ext_status) 162 { 163 struct efc_els_io_req *els; 164 struct efc_node *node; 165 struct efc *efc; 166 struct efc_node_cb cbdata; 167 u32 reason_code; 168 169 els = arg; 170 node = els->node; 171 efc = node->efc; 172 173 if (status) 174 els_io_printf(els, "status x%x ext x%x\n", status, ext_status); 175 176 /* set the response len element of els->rsp */ 177 els->io.rsp.len = length; 178 179 cbdata.status = status; 180 cbdata.ext_status = ext_status; 181 cbdata.header = NULL; 182 cbdata.els_rsp = els->io.rsp; 183 184 /* set the response len element of els->rsp */ 185 cbdata.rsp_len = length; 186 187 /* FW returns the number of bytes received on the link in 188 * the WCQE, not the amount placed in the buffer; use this info to 189 * check if there was an overrun. 190 */ 191 if (length > els->io.rsp.size) { 192 efc_log_warn(efc, 193 "ELS response returned len=%d > buflen=%zu\n", 194 length, els->io.rsp.size); 195 efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata); 196 return 0; 197 } 198 199 /* Post event to ELS IO object */ 200 switch (status) { 201 case SLI4_FC_WCQE_STATUS_SUCCESS: 202 efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_OK, &cbdata); 203 break; 204 205 case SLI4_FC_WCQE_STATUS_LS_RJT: 206 reason_code = (ext_status >> 16) & 0xff; 207 208 /* delay and retry if reason code is Logical Busy */ 209 switch (reason_code) { 210 case ELS_RJT_BUSY: 211 els->node->els_req_cnt--; 212 els_io_printf(els, 213 "LS_RJT Logical Busy, delay and retry\n"); 214 timer_setup(&els->delay_timer, 215 efc_els_delay_timer_cb, 0); 216 mod_timer(&els->delay_timer, 217 jiffies + msecs_to_jiffies(5000)); 218 break; 219 default: 220 efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_RJT, 221 &cbdata); 222 break; 223 } 224 break; 225 226 case SLI4_FC_WCQE_STATUS_LOCAL_REJECT: 227 switch (ext_status) { 228 case SLI4_FC_LOCAL_REJECT_SEQUENCE_TIMEOUT: 229 efc_els_retry(els); 230 break; 231 default: 232 efc_log_err(efc, "LOCAL_REJECT with ext status:%x\n", 233 ext_status); 234 efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, 235 &cbdata); 236 break; 237 } 238 break; 239 default: /* Other error */ 240 efc_log_warn(efc, "els req failed status x%x, ext_status x%x\n", 241 status, ext_status); 242 efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata); 243 break; 244 } 245 246 return 0; 247 } 248 249 void efc_disc_io_complete(struct efc_disc_io *io, u32 len, u32 status, 250 u32 ext_status) 251 { 252 struct efc_els_io_req *els = 253 container_of(io, struct efc_els_io_req, io); 254 255 WARN_ON_ONCE(!els->cb); 256 257 ((efc_hw_srrs_cb_t)els->cb) (els, len, status, ext_status); 258 } 259 260 static int efc_els_send_req(struct efc_node *node, struct efc_els_io_req *els, 261 enum efc_disc_io_type io_type) 262 { 263 int rc = 0; 264 struct efc *efc = node->efc; 265 struct efc_node_cb cbdata; 266 267 /* update ELS request counter */ 268 els->node->els_req_cnt++; 269 270 /* Prepare the IO request details */ 271 els->io.io_type = io_type; 272 els->io.xmit_len = els->io.req.size; 273 els->io.rsp_len = els->io.rsp.size; 274 els->io.rpi = node->rnode.indicator; 275 els->io.vpi = node->nport->indicator; 276 els->io.s_id = node->nport->fc_id; 277 els->io.d_id = node->rnode.fc_id; 278 279 if (node->rnode.attached) 280 els->io.rpi_registered = true; 281 282 els->cb = efc_els_req_cb; 283 284 rc = efc->tt.send_els(efc, &els->io); 285 if (!rc) 286 return rc; 287 288 cbdata.status = EFC_STATUS_INVALID; 289 cbdata.ext_status = EFC_STATUS_INVALID; 290 cbdata.els_rsp = els->io.rsp; 291 efc_log_err(efc, "efc_els_send failed: %d\n", rc); 292 efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata); 293 294 return rc; 295 } 296 297 static void 298 efc_els_retry(struct efc_els_io_req *els) 299 { 300 struct efc *efc; 301 struct efc_node_cb cbdata; 302 u32 rc; 303 304 efc = els->node->efc; 305 cbdata.status = EFC_STATUS_INVALID; 306 cbdata.ext_status = EFC_STATUS_INVALID; 307 cbdata.els_rsp = els->io.rsp; 308 309 if (els->els_retries_remaining) { 310 els->els_retries_remaining--; 311 rc = efc->tt.send_els(efc, &els->io); 312 } else { 313 rc = -EIO; 314 } 315 316 if (rc) { 317 efc_log_err(efc, "ELS retries exhausted\n"); 318 efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata); 319 } 320 } 321 322 static int 323 efc_els_acc_cb(void *arg, u32 length, int status, u32 ext_status) 324 { 325 struct efc_els_io_req *els; 326 struct efc_node *node; 327 struct efc *efc; 328 struct efc_node_cb cbdata; 329 330 els = arg; 331 node = els->node; 332 efc = node->efc; 333 334 cbdata.status = status; 335 cbdata.ext_status = ext_status; 336 cbdata.header = NULL; 337 cbdata.els_rsp = els->io.rsp; 338 339 /* Post node event */ 340 switch (status) { 341 case SLI4_FC_WCQE_STATUS_SUCCESS: 342 efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_CMPL_OK, &cbdata); 343 break; 344 345 default: /* Other error */ 346 efc_log_warn(efc, "[%s] %-8s failed status x%x, ext x%x\n", 347 node->display_name, els->display_name, 348 status, ext_status); 349 efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_CMPL_FAIL, &cbdata); 350 break; 351 } 352 353 return 0; 354 } 355 356 static int 357 efc_els_send_rsp(struct efc_els_io_req *els, u32 rsplen) 358 { 359 int rc = 0; 360 struct efc_node_cb cbdata; 361 struct efc_node *node = els->node; 362 struct efc *efc = node->efc; 363 364 /* increment ELS completion counter */ 365 node->els_cmpl_cnt++; 366 367 els->io.io_type = EFC_DISC_IO_ELS_RESP; 368 els->cb = efc_els_acc_cb; 369 370 /* Prepare the IO request details */ 371 els->io.xmit_len = rsplen; 372 els->io.rsp_len = els->io.rsp.size; 373 els->io.rpi = node->rnode.indicator; 374 els->io.vpi = node->nport->indicator; 375 if (node->nport->fc_id != U32_MAX) 376 els->io.s_id = node->nport->fc_id; 377 else 378 els->io.s_id = els->io.iparam.els.s_id; 379 els->io.d_id = node->rnode.fc_id; 380 381 if (node->attached) 382 els->io.rpi_registered = true; 383 384 rc = efc->tt.send_els(efc, &els->io); 385 if (!rc) 386 return rc; 387 388 cbdata.status = EFC_STATUS_INVALID; 389 cbdata.ext_status = EFC_STATUS_INVALID; 390 cbdata.els_rsp = els->io.rsp; 391 efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_CMPL_FAIL, &cbdata); 392 393 return rc; 394 } 395 396 int 397 efc_send_plogi(struct efc_node *node) 398 { 399 struct efc_els_io_req *els; 400 struct efc *efc = node->efc; 401 struct fc_els_flogi *plogi; 402 403 node_els_trace(); 404 405 els = efc_els_io_alloc(node, sizeof(*plogi)); 406 if (!els) { 407 efc_log_err(efc, "IO alloc failed\n"); 408 return -EIO; 409 } 410 els->display_name = "plogi"; 411 412 /* Build PLOGI request */ 413 plogi = els->io.req.virt; 414 415 memcpy(plogi, node->nport->service_params, sizeof(*plogi)); 416 417 plogi->fl_cmd = ELS_PLOGI; 418 memset(plogi->_fl_resvd, 0, sizeof(plogi->_fl_resvd)); 419 420 return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ); 421 } 422 423 int 424 efc_send_flogi(struct efc_node *node) 425 { 426 struct efc_els_io_req *els; 427 struct efc *efc; 428 struct fc_els_flogi *flogi; 429 430 efc = node->efc; 431 432 node_els_trace(); 433 434 els = efc_els_io_alloc(node, sizeof(*flogi)); 435 if (!els) { 436 efc_log_err(efc, "IO alloc failed\n"); 437 return -EIO; 438 } 439 440 els->display_name = "flogi"; 441 442 /* Build FLOGI request */ 443 flogi = els->io.req.virt; 444 445 memcpy(flogi, node->nport->service_params, sizeof(*flogi)); 446 flogi->fl_cmd = ELS_FLOGI; 447 memset(flogi->_fl_resvd, 0, sizeof(flogi->_fl_resvd)); 448 449 return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ); 450 } 451 452 int 453 efc_send_fdisc(struct efc_node *node) 454 { 455 struct efc_els_io_req *els; 456 struct efc *efc; 457 struct fc_els_flogi *fdisc; 458 459 efc = node->efc; 460 461 node_els_trace(); 462 463 els = efc_els_io_alloc(node, sizeof(*fdisc)); 464 if (!els) { 465 efc_log_err(efc, "IO alloc failed\n"); 466 return -EIO; 467 } 468 469 els->display_name = "fdisc"; 470 471 /* Build FDISC request */ 472 fdisc = els->io.req.virt; 473 474 memcpy(fdisc, node->nport->service_params, sizeof(*fdisc)); 475 fdisc->fl_cmd = ELS_FDISC; 476 memset(fdisc->_fl_resvd, 0, sizeof(fdisc->_fl_resvd)); 477 478 return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ); 479 } 480 481 int 482 efc_send_prli(struct efc_node *node) 483 { 484 struct efc *efc = node->efc; 485 struct efc_els_io_req *els; 486 struct { 487 struct fc_els_prli prli; 488 struct fc_els_spp spp; 489 } *pp; 490 491 node_els_trace(); 492 493 els = efc_els_io_alloc(node, sizeof(*pp)); 494 if (!els) { 495 efc_log_err(efc, "IO alloc failed\n"); 496 return -EIO; 497 } 498 499 els->display_name = "prli"; 500 501 /* Build PRLI request */ 502 pp = els->io.req.virt; 503 504 memset(pp, 0, sizeof(*pp)); 505 506 pp->prli.prli_cmd = ELS_PRLI; 507 pp->prli.prli_spp_len = 16; 508 pp->prli.prli_len = cpu_to_be16(sizeof(*pp)); 509 pp->spp.spp_type = FC_TYPE_FCP; 510 pp->spp.spp_type_ext = 0; 511 pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR; 512 pp->spp.spp_params = cpu_to_be32(FCP_SPPF_RD_XRDY_DIS | 513 (node->nport->enable_ini ? 514 FCP_SPPF_INIT_FCN : 0) | 515 (node->nport->enable_tgt ? 516 FCP_SPPF_TARG_FCN : 0)); 517 518 return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ); 519 } 520 521 int 522 efc_send_logo(struct efc_node *node) 523 { 524 struct efc *efc = node->efc; 525 struct efc_els_io_req *els; 526 struct fc_els_logo *logo; 527 struct fc_els_flogi *sparams; 528 529 node_els_trace(); 530 531 sparams = (struct fc_els_flogi *)node->nport->service_params; 532 533 els = efc_els_io_alloc(node, sizeof(*logo)); 534 if (!els) { 535 efc_log_err(efc, "IO alloc failed\n"); 536 return -EIO; 537 } 538 539 els->display_name = "logo"; 540 541 /* Build LOGO request */ 542 543 logo = els->io.req.virt; 544 545 memset(logo, 0, sizeof(*logo)); 546 logo->fl_cmd = ELS_LOGO; 547 hton24(logo->fl_n_port_id, node->rnode.nport->fc_id); 548 logo->fl_n_port_wwn = sparams->fl_wwpn; 549 550 return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ); 551 } 552 553 int 554 efc_send_adisc(struct efc_node *node) 555 { 556 struct efc *efc = node->efc; 557 struct efc_els_io_req *els; 558 struct fc_els_adisc *adisc; 559 struct fc_els_flogi *sparams; 560 struct efc_nport *nport = node->nport; 561 562 node_els_trace(); 563 564 sparams = (struct fc_els_flogi *)node->nport->service_params; 565 566 els = efc_els_io_alloc(node, sizeof(*adisc)); 567 if (!els) { 568 efc_log_err(efc, "IO alloc failed\n"); 569 return -EIO; 570 } 571 572 els->display_name = "adisc"; 573 574 /* Build ADISC request */ 575 576 adisc = els->io.req.virt; 577 578 memset(adisc, 0, sizeof(*adisc)); 579 adisc->adisc_cmd = ELS_ADISC; 580 hton24(adisc->adisc_hard_addr, nport->fc_id); 581 adisc->adisc_wwpn = sparams->fl_wwpn; 582 adisc->adisc_wwnn = sparams->fl_wwnn; 583 hton24(adisc->adisc_port_id, node->rnode.nport->fc_id); 584 585 return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ); 586 } 587 588 int 589 efc_send_scr(struct efc_node *node) 590 { 591 struct efc_els_io_req *els; 592 struct efc *efc = node->efc; 593 struct fc_els_scr *req; 594 595 node_els_trace(); 596 597 els = efc_els_io_alloc(node, sizeof(*req)); 598 if (!els) { 599 efc_log_err(efc, "IO alloc failed\n"); 600 return -EIO; 601 } 602 603 els->display_name = "scr"; 604 605 req = els->io.req.virt; 606 607 memset(req, 0, sizeof(*req)); 608 req->scr_cmd = ELS_SCR; 609 req->scr_reg_func = ELS_SCRF_FULL; 610 611 return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ); 612 } 613 614 int 615 efc_send_ls_rjt(struct efc_node *node, u32 ox_id, u32 reason_code, 616 u32 reason_code_expl, u32 vendor_unique) 617 { 618 struct efc *efc = node->efc; 619 struct efc_els_io_req *els = NULL; 620 struct fc_els_ls_rjt *rjt; 621 622 els = efc_els_io_alloc(node, sizeof(*rjt)); 623 if (!els) { 624 efc_log_err(efc, "els IO alloc failed\n"); 625 return -EIO; 626 } 627 628 node_els_trace(); 629 630 els->display_name = "ls_rjt"; 631 632 memset(&els->io.iparam, 0, sizeof(els->io.iparam)); 633 els->io.iparam.els.ox_id = ox_id; 634 635 rjt = els->io.req.virt; 636 memset(rjt, 0, sizeof(*rjt)); 637 638 rjt->er_cmd = ELS_LS_RJT; 639 rjt->er_reason = reason_code; 640 rjt->er_explan = reason_code_expl; 641 642 return efc_els_send_rsp(els, sizeof(*rjt)); 643 } 644 645 int 646 efc_send_plogi_acc(struct efc_node *node, u32 ox_id) 647 { 648 struct efc *efc = node->efc; 649 struct efc_els_io_req *els = NULL; 650 struct fc_els_flogi *plogi; 651 struct fc_els_flogi *req = (struct fc_els_flogi *)node->service_params; 652 653 node_els_trace(); 654 655 els = efc_els_io_alloc(node, sizeof(*plogi)); 656 if (!els) { 657 efc_log_err(efc, "els IO alloc failed\n"); 658 return -EIO; 659 } 660 661 els->display_name = "plogi_acc"; 662 663 memset(&els->io.iparam, 0, sizeof(els->io.iparam)); 664 els->io.iparam.els.ox_id = ox_id; 665 666 plogi = els->io.req.virt; 667 668 /* copy our port's service parameters to payload */ 669 memcpy(plogi, node->nport->service_params, sizeof(*plogi)); 670 plogi->fl_cmd = ELS_LS_ACC; 671 memset(plogi->_fl_resvd, 0, sizeof(plogi->_fl_resvd)); 672 673 /* Set Application header support bit if requested */ 674 if (req->fl_csp.sp_features & cpu_to_be16(FC_SP_FT_BCAST)) 675 plogi->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_BCAST); 676 677 return efc_els_send_rsp(els, sizeof(*plogi)); 678 } 679 680 int 681 efc_send_flogi_p2p_acc(struct efc_node *node, u32 ox_id, u32 s_id) 682 { 683 struct efc *efc = node->efc; 684 struct efc_els_io_req *els = NULL; 685 struct fc_els_flogi *flogi; 686 687 node_els_trace(); 688 689 els = efc_els_io_alloc(node, sizeof(*flogi)); 690 if (!els) { 691 efc_log_err(efc, "els IO alloc failed\n"); 692 return -EIO; 693 } 694 695 els->display_name = "flogi_p2p_acc"; 696 697 memset(&els->io.iparam, 0, sizeof(els->io.iparam)); 698 els->io.iparam.els.ox_id = ox_id; 699 els->io.iparam.els.s_id = s_id; 700 701 flogi = els->io.req.virt; 702 703 /* copy our port's service parameters to payload */ 704 memcpy(flogi, node->nport->service_params, sizeof(*flogi)); 705 flogi->fl_cmd = ELS_LS_ACC; 706 memset(flogi->_fl_resvd, 0, sizeof(flogi->_fl_resvd)); 707 708 memset(flogi->fl_cssp, 0, sizeof(flogi->fl_cssp)); 709 710 return efc_els_send_rsp(els, sizeof(*flogi)); 711 } 712 713 int 714 efc_send_prli_acc(struct efc_node *node, u32 ox_id) 715 { 716 struct efc *efc = node->efc; 717 struct efc_els_io_req *els = NULL; 718 struct { 719 struct fc_els_prli prli; 720 struct fc_els_spp spp; 721 } *pp; 722 723 node_els_trace(); 724 725 els = efc_els_io_alloc(node, sizeof(*pp)); 726 if (!els) { 727 efc_log_err(efc, "els IO alloc failed\n"); 728 return -EIO; 729 } 730 731 els->display_name = "prli_acc"; 732 733 memset(&els->io.iparam, 0, sizeof(els->io.iparam)); 734 els->io.iparam.els.ox_id = ox_id; 735 736 pp = els->io.req.virt; 737 memset(pp, 0, sizeof(*pp)); 738 739 pp->prli.prli_cmd = ELS_LS_ACC; 740 pp->prli.prli_spp_len = 0x10; 741 pp->prli.prli_len = cpu_to_be16(sizeof(*pp)); 742 pp->spp.spp_type = FC_TYPE_FCP; 743 pp->spp.spp_type_ext = 0; 744 pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR | FC_SPP_RESP_ACK; 745 746 pp->spp.spp_params = cpu_to_be32(FCP_SPPF_RD_XRDY_DIS | 747 (node->nport->enable_ini ? 748 FCP_SPPF_INIT_FCN : 0) | 749 (node->nport->enable_tgt ? 750 FCP_SPPF_TARG_FCN : 0)); 751 752 return efc_els_send_rsp(els, sizeof(*pp)); 753 } 754 755 int 756 efc_send_prlo_acc(struct efc_node *node, u32 ox_id) 757 { 758 struct efc *efc = node->efc; 759 struct efc_els_io_req *els = NULL; 760 struct { 761 struct fc_els_prlo prlo; 762 struct fc_els_spp spp; 763 } *pp; 764 765 node_els_trace(); 766 767 els = efc_els_io_alloc(node, sizeof(*pp)); 768 if (!els) { 769 efc_log_err(efc, "els IO alloc failed\n"); 770 return -EIO; 771 } 772 773 els->display_name = "prlo_acc"; 774 775 memset(&els->io.iparam, 0, sizeof(els->io.iparam)); 776 els->io.iparam.els.ox_id = ox_id; 777 778 pp = els->io.req.virt; 779 memset(pp, 0, sizeof(*pp)); 780 pp->prlo.prlo_cmd = ELS_LS_ACC; 781 pp->prlo.prlo_obs = 0x10; 782 pp->prlo.prlo_len = cpu_to_be16(sizeof(*pp)); 783 784 pp->spp.spp_type = FC_TYPE_FCP; 785 pp->spp.spp_type_ext = 0; 786 pp->spp.spp_flags = FC_SPP_RESP_ACK; 787 788 return efc_els_send_rsp(els, sizeof(*pp)); 789 } 790 791 int 792 efc_send_ls_acc(struct efc_node *node, u32 ox_id) 793 { 794 struct efc *efc = node->efc; 795 struct efc_els_io_req *els = NULL; 796 struct fc_els_ls_acc *acc; 797 798 node_els_trace(); 799 800 els = efc_els_io_alloc(node, sizeof(*acc)); 801 if (!els) { 802 efc_log_err(efc, "els IO alloc failed\n"); 803 return -EIO; 804 } 805 806 els->display_name = "ls_acc"; 807 808 memset(&els->io.iparam, 0, sizeof(els->io.iparam)); 809 els->io.iparam.els.ox_id = ox_id; 810 811 acc = els->io.req.virt; 812 memset(acc, 0, sizeof(*acc)); 813 814 acc->la_cmd = ELS_LS_ACC; 815 816 return efc_els_send_rsp(els, sizeof(*acc)); 817 } 818 819 int 820 efc_send_logo_acc(struct efc_node *node, u32 ox_id) 821 { 822 struct efc_els_io_req *els = NULL; 823 struct efc *efc = node->efc; 824 struct fc_els_ls_acc *logo; 825 826 node_els_trace(); 827 828 els = efc_els_io_alloc(node, sizeof(*logo)); 829 if (!els) { 830 efc_log_err(efc, "els IO alloc failed\n"); 831 return -EIO; 832 } 833 834 els->display_name = "logo_acc"; 835 836 memset(&els->io.iparam, 0, sizeof(els->io.iparam)); 837 els->io.iparam.els.ox_id = ox_id; 838 839 logo = els->io.req.virt; 840 memset(logo, 0, sizeof(*logo)); 841 842 logo->la_cmd = ELS_LS_ACC; 843 844 return efc_els_send_rsp(els, sizeof(*logo)); 845 } 846 847 int 848 efc_send_adisc_acc(struct efc_node *node, u32 ox_id) 849 { 850 struct efc *efc = node->efc; 851 struct efc_els_io_req *els = NULL; 852 struct fc_els_adisc *adisc; 853 struct fc_els_flogi *sparams; 854 855 node_els_trace(); 856 857 els = efc_els_io_alloc(node, sizeof(*adisc)); 858 if (!els) { 859 efc_log_err(efc, "els IO alloc failed\n"); 860 return -EIO; 861 } 862 863 els->display_name = "adisc_acc"; 864 865 /* Go ahead and send the ELS_ACC */ 866 memset(&els->io.iparam, 0, sizeof(els->io.iparam)); 867 els->io.iparam.els.ox_id = ox_id; 868 869 sparams = (struct fc_els_flogi *)node->nport->service_params; 870 adisc = els->io.req.virt; 871 memset(adisc, 0, sizeof(*adisc)); 872 adisc->adisc_cmd = ELS_LS_ACC; 873 adisc->adisc_wwpn = sparams->fl_wwpn; 874 adisc->adisc_wwnn = sparams->fl_wwnn; 875 hton24(adisc->adisc_port_id, node->rnode.nport->fc_id); 876 877 return efc_els_send_rsp(els, sizeof(*adisc)); 878 } 879 880 static inline void 881 fcct_build_req_header(struct fc_ct_hdr *hdr, u16 cmd, u16 max_size) 882 { 883 hdr->ct_rev = FC_CT_REV; 884 hdr->ct_fs_type = FC_FST_DIR; 885 hdr->ct_fs_subtype = FC_NS_SUBTYPE; 886 hdr->ct_options = 0; 887 hdr->ct_cmd = cpu_to_be16(cmd); 888 /* words */ 889 hdr->ct_mr_size = cpu_to_be16(max_size / (sizeof(u32))); 890 hdr->ct_reason = 0; 891 hdr->ct_explan = 0; 892 hdr->ct_vendor = 0; 893 } 894 895 int 896 efc_ns_send_rftid(struct efc_node *node) 897 { 898 struct efc *efc = node->efc; 899 struct efc_els_io_req *els; 900 struct { 901 struct fc_ct_hdr hdr; 902 struct fc_ns_rft_id rftid; 903 } *ct; 904 905 node_els_trace(); 906 907 els = efc_els_io_alloc(node, sizeof(*ct)); 908 if (!els) { 909 efc_log_err(efc, "IO alloc failed\n"); 910 return -EIO; 911 } 912 913 els->io.iparam.ct.r_ctl = FC_RCTL_ELS_REQ; 914 els->io.iparam.ct.type = FC_TYPE_CT; 915 els->io.iparam.ct.df_ctl = 0; 916 els->io.iparam.ct.timeout = EFC_FC_ELS_SEND_DEFAULT_TIMEOUT; 917 918 els->display_name = "rftid"; 919 920 ct = els->io.req.virt; 921 memset(ct, 0, sizeof(*ct)); 922 fcct_build_req_header(&ct->hdr, FC_NS_RFT_ID, 923 sizeof(struct fc_ns_rft_id)); 924 925 hton24(ct->rftid.fr_fid.fp_fid, node->rnode.nport->fc_id); 926 ct->rftid.fr_fts.ff_type_map[FC_TYPE_FCP / FC_NS_BPW] = 927 cpu_to_be32(1 << (FC_TYPE_FCP % FC_NS_BPW)); 928 929 return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ); 930 } 931 932 int 933 efc_ns_send_rffid(struct efc_node *node) 934 { 935 struct efc *efc = node->efc; 936 struct efc_els_io_req *els; 937 struct { 938 struct fc_ct_hdr hdr; 939 struct fc_ns_rff_id rffid; 940 } *ct; 941 942 node_els_trace(); 943 944 els = efc_els_io_alloc(node, sizeof(*ct)); 945 if (!els) { 946 efc_log_err(efc, "IO alloc failed\n"); 947 return -EIO; 948 } 949 950 els->io.iparam.ct.r_ctl = FC_RCTL_ELS_REQ; 951 els->io.iparam.ct.type = FC_TYPE_CT; 952 els->io.iparam.ct.df_ctl = 0; 953 els->io.iparam.ct.timeout = EFC_FC_ELS_SEND_DEFAULT_TIMEOUT; 954 955 els->display_name = "rffid"; 956 ct = els->io.req.virt; 957 958 memset(ct, 0, sizeof(*ct)); 959 fcct_build_req_header(&ct->hdr, FC_NS_RFF_ID, 960 sizeof(struct fc_ns_rff_id)); 961 962 hton24(ct->rffid.fr_fid.fp_fid, node->rnode.nport->fc_id); 963 if (node->nport->enable_ini) 964 ct->rffid.fr_feat |= FCP_FEAT_INIT; 965 if (node->nport->enable_tgt) 966 ct->rffid.fr_feat |= FCP_FEAT_TARG; 967 ct->rffid.fr_type = FC_TYPE_FCP; 968 969 return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ); 970 } 971 972 int 973 efc_ns_send_gidpt(struct efc_node *node) 974 { 975 struct efc_els_io_req *els = NULL; 976 struct efc *efc = node->efc; 977 struct { 978 struct fc_ct_hdr hdr; 979 struct fc_ns_gid_pt gidpt; 980 } *ct; 981 982 node_els_trace(); 983 984 els = efc_els_io_alloc_size(node, sizeof(*ct), EFC_ELS_GID_PT_RSP_LEN); 985 if (!els) { 986 efc_log_err(efc, "IO alloc failed\n"); 987 return -EIO; 988 } 989 990 els->io.iparam.ct.r_ctl = FC_RCTL_ELS_REQ; 991 els->io.iparam.ct.type = FC_TYPE_CT; 992 els->io.iparam.ct.df_ctl = 0; 993 els->io.iparam.ct.timeout = EFC_FC_ELS_SEND_DEFAULT_TIMEOUT; 994 995 els->display_name = "gidpt"; 996 997 ct = els->io.req.virt; 998 999 memset(ct, 0, sizeof(*ct)); 1000 fcct_build_req_header(&ct->hdr, FC_NS_GID_PT, 1001 sizeof(struct fc_ns_gid_pt)); 1002 1003 ct->gidpt.fn_pt_type = FC_TYPE_FCP; 1004 1005 return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ); 1006 } 1007 1008 void 1009 efc_els_io_cleanup(struct efc_els_io_req *els, int evt, void *arg) 1010 { 1011 /* don't want further events that could come; e.g. abort requests 1012 * from the node state machine; thus, disable state machine 1013 */ 1014 els->els_req_free = true; 1015 efc_node_post_els_resp(els->node, evt, arg); 1016 1017 efc_els_io_free(els); 1018 } 1019 1020 static int 1021 efc_ct_acc_cb(void *arg, u32 length, int status, u32 ext_status) 1022 { 1023 struct efc_els_io_req *els = arg; 1024 1025 efc_els_io_free(els); 1026 1027 return 0; 1028 } 1029 1030 int 1031 efc_send_ct_rsp(struct efc *efc, struct efc_node *node, u16 ox_id, 1032 struct fc_ct_hdr *ct_hdr, u32 cmd_rsp_code, 1033 u32 reason_code, u32 reason_code_explanation) 1034 { 1035 struct efc_els_io_req *els = NULL; 1036 struct fc_ct_hdr *rsp = NULL; 1037 1038 els = efc_els_io_alloc(node, 256); 1039 if (!els) { 1040 efc_log_err(efc, "IO alloc failed\n"); 1041 return -EIO; 1042 } 1043 1044 rsp = els->io.rsp.virt; 1045 1046 *rsp = *ct_hdr; 1047 1048 fcct_build_req_header(rsp, cmd_rsp_code, 0); 1049 rsp->ct_reason = reason_code; 1050 rsp->ct_explan = reason_code_explanation; 1051 1052 els->display_name = "ct_rsp"; 1053 els->cb = efc_ct_acc_cb; 1054 1055 /* Prepare the IO request details */ 1056 els->io.io_type = EFC_DISC_IO_CT_RESP; 1057 els->io.xmit_len = sizeof(*rsp); 1058 1059 els->io.rpi = node->rnode.indicator; 1060 els->io.d_id = node->rnode.fc_id; 1061 1062 memset(&els->io.iparam, 0, sizeof(els->io.iparam)); 1063 1064 els->io.iparam.ct.ox_id = ox_id; 1065 els->io.iparam.ct.r_ctl = 3; 1066 els->io.iparam.ct.type = FC_TYPE_CT; 1067 els->io.iparam.ct.df_ctl = 0; 1068 els->io.iparam.ct.timeout = 5; 1069 1070 if (efc->tt.send_els(efc, &els->io)) { 1071 efc_els_io_free(els); 1072 return -EIO; 1073 } 1074 return 0; 1075 } 1076 1077 int 1078 efc_send_bls_acc(struct efc_node *node, struct fc_frame_header *hdr) 1079 { 1080 struct sli_bls_params bls; 1081 struct fc_ba_acc *acc; 1082 struct efc *efc = node->efc; 1083 1084 memset(&bls, 0, sizeof(bls)); 1085 bls.ox_id = be16_to_cpu(hdr->fh_ox_id); 1086 bls.rx_id = be16_to_cpu(hdr->fh_rx_id); 1087 bls.s_id = ntoh24(hdr->fh_d_id); 1088 bls.d_id = node->rnode.fc_id; 1089 bls.rpi = node->rnode.indicator; 1090 bls.vpi = node->nport->indicator; 1091 1092 acc = (void *)bls.payload; 1093 acc->ba_ox_id = cpu_to_be16(bls.ox_id); 1094 acc->ba_rx_id = cpu_to_be16(bls.rx_id); 1095 acc->ba_high_seq_cnt = cpu_to_be16(U16_MAX); 1096 1097 return efc->tt.send_bls(efc, FC_RCTL_BA_ACC, &bls); 1098 } 1099