1 /* 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. 3 * All rights reserved 4 * www.brocade.com 5 * 6 * Linux driver for Brocade Fibre Channel Host Bus Adapter. 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License (GPL) Version 2 as 10 * published by the Free Software Foundation 11 * 12 * This program is distributed in the hope that it will be useful, but 13 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 */ 17 18 #include <linux/uaccess.h> 19 #include "bfad_drv.h" 20 #include "bfad_im.h" 21 #include "bfad_bsg.h" 22 23 BFA_TRC_FILE(LDRV, BSG); 24 25 int 26 bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd) 27 { 28 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 29 int rc = 0; 30 unsigned long flags; 31 32 spin_lock_irqsave(&bfad->bfad_lock, flags); 33 /* If IOC is not in disabled state - return */ 34 if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) { 35 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 36 iocmd->status = BFA_STATUS_IOC_FAILURE; 37 return rc; 38 } 39 40 init_completion(&bfad->enable_comp); 41 bfa_iocfc_enable(&bfad->bfa); 42 iocmd->status = BFA_STATUS_OK; 43 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 44 wait_for_completion(&bfad->enable_comp); 45 46 return rc; 47 } 48 49 int 50 bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd) 51 { 52 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 53 int rc = 0; 54 unsigned long flags; 55 56 spin_lock_irqsave(&bfad->bfad_lock, flags); 57 if (bfad->disable_active) { 58 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 59 return -EBUSY; 60 } 61 62 bfad->disable_active = BFA_TRUE; 63 init_completion(&bfad->disable_comp); 64 bfa_iocfc_disable(&bfad->bfa); 65 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 66 67 wait_for_completion(&bfad->disable_comp); 68 bfad->disable_active = BFA_FALSE; 69 iocmd->status = BFA_STATUS_OK; 70 71 return rc; 72 } 73 74 static int 75 bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd) 76 { 77 int i; 78 struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd; 79 struct bfad_im_port_s *im_port; 80 struct bfa_port_attr_s pattr; 81 unsigned long flags; 82 83 spin_lock_irqsave(&bfad->bfad_lock, flags); 84 bfa_fcport_get_attr(&bfad->bfa, &pattr); 85 iocmd->nwwn = pattr.nwwn; 86 iocmd->pwwn = pattr.pwwn; 87 iocmd->ioc_type = bfa_get_type(&bfad->bfa); 88 iocmd->mac = bfa_get_mac(&bfad->bfa); 89 iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa); 90 bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum); 91 iocmd->factorynwwn = pattr.factorynwwn; 92 iocmd->factorypwwn = pattr.factorypwwn; 93 iocmd->bfad_num = bfad->inst_no; 94 im_port = bfad->pport.im_port; 95 iocmd->host = im_port->shost->host_no; 96 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 97 98 strcpy(iocmd->name, bfad->adapter_name); 99 strcpy(iocmd->port_name, bfad->port_name); 100 strcpy(iocmd->hwpath, bfad->pci_name); 101 102 /* set adapter hw path */ 103 strcpy(iocmd->adapter_hwpath, bfad->pci_name); 104 i = strlen(iocmd->adapter_hwpath) - 1; 105 while (iocmd->adapter_hwpath[i] != '.') 106 i--; 107 iocmd->adapter_hwpath[i] = '\0'; 108 iocmd->status = BFA_STATUS_OK; 109 return 0; 110 } 111 112 static int 113 bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd) 114 { 115 struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd; 116 unsigned long flags; 117 118 spin_lock_irqsave(&bfad->bfad_lock, flags); 119 bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr); 120 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 121 122 /* fill in driver attr info */ 123 strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME); 124 strncpy(iocmd->ioc_attr.driver_attr.driver_ver, 125 BFAD_DRIVER_VERSION, BFA_VERSION_LEN); 126 strcpy(iocmd->ioc_attr.driver_attr.fw_ver, 127 iocmd->ioc_attr.adapter_attr.fw_ver); 128 strcpy(iocmd->ioc_attr.driver_attr.bios_ver, 129 iocmd->ioc_attr.adapter_attr.optrom_ver); 130 131 /* copy chip rev info first otherwise it will be overwritten */ 132 memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev, 133 sizeof(bfad->pci_attr.chip_rev)); 134 memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr, 135 sizeof(struct bfa_ioc_pci_attr_s)); 136 137 iocmd->status = BFA_STATUS_OK; 138 return 0; 139 } 140 141 int 142 bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd) 143 { 144 struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd; 145 146 bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats); 147 iocmd->status = BFA_STATUS_OK; 148 return 0; 149 } 150 151 int 152 bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd, 153 unsigned int payload_len) 154 { 155 struct bfa_bsg_ioc_fwstats_s *iocmd = 156 (struct bfa_bsg_ioc_fwstats_s *)cmd; 157 void *iocmd_bufptr; 158 unsigned long flags; 159 160 if (bfad_chk_iocmd_sz(payload_len, 161 sizeof(struct bfa_bsg_ioc_fwstats_s), 162 sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) { 163 iocmd->status = BFA_STATUS_VERSION_FAIL; 164 goto out; 165 } 166 167 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s); 168 spin_lock_irqsave(&bfad->bfad_lock, flags); 169 iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr); 170 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 171 172 if (iocmd->status != BFA_STATUS_OK) { 173 bfa_trc(bfad, iocmd->status); 174 goto out; 175 } 176 out: 177 bfa_trc(bfad, 0x6666); 178 return 0; 179 } 180 181 int 182 bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 183 { 184 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 185 unsigned long flags; 186 187 if (v_cmd == IOCMD_IOC_RESET_STATS) { 188 bfa_ioc_clear_stats(&bfad->bfa); 189 iocmd->status = BFA_STATUS_OK; 190 } else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) { 191 spin_lock_irqsave(&bfad->bfad_lock, flags); 192 iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc); 193 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 194 } 195 196 return 0; 197 } 198 199 int 200 bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 201 { 202 struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd; 203 204 if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME) 205 strcpy(bfad->adapter_name, iocmd->name); 206 else if (v_cmd == IOCMD_IOC_SET_PORT_NAME) 207 strcpy(bfad->port_name, iocmd->name); 208 209 iocmd->status = BFA_STATUS_OK; 210 return 0; 211 } 212 213 int 214 bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd) 215 { 216 struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd; 217 218 iocmd->status = BFA_STATUS_OK; 219 bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr); 220 221 return 0; 222 } 223 224 int 225 bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd) 226 { 227 struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd; 228 unsigned long flags; 229 230 spin_lock_irqsave(&bfad->bfad_lock, flags); 231 iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr); 232 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 233 234 return 0; 235 } 236 237 int 238 bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd) 239 { 240 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 241 struct bfad_hal_comp fcomp; 242 unsigned long flags; 243 244 init_completion(&fcomp.comp); 245 spin_lock_irqsave(&bfad->bfad_lock, flags); 246 iocmd->status = bfa_port_enable(&bfad->bfa.modules.port, 247 bfad_hcb_comp, &fcomp); 248 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 249 if (iocmd->status != BFA_STATUS_OK) { 250 bfa_trc(bfad, iocmd->status); 251 return 0; 252 } 253 wait_for_completion(&fcomp.comp); 254 iocmd->status = fcomp.status; 255 return 0; 256 } 257 258 int 259 bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd) 260 { 261 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 262 struct bfad_hal_comp fcomp; 263 unsigned long flags; 264 265 init_completion(&fcomp.comp); 266 spin_lock_irqsave(&bfad->bfad_lock, flags); 267 iocmd->status = bfa_port_disable(&bfad->bfa.modules.port, 268 bfad_hcb_comp, &fcomp); 269 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 270 271 if (iocmd->status != BFA_STATUS_OK) { 272 bfa_trc(bfad, iocmd->status); 273 return 0; 274 } 275 wait_for_completion(&fcomp.comp); 276 iocmd->status = fcomp.status; 277 return 0; 278 } 279 280 static int 281 bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd) 282 { 283 struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd; 284 struct bfa_lport_attr_s port_attr; 285 unsigned long flags; 286 287 spin_lock_irqsave(&bfad->bfad_lock, flags); 288 bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr); 289 bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); 290 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 291 292 if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE) 293 iocmd->attr.pid = port_attr.pid; 294 else 295 iocmd->attr.pid = 0; 296 297 iocmd->attr.port_type = port_attr.port_type; 298 iocmd->attr.loopback = port_attr.loopback; 299 iocmd->attr.authfail = port_attr.authfail; 300 strncpy(iocmd->attr.port_symname.symname, 301 port_attr.port_cfg.sym_name.symname, 302 sizeof(port_attr.port_cfg.sym_name.symname)); 303 304 iocmd->status = BFA_STATUS_OK; 305 return 0; 306 } 307 308 int 309 bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd, 310 unsigned int payload_len) 311 { 312 struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd; 313 struct bfad_hal_comp fcomp; 314 void *iocmd_bufptr; 315 unsigned long flags; 316 317 if (bfad_chk_iocmd_sz(payload_len, 318 sizeof(struct bfa_bsg_port_stats_s), 319 sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) { 320 iocmd->status = BFA_STATUS_VERSION_FAIL; 321 return 0; 322 } 323 324 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s); 325 326 init_completion(&fcomp.comp); 327 spin_lock_irqsave(&bfad->bfad_lock, flags); 328 iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port, 329 iocmd_bufptr, bfad_hcb_comp, &fcomp); 330 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 331 if (iocmd->status != BFA_STATUS_OK) { 332 bfa_trc(bfad, iocmd->status); 333 goto out; 334 } 335 336 wait_for_completion(&fcomp.comp); 337 iocmd->status = fcomp.status; 338 out: 339 return 0; 340 } 341 342 int 343 bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd) 344 { 345 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 346 struct bfad_hal_comp fcomp; 347 unsigned long flags; 348 349 init_completion(&fcomp.comp); 350 spin_lock_irqsave(&bfad->bfad_lock, flags); 351 iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port, 352 bfad_hcb_comp, &fcomp); 353 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 354 if (iocmd->status != BFA_STATUS_OK) { 355 bfa_trc(bfad, iocmd->status); 356 return 0; 357 } 358 wait_for_completion(&fcomp.comp); 359 iocmd->status = fcomp.status; 360 return 0; 361 } 362 363 int 364 bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd) 365 { 366 struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd; 367 unsigned long flags; 368 369 spin_lock_irqsave(&bfad->bfad_lock, flags); 370 if (v_cmd == IOCMD_PORT_CFG_TOPO) 371 cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param); 372 else if (v_cmd == IOCMD_PORT_CFG_SPEED) 373 cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param); 374 else if (v_cmd == IOCMD_PORT_CFG_ALPA) 375 cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param); 376 else if (v_cmd == IOCMD_PORT_CLR_ALPA) 377 cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa); 378 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 379 380 return 0; 381 } 382 383 int 384 bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd) 385 { 386 struct bfa_bsg_port_cfg_maxfrsize_s *iocmd = 387 (struct bfa_bsg_port_cfg_maxfrsize_s *)cmd; 388 unsigned long flags; 389 390 spin_lock_irqsave(&bfad->bfad_lock, flags); 391 iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize); 392 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 393 394 return 0; 395 } 396 397 int 398 bfad_iocmd_port_cfg_bbsc(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 399 { 400 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 401 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 402 unsigned long flags; 403 404 spin_lock_irqsave(&bfad->bfad_lock, flags); 405 if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) { 406 if (v_cmd == IOCMD_PORT_BBSC_ENABLE) 407 fcport->cfg.bb_scn_state = BFA_TRUE; 408 else if (v_cmd == IOCMD_PORT_BBSC_DISABLE) 409 fcport->cfg.bb_scn_state = BFA_FALSE; 410 } 411 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 412 413 iocmd->status = BFA_STATUS_OK; 414 return 0; 415 } 416 417 static int 418 bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd) 419 { 420 struct bfa_fcs_lport_s *fcs_port; 421 struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd; 422 unsigned long flags; 423 424 spin_lock_irqsave(&bfad->bfad_lock, flags); 425 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 426 iocmd->vf_id, iocmd->pwwn); 427 if (fcs_port == NULL) { 428 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 429 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 430 goto out; 431 } 432 433 bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr); 434 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 435 iocmd->status = BFA_STATUS_OK; 436 out: 437 return 0; 438 } 439 440 int 441 bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd) 442 { 443 struct bfa_fcs_lport_s *fcs_port; 444 struct bfa_bsg_lport_stats_s *iocmd = 445 (struct bfa_bsg_lport_stats_s *)cmd; 446 unsigned long flags; 447 448 spin_lock_irqsave(&bfad->bfad_lock, flags); 449 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 450 iocmd->vf_id, iocmd->pwwn); 451 if (fcs_port == NULL) { 452 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 453 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 454 goto out; 455 } 456 457 bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats); 458 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 459 iocmd->status = BFA_STATUS_OK; 460 out: 461 return 0; 462 } 463 464 int 465 bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd) 466 { 467 struct bfa_fcs_lport_s *fcs_port; 468 struct bfa_bsg_reset_stats_s *iocmd = 469 (struct bfa_bsg_reset_stats_s *)cmd; 470 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); 471 struct list_head *qe, *qen; 472 struct bfa_itnim_s *itnim; 473 unsigned long flags; 474 475 spin_lock_irqsave(&bfad->bfad_lock, flags); 476 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 477 iocmd->vf_id, iocmd->vpwwn); 478 if (fcs_port == NULL) { 479 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 480 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 481 goto out; 482 } 483 484 bfa_fcs_lport_clear_stats(fcs_port); 485 /* clear IO stats from all active itnims */ 486 list_for_each_safe(qe, qen, &fcpim->itnim_q) { 487 itnim = (struct bfa_itnim_s *) qe; 488 if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag) 489 continue; 490 bfa_itnim_clear_stats(itnim); 491 } 492 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 493 iocmd->status = BFA_STATUS_OK; 494 out: 495 return 0; 496 } 497 498 int 499 bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd) 500 { 501 struct bfa_fcs_lport_s *fcs_port; 502 struct bfa_bsg_lport_iostats_s *iocmd = 503 (struct bfa_bsg_lport_iostats_s *)cmd; 504 unsigned long flags; 505 506 spin_lock_irqsave(&bfad->bfad_lock, flags); 507 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 508 iocmd->vf_id, iocmd->pwwn); 509 if (fcs_port == NULL) { 510 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 511 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 512 goto out; 513 } 514 515 bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats, 516 fcs_port->lp_tag); 517 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 518 iocmd->status = BFA_STATUS_OK; 519 out: 520 return 0; 521 } 522 523 int 524 bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd, 525 unsigned int payload_len) 526 { 527 struct bfa_bsg_lport_get_rports_s *iocmd = 528 (struct bfa_bsg_lport_get_rports_s *)cmd; 529 struct bfa_fcs_lport_s *fcs_port; 530 unsigned long flags; 531 void *iocmd_bufptr; 532 533 if (iocmd->nrports == 0) 534 return -EINVAL; 535 536 if (bfad_chk_iocmd_sz(payload_len, 537 sizeof(struct bfa_bsg_lport_get_rports_s), 538 sizeof(wwn_t) * iocmd->nrports) != BFA_STATUS_OK) { 539 iocmd->status = BFA_STATUS_VERSION_FAIL; 540 return 0; 541 } 542 543 iocmd_bufptr = (char *)iocmd + 544 sizeof(struct bfa_bsg_lport_get_rports_s); 545 spin_lock_irqsave(&bfad->bfad_lock, flags); 546 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 547 iocmd->vf_id, iocmd->pwwn); 548 if (fcs_port == NULL) { 549 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 550 bfa_trc(bfad, 0); 551 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 552 goto out; 553 } 554 555 bfa_fcs_lport_get_rports(fcs_port, (wwn_t *)iocmd_bufptr, 556 &iocmd->nrports); 557 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 558 iocmd->status = BFA_STATUS_OK; 559 out: 560 return 0; 561 } 562 563 int 564 bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd) 565 { 566 struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd; 567 struct bfa_fcs_lport_s *fcs_port; 568 struct bfa_fcs_rport_s *fcs_rport; 569 unsigned long flags; 570 571 spin_lock_irqsave(&bfad->bfad_lock, flags); 572 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 573 iocmd->vf_id, iocmd->pwwn); 574 if (fcs_port == NULL) { 575 bfa_trc(bfad, 0); 576 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 577 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 578 goto out; 579 } 580 581 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); 582 if (fcs_rport == NULL) { 583 bfa_trc(bfad, 0); 584 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 585 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 586 goto out; 587 } 588 589 bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr); 590 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 591 iocmd->status = BFA_STATUS_OK; 592 out: 593 return 0; 594 } 595 596 static int 597 bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd) 598 { 599 struct bfa_bsg_rport_scsi_addr_s *iocmd = 600 (struct bfa_bsg_rport_scsi_addr_s *)cmd; 601 struct bfa_fcs_lport_s *fcs_port; 602 struct bfa_fcs_itnim_s *fcs_itnim; 603 struct bfad_itnim_s *drv_itnim; 604 unsigned long flags; 605 606 spin_lock_irqsave(&bfad->bfad_lock, flags); 607 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 608 iocmd->vf_id, iocmd->pwwn); 609 if (fcs_port == NULL) { 610 bfa_trc(bfad, 0); 611 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 612 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 613 goto out; 614 } 615 616 fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); 617 if (fcs_itnim == NULL) { 618 bfa_trc(bfad, 0); 619 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 620 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 621 goto out; 622 } 623 624 drv_itnim = fcs_itnim->itnim_drv; 625 626 if (drv_itnim && drv_itnim->im_port) 627 iocmd->host = drv_itnim->im_port->shost->host_no; 628 else { 629 bfa_trc(bfad, 0); 630 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 631 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 632 goto out; 633 } 634 635 iocmd->target = drv_itnim->scsi_tgt_id; 636 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 637 638 iocmd->bus = 0; 639 iocmd->lun = 0; 640 iocmd->status = BFA_STATUS_OK; 641 out: 642 return 0; 643 } 644 645 int 646 bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd) 647 { 648 struct bfa_bsg_rport_stats_s *iocmd = 649 (struct bfa_bsg_rport_stats_s *)cmd; 650 struct bfa_fcs_lport_s *fcs_port; 651 struct bfa_fcs_rport_s *fcs_rport; 652 unsigned long flags; 653 654 spin_lock_irqsave(&bfad->bfad_lock, flags); 655 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 656 iocmd->vf_id, iocmd->pwwn); 657 if (fcs_port == NULL) { 658 bfa_trc(bfad, 0); 659 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 660 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 661 goto out; 662 } 663 664 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); 665 if (fcs_rport == NULL) { 666 bfa_trc(bfad, 0); 667 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 668 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 669 goto out; 670 } 671 672 memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats, 673 sizeof(struct bfa_rport_stats_s)); 674 memcpy((void *)&iocmd->stats.hal_stats, 675 (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats), 676 sizeof(struct bfa_rport_hal_stats_s)); 677 678 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 679 iocmd->status = BFA_STATUS_OK; 680 out: 681 return 0; 682 } 683 684 int 685 bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd) 686 { 687 struct bfa_bsg_rport_reset_stats_s *iocmd = 688 (struct bfa_bsg_rport_reset_stats_s *)cmd; 689 struct bfa_fcs_lport_s *fcs_port; 690 struct bfa_fcs_rport_s *fcs_rport; 691 struct bfa_rport_s *rport; 692 unsigned long flags; 693 694 spin_lock_irqsave(&bfad->bfad_lock, flags); 695 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 696 iocmd->vf_id, iocmd->pwwn); 697 if (fcs_port == NULL) { 698 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 699 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 700 goto out; 701 } 702 703 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); 704 if (fcs_rport == NULL) { 705 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 706 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 707 goto out; 708 } 709 710 memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s)); 711 rport = bfa_fcs_rport_get_halrport(fcs_rport); 712 memset(&rport->stats, 0, sizeof(rport->stats)); 713 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 714 iocmd->status = BFA_STATUS_OK; 715 out: 716 return 0; 717 } 718 719 int 720 bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd) 721 { 722 struct bfa_bsg_rport_set_speed_s *iocmd = 723 (struct bfa_bsg_rport_set_speed_s *)cmd; 724 struct bfa_fcs_lport_s *fcs_port; 725 struct bfa_fcs_rport_s *fcs_rport; 726 unsigned long flags; 727 728 spin_lock_irqsave(&bfad->bfad_lock, flags); 729 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 730 iocmd->vf_id, iocmd->pwwn); 731 if (fcs_port == NULL) { 732 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 733 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 734 goto out; 735 } 736 737 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); 738 if (fcs_rport == NULL) { 739 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 740 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 741 goto out; 742 } 743 744 fcs_rport->rpf.assigned_speed = iocmd->speed; 745 /* Set this speed in f/w only if the RPSC speed is not available */ 746 if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN) 747 bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed); 748 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 749 iocmd->status = BFA_STATUS_OK; 750 out: 751 return 0; 752 } 753 754 int 755 bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd) 756 { 757 struct bfa_fcs_vport_s *fcs_vport; 758 struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd; 759 unsigned long flags; 760 761 spin_lock_irqsave(&bfad->bfad_lock, flags); 762 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 763 iocmd->vf_id, iocmd->vpwwn); 764 if (fcs_vport == NULL) { 765 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 766 iocmd->status = BFA_STATUS_UNKNOWN_VWWN; 767 goto out; 768 } 769 770 bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr); 771 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 772 iocmd->status = BFA_STATUS_OK; 773 out: 774 return 0; 775 } 776 777 int 778 bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd) 779 { 780 struct bfa_fcs_vport_s *fcs_vport; 781 struct bfa_bsg_vport_stats_s *iocmd = 782 (struct bfa_bsg_vport_stats_s *)cmd; 783 unsigned long flags; 784 785 spin_lock_irqsave(&bfad->bfad_lock, flags); 786 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 787 iocmd->vf_id, iocmd->vpwwn); 788 if (fcs_vport == NULL) { 789 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 790 iocmd->status = BFA_STATUS_UNKNOWN_VWWN; 791 goto out; 792 } 793 794 memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats, 795 sizeof(struct bfa_vport_stats_s)); 796 memcpy((void *)&iocmd->vport_stats.port_stats, 797 (void *)&fcs_vport->lport.stats, 798 sizeof(struct bfa_lport_stats_s)); 799 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 800 iocmd->status = BFA_STATUS_OK; 801 out: 802 return 0; 803 } 804 805 int 806 bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd) 807 { 808 struct bfa_fcs_vport_s *fcs_vport; 809 struct bfa_bsg_reset_stats_s *iocmd = 810 (struct bfa_bsg_reset_stats_s *)cmd; 811 unsigned long flags; 812 813 spin_lock_irqsave(&bfad->bfad_lock, flags); 814 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 815 iocmd->vf_id, iocmd->vpwwn); 816 if (fcs_vport == NULL) { 817 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 818 iocmd->status = BFA_STATUS_UNKNOWN_VWWN; 819 goto out; 820 } 821 822 memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s)); 823 memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s)); 824 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 825 iocmd->status = BFA_STATUS_OK; 826 out: 827 return 0; 828 } 829 830 static int 831 bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd, 832 unsigned int payload_len) 833 { 834 struct bfa_bsg_fabric_get_lports_s *iocmd = 835 (struct bfa_bsg_fabric_get_lports_s *)cmd; 836 bfa_fcs_vf_t *fcs_vf; 837 uint32_t nports = iocmd->nports; 838 unsigned long flags; 839 void *iocmd_bufptr; 840 841 if (nports == 0) { 842 iocmd->status = BFA_STATUS_EINVAL; 843 goto out; 844 } 845 846 if (bfad_chk_iocmd_sz(payload_len, 847 sizeof(struct bfa_bsg_fabric_get_lports_s), 848 sizeof(wwn_t[iocmd->nports])) != BFA_STATUS_OK) { 849 iocmd->status = BFA_STATUS_VERSION_FAIL; 850 goto out; 851 } 852 853 iocmd_bufptr = (char *)iocmd + 854 sizeof(struct bfa_bsg_fabric_get_lports_s); 855 856 spin_lock_irqsave(&bfad->bfad_lock, flags); 857 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id); 858 if (fcs_vf == NULL) { 859 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 860 iocmd->status = BFA_STATUS_UNKNOWN_VFID; 861 goto out; 862 } 863 bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports); 864 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 865 866 iocmd->nports = nports; 867 iocmd->status = BFA_STATUS_OK; 868 out: 869 return 0; 870 } 871 872 int 873 bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd) 874 { 875 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; 876 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 877 unsigned long flags; 878 879 spin_lock_irqsave(&bfad->bfad_lock, flags); 880 881 if (cmd == IOCMD_RATELIM_ENABLE) 882 fcport->cfg.ratelimit = BFA_TRUE; 883 else if (cmd == IOCMD_RATELIM_DISABLE) 884 fcport->cfg.ratelimit = BFA_FALSE; 885 886 if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN) 887 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS; 888 889 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 890 iocmd->status = BFA_STATUS_OK; 891 892 return 0; 893 } 894 895 int 896 bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd) 897 { 898 struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd; 899 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 900 unsigned long flags; 901 902 spin_lock_irqsave(&bfad->bfad_lock, flags); 903 904 /* Auto and speeds greater than the supported speed, are invalid */ 905 if ((iocmd->speed == BFA_PORT_SPEED_AUTO) || 906 (iocmd->speed > fcport->speed_sup)) { 907 iocmd->status = BFA_STATUS_UNSUPP_SPEED; 908 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 909 return 0; 910 } 911 912 fcport->cfg.trl_def_speed = iocmd->speed; 913 iocmd->status = BFA_STATUS_OK; 914 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 915 916 return 0; 917 } 918 919 int 920 bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd) 921 { 922 struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd; 923 unsigned long flags; 924 925 spin_lock_irqsave(&bfad->bfad_lock, flags); 926 bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param); 927 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 928 iocmd->status = BFA_STATUS_OK; 929 return 0; 930 } 931 932 int 933 bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd) 934 { 935 struct bfa_bsg_fcpim_modstats_s *iocmd = 936 (struct bfa_bsg_fcpim_modstats_s *)cmd; 937 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); 938 struct list_head *qe, *qen; 939 struct bfa_itnim_s *itnim; 940 unsigned long flags; 941 942 spin_lock_irqsave(&bfad->bfad_lock, flags); 943 /* accumulate IO stats from itnim */ 944 memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s)); 945 list_for_each_safe(qe, qen, &fcpim->itnim_q) { 946 itnim = (struct bfa_itnim_s *) qe; 947 bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats)); 948 } 949 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 950 iocmd->status = BFA_STATUS_OK; 951 return 0; 952 } 953 954 int 955 bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd) 956 { 957 struct bfa_bsg_fcpim_modstatsclr_s *iocmd = 958 (struct bfa_bsg_fcpim_modstatsclr_s *)cmd; 959 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); 960 struct list_head *qe, *qen; 961 struct bfa_itnim_s *itnim; 962 unsigned long flags; 963 964 spin_lock_irqsave(&bfad->bfad_lock, flags); 965 list_for_each_safe(qe, qen, &fcpim->itnim_q) { 966 itnim = (struct bfa_itnim_s *) qe; 967 bfa_itnim_clear_stats(itnim); 968 } 969 memset(&fcpim->del_itn_stats, 0, 970 sizeof(struct bfa_fcpim_del_itn_stats_s)); 971 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 972 iocmd->status = BFA_STATUS_OK; 973 return 0; 974 } 975 976 int 977 bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd) 978 { 979 struct bfa_bsg_fcpim_del_itn_stats_s *iocmd = 980 (struct bfa_bsg_fcpim_del_itn_stats_s *)cmd; 981 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); 982 unsigned long flags; 983 984 spin_lock_irqsave(&bfad->bfad_lock, flags); 985 memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats, 986 sizeof(struct bfa_fcpim_del_itn_stats_s)); 987 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 988 989 iocmd->status = BFA_STATUS_OK; 990 return 0; 991 } 992 993 static int 994 bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd) 995 { 996 struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd; 997 struct bfa_fcs_lport_s *fcs_port; 998 unsigned long flags; 999 1000 spin_lock_irqsave(&bfad->bfad_lock, flags); 1001 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 1002 iocmd->vf_id, iocmd->lpwwn); 1003 if (!fcs_port) 1004 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 1005 else 1006 iocmd->status = bfa_fcs_itnim_attr_get(fcs_port, 1007 iocmd->rpwwn, &iocmd->attr); 1008 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1009 return 0; 1010 } 1011 1012 static int 1013 bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd) 1014 { 1015 struct bfa_bsg_itnim_iostats_s *iocmd = 1016 (struct bfa_bsg_itnim_iostats_s *)cmd; 1017 struct bfa_fcs_lport_s *fcs_port; 1018 struct bfa_fcs_itnim_s *itnim; 1019 unsigned long flags; 1020 1021 spin_lock_irqsave(&bfad->bfad_lock, flags); 1022 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 1023 iocmd->vf_id, iocmd->lpwwn); 1024 if (!fcs_port) { 1025 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 1026 bfa_trc(bfad, 0); 1027 } else { 1028 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); 1029 if (itnim == NULL) 1030 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 1031 else { 1032 iocmd->status = BFA_STATUS_OK; 1033 memcpy((void *)&iocmd->iostats, (void *) 1034 &(bfa_fcs_itnim_get_halitn(itnim)->stats), 1035 sizeof(struct bfa_itnim_iostats_s)); 1036 } 1037 } 1038 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1039 return 0; 1040 } 1041 1042 static int 1043 bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd) 1044 { 1045 struct bfa_bsg_rport_reset_stats_s *iocmd = 1046 (struct bfa_bsg_rport_reset_stats_s *)cmd; 1047 struct bfa_fcs_lport_s *fcs_port; 1048 struct bfa_fcs_itnim_s *itnim; 1049 unsigned long flags; 1050 1051 spin_lock_irqsave(&bfad->bfad_lock, flags); 1052 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 1053 iocmd->vf_id, iocmd->pwwn); 1054 if (!fcs_port) 1055 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 1056 else { 1057 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); 1058 if (itnim == NULL) 1059 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 1060 else { 1061 iocmd->status = BFA_STATUS_OK; 1062 bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn); 1063 bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim)); 1064 } 1065 } 1066 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1067 1068 return 0; 1069 } 1070 1071 static int 1072 bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd) 1073 { 1074 struct bfa_bsg_itnim_itnstats_s *iocmd = 1075 (struct bfa_bsg_itnim_itnstats_s *)cmd; 1076 struct bfa_fcs_lport_s *fcs_port; 1077 struct bfa_fcs_itnim_s *itnim; 1078 unsigned long flags; 1079 1080 spin_lock_irqsave(&bfad->bfad_lock, flags); 1081 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 1082 iocmd->vf_id, iocmd->lpwwn); 1083 if (!fcs_port) { 1084 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 1085 bfa_trc(bfad, 0); 1086 } else { 1087 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); 1088 if (itnim == NULL) 1089 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 1090 else { 1091 iocmd->status = BFA_STATUS_OK; 1092 bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn, 1093 &iocmd->itnstats); 1094 } 1095 } 1096 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1097 return 0; 1098 } 1099 1100 int 1101 bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd) 1102 { 1103 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 1104 unsigned long flags; 1105 1106 spin_lock_irqsave(&bfad->bfad_lock, flags); 1107 iocmd->status = bfa_fcport_enable(&bfad->bfa); 1108 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1109 1110 return 0; 1111 } 1112 1113 int 1114 bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd) 1115 { 1116 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 1117 unsigned long flags; 1118 1119 spin_lock_irqsave(&bfad->bfad_lock, flags); 1120 iocmd->status = bfa_fcport_disable(&bfad->bfa); 1121 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1122 1123 return 0; 1124 } 1125 1126 int 1127 bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd) 1128 { 1129 struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd; 1130 struct bfad_hal_comp fcomp; 1131 unsigned long flags; 1132 1133 init_completion(&fcomp.comp); 1134 spin_lock_irqsave(&bfad->bfad_lock, flags); 1135 iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk, 1136 &iocmd->pcifn_cfg, 1137 bfad_hcb_comp, &fcomp); 1138 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1139 if (iocmd->status != BFA_STATUS_OK) 1140 goto out; 1141 1142 wait_for_completion(&fcomp.comp); 1143 iocmd->status = fcomp.status; 1144 out: 1145 return 0; 1146 } 1147 1148 int 1149 bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd) 1150 { 1151 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd; 1152 struct bfad_hal_comp fcomp; 1153 unsigned long flags; 1154 1155 init_completion(&fcomp.comp); 1156 spin_lock_irqsave(&bfad->bfad_lock, flags); 1157 iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk, 1158 &iocmd->pcifn_id, iocmd->port, 1159 iocmd->pcifn_class, iocmd->bandwidth, 1160 bfad_hcb_comp, &fcomp); 1161 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1162 if (iocmd->status != BFA_STATUS_OK) 1163 goto out; 1164 1165 wait_for_completion(&fcomp.comp); 1166 iocmd->status = fcomp.status; 1167 out: 1168 return 0; 1169 } 1170 1171 int 1172 bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd) 1173 { 1174 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd; 1175 struct bfad_hal_comp fcomp; 1176 unsigned long flags; 1177 1178 init_completion(&fcomp.comp); 1179 spin_lock_irqsave(&bfad->bfad_lock, flags); 1180 iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk, 1181 iocmd->pcifn_id, 1182 bfad_hcb_comp, &fcomp); 1183 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1184 if (iocmd->status != BFA_STATUS_OK) 1185 goto out; 1186 1187 wait_for_completion(&fcomp.comp); 1188 iocmd->status = fcomp.status; 1189 out: 1190 return 0; 1191 } 1192 1193 int 1194 bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd) 1195 { 1196 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd; 1197 struct bfad_hal_comp fcomp; 1198 unsigned long flags; 1199 1200 init_completion(&fcomp.comp); 1201 spin_lock_irqsave(&bfad->bfad_lock, flags); 1202 iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk, 1203 iocmd->pcifn_id, iocmd->bandwidth, 1204 bfad_hcb_comp, &fcomp); 1205 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1206 bfa_trc(bfad, iocmd->status); 1207 if (iocmd->status != BFA_STATUS_OK) 1208 goto out; 1209 1210 wait_for_completion(&fcomp.comp); 1211 iocmd->status = fcomp.status; 1212 bfa_trc(bfad, iocmd->status); 1213 out: 1214 return 0; 1215 } 1216 1217 int 1218 bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd) 1219 { 1220 struct bfa_bsg_adapter_cfg_mode_s *iocmd = 1221 (struct bfa_bsg_adapter_cfg_mode_s *)cmd; 1222 struct bfad_hal_comp fcomp; 1223 unsigned long flags = 0; 1224 1225 init_completion(&fcomp.comp); 1226 spin_lock_irqsave(&bfad->bfad_lock, flags); 1227 iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk, 1228 iocmd->cfg.mode, iocmd->cfg.max_pf, 1229 iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp); 1230 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1231 if (iocmd->status != BFA_STATUS_OK) 1232 goto out; 1233 1234 wait_for_completion(&fcomp.comp); 1235 iocmd->status = fcomp.status; 1236 out: 1237 return 0; 1238 } 1239 1240 int 1241 bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd) 1242 { 1243 struct bfa_bsg_port_cfg_mode_s *iocmd = 1244 (struct bfa_bsg_port_cfg_mode_s *)cmd; 1245 struct bfad_hal_comp fcomp; 1246 unsigned long flags = 0; 1247 1248 init_completion(&fcomp.comp); 1249 spin_lock_irqsave(&bfad->bfad_lock, flags); 1250 iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk, 1251 iocmd->instance, iocmd->cfg.mode, 1252 iocmd->cfg.max_pf, iocmd->cfg.max_vf, 1253 bfad_hcb_comp, &fcomp); 1254 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1255 if (iocmd->status != BFA_STATUS_OK) 1256 goto out; 1257 1258 wait_for_completion(&fcomp.comp); 1259 iocmd->status = fcomp.status; 1260 out: 1261 return 0; 1262 } 1263 1264 int 1265 bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd) 1266 { 1267 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; 1268 struct bfad_hal_comp fcomp; 1269 unsigned long flags; 1270 1271 init_completion(&fcomp.comp); 1272 spin_lock_irqsave(&bfad->bfad_lock, flags); 1273 if (cmd == IOCMD_FLASH_ENABLE_OPTROM) 1274 iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk, 1275 bfad_hcb_comp, &fcomp); 1276 else 1277 iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk, 1278 bfad_hcb_comp, &fcomp); 1279 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1280 1281 if (iocmd->status != BFA_STATUS_OK) 1282 goto out; 1283 1284 wait_for_completion(&fcomp.comp); 1285 iocmd->status = fcomp.status; 1286 out: 1287 return 0; 1288 } 1289 1290 int 1291 bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd) 1292 { 1293 struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd; 1294 struct bfad_hal_comp fcomp; 1295 unsigned long flags; 1296 1297 init_completion(&fcomp.comp); 1298 iocmd->status = BFA_STATUS_OK; 1299 spin_lock_irqsave(&bfad->bfad_lock, flags); 1300 iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr, 1301 bfad_hcb_comp, &fcomp); 1302 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1303 1304 if (iocmd->status != BFA_STATUS_OK) 1305 goto out; 1306 1307 wait_for_completion(&fcomp.comp); 1308 iocmd->status = fcomp.status; 1309 out: 1310 return 0; 1311 } 1312 1313 int 1314 bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len) 1315 { 1316 struct bfa_bsg_cee_attr_s *iocmd = 1317 (struct bfa_bsg_cee_attr_s *)cmd; 1318 void *iocmd_bufptr; 1319 struct bfad_hal_comp cee_comp; 1320 unsigned long flags; 1321 1322 if (bfad_chk_iocmd_sz(payload_len, 1323 sizeof(struct bfa_bsg_cee_attr_s), 1324 sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) { 1325 iocmd->status = BFA_STATUS_VERSION_FAIL; 1326 return 0; 1327 } 1328 1329 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s); 1330 1331 cee_comp.status = 0; 1332 init_completion(&cee_comp.comp); 1333 mutex_lock(&bfad_mutex); 1334 spin_lock_irqsave(&bfad->bfad_lock, flags); 1335 iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr, 1336 bfad_hcb_comp, &cee_comp); 1337 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1338 if (iocmd->status != BFA_STATUS_OK) { 1339 mutex_unlock(&bfad_mutex); 1340 bfa_trc(bfad, 0x5555); 1341 goto out; 1342 } 1343 wait_for_completion(&cee_comp.comp); 1344 mutex_unlock(&bfad_mutex); 1345 out: 1346 return 0; 1347 } 1348 1349 int 1350 bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd, 1351 unsigned int payload_len) 1352 { 1353 struct bfa_bsg_cee_stats_s *iocmd = 1354 (struct bfa_bsg_cee_stats_s *)cmd; 1355 void *iocmd_bufptr; 1356 struct bfad_hal_comp cee_comp; 1357 unsigned long flags; 1358 1359 if (bfad_chk_iocmd_sz(payload_len, 1360 sizeof(struct bfa_bsg_cee_stats_s), 1361 sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) { 1362 iocmd->status = BFA_STATUS_VERSION_FAIL; 1363 return 0; 1364 } 1365 1366 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s); 1367 1368 cee_comp.status = 0; 1369 init_completion(&cee_comp.comp); 1370 mutex_lock(&bfad_mutex); 1371 spin_lock_irqsave(&bfad->bfad_lock, flags); 1372 iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr, 1373 bfad_hcb_comp, &cee_comp); 1374 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1375 if (iocmd->status != BFA_STATUS_OK) { 1376 mutex_unlock(&bfad_mutex); 1377 bfa_trc(bfad, 0x5555); 1378 goto out; 1379 } 1380 wait_for_completion(&cee_comp.comp); 1381 mutex_unlock(&bfad_mutex); 1382 out: 1383 return 0; 1384 } 1385 1386 int 1387 bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd) 1388 { 1389 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 1390 unsigned long flags; 1391 1392 spin_lock_irqsave(&bfad->bfad_lock, flags); 1393 iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL); 1394 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1395 if (iocmd->status != BFA_STATUS_OK) 1396 bfa_trc(bfad, 0x5555); 1397 return 0; 1398 } 1399 1400 int 1401 bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd) 1402 { 1403 struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd; 1404 struct bfad_hal_comp fcomp; 1405 unsigned long flags; 1406 1407 init_completion(&fcomp.comp); 1408 spin_lock_irqsave(&bfad->bfad_lock, flags); 1409 iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media, 1410 bfad_hcb_comp, &fcomp); 1411 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1412 bfa_trc(bfad, iocmd->status); 1413 if (iocmd->status != BFA_STATUS_SFP_NOT_READY) 1414 goto out; 1415 1416 wait_for_completion(&fcomp.comp); 1417 iocmd->status = fcomp.status; 1418 out: 1419 return 0; 1420 } 1421 1422 int 1423 bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd) 1424 { 1425 struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd; 1426 struct bfad_hal_comp fcomp; 1427 unsigned long flags; 1428 1429 init_completion(&fcomp.comp); 1430 spin_lock_irqsave(&bfad->bfad_lock, flags); 1431 iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed, 1432 bfad_hcb_comp, &fcomp); 1433 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1434 bfa_trc(bfad, iocmd->status); 1435 if (iocmd->status != BFA_STATUS_SFP_NOT_READY) 1436 goto out; 1437 wait_for_completion(&fcomp.comp); 1438 iocmd->status = fcomp.status; 1439 out: 1440 return 0; 1441 } 1442 1443 int 1444 bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd) 1445 { 1446 struct bfa_bsg_flash_attr_s *iocmd = 1447 (struct bfa_bsg_flash_attr_s *)cmd; 1448 struct bfad_hal_comp fcomp; 1449 unsigned long flags; 1450 1451 init_completion(&fcomp.comp); 1452 spin_lock_irqsave(&bfad->bfad_lock, flags); 1453 iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr, 1454 bfad_hcb_comp, &fcomp); 1455 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1456 if (iocmd->status != BFA_STATUS_OK) 1457 goto out; 1458 wait_for_completion(&fcomp.comp); 1459 iocmd->status = fcomp.status; 1460 out: 1461 return 0; 1462 } 1463 1464 int 1465 bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd) 1466 { 1467 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd; 1468 struct bfad_hal_comp fcomp; 1469 unsigned long flags; 1470 1471 init_completion(&fcomp.comp); 1472 spin_lock_irqsave(&bfad->bfad_lock, flags); 1473 iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type, 1474 iocmd->instance, bfad_hcb_comp, &fcomp); 1475 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1476 if (iocmd->status != BFA_STATUS_OK) 1477 goto out; 1478 wait_for_completion(&fcomp.comp); 1479 iocmd->status = fcomp.status; 1480 out: 1481 return 0; 1482 } 1483 1484 int 1485 bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd, 1486 unsigned int payload_len) 1487 { 1488 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd; 1489 void *iocmd_bufptr; 1490 struct bfad_hal_comp fcomp; 1491 unsigned long flags; 1492 1493 if (bfad_chk_iocmd_sz(payload_len, 1494 sizeof(struct bfa_bsg_flash_s), 1495 iocmd->bufsz) != BFA_STATUS_OK) { 1496 iocmd->status = BFA_STATUS_VERSION_FAIL; 1497 return 0; 1498 } 1499 1500 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s); 1501 1502 init_completion(&fcomp.comp); 1503 spin_lock_irqsave(&bfad->bfad_lock, flags); 1504 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), 1505 iocmd->type, iocmd->instance, iocmd_bufptr, 1506 iocmd->bufsz, 0, bfad_hcb_comp, &fcomp); 1507 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1508 if (iocmd->status != BFA_STATUS_OK) 1509 goto out; 1510 wait_for_completion(&fcomp.comp); 1511 iocmd->status = fcomp.status; 1512 out: 1513 return 0; 1514 } 1515 1516 int 1517 bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd, 1518 unsigned int payload_len) 1519 { 1520 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd; 1521 struct bfad_hal_comp fcomp; 1522 void *iocmd_bufptr; 1523 unsigned long flags; 1524 1525 if (bfad_chk_iocmd_sz(payload_len, 1526 sizeof(struct bfa_bsg_flash_s), 1527 iocmd->bufsz) != BFA_STATUS_OK) { 1528 iocmd->status = BFA_STATUS_VERSION_FAIL; 1529 return 0; 1530 } 1531 1532 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s); 1533 1534 init_completion(&fcomp.comp); 1535 spin_lock_irqsave(&bfad->bfad_lock, flags); 1536 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type, 1537 iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0, 1538 bfad_hcb_comp, &fcomp); 1539 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1540 if (iocmd->status != BFA_STATUS_OK) 1541 goto out; 1542 wait_for_completion(&fcomp.comp); 1543 iocmd->status = fcomp.status; 1544 out: 1545 return 0; 1546 } 1547 1548 int 1549 bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd) 1550 { 1551 struct bfa_bsg_diag_get_temp_s *iocmd = 1552 (struct bfa_bsg_diag_get_temp_s *)cmd; 1553 struct bfad_hal_comp fcomp; 1554 unsigned long flags; 1555 1556 init_completion(&fcomp.comp); 1557 spin_lock_irqsave(&bfad->bfad_lock, flags); 1558 iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa), 1559 &iocmd->result, bfad_hcb_comp, &fcomp); 1560 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1561 bfa_trc(bfad, iocmd->status); 1562 if (iocmd->status != BFA_STATUS_OK) 1563 goto out; 1564 wait_for_completion(&fcomp.comp); 1565 iocmd->status = fcomp.status; 1566 out: 1567 return 0; 1568 } 1569 1570 int 1571 bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd) 1572 { 1573 struct bfa_bsg_diag_memtest_s *iocmd = 1574 (struct bfa_bsg_diag_memtest_s *)cmd; 1575 struct bfad_hal_comp fcomp; 1576 unsigned long flags; 1577 1578 init_completion(&fcomp.comp); 1579 spin_lock_irqsave(&bfad->bfad_lock, flags); 1580 iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa), 1581 &iocmd->memtest, iocmd->pat, 1582 &iocmd->result, bfad_hcb_comp, &fcomp); 1583 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1584 bfa_trc(bfad, iocmd->status); 1585 if (iocmd->status != BFA_STATUS_OK) 1586 goto out; 1587 wait_for_completion(&fcomp.comp); 1588 iocmd->status = fcomp.status; 1589 out: 1590 return 0; 1591 } 1592 1593 int 1594 bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd) 1595 { 1596 struct bfa_bsg_diag_loopback_s *iocmd = 1597 (struct bfa_bsg_diag_loopback_s *)cmd; 1598 struct bfad_hal_comp fcomp; 1599 unsigned long flags; 1600 1601 init_completion(&fcomp.comp); 1602 spin_lock_irqsave(&bfad->bfad_lock, flags); 1603 iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode, 1604 iocmd->speed, iocmd->lpcnt, iocmd->pat, 1605 &iocmd->result, bfad_hcb_comp, &fcomp); 1606 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1607 bfa_trc(bfad, iocmd->status); 1608 if (iocmd->status != BFA_STATUS_OK) 1609 goto out; 1610 wait_for_completion(&fcomp.comp); 1611 iocmd->status = fcomp.status; 1612 out: 1613 return 0; 1614 } 1615 1616 int 1617 bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd) 1618 { 1619 struct bfa_bsg_diag_fwping_s *iocmd = 1620 (struct bfa_bsg_diag_fwping_s *)cmd; 1621 struct bfad_hal_comp fcomp; 1622 unsigned long flags; 1623 1624 init_completion(&fcomp.comp); 1625 spin_lock_irqsave(&bfad->bfad_lock, flags); 1626 iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt, 1627 iocmd->pattern, &iocmd->result, 1628 bfad_hcb_comp, &fcomp); 1629 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1630 bfa_trc(bfad, iocmd->status); 1631 if (iocmd->status != BFA_STATUS_OK) 1632 goto out; 1633 bfa_trc(bfad, 0x77771); 1634 wait_for_completion(&fcomp.comp); 1635 iocmd->status = fcomp.status; 1636 out: 1637 return 0; 1638 } 1639 1640 int 1641 bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd) 1642 { 1643 struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd; 1644 struct bfad_hal_comp fcomp; 1645 unsigned long flags; 1646 1647 init_completion(&fcomp.comp); 1648 spin_lock_irqsave(&bfad->bfad_lock, flags); 1649 iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force, 1650 iocmd->queue, &iocmd->result, 1651 bfad_hcb_comp, &fcomp); 1652 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1653 if (iocmd->status != BFA_STATUS_OK) 1654 goto out; 1655 wait_for_completion(&fcomp.comp); 1656 iocmd->status = fcomp.status; 1657 out: 1658 return 0; 1659 } 1660 1661 int 1662 bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd) 1663 { 1664 struct bfa_bsg_sfp_show_s *iocmd = 1665 (struct bfa_bsg_sfp_show_s *)cmd; 1666 struct bfad_hal_comp fcomp; 1667 unsigned long flags; 1668 1669 init_completion(&fcomp.comp); 1670 spin_lock_irqsave(&bfad->bfad_lock, flags); 1671 iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp, 1672 bfad_hcb_comp, &fcomp); 1673 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1674 bfa_trc(bfad, iocmd->status); 1675 if (iocmd->status != BFA_STATUS_OK) 1676 goto out; 1677 wait_for_completion(&fcomp.comp); 1678 iocmd->status = fcomp.status; 1679 bfa_trc(bfad, iocmd->status); 1680 out: 1681 return 0; 1682 } 1683 1684 int 1685 bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd) 1686 { 1687 struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd; 1688 unsigned long flags; 1689 1690 spin_lock_irqsave(&bfad->bfad_lock, flags); 1691 iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa), 1692 &iocmd->ledtest); 1693 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1694 return 0; 1695 } 1696 1697 int 1698 bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd) 1699 { 1700 struct bfa_bsg_diag_beacon_s *iocmd = 1701 (struct bfa_bsg_diag_beacon_s *)cmd; 1702 unsigned long flags; 1703 1704 spin_lock_irqsave(&bfad->bfad_lock, flags); 1705 iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa), 1706 iocmd->beacon, iocmd->link_e2e_beacon, 1707 iocmd->second); 1708 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1709 return 0; 1710 } 1711 1712 int 1713 bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd) 1714 { 1715 struct bfa_bsg_diag_lb_stat_s *iocmd = 1716 (struct bfa_bsg_diag_lb_stat_s *)cmd; 1717 unsigned long flags; 1718 1719 spin_lock_irqsave(&bfad->bfad_lock, flags); 1720 iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa); 1721 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1722 bfa_trc(bfad, iocmd->status); 1723 1724 return 0; 1725 } 1726 1727 int 1728 bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd) 1729 { 1730 struct bfa_bsg_phy_attr_s *iocmd = 1731 (struct bfa_bsg_phy_attr_s *)cmd; 1732 struct bfad_hal_comp fcomp; 1733 unsigned long flags; 1734 1735 init_completion(&fcomp.comp); 1736 spin_lock_irqsave(&bfad->bfad_lock, flags); 1737 iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance, 1738 &iocmd->attr, bfad_hcb_comp, &fcomp); 1739 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1740 if (iocmd->status != BFA_STATUS_OK) 1741 goto out; 1742 wait_for_completion(&fcomp.comp); 1743 iocmd->status = fcomp.status; 1744 out: 1745 return 0; 1746 } 1747 1748 int 1749 bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd) 1750 { 1751 struct bfa_bsg_phy_stats_s *iocmd = 1752 (struct bfa_bsg_phy_stats_s *)cmd; 1753 struct bfad_hal_comp fcomp; 1754 unsigned long flags; 1755 1756 init_completion(&fcomp.comp); 1757 spin_lock_irqsave(&bfad->bfad_lock, flags); 1758 iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance, 1759 &iocmd->stats, bfad_hcb_comp, &fcomp); 1760 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1761 if (iocmd->status != BFA_STATUS_OK) 1762 goto out; 1763 wait_for_completion(&fcomp.comp); 1764 iocmd->status = fcomp.status; 1765 out: 1766 return 0; 1767 } 1768 1769 int 1770 bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len) 1771 { 1772 struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd; 1773 struct bfad_hal_comp fcomp; 1774 void *iocmd_bufptr; 1775 unsigned long flags; 1776 1777 if (bfad_chk_iocmd_sz(payload_len, 1778 sizeof(struct bfa_bsg_phy_s), 1779 iocmd->bufsz) != BFA_STATUS_OK) { 1780 iocmd->status = BFA_STATUS_VERSION_FAIL; 1781 return 0; 1782 } 1783 1784 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s); 1785 init_completion(&fcomp.comp); 1786 spin_lock_irqsave(&bfad->bfad_lock, flags); 1787 iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa), 1788 iocmd->instance, iocmd_bufptr, iocmd->bufsz, 1789 0, bfad_hcb_comp, &fcomp); 1790 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1791 if (iocmd->status != BFA_STATUS_OK) 1792 goto out; 1793 wait_for_completion(&fcomp.comp); 1794 iocmd->status = fcomp.status; 1795 if (iocmd->status != BFA_STATUS_OK) 1796 goto out; 1797 out: 1798 return 0; 1799 } 1800 1801 int 1802 bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd) 1803 { 1804 struct bfa_bsg_vhba_attr_s *iocmd = 1805 (struct bfa_bsg_vhba_attr_s *)cmd; 1806 struct bfa_vhba_attr_s *attr = &iocmd->attr; 1807 unsigned long flags; 1808 1809 spin_lock_irqsave(&bfad->bfad_lock, flags); 1810 attr->pwwn = bfad->bfa.ioc.attr->pwwn; 1811 attr->nwwn = bfad->bfa.ioc.attr->nwwn; 1812 attr->plog_enabled = (bfa_boolean_t)bfad->bfa.plog->plog_enabled; 1813 attr->io_profile = bfa_fcpim_get_io_profile(&bfad->bfa); 1814 attr->path_tov = bfa_fcpim_path_tov_get(&bfad->bfa); 1815 iocmd->status = BFA_STATUS_OK; 1816 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1817 return 0; 1818 } 1819 1820 int 1821 bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len) 1822 { 1823 struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd; 1824 void *iocmd_bufptr; 1825 struct bfad_hal_comp fcomp; 1826 unsigned long flags; 1827 1828 if (bfad_chk_iocmd_sz(payload_len, 1829 sizeof(struct bfa_bsg_phy_s), 1830 iocmd->bufsz) != BFA_STATUS_OK) { 1831 iocmd->status = BFA_STATUS_VERSION_FAIL; 1832 return 0; 1833 } 1834 1835 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s); 1836 init_completion(&fcomp.comp); 1837 spin_lock_irqsave(&bfad->bfad_lock, flags); 1838 iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa), 1839 iocmd->instance, iocmd_bufptr, iocmd->bufsz, 1840 0, bfad_hcb_comp, &fcomp); 1841 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1842 if (iocmd->status != BFA_STATUS_OK) 1843 goto out; 1844 wait_for_completion(&fcomp.comp); 1845 iocmd->status = fcomp.status; 1846 out: 1847 return 0; 1848 } 1849 1850 int 1851 bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd) 1852 { 1853 struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd; 1854 void *iocmd_bufptr; 1855 1856 if (iocmd->bufsz < sizeof(struct bfa_plog_s)) { 1857 bfa_trc(bfad, sizeof(struct bfa_plog_s)); 1858 iocmd->status = BFA_STATUS_EINVAL; 1859 goto out; 1860 } 1861 1862 iocmd->status = BFA_STATUS_OK; 1863 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s); 1864 memcpy(iocmd_bufptr, (u8 *) &bfad->plog_buf, sizeof(struct bfa_plog_s)); 1865 out: 1866 return 0; 1867 } 1868 1869 #define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */ 1870 int 1871 bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd, 1872 unsigned int payload_len) 1873 { 1874 struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd; 1875 void *iocmd_bufptr; 1876 unsigned long flags; 1877 u32 offset; 1878 1879 if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s), 1880 BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) { 1881 iocmd->status = BFA_STATUS_VERSION_FAIL; 1882 return 0; 1883 } 1884 1885 if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ || 1886 !IS_ALIGNED(iocmd->bufsz, sizeof(u16)) || 1887 !IS_ALIGNED(iocmd->offset, sizeof(u32))) { 1888 bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ); 1889 iocmd->status = BFA_STATUS_EINVAL; 1890 goto out; 1891 } 1892 1893 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s); 1894 spin_lock_irqsave(&bfad->bfad_lock, flags); 1895 offset = iocmd->offset; 1896 iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr, 1897 &offset, &iocmd->bufsz); 1898 iocmd->offset = offset; 1899 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1900 out: 1901 return 0; 1902 } 1903 1904 int 1905 bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 1906 { 1907 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 1908 unsigned long flags; 1909 1910 if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) { 1911 spin_lock_irqsave(&bfad->bfad_lock, flags); 1912 bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE; 1913 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1914 } else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR) 1915 bfad->plog_buf.head = bfad->plog_buf.tail = 0; 1916 else if (v_cmd == IOCMD_DEBUG_START_DTRC) 1917 bfa_trc_init(bfad->trcmod); 1918 else if (v_cmd == IOCMD_DEBUG_STOP_DTRC) 1919 bfa_trc_stop(bfad->trcmod); 1920 1921 iocmd->status = BFA_STATUS_OK; 1922 return 0; 1923 } 1924 1925 int 1926 bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd) 1927 { 1928 struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd; 1929 1930 if (iocmd->ctl == BFA_TRUE) 1931 bfad->plog_buf.plog_enabled = 1; 1932 else 1933 bfad->plog_buf.plog_enabled = 0; 1934 1935 iocmd->status = BFA_STATUS_OK; 1936 return 0; 1937 } 1938 1939 int 1940 bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 1941 { 1942 struct bfa_bsg_fcpim_profile_s *iocmd = 1943 (struct bfa_bsg_fcpim_profile_s *)cmd; 1944 struct timeval tv; 1945 unsigned long flags; 1946 1947 do_gettimeofday(&tv); 1948 spin_lock_irqsave(&bfad->bfad_lock, flags); 1949 if (v_cmd == IOCMD_FCPIM_PROFILE_ON) 1950 iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec); 1951 else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF) 1952 iocmd->status = bfa_fcpim_profile_off(&bfad->bfa); 1953 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1954 1955 return 0; 1956 } 1957 1958 static int 1959 bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd) 1960 { 1961 struct bfa_bsg_itnim_ioprofile_s *iocmd = 1962 (struct bfa_bsg_itnim_ioprofile_s *)cmd; 1963 struct bfa_fcs_lport_s *fcs_port; 1964 struct bfa_fcs_itnim_s *itnim; 1965 unsigned long flags; 1966 1967 spin_lock_irqsave(&bfad->bfad_lock, flags); 1968 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 1969 iocmd->vf_id, iocmd->lpwwn); 1970 if (!fcs_port) 1971 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 1972 else { 1973 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); 1974 if (itnim == NULL) 1975 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 1976 else 1977 iocmd->status = bfa_itnim_get_ioprofile( 1978 bfa_fcs_itnim_get_halitn(itnim), 1979 &iocmd->ioprofile); 1980 } 1981 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1982 return 0; 1983 } 1984 1985 int 1986 bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd) 1987 { 1988 struct bfa_bsg_fcport_stats_s *iocmd = 1989 (struct bfa_bsg_fcport_stats_s *)cmd; 1990 struct bfad_hal_comp fcomp; 1991 unsigned long flags; 1992 struct bfa_cb_pending_q_s cb_qe; 1993 1994 init_completion(&fcomp.comp); 1995 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, 1996 &fcomp, &iocmd->stats); 1997 spin_lock_irqsave(&bfad->bfad_lock, flags); 1998 iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe); 1999 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2000 if (iocmd->status != BFA_STATUS_OK) { 2001 bfa_trc(bfad, iocmd->status); 2002 goto out; 2003 } 2004 wait_for_completion(&fcomp.comp); 2005 iocmd->status = fcomp.status; 2006 out: 2007 return 0; 2008 } 2009 2010 int 2011 bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd) 2012 { 2013 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 2014 struct bfad_hal_comp fcomp; 2015 unsigned long flags; 2016 struct bfa_cb_pending_q_s cb_qe; 2017 2018 init_completion(&fcomp.comp); 2019 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL); 2020 2021 spin_lock_irqsave(&bfad->bfad_lock, flags); 2022 iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe); 2023 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2024 if (iocmd->status != BFA_STATUS_OK) { 2025 bfa_trc(bfad, iocmd->status); 2026 goto out; 2027 } 2028 wait_for_completion(&fcomp.comp); 2029 iocmd->status = fcomp.status; 2030 out: 2031 return 0; 2032 } 2033 2034 int 2035 bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd) 2036 { 2037 struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd; 2038 struct bfad_hal_comp fcomp; 2039 unsigned long flags; 2040 2041 init_completion(&fcomp.comp); 2042 spin_lock_irqsave(&bfad->bfad_lock, flags); 2043 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), 2044 BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn), 2045 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, 2046 bfad_hcb_comp, &fcomp); 2047 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2048 if (iocmd->status != BFA_STATUS_OK) 2049 goto out; 2050 wait_for_completion(&fcomp.comp); 2051 iocmd->status = fcomp.status; 2052 out: 2053 return 0; 2054 } 2055 2056 int 2057 bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd) 2058 { 2059 struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd; 2060 struct bfad_hal_comp fcomp; 2061 unsigned long flags; 2062 2063 init_completion(&fcomp.comp); 2064 spin_lock_irqsave(&bfad->bfad_lock, flags); 2065 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), 2066 BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn), 2067 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, 2068 bfad_hcb_comp, &fcomp); 2069 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2070 if (iocmd->status != BFA_STATUS_OK) 2071 goto out; 2072 wait_for_completion(&fcomp.comp); 2073 iocmd->status = fcomp.status; 2074 out: 2075 return 0; 2076 } 2077 2078 int 2079 bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd) 2080 { 2081 struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd; 2082 struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp; 2083 struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg; 2084 unsigned long flags; 2085 2086 spin_lock_irqsave(&bfad->bfad_lock, flags); 2087 pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled; 2088 pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns; 2089 pbcfg->speed = cfgrsp->pbc_cfg.port_speed; 2090 memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun)); 2091 iocmd->status = BFA_STATUS_OK; 2092 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2093 2094 return 0; 2095 } 2096 2097 int 2098 bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd) 2099 { 2100 struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd; 2101 struct bfad_hal_comp fcomp; 2102 unsigned long flags; 2103 2104 init_completion(&fcomp.comp); 2105 spin_lock_irqsave(&bfad->bfad_lock, flags); 2106 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), 2107 BFA_FLASH_PART_PXECFG, 2108 bfad->bfa.ioc.port_id, &iocmd->cfg, 2109 sizeof(struct bfa_ethboot_cfg_s), 0, 2110 bfad_hcb_comp, &fcomp); 2111 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2112 if (iocmd->status != BFA_STATUS_OK) 2113 goto out; 2114 wait_for_completion(&fcomp.comp); 2115 iocmd->status = fcomp.status; 2116 out: 2117 return 0; 2118 } 2119 2120 int 2121 bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd) 2122 { 2123 struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd; 2124 struct bfad_hal_comp fcomp; 2125 unsigned long flags; 2126 2127 init_completion(&fcomp.comp); 2128 spin_lock_irqsave(&bfad->bfad_lock, flags); 2129 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), 2130 BFA_FLASH_PART_PXECFG, 2131 bfad->bfa.ioc.port_id, &iocmd->cfg, 2132 sizeof(struct bfa_ethboot_cfg_s), 0, 2133 bfad_hcb_comp, &fcomp); 2134 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2135 if (iocmd->status != BFA_STATUS_OK) 2136 goto out; 2137 wait_for_completion(&fcomp.comp); 2138 iocmd->status = fcomp.status; 2139 out: 2140 return 0; 2141 } 2142 2143 int 2144 bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 2145 { 2146 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 2147 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2148 struct bfa_fcport_trunk_s *trunk = &fcport->trunk; 2149 unsigned long flags; 2150 2151 spin_lock_irqsave(&bfad->bfad_lock, flags); 2152 2153 if (v_cmd == IOCMD_TRUNK_ENABLE) { 2154 trunk->attr.state = BFA_TRUNK_OFFLINE; 2155 bfa_fcport_disable(&bfad->bfa); 2156 fcport->cfg.trunked = BFA_TRUE; 2157 } else if (v_cmd == IOCMD_TRUNK_DISABLE) { 2158 trunk->attr.state = BFA_TRUNK_DISABLED; 2159 bfa_fcport_disable(&bfad->bfa); 2160 fcport->cfg.trunked = BFA_FALSE; 2161 } 2162 2163 if (!bfa_fcport_is_disabled(&bfad->bfa)) 2164 bfa_fcport_enable(&bfad->bfa); 2165 2166 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2167 2168 iocmd->status = BFA_STATUS_OK; 2169 return 0; 2170 } 2171 2172 int 2173 bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd) 2174 { 2175 struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd; 2176 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2177 struct bfa_fcport_trunk_s *trunk = &fcport->trunk; 2178 unsigned long flags; 2179 2180 spin_lock_irqsave(&bfad->bfad_lock, flags); 2181 memcpy((void *)&iocmd->attr, (void *)&trunk->attr, 2182 sizeof(struct bfa_trunk_attr_s)); 2183 iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa); 2184 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2185 2186 iocmd->status = BFA_STATUS_OK; 2187 return 0; 2188 } 2189 2190 int 2191 bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 2192 { 2193 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 2194 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2195 unsigned long flags; 2196 2197 spin_lock_irqsave(&bfad->bfad_lock, flags); 2198 if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) { 2199 if (v_cmd == IOCMD_QOS_ENABLE) 2200 fcport->cfg.qos_enabled = BFA_TRUE; 2201 else if (v_cmd == IOCMD_QOS_DISABLE) 2202 fcport->cfg.qos_enabled = BFA_FALSE; 2203 } 2204 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2205 2206 iocmd->status = BFA_STATUS_OK; 2207 return 0; 2208 } 2209 2210 int 2211 bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd) 2212 { 2213 struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd; 2214 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2215 unsigned long flags; 2216 2217 spin_lock_irqsave(&bfad->bfad_lock, flags); 2218 iocmd->attr.state = fcport->qos_attr.state; 2219 iocmd->attr.total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr); 2220 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2221 2222 iocmd->status = BFA_STATUS_OK; 2223 return 0; 2224 } 2225 2226 int 2227 bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd) 2228 { 2229 struct bfa_bsg_qos_vc_attr_s *iocmd = 2230 (struct bfa_bsg_qos_vc_attr_s *)cmd; 2231 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2232 struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr; 2233 unsigned long flags; 2234 u32 i = 0; 2235 2236 spin_lock_irqsave(&bfad->bfad_lock, flags); 2237 iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count); 2238 iocmd->attr.shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit); 2239 iocmd->attr.elp_opmode_flags = 2240 be32_to_cpu(bfa_vc_attr->elp_opmode_flags); 2241 2242 /* Individual VC info */ 2243 while (i < iocmd->attr.total_vc_count) { 2244 iocmd->attr.vc_info[i].vc_credit = 2245 bfa_vc_attr->vc_info[i].vc_credit; 2246 iocmd->attr.vc_info[i].borrow_credit = 2247 bfa_vc_attr->vc_info[i].borrow_credit; 2248 iocmd->attr.vc_info[i].priority = 2249 bfa_vc_attr->vc_info[i].priority; 2250 i++; 2251 } 2252 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2253 2254 iocmd->status = BFA_STATUS_OK; 2255 return 0; 2256 } 2257 2258 int 2259 bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd) 2260 { 2261 struct bfa_bsg_fcport_stats_s *iocmd = 2262 (struct bfa_bsg_fcport_stats_s *)cmd; 2263 struct bfad_hal_comp fcomp; 2264 unsigned long flags; 2265 struct bfa_cb_pending_q_s cb_qe; 2266 2267 init_completion(&fcomp.comp); 2268 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, 2269 &fcomp, &iocmd->stats); 2270 2271 spin_lock_irqsave(&bfad->bfad_lock, flags); 2272 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); 2273 iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe); 2274 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2275 if (iocmd->status != BFA_STATUS_OK) { 2276 bfa_trc(bfad, iocmd->status); 2277 goto out; 2278 } 2279 wait_for_completion(&fcomp.comp); 2280 iocmd->status = fcomp.status; 2281 out: 2282 return 0; 2283 } 2284 2285 int 2286 bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd) 2287 { 2288 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 2289 struct bfad_hal_comp fcomp; 2290 unsigned long flags; 2291 struct bfa_cb_pending_q_s cb_qe; 2292 2293 init_completion(&fcomp.comp); 2294 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, 2295 &fcomp, NULL); 2296 2297 spin_lock_irqsave(&bfad->bfad_lock, flags); 2298 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); 2299 iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe); 2300 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2301 if (iocmd->status != BFA_STATUS_OK) { 2302 bfa_trc(bfad, iocmd->status); 2303 goto out; 2304 } 2305 wait_for_completion(&fcomp.comp); 2306 iocmd->status = fcomp.status; 2307 out: 2308 return 0; 2309 } 2310 2311 int 2312 bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd) 2313 { 2314 struct bfa_bsg_vf_stats_s *iocmd = 2315 (struct bfa_bsg_vf_stats_s *)cmd; 2316 struct bfa_fcs_fabric_s *fcs_vf; 2317 unsigned long flags; 2318 2319 spin_lock_irqsave(&bfad->bfad_lock, flags); 2320 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id); 2321 if (fcs_vf == NULL) { 2322 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2323 iocmd->status = BFA_STATUS_UNKNOWN_VFID; 2324 goto out; 2325 } 2326 memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats, 2327 sizeof(struct bfa_vf_stats_s)); 2328 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2329 iocmd->status = BFA_STATUS_OK; 2330 out: 2331 return 0; 2332 } 2333 2334 int 2335 bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd) 2336 { 2337 struct bfa_bsg_vf_reset_stats_s *iocmd = 2338 (struct bfa_bsg_vf_reset_stats_s *)cmd; 2339 struct bfa_fcs_fabric_s *fcs_vf; 2340 unsigned long flags; 2341 2342 spin_lock_irqsave(&bfad->bfad_lock, flags); 2343 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id); 2344 if (fcs_vf == NULL) { 2345 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2346 iocmd->status = BFA_STATUS_UNKNOWN_VFID; 2347 goto out; 2348 } 2349 memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s)); 2350 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2351 iocmd->status = BFA_STATUS_OK; 2352 out: 2353 return 0; 2354 } 2355 2356 /* Function to reset the LUN SCAN mode */ 2357 static void 2358 bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg) 2359 { 2360 struct bfad_im_port_s *pport_im = bfad->pport.im_port; 2361 struct bfad_vport_s *vport = NULL; 2362 2363 /* Set the scsi device LUN SCAN flags for base port */ 2364 bfad_reset_sdev_bflags(pport_im, lunmask_cfg); 2365 2366 /* Set the scsi device LUN SCAN flags for the vports */ 2367 list_for_each_entry(vport, &bfad->vport_list, list_entry) 2368 bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg); 2369 } 2370 2371 int 2372 bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd) 2373 { 2374 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; 2375 unsigned long flags; 2376 2377 spin_lock_irqsave(&bfad->bfad_lock, flags); 2378 if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) { 2379 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE); 2380 /* Set the LUN Scanning mode to be Sequential scan */ 2381 if (iocmd->status == BFA_STATUS_OK) 2382 bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE); 2383 } else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) { 2384 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE); 2385 /* Set the LUN Scanning mode to default REPORT_LUNS scan */ 2386 if (iocmd->status == BFA_STATUS_OK) 2387 bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE); 2388 } else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR) 2389 iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa); 2390 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2391 return 0; 2392 } 2393 2394 int 2395 bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd) 2396 { 2397 struct bfa_bsg_fcpim_lunmask_query_s *iocmd = 2398 (struct bfa_bsg_fcpim_lunmask_query_s *)cmd; 2399 struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask; 2400 unsigned long flags; 2401 2402 spin_lock_irqsave(&bfad->bfad_lock, flags); 2403 iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask); 2404 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2405 return 0; 2406 } 2407 2408 int 2409 bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 2410 { 2411 struct bfa_bsg_fcpim_lunmask_s *iocmd = 2412 (struct bfa_bsg_fcpim_lunmask_s *)cmd; 2413 unsigned long flags; 2414 2415 spin_lock_irqsave(&bfad->bfad_lock, flags); 2416 if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD) 2417 iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id, 2418 &iocmd->pwwn, iocmd->rpwwn, iocmd->lun); 2419 else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE) 2420 iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa, 2421 iocmd->vf_id, &iocmd->pwwn, 2422 iocmd->rpwwn, iocmd->lun); 2423 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2424 return 0; 2425 } 2426 2427 static int 2428 bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, 2429 unsigned int payload_len) 2430 { 2431 int rc = -EINVAL; 2432 2433 switch (cmd) { 2434 case IOCMD_IOC_ENABLE: 2435 rc = bfad_iocmd_ioc_enable(bfad, iocmd); 2436 break; 2437 case IOCMD_IOC_DISABLE: 2438 rc = bfad_iocmd_ioc_disable(bfad, iocmd); 2439 break; 2440 case IOCMD_IOC_GET_INFO: 2441 rc = bfad_iocmd_ioc_get_info(bfad, iocmd); 2442 break; 2443 case IOCMD_IOC_GET_ATTR: 2444 rc = bfad_iocmd_ioc_get_attr(bfad, iocmd); 2445 break; 2446 case IOCMD_IOC_GET_STATS: 2447 rc = bfad_iocmd_ioc_get_stats(bfad, iocmd); 2448 break; 2449 case IOCMD_IOC_GET_FWSTATS: 2450 rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len); 2451 break; 2452 case IOCMD_IOC_RESET_STATS: 2453 case IOCMD_IOC_RESET_FWSTATS: 2454 rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd); 2455 break; 2456 case IOCMD_IOC_SET_ADAPTER_NAME: 2457 case IOCMD_IOC_SET_PORT_NAME: 2458 rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd); 2459 break; 2460 case IOCMD_IOCFC_GET_ATTR: 2461 rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd); 2462 break; 2463 case IOCMD_IOCFC_SET_INTR: 2464 rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd); 2465 break; 2466 case IOCMD_PORT_ENABLE: 2467 rc = bfad_iocmd_port_enable(bfad, iocmd); 2468 break; 2469 case IOCMD_PORT_DISABLE: 2470 rc = bfad_iocmd_port_disable(bfad, iocmd); 2471 break; 2472 case IOCMD_PORT_GET_ATTR: 2473 rc = bfad_iocmd_port_get_attr(bfad, iocmd); 2474 break; 2475 case IOCMD_PORT_GET_STATS: 2476 rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len); 2477 break; 2478 case IOCMD_PORT_RESET_STATS: 2479 rc = bfad_iocmd_port_reset_stats(bfad, iocmd); 2480 break; 2481 case IOCMD_PORT_CFG_TOPO: 2482 case IOCMD_PORT_CFG_SPEED: 2483 case IOCMD_PORT_CFG_ALPA: 2484 case IOCMD_PORT_CLR_ALPA: 2485 rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd); 2486 break; 2487 case IOCMD_PORT_CFG_MAXFRSZ: 2488 rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd); 2489 break; 2490 case IOCMD_PORT_BBSC_ENABLE: 2491 case IOCMD_PORT_BBSC_DISABLE: 2492 rc = bfad_iocmd_port_cfg_bbsc(bfad, iocmd, cmd); 2493 break; 2494 case IOCMD_LPORT_GET_ATTR: 2495 rc = bfad_iocmd_lport_get_attr(bfad, iocmd); 2496 break; 2497 case IOCMD_LPORT_GET_STATS: 2498 rc = bfad_iocmd_lport_get_stats(bfad, iocmd); 2499 break; 2500 case IOCMD_LPORT_RESET_STATS: 2501 rc = bfad_iocmd_lport_reset_stats(bfad, iocmd); 2502 break; 2503 case IOCMD_LPORT_GET_IOSTATS: 2504 rc = bfad_iocmd_lport_get_iostats(bfad, iocmd); 2505 break; 2506 case IOCMD_LPORT_GET_RPORTS: 2507 rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len); 2508 break; 2509 case IOCMD_RPORT_GET_ATTR: 2510 rc = bfad_iocmd_rport_get_attr(bfad, iocmd); 2511 break; 2512 case IOCMD_RPORT_GET_ADDR: 2513 rc = bfad_iocmd_rport_get_addr(bfad, iocmd); 2514 break; 2515 case IOCMD_RPORT_GET_STATS: 2516 rc = bfad_iocmd_rport_get_stats(bfad, iocmd); 2517 break; 2518 case IOCMD_RPORT_RESET_STATS: 2519 rc = bfad_iocmd_rport_clr_stats(bfad, iocmd); 2520 break; 2521 case IOCMD_RPORT_SET_SPEED: 2522 rc = bfad_iocmd_rport_set_speed(bfad, iocmd); 2523 break; 2524 case IOCMD_VPORT_GET_ATTR: 2525 rc = bfad_iocmd_vport_get_attr(bfad, iocmd); 2526 break; 2527 case IOCMD_VPORT_GET_STATS: 2528 rc = bfad_iocmd_vport_get_stats(bfad, iocmd); 2529 break; 2530 case IOCMD_VPORT_RESET_STATS: 2531 rc = bfad_iocmd_vport_clr_stats(bfad, iocmd); 2532 break; 2533 case IOCMD_FABRIC_GET_LPORTS: 2534 rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len); 2535 break; 2536 case IOCMD_RATELIM_ENABLE: 2537 case IOCMD_RATELIM_DISABLE: 2538 rc = bfad_iocmd_ratelim(bfad, cmd, iocmd); 2539 break; 2540 case IOCMD_RATELIM_DEF_SPEED: 2541 rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd); 2542 break; 2543 case IOCMD_FCPIM_FAILOVER: 2544 rc = bfad_iocmd_cfg_fcpim(bfad, iocmd); 2545 break; 2546 case IOCMD_FCPIM_MODSTATS: 2547 rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd); 2548 break; 2549 case IOCMD_FCPIM_MODSTATSCLR: 2550 rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd); 2551 break; 2552 case IOCMD_FCPIM_DEL_ITN_STATS: 2553 rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd); 2554 break; 2555 case IOCMD_ITNIM_GET_ATTR: 2556 rc = bfad_iocmd_itnim_get_attr(bfad, iocmd); 2557 break; 2558 case IOCMD_ITNIM_GET_IOSTATS: 2559 rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd); 2560 break; 2561 case IOCMD_ITNIM_RESET_STATS: 2562 rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd); 2563 break; 2564 case IOCMD_ITNIM_GET_ITNSTATS: 2565 rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd); 2566 break; 2567 case IOCMD_FCPORT_ENABLE: 2568 rc = bfad_iocmd_fcport_enable(bfad, iocmd); 2569 break; 2570 case IOCMD_FCPORT_DISABLE: 2571 rc = bfad_iocmd_fcport_disable(bfad, iocmd); 2572 break; 2573 case IOCMD_IOC_PCIFN_CFG: 2574 rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd); 2575 break; 2576 case IOCMD_PCIFN_CREATE: 2577 rc = bfad_iocmd_pcifn_create(bfad, iocmd); 2578 break; 2579 case IOCMD_PCIFN_DELETE: 2580 rc = bfad_iocmd_pcifn_delete(bfad, iocmd); 2581 break; 2582 case IOCMD_PCIFN_BW: 2583 rc = bfad_iocmd_pcifn_bw(bfad, iocmd); 2584 break; 2585 case IOCMD_ADAPTER_CFG_MODE: 2586 rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd); 2587 break; 2588 case IOCMD_PORT_CFG_MODE: 2589 rc = bfad_iocmd_port_cfg_mode(bfad, iocmd); 2590 break; 2591 case IOCMD_FLASH_ENABLE_OPTROM: 2592 case IOCMD_FLASH_DISABLE_OPTROM: 2593 rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd); 2594 break; 2595 case IOCMD_FAA_QUERY: 2596 rc = bfad_iocmd_faa_query(bfad, iocmd); 2597 break; 2598 case IOCMD_CEE_GET_ATTR: 2599 rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len); 2600 break; 2601 case IOCMD_CEE_GET_STATS: 2602 rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len); 2603 break; 2604 case IOCMD_CEE_RESET_STATS: 2605 rc = bfad_iocmd_cee_reset_stats(bfad, iocmd); 2606 break; 2607 case IOCMD_SFP_MEDIA: 2608 rc = bfad_iocmd_sfp_media(bfad, iocmd); 2609 break; 2610 case IOCMD_SFP_SPEED: 2611 rc = bfad_iocmd_sfp_speed(bfad, iocmd); 2612 break; 2613 case IOCMD_FLASH_GET_ATTR: 2614 rc = bfad_iocmd_flash_get_attr(bfad, iocmd); 2615 break; 2616 case IOCMD_FLASH_ERASE_PART: 2617 rc = bfad_iocmd_flash_erase_part(bfad, iocmd); 2618 break; 2619 case IOCMD_FLASH_UPDATE_PART: 2620 rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len); 2621 break; 2622 case IOCMD_FLASH_READ_PART: 2623 rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len); 2624 break; 2625 case IOCMD_DIAG_TEMP: 2626 rc = bfad_iocmd_diag_temp(bfad, iocmd); 2627 break; 2628 case IOCMD_DIAG_MEMTEST: 2629 rc = bfad_iocmd_diag_memtest(bfad, iocmd); 2630 break; 2631 case IOCMD_DIAG_LOOPBACK: 2632 rc = bfad_iocmd_diag_loopback(bfad, iocmd); 2633 break; 2634 case IOCMD_DIAG_FWPING: 2635 rc = bfad_iocmd_diag_fwping(bfad, iocmd); 2636 break; 2637 case IOCMD_DIAG_QUEUETEST: 2638 rc = bfad_iocmd_diag_queuetest(bfad, iocmd); 2639 break; 2640 case IOCMD_DIAG_SFP: 2641 rc = bfad_iocmd_diag_sfp(bfad, iocmd); 2642 break; 2643 case IOCMD_DIAG_LED: 2644 rc = bfad_iocmd_diag_led(bfad, iocmd); 2645 break; 2646 case IOCMD_DIAG_BEACON_LPORT: 2647 rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd); 2648 break; 2649 case IOCMD_DIAG_LB_STAT: 2650 rc = bfad_iocmd_diag_lb_stat(bfad, iocmd); 2651 break; 2652 case IOCMD_PHY_GET_ATTR: 2653 rc = bfad_iocmd_phy_get_attr(bfad, iocmd); 2654 break; 2655 case IOCMD_PHY_GET_STATS: 2656 rc = bfad_iocmd_phy_get_stats(bfad, iocmd); 2657 break; 2658 case IOCMD_PHY_UPDATE_FW: 2659 rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len); 2660 break; 2661 case IOCMD_PHY_READ_FW: 2662 rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len); 2663 break; 2664 case IOCMD_VHBA_QUERY: 2665 rc = bfad_iocmd_vhba_query(bfad, iocmd); 2666 break; 2667 case IOCMD_DEBUG_PORTLOG: 2668 rc = bfad_iocmd_porglog_get(bfad, iocmd); 2669 break; 2670 case IOCMD_DEBUG_FW_CORE: 2671 rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len); 2672 break; 2673 case IOCMD_DEBUG_FW_STATE_CLR: 2674 case IOCMD_DEBUG_PORTLOG_CLR: 2675 case IOCMD_DEBUG_START_DTRC: 2676 case IOCMD_DEBUG_STOP_DTRC: 2677 rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd); 2678 break; 2679 case IOCMD_DEBUG_PORTLOG_CTL: 2680 rc = bfad_iocmd_porglog_ctl(bfad, iocmd); 2681 break; 2682 case IOCMD_FCPIM_PROFILE_ON: 2683 case IOCMD_FCPIM_PROFILE_OFF: 2684 rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd); 2685 break; 2686 case IOCMD_ITNIM_GET_IOPROFILE: 2687 rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd); 2688 break; 2689 case IOCMD_FCPORT_GET_STATS: 2690 rc = bfad_iocmd_fcport_get_stats(bfad, iocmd); 2691 break; 2692 case IOCMD_FCPORT_RESET_STATS: 2693 rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd); 2694 break; 2695 case IOCMD_BOOT_CFG: 2696 rc = bfad_iocmd_boot_cfg(bfad, iocmd); 2697 break; 2698 case IOCMD_BOOT_QUERY: 2699 rc = bfad_iocmd_boot_query(bfad, iocmd); 2700 break; 2701 case IOCMD_PREBOOT_QUERY: 2702 rc = bfad_iocmd_preboot_query(bfad, iocmd); 2703 break; 2704 case IOCMD_ETHBOOT_CFG: 2705 rc = bfad_iocmd_ethboot_cfg(bfad, iocmd); 2706 break; 2707 case IOCMD_ETHBOOT_QUERY: 2708 rc = bfad_iocmd_ethboot_query(bfad, iocmd); 2709 break; 2710 case IOCMD_TRUNK_ENABLE: 2711 case IOCMD_TRUNK_DISABLE: 2712 rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd); 2713 break; 2714 case IOCMD_TRUNK_GET_ATTR: 2715 rc = bfad_iocmd_trunk_get_attr(bfad, iocmd); 2716 break; 2717 case IOCMD_QOS_ENABLE: 2718 case IOCMD_QOS_DISABLE: 2719 rc = bfad_iocmd_qos(bfad, iocmd, cmd); 2720 break; 2721 case IOCMD_QOS_GET_ATTR: 2722 rc = bfad_iocmd_qos_get_attr(bfad, iocmd); 2723 break; 2724 case IOCMD_QOS_GET_VC_ATTR: 2725 rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd); 2726 break; 2727 case IOCMD_QOS_GET_STATS: 2728 rc = bfad_iocmd_qos_get_stats(bfad, iocmd); 2729 break; 2730 case IOCMD_QOS_RESET_STATS: 2731 rc = bfad_iocmd_qos_reset_stats(bfad, iocmd); 2732 break; 2733 case IOCMD_VF_GET_STATS: 2734 rc = bfad_iocmd_vf_get_stats(bfad, iocmd); 2735 break; 2736 case IOCMD_VF_RESET_STATS: 2737 rc = bfad_iocmd_vf_clr_stats(bfad, iocmd); 2738 break; 2739 case IOCMD_FCPIM_LUNMASK_ENABLE: 2740 case IOCMD_FCPIM_LUNMASK_DISABLE: 2741 case IOCMD_FCPIM_LUNMASK_CLEAR: 2742 rc = bfad_iocmd_lunmask(bfad, iocmd, cmd); 2743 break; 2744 case IOCMD_FCPIM_LUNMASK_QUERY: 2745 rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd); 2746 break; 2747 case IOCMD_FCPIM_LUNMASK_ADD: 2748 case IOCMD_FCPIM_LUNMASK_DELETE: 2749 rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd); 2750 break; 2751 default: 2752 rc = -EINVAL; 2753 break; 2754 } 2755 return rc; 2756 } 2757 2758 static int 2759 bfad_im_bsg_vendor_request(struct fc_bsg_job *job) 2760 { 2761 uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0]; 2762 struct bfad_im_port_s *im_port = 2763 (struct bfad_im_port_s *) job->shost->hostdata[0]; 2764 struct bfad_s *bfad = im_port->bfad; 2765 struct request_queue *request_q = job->req->q; 2766 void *payload_kbuf; 2767 int rc = -EINVAL; 2768 2769 /* 2770 * Set the BSG device request_queue size to 256 to support 2771 * payloads larger than 512*1024K bytes. 2772 */ 2773 blk_queue_max_segments(request_q, 256); 2774 2775 /* Allocate a temp buffer to hold the passed in user space command */ 2776 payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL); 2777 if (!payload_kbuf) { 2778 rc = -ENOMEM; 2779 goto out; 2780 } 2781 2782 /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */ 2783 sg_copy_to_buffer(job->request_payload.sg_list, 2784 job->request_payload.sg_cnt, payload_kbuf, 2785 job->request_payload.payload_len); 2786 2787 /* Invoke IOCMD handler - to handle all the vendor command requests */ 2788 rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf, 2789 job->request_payload.payload_len); 2790 if (rc != BFA_STATUS_OK) 2791 goto error; 2792 2793 /* Copy the response data to the job->reply_payload sg_list */ 2794 sg_copy_from_buffer(job->reply_payload.sg_list, 2795 job->reply_payload.sg_cnt, 2796 payload_kbuf, 2797 job->reply_payload.payload_len); 2798 2799 /* free the command buffer */ 2800 kfree(payload_kbuf); 2801 2802 /* Fill the BSG job reply data */ 2803 job->reply_len = job->reply_payload.payload_len; 2804 job->reply->reply_payload_rcv_len = job->reply_payload.payload_len; 2805 job->reply->result = rc; 2806 2807 job->job_done(job); 2808 return rc; 2809 error: 2810 /* free the command buffer */ 2811 kfree(payload_kbuf); 2812 out: 2813 job->reply->result = rc; 2814 job->reply_len = sizeof(uint32_t); 2815 job->reply->reply_payload_rcv_len = 0; 2816 return rc; 2817 } 2818 2819 /* FC passthru call backs */ 2820 u64 2821 bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid) 2822 { 2823 struct bfad_fcxp *drv_fcxp = bfad_fcxp; 2824 struct bfa_sge_s *sge; 2825 u64 addr; 2826 2827 sge = drv_fcxp->req_sge + sgeid; 2828 addr = (u64)(size_t) sge->sg_addr; 2829 return addr; 2830 } 2831 2832 u32 2833 bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid) 2834 { 2835 struct bfad_fcxp *drv_fcxp = bfad_fcxp; 2836 struct bfa_sge_s *sge; 2837 2838 sge = drv_fcxp->req_sge + sgeid; 2839 return sge->sg_len; 2840 } 2841 2842 u64 2843 bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid) 2844 { 2845 struct bfad_fcxp *drv_fcxp = bfad_fcxp; 2846 struct bfa_sge_s *sge; 2847 u64 addr; 2848 2849 sge = drv_fcxp->rsp_sge + sgeid; 2850 addr = (u64)(size_t) sge->sg_addr; 2851 return addr; 2852 } 2853 2854 u32 2855 bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid) 2856 { 2857 struct bfad_fcxp *drv_fcxp = bfad_fcxp; 2858 struct bfa_sge_s *sge; 2859 2860 sge = drv_fcxp->rsp_sge + sgeid; 2861 return sge->sg_len; 2862 } 2863 2864 void 2865 bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg, 2866 bfa_status_t req_status, u32 rsp_len, u32 resid_len, 2867 struct fchs_s *rsp_fchs) 2868 { 2869 struct bfad_fcxp *drv_fcxp = bfad_fcxp; 2870 2871 drv_fcxp->req_status = req_status; 2872 drv_fcxp->rsp_len = rsp_len; 2873 2874 /* bfa_fcxp will be automatically freed by BFA */ 2875 drv_fcxp->bfa_fcxp = NULL; 2876 complete(&drv_fcxp->comp); 2877 } 2878 2879 struct bfad_buf_info * 2880 bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf, 2881 uint32_t payload_len, uint32_t *num_sgles) 2882 { 2883 struct bfad_buf_info *buf_base, *buf_info; 2884 struct bfa_sge_s *sg_table; 2885 int sge_num = 1; 2886 2887 buf_base = kzalloc((sizeof(struct bfad_buf_info) + 2888 sizeof(struct bfa_sge_s)) * sge_num, GFP_KERNEL); 2889 if (!buf_base) 2890 return NULL; 2891 2892 sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) + 2893 (sizeof(struct bfad_buf_info) * sge_num)); 2894 2895 /* Allocate dma coherent memory */ 2896 buf_info = buf_base; 2897 buf_info->size = payload_len; 2898 buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, buf_info->size, 2899 &buf_info->phys, GFP_KERNEL); 2900 if (!buf_info->virt) 2901 goto out_free_mem; 2902 2903 /* copy the linear bsg buffer to buf_info */ 2904 memset(buf_info->virt, 0, buf_info->size); 2905 memcpy(buf_info->virt, payload_kbuf, buf_info->size); 2906 2907 /* 2908 * Setup SG table 2909 */ 2910 sg_table->sg_len = buf_info->size; 2911 sg_table->sg_addr = (void *)(size_t) buf_info->phys; 2912 2913 *num_sgles = sge_num; 2914 2915 return buf_base; 2916 2917 out_free_mem: 2918 kfree(buf_base); 2919 return NULL; 2920 } 2921 2922 void 2923 bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base, 2924 uint32_t num_sgles) 2925 { 2926 int i; 2927 struct bfad_buf_info *buf_info = buf_base; 2928 2929 if (buf_base) { 2930 for (i = 0; i < num_sgles; buf_info++, i++) { 2931 if (buf_info->virt != NULL) 2932 dma_free_coherent(&bfad->pcidev->dev, 2933 buf_info->size, buf_info->virt, 2934 buf_info->phys); 2935 } 2936 kfree(buf_base); 2937 } 2938 } 2939 2940 int 2941 bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp, 2942 bfa_bsg_fcpt_t *bsg_fcpt) 2943 { 2944 struct bfa_fcxp_s *hal_fcxp; 2945 struct bfad_s *bfad = drv_fcxp->port->bfad; 2946 unsigned long flags; 2947 uint8_t lp_tag; 2948 2949 spin_lock_irqsave(&bfad->bfad_lock, flags); 2950 2951 /* Allocate bfa_fcxp structure */ 2952 hal_fcxp = bfa_fcxp_alloc(drv_fcxp, &bfad->bfa, 2953 drv_fcxp->num_req_sgles, 2954 drv_fcxp->num_rsp_sgles, 2955 bfad_fcxp_get_req_sgaddr_cb, 2956 bfad_fcxp_get_req_sglen_cb, 2957 bfad_fcxp_get_rsp_sgaddr_cb, 2958 bfad_fcxp_get_rsp_sglen_cb); 2959 if (!hal_fcxp) { 2960 bfa_trc(bfad, 0); 2961 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2962 return BFA_STATUS_ENOMEM; 2963 } 2964 2965 drv_fcxp->bfa_fcxp = hal_fcxp; 2966 2967 lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id); 2968 2969 bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag, 2970 bsg_fcpt->cts, bsg_fcpt->cos, 2971 job->request_payload.payload_len, 2972 &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad, 2973 job->reply_payload.payload_len, bsg_fcpt->tsecs); 2974 2975 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2976 2977 return BFA_STATUS_OK; 2978 } 2979 2980 int 2981 bfad_im_bsg_els_ct_request(struct fc_bsg_job *job) 2982 { 2983 struct bfa_bsg_data *bsg_data; 2984 struct bfad_im_port_s *im_port = 2985 (struct bfad_im_port_s *) job->shost->hostdata[0]; 2986 struct bfad_s *bfad = im_port->bfad; 2987 bfa_bsg_fcpt_t *bsg_fcpt; 2988 struct bfad_fcxp *drv_fcxp; 2989 struct bfa_fcs_lport_s *fcs_port; 2990 struct bfa_fcs_rport_s *fcs_rport; 2991 uint32_t command_type = job->request->msgcode; 2992 unsigned long flags; 2993 struct bfad_buf_info *rsp_buf_info; 2994 void *req_kbuf = NULL, *rsp_kbuf = NULL; 2995 int rc = -EINVAL; 2996 2997 job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */ 2998 job->reply->reply_payload_rcv_len = 0; 2999 3000 /* Get the payload passed in from userspace */ 3001 bsg_data = (struct bfa_bsg_data *) (((char *)job->request) + 3002 sizeof(struct fc_bsg_request)); 3003 if (bsg_data == NULL) 3004 goto out; 3005 3006 /* 3007 * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload 3008 * buffer of size bsg_data->payload_len 3009 */ 3010 bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL); 3011 if (!bsg_fcpt) { 3012 rc = -ENOMEM; 3013 goto out; 3014 } 3015 3016 if (copy_from_user((uint8_t *)bsg_fcpt, bsg_data->payload, 3017 bsg_data->payload_len)) { 3018 kfree(bsg_fcpt); 3019 rc = -EIO; 3020 goto out; 3021 } 3022 3023 drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL); 3024 if (drv_fcxp == NULL) { 3025 kfree(bsg_fcpt); 3026 rc = -ENOMEM; 3027 goto out; 3028 } 3029 3030 spin_lock_irqsave(&bfad->bfad_lock, flags); 3031 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id, 3032 bsg_fcpt->lpwwn); 3033 if (fcs_port == NULL) { 3034 bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN; 3035 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3036 goto out_free_mem; 3037 } 3038 3039 /* Check if the port is online before sending FC Passthru cmd */ 3040 if (!bfa_fcs_lport_is_online(fcs_port)) { 3041 bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE; 3042 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3043 goto out_free_mem; 3044 } 3045 3046 drv_fcxp->port = fcs_port->bfad_port; 3047 3048 if (drv_fcxp->port->bfad == 0) 3049 drv_fcxp->port->bfad = bfad; 3050 3051 /* Fetch the bfa_rport - if nexus needed */ 3052 if (command_type == FC_BSG_HST_ELS_NOLOGIN || 3053 command_type == FC_BSG_HST_CT) { 3054 /* BSG HST commands: no nexus needed */ 3055 drv_fcxp->bfa_rport = NULL; 3056 3057 } else if (command_type == FC_BSG_RPT_ELS || 3058 command_type == FC_BSG_RPT_CT) { 3059 /* BSG RPT commands: nexus needed */ 3060 fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port, 3061 bsg_fcpt->dpwwn); 3062 if (fcs_rport == NULL) { 3063 bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN; 3064 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3065 goto out_free_mem; 3066 } 3067 3068 drv_fcxp->bfa_rport = fcs_rport->bfa_rport; 3069 3070 } else { /* Unknown BSG msgcode; return -EINVAL */ 3071 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3072 goto out_free_mem; 3073 } 3074 3075 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3076 3077 /* allocate memory for req / rsp buffers */ 3078 req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL); 3079 if (!req_kbuf) { 3080 printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n", 3081 bfad->pci_name); 3082 rc = -ENOMEM; 3083 goto out_free_mem; 3084 } 3085 3086 rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL); 3087 if (!rsp_kbuf) { 3088 printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n", 3089 bfad->pci_name); 3090 rc = -ENOMEM; 3091 goto out_free_mem; 3092 } 3093 3094 /* map req sg - copy the sg_list passed in to the linear buffer */ 3095 sg_copy_to_buffer(job->request_payload.sg_list, 3096 job->request_payload.sg_cnt, req_kbuf, 3097 job->request_payload.payload_len); 3098 3099 drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf, 3100 job->request_payload.payload_len, 3101 &drv_fcxp->num_req_sgles); 3102 if (!drv_fcxp->reqbuf_info) { 3103 printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n", 3104 bfad->pci_name); 3105 rc = -ENOMEM; 3106 goto out_free_mem; 3107 } 3108 3109 drv_fcxp->req_sge = (struct bfa_sge_s *) 3110 (((uint8_t *)drv_fcxp->reqbuf_info) + 3111 (sizeof(struct bfad_buf_info) * 3112 drv_fcxp->num_req_sgles)); 3113 3114 /* map rsp sg */ 3115 drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf, 3116 job->reply_payload.payload_len, 3117 &drv_fcxp->num_rsp_sgles); 3118 if (!drv_fcxp->rspbuf_info) { 3119 printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n", 3120 bfad->pci_name); 3121 rc = -ENOMEM; 3122 goto out_free_mem; 3123 } 3124 3125 rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info; 3126 drv_fcxp->rsp_sge = (struct bfa_sge_s *) 3127 (((uint8_t *)drv_fcxp->rspbuf_info) + 3128 (sizeof(struct bfad_buf_info) * 3129 drv_fcxp->num_rsp_sgles)); 3130 3131 /* fcxp send */ 3132 init_completion(&drv_fcxp->comp); 3133 rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt); 3134 if (rc == BFA_STATUS_OK) { 3135 wait_for_completion(&drv_fcxp->comp); 3136 bsg_fcpt->status = drv_fcxp->req_status; 3137 } else { 3138 bsg_fcpt->status = rc; 3139 goto out_free_mem; 3140 } 3141 3142 /* fill the job->reply data */ 3143 if (drv_fcxp->req_status == BFA_STATUS_OK) { 3144 job->reply_len = drv_fcxp->rsp_len; 3145 job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len; 3146 job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 3147 } else { 3148 job->reply->reply_payload_rcv_len = 3149 sizeof(struct fc_bsg_ctels_reply); 3150 job->reply_len = sizeof(uint32_t); 3151 job->reply->reply_data.ctels_reply.status = 3152 FC_CTELS_STATUS_REJECT; 3153 } 3154 3155 /* Copy the response data to the reply_payload sg list */ 3156 sg_copy_from_buffer(job->reply_payload.sg_list, 3157 job->reply_payload.sg_cnt, 3158 (uint8_t *)rsp_buf_info->virt, 3159 job->reply_payload.payload_len); 3160 3161 out_free_mem: 3162 bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info, 3163 drv_fcxp->num_rsp_sgles); 3164 bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info, 3165 drv_fcxp->num_req_sgles); 3166 kfree(req_kbuf); 3167 kfree(rsp_kbuf); 3168 3169 /* Need a copy to user op */ 3170 if (copy_to_user(bsg_data->payload, (void *) bsg_fcpt, 3171 bsg_data->payload_len)) 3172 rc = -EIO; 3173 3174 kfree(bsg_fcpt); 3175 kfree(drv_fcxp); 3176 out: 3177 job->reply->result = rc; 3178 3179 if (rc == BFA_STATUS_OK) 3180 job->job_done(job); 3181 3182 return rc; 3183 } 3184 3185 int 3186 bfad_im_bsg_request(struct fc_bsg_job *job) 3187 { 3188 uint32_t rc = BFA_STATUS_OK; 3189 3190 switch (job->request->msgcode) { 3191 case FC_BSG_HST_VENDOR: 3192 /* Process BSG HST Vendor requests */ 3193 rc = bfad_im_bsg_vendor_request(job); 3194 break; 3195 case FC_BSG_HST_ELS_NOLOGIN: 3196 case FC_BSG_RPT_ELS: 3197 case FC_BSG_HST_CT: 3198 case FC_BSG_RPT_CT: 3199 /* Process BSG ELS/CT commands */ 3200 rc = bfad_im_bsg_els_ct_request(job); 3201 break; 3202 default: 3203 job->reply->result = rc = -EINVAL; 3204 job->reply->reply_payload_rcv_len = 0; 3205 break; 3206 } 3207 3208 return rc; 3209 } 3210 3211 int 3212 bfad_im_bsg_timeout(struct fc_bsg_job *job) 3213 { 3214 /* Don't complete the BSG job request - return -EAGAIN 3215 * to reset bsg job timeout : for ELS/CT pass thru we 3216 * already have timer to track the request. 3217 */ 3218 return -EAGAIN; 3219 } 3220