1 /* 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. 3 * All rights reserved 4 * www.brocade.com 5 * 6 * Linux driver for Brocade Fibre Channel Host Bus Adapter. 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License (GPL) Version 2 as 10 * published by the Free Software Foundation 11 * 12 * This program is distributed in the hope that it will be useful, but 13 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 */ 17 18 #include <linux/uaccess.h> 19 #include "bfad_drv.h" 20 #include "bfad_im.h" 21 #include "bfad_bsg.h" 22 23 BFA_TRC_FILE(LDRV, BSG); 24 25 int 26 bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd) 27 { 28 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 29 int rc = 0; 30 unsigned long flags; 31 32 spin_lock_irqsave(&bfad->bfad_lock, flags); 33 /* If IOC is not in disabled state - return */ 34 if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) { 35 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 36 iocmd->status = BFA_STATUS_OK; 37 return rc; 38 } 39 40 init_completion(&bfad->enable_comp); 41 bfa_iocfc_enable(&bfad->bfa); 42 iocmd->status = BFA_STATUS_OK; 43 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 44 wait_for_completion(&bfad->enable_comp); 45 46 return rc; 47 } 48 49 int 50 bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd) 51 { 52 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 53 int rc = 0; 54 unsigned long flags; 55 56 spin_lock_irqsave(&bfad->bfad_lock, flags); 57 if (bfa_ioc_is_disabled(&bfad->bfa.ioc)) { 58 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 59 iocmd->status = BFA_STATUS_OK; 60 return rc; 61 } 62 63 if (bfad->disable_active) { 64 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 65 return -EBUSY; 66 } 67 68 bfad->disable_active = BFA_TRUE; 69 init_completion(&bfad->disable_comp); 70 bfa_iocfc_disable(&bfad->bfa); 71 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 72 73 wait_for_completion(&bfad->disable_comp); 74 bfad->disable_active = BFA_FALSE; 75 iocmd->status = BFA_STATUS_OK; 76 77 return rc; 78 } 79 80 static int 81 bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd) 82 { 83 int i; 84 struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd; 85 struct bfad_im_port_s *im_port; 86 struct bfa_port_attr_s pattr; 87 unsigned long flags; 88 89 spin_lock_irqsave(&bfad->bfad_lock, flags); 90 bfa_fcport_get_attr(&bfad->bfa, &pattr); 91 iocmd->nwwn = pattr.nwwn; 92 iocmd->pwwn = pattr.pwwn; 93 iocmd->ioc_type = bfa_get_type(&bfad->bfa); 94 iocmd->mac = bfa_get_mac(&bfad->bfa); 95 iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa); 96 bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum); 97 iocmd->factorynwwn = pattr.factorynwwn; 98 iocmd->factorypwwn = pattr.factorypwwn; 99 iocmd->bfad_num = bfad->inst_no; 100 im_port = bfad->pport.im_port; 101 iocmd->host = im_port->shost->host_no; 102 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 103 104 strcpy(iocmd->name, bfad->adapter_name); 105 strcpy(iocmd->port_name, bfad->port_name); 106 strcpy(iocmd->hwpath, bfad->pci_name); 107 108 /* set adapter hw path */ 109 strcpy(iocmd->adapter_hwpath, bfad->pci_name); 110 for (i = 0; iocmd->adapter_hwpath[i] != ':' && i < BFA_STRING_32; i++) 111 ; 112 for (; iocmd->adapter_hwpath[++i] != ':' && i < BFA_STRING_32; ) 113 ; 114 iocmd->adapter_hwpath[i] = '\0'; 115 iocmd->status = BFA_STATUS_OK; 116 return 0; 117 } 118 119 static int 120 bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd) 121 { 122 struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd; 123 unsigned long flags; 124 125 spin_lock_irqsave(&bfad->bfad_lock, flags); 126 bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr); 127 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 128 129 /* fill in driver attr info */ 130 strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME); 131 strncpy(iocmd->ioc_attr.driver_attr.driver_ver, 132 BFAD_DRIVER_VERSION, BFA_VERSION_LEN); 133 strcpy(iocmd->ioc_attr.driver_attr.fw_ver, 134 iocmd->ioc_attr.adapter_attr.fw_ver); 135 strcpy(iocmd->ioc_attr.driver_attr.bios_ver, 136 iocmd->ioc_attr.adapter_attr.optrom_ver); 137 138 /* copy chip rev info first otherwise it will be overwritten */ 139 memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev, 140 sizeof(bfad->pci_attr.chip_rev)); 141 memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr, 142 sizeof(struct bfa_ioc_pci_attr_s)); 143 144 iocmd->status = BFA_STATUS_OK; 145 return 0; 146 } 147 148 int 149 bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd) 150 { 151 struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd; 152 153 bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats); 154 iocmd->status = BFA_STATUS_OK; 155 return 0; 156 } 157 158 int 159 bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd, 160 unsigned int payload_len) 161 { 162 struct bfa_bsg_ioc_fwstats_s *iocmd = 163 (struct bfa_bsg_ioc_fwstats_s *)cmd; 164 void *iocmd_bufptr; 165 unsigned long flags; 166 167 if (bfad_chk_iocmd_sz(payload_len, 168 sizeof(struct bfa_bsg_ioc_fwstats_s), 169 sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) { 170 iocmd->status = BFA_STATUS_VERSION_FAIL; 171 goto out; 172 } 173 174 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s); 175 spin_lock_irqsave(&bfad->bfad_lock, flags); 176 iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr); 177 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 178 179 if (iocmd->status != BFA_STATUS_OK) { 180 bfa_trc(bfad, iocmd->status); 181 goto out; 182 } 183 out: 184 bfa_trc(bfad, 0x6666); 185 return 0; 186 } 187 188 int 189 bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 190 { 191 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 192 unsigned long flags; 193 194 if (v_cmd == IOCMD_IOC_RESET_STATS) { 195 bfa_ioc_clear_stats(&bfad->bfa); 196 iocmd->status = BFA_STATUS_OK; 197 } else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) { 198 spin_lock_irqsave(&bfad->bfad_lock, flags); 199 iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc); 200 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 201 } 202 203 return 0; 204 } 205 206 int 207 bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 208 { 209 struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd; 210 211 if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME) 212 strcpy(bfad->adapter_name, iocmd->name); 213 else if (v_cmd == IOCMD_IOC_SET_PORT_NAME) 214 strcpy(bfad->port_name, iocmd->name); 215 216 iocmd->status = BFA_STATUS_OK; 217 return 0; 218 } 219 220 int 221 bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd) 222 { 223 struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd; 224 225 iocmd->status = BFA_STATUS_OK; 226 bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr); 227 228 return 0; 229 } 230 231 int 232 bfad_iocmd_ioc_fw_sig_inv(struct bfad_s *bfad, void *cmd) 233 { 234 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 235 unsigned long flags; 236 237 spin_lock_irqsave(&bfad->bfad_lock, flags); 238 iocmd->status = bfa_ioc_fwsig_invalidate(&bfad->bfa.ioc); 239 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 240 return 0; 241 } 242 243 int 244 bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd) 245 { 246 struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd; 247 unsigned long flags; 248 249 spin_lock_irqsave(&bfad->bfad_lock, flags); 250 iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr); 251 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 252 253 return 0; 254 } 255 256 int 257 bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd) 258 { 259 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 260 struct bfad_hal_comp fcomp; 261 unsigned long flags; 262 263 init_completion(&fcomp.comp); 264 spin_lock_irqsave(&bfad->bfad_lock, flags); 265 iocmd->status = bfa_port_enable(&bfad->bfa.modules.port, 266 bfad_hcb_comp, &fcomp); 267 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 268 if (iocmd->status != BFA_STATUS_OK) { 269 bfa_trc(bfad, iocmd->status); 270 return 0; 271 } 272 wait_for_completion(&fcomp.comp); 273 iocmd->status = fcomp.status; 274 return 0; 275 } 276 277 int 278 bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd) 279 { 280 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 281 struct bfad_hal_comp fcomp; 282 unsigned long flags; 283 284 init_completion(&fcomp.comp); 285 spin_lock_irqsave(&bfad->bfad_lock, flags); 286 iocmd->status = bfa_port_disable(&bfad->bfa.modules.port, 287 bfad_hcb_comp, &fcomp); 288 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 289 290 if (iocmd->status != BFA_STATUS_OK) { 291 bfa_trc(bfad, iocmd->status); 292 return 0; 293 } 294 wait_for_completion(&fcomp.comp); 295 iocmd->status = fcomp.status; 296 return 0; 297 } 298 299 static int 300 bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd) 301 { 302 struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd; 303 struct bfa_lport_attr_s port_attr; 304 unsigned long flags; 305 306 spin_lock_irqsave(&bfad->bfad_lock, flags); 307 bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr); 308 bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); 309 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 310 311 if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE) 312 iocmd->attr.pid = port_attr.pid; 313 else 314 iocmd->attr.pid = 0; 315 316 iocmd->attr.port_type = port_attr.port_type; 317 iocmd->attr.loopback = port_attr.loopback; 318 iocmd->attr.authfail = port_attr.authfail; 319 strncpy(iocmd->attr.port_symname.symname, 320 port_attr.port_cfg.sym_name.symname, 321 sizeof(port_attr.port_cfg.sym_name.symname)); 322 323 iocmd->status = BFA_STATUS_OK; 324 return 0; 325 } 326 327 int 328 bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd, 329 unsigned int payload_len) 330 { 331 struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd; 332 struct bfad_hal_comp fcomp; 333 void *iocmd_bufptr; 334 unsigned long flags; 335 336 if (bfad_chk_iocmd_sz(payload_len, 337 sizeof(struct bfa_bsg_port_stats_s), 338 sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) { 339 iocmd->status = BFA_STATUS_VERSION_FAIL; 340 return 0; 341 } 342 343 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s); 344 345 init_completion(&fcomp.comp); 346 spin_lock_irqsave(&bfad->bfad_lock, flags); 347 iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port, 348 iocmd_bufptr, bfad_hcb_comp, &fcomp); 349 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 350 if (iocmd->status != BFA_STATUS_OK) { 351 bfa_trc(bfad, iocmd->status); 352 goto out; 353 } 354 355 wait_for_completion(&fcomp.comp); 356 iocmd->status = fcomp.status; 357 out: 358 return 0; 359 } 360 361 int 362 bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd) 363 { 364 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 365 struct bfad_hal_comp fcomp; 366 unsigned long flags; 367 368 init_completion(&fcomp.comp); 369 spin_lock_irqsave(&bfad->bfad_lock, flags); 370 iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port, 371 bfad_hcb_comp, &fcomp); 372 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 373 if (iocmd->status != BFA_STATUS_OK) { 374 bfa_trc(bfad, iocmd->status); 375 return 0; 376 } 377 wait_for_completion(&fcomp.comp); 378 iocmd->status = fcomp.status; 379 return 0; 380 } 381 382 int 383 bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd) 384 { 385 struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd; 386 unsigned long flags; 387 388 spin_lock_irqsave(&bfad->bfad_lock, flags); 389 if (v_cmd == IOCMD_PORT_CFG_TOPO) 390 cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param); 391 else if (v_cmd == IOCMD_PORT_CFG_SPEED) 392 cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param); 393 else if (v_cmd == IOCMD_PORT_CFG_ALPA) 394 cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param); 395 else if (v_cmd == IOCMD_PORT_CLR_ALPA) 396 cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa); 397 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 398 399 return 0; 400 } 401 402 int 403 bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd) 404 { 405 struct bfa_bsg_port_cfg_maxfrsize_s *iocmd = 406 (struct bfa_bsg_port_cfg_maxfrsize_s *)cmd; 407 unsigned long flags; 408 409 spin_lock_irqsave(&bfad->bfad_lock, flags); 410 iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize); 411 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 412 413 return 0; 414 } 415 416 int 417 bfad_iocmd_port_cfg_bbcr(struct bfad_s *bfad, unsigned int cmd, void *pcmd) 418 { 419 struct bfa_bsg_bbcr_enable_s *iocmd = 420 (struct bfa_bsg_bbcr_enable_s *)pcmd; 421 unsigned long flags; 422 int rc; 423 424 spin_lock_irqsave(&bfad->bfad_lock, flags); 425 if (cmd == IOCMD_PORT_BBCR_ENABLE) 426 rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_TRUE, iocmd->bb_scn); 427 else if (cmd == IOCMD_PORT_BBCR_DISABLE) 428 rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_FALSE, 0); 429 else { 430 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 431 return -EINVAL; 432 } 433 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 434 435 iocmd->status = rc; 436 return 0; 437 } 438 439 int 440 bfad_iocmd_port_get_bbcr_attr(struct bfad_s *bfad, void *pcmd) 441 { 442 struct bfa_bsg_bbcr_attr_s *iocmd = (struct bfa_bsg_bbcr_attr_s *) pcmd; 443 unsigned long flags; 444 445 spin_lock_irqsave(&bfad->bfad_lock, flags); 446 iocmd->status = 447 bfa_fcport_get_bbcr_attr(&bfad->bfa, &iocmd->attr); 448 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 449 450 return 0; 451 } 452 453 454 static int 455 bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd) 456 { 457 struct bfa_fcs_lport_s *fcs_port; 458 struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd; 459 unsigned long flags; 460 461 spin_lock_irqsave(&bfad->bfad_lock, flags); 462 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 463 iocmd->vf_id, iocmd->pwwn); 464 if (fcs_port == NULL) { 465 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 466 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 467 goto out; 468 } 469 470 bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr); 471 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 472 iocmd->status = BFA_STATUS_OK; 473 out: 474 return 0; 475 } 476 477 int 478 bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd) 479 { 480 struct bfa_fcs_lport_s *fcs_port; 481 struct bfa_bsg_lport_stats_s *iocmd = 482 (struct bfa_bsg_lport_stats_s *)cmd; 483 unsigned long flags; 484 485 spin_lock_irqsave(&bfad->bfad_lock, flags); 486 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 487 iocmd->vf_id, iocmd->pwwn); 488 if (fcs_port == NULL) { 489 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 490 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 491 goto out; 492 } 493 494 bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats); 495 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 496 iocmd->status = BFA_STATUS_OK; 497 out: 498 return 0; 499 } 500 501 int 502 bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd) 503 { 504 struct bfa_fcs_lport_s *fcs_port; 505 struct bfa_bsg_reset_stats_s *iocmd = 506 (struct bfa_bsg_reset_stats_s *)cmd; 507 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); 508 struct list_head *qe, *qen; 509 struct bfa_itnim_s *itnim; 510 unsigned long flags; 511 512 spin_lock_irqsave(&bfad->bfad_lock, flags); 513 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 514 iocmd->vf_id, iocmd->vpwwn); 515 if (fcs_port == NULL) { 516 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 517 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 518 goto out; 519 } 520 521 bfa_fcs_lport_clear_stats(fcs_port); 522 /* clear IO stats from all active itnims */ 523 list_for_each_safe(qe, qen, &fcpim->itnim_q) { 524 itnim = (struct bfa_itnim_s *) qe; 525 if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag) 526 continue; 527 bfa_itnim_clear_stats(itnim); 528 } 529 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 530 iocmd->status = BFA_STATUS_OK; 531 out: 532 return 0; 533 } 534 535 int 536 bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd) 537 { 538 struct bfa_fcs_lport_s *fcs_port; 539 struct bfa_bsg_lport_iostats_s *iocmd = 540 (struct bfa_bsg_lport_iostats_s *)cmd; 541 unsigned long flags; 542 543 spin_lock_irqsave(&bfad->bfad_lock, flags); 544 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 545 iocmd->vf_id, iocmd->pwwn); 546 if (fcs_port == NULL) { 547 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 548 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 549 goto out; 550 } 551 552 bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats, 553 fcs_port->lp_tag); 554 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 555 iocmd->status = BFA_STATUS_OK; 556 out: 557 return 0; 558 } 559 560 int 561 bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd, 562 unsigned int payload_len) 563 { 564 struct bfa_bsg_lport_get_rports_s *iocmd = 565 (struct bfa_bsg_lport_get_rports_s *)cmd; 566 struct bfa_fcs_lport_s *fcs_port; 567 unsigned long flags; 568 void *iocmd_bufptr; 569 570 if (iocmd->nrports == 0) 571 return -EINVAL; 572 573 if (bfad_chk_iocmd_sz(payload_len, 574 sizeof(struct bfa_bsg_lport_get_rports_s), 575 sizeof(struct bfa_rport_qualifier_s) * iocmd->nrports) 576 != BFA_STATUS_OK) { 577 iocmd->status = BFA_STATUS_VERSION_FAIL; 578 return 0; 579 } 580 581 iocmd_bufptr = (char *)iocmd + 582 sizeof(struct bfa_bsg_lport_get_rports_s); 583 spin_lock_irqsave(&bfad->bfad_lock, flags); 584 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 585 iocmd->vf_id, iocmd->pwwn); 586 if (fcs_port == NULL) { 587 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 588 bfa_trc(bfad, 0); 589 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 590 goto out; 591 } 592 593 bfa_fcs_lport_get_rport_quals(fcs_port, 594 (struct bfa_rport_qualifier_s *)iocmd_bufptr, 595 &iocmd->nrports); 596 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 597 iocmd->status = BFA_STATUS_OK; 598 out: 599 return 0; 600 } 601 602 int 603 bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd) 604 { 605 struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd; 606 struct bfa_fcs_lport_s *fcs_port; 607 struct bfa_fcs_rport_s *fcs_rport; 608 unsigned long flags; 609 610 spin_lock_irqsave(&bfad->bfad_lock, flags); 611 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 612 iocmd->vf_id, iocmd->pwwn); 613 if (fcs_port == NULL) { 614 bfa_trc(bfad, 0); 615 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 616 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 617 goto out; 618 } 619 620 if (iocmd->pid) 621 fcs_rport = bfa_fcs_lport_get_rport_by_qualifier(fcs_port, 622 iocmd->rpwwn, iocmd->pid); 623 else 624 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); 625 if (fcs_rport == NULL) { 626 bfa_trc(bfad, 0); 627 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 628 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 629 goto out; 630 } 631 632 bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr); 633 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 634 iocmd->status = BFA_STATUS_OK; 635 out: 636 return 0; 637 } 638 639 static int 640 bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd) 641 { 642 struct bfa_bsg_rport_scsi_addr_s *iocmd = 643 (struct bfa_bsg_rport_scsi_addr_s *)cmd; 644 struct bfa_fcs_lport_s *fcs_port; 645 struct bfa_fcs_itnim_s *fcs_itnim; 646 struct bfad_itnim_s *drv_itnim; 647 unsigned long flags; 648 649 spin_lock_irqsave(&bfad->bfad_lock, flags); 650 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 651 iocmd->vf_id, iocmd->pwwn); 652 if (fcs_port == NULL) { 653 bfa_trc(bfad, 0); 654 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 655 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 656 goto out; 657 } 658 659 fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); 660 if (fcs_itnim == NULL) { 661 bfa_trc(bfad, 0); 662 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 663 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 664 goto out; 665 } 666 667 drv_itnim = fcs_itnim->itnim_drv; 668 669 if (drv_itnim && drv_itnim->im_port) 670 iocmd->host = drv_itnim->im_port->shost->host_no; 671 else { 672 bfa_trc(bfad, 0); 673 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 674 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 675 goto out; 676 } 677 678 iocmd->target = drv_itnim->scsi_tgt_id; 679 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 680 681 iocmd->bus = 0; 682 iocmd->lun = 0; 683 iocmd->status = BFA_STATUS_OK; 684 out: 685 return 0; 686 } 687 688 int 689 bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd) 690 { 691 struct bfa_bsg_rport_stats_s *iocmd = 692 (struct bfa_bsg_rport_stats_s *)cmd; 693 struct bfa_fcs_lport_s *fcs_port; 694 struct bfa_fcs_rport_s *fcs_rport; 695 unsigned long flags; 696 697 spin_lock_irqsave(&bfad->bfad_lock, flags); 698 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 699 iocmd->vf_id, iocmd->pwwn); 700 if (fcs_port == NULL) { 701 bfa_trc(bfad, 0); 702 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 703 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 704 goto out; 705 } 706 707 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); 708 if (fcs_rport == NULL) { 709 bfa_trc(bfad, 0); 710 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 711 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 712 goto out; 713 } 714 715 memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats, 716 sizeof(struct bfa_rport_stats_s)); 717 if (bfa_fcs_rport_get_halrport(fcs_rport)) { 718 memcpy((void *)&iocmd->stats.hal_stats, 719 (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats), 720 sizeof(struct bfa_rport_hal_stats_s)); 721 } 722 723 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 724 iocmd->status = BFA_STATUS_OK; 725 out: 726 return 0; 727 } 728 729 int 730 bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd) 731 { 732 struct bfa_bsg_rport_reset_stats_s *iocmd = 733 (struct bfa_bsg_rport_reset_stats_s *)cmd; 734 struct bfa_fcs_lport_s *fcs_port; 735 struct bfa_fcs_rport_s *fcs_rport; 736 struct bfa_rport_s *rport; 737 unsigned long flags; 738 739 spin_lock_irqsave(&bfad->bfad_lock, flags); 740 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 741 iocmd->vf_id, iocmd->pwwn); 742 if (fcs_port == NULL) { 743 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 744 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 745 goto out; 746 } 747 748 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); 749 if (fcs_rport == NULL) { 750 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 751 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 752 goto out; 753 } 754 755 memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s)); 756 rport = bfa_fcs_rport_get_halrport(fcs_rport); 757 if (rport) 758 memset(&rport->stats, 0, sizeof(rport->stats)); 759 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 760 iocmd->status = BFA_STATUS_OK; 761 out: 762 return 0; 763 } 764 765 int 766 bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd) 767 { 768 struct bfa_bsg_rport_set_speed_s *iocmd = 769 (struct bfa_bsg_rport_set_speed_s *)cmd; 770 struct bfa_fcs_lport_s *fcs_port; 771 struct bfa_fcs_rport_s *fcs_rport; 772 unsigned long flags; 773 774 spin_lock_irqsave(&bfad->bfad_lock, flags); 775 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 776 iocmd->vf_id, iocmd->pwwn); 777 if (fcs_port == NULL) { 778 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 779 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 780 goto out; 781 } 782 783 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); 784 if (fcs_rport == NULL) { 785 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 786 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 787 goto out; 788 } 789 790 fcs_rport->rpf.assigned_speed = iocmd->speed; 791 /* Set this speed in f/w only if the RPSC speed is not available */ 792 if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN) 793 if (fcs_rport->bfa_rport) 794 bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed); 795 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 796 iocmd->status = BFA_STATUS_OK; 797 out: 798 return 0; 799 } 800 801 int 802 bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd) 803 { 804 struct bfa_fcs_vport_s *fcs_vport; 805 struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd; 806 unsigned long flags; 807 808 spin_lock_irqsave(&bfad->bfad_lock, flags); 809 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 810 iocmd->vf_id, iocmd->vpwwn); 811 if (fcs_vport == NULL) { 812 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 813 iocmd->status = BFA_STATUS_UNKNOWN_VWWN; 814 goto out; 815 } 816 817 bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr); 818 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 819 iocmd->status = BFA_STATUS_OK; 820 out: 821 return 0; 822 } 823 824 int 825 bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd) 826 { 827 struct bfa_fcs_vport_s *fcs_vport; 828 struct bfa_bsg_vport_stats_s *iocmd = 829 (struct bfa_bsg_vport_stats_s *)cmd; 830 unsigned long flags; 831 832 spin_lock_irqsave(&bfad->bfad_lock, flags); 833 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 834 iocmd->vf_id, iocmd->vpwwn); 835 if (fcs_vport == NULL) { 836 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 837 iocmd->status = BFA_STATUS_UNKNOWN_VWWN; 838 goto out; 839 } 840 841 memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats, 842 sizeof(struct bfa_vport_stats_s)); 843 memcpy((void *)&iocmd->vport_stats.port_stats, 844 (void *)&fcs_vport->lport.stats, 845 sizeof(struct bfa_lport_stats_s)); 846 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 847 iocmd->status = BFA_STATUS_OK; 848 out: 849 return 0; 850 } 851 852 int 853 bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd) 854 { 855 struct bfa_fcs_vport_s *fcs_vport; 856 struct bfa_bsg_reset_stats_s *iocmd = 857 (struct bfa_bsg_reset_stats_s *)cmd; 858 unsigned long flags; 859 860 spin_lock_irqsave(&bfad->bfad_lock, flags); 861 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 862 iocmd->vf_id, iocmd->vpwwn); 863 if (fcs_vport == NULL) { 864 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 865 iocmd->status = BFA_STATUS_UNKNOWN_VWWN; 866 goto out; 867 } 868 869 memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s)); 870 memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s)); 871 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 872 iocmd->status = BFA_STATUS_OK; 873 out: 874 return 0; 875 } 876 877 static int 878 bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd, 879 unsigned int payload_len) 880 { 881 struct bfa_bsg_fabric_get_lports_s *iocmd = 882 (struct bfa_bsg_fabric_get_lports_s *)cmd; 883 bfa_fcs_vf_t *fcs_vf; 884 uint32_t nports = iocmd->nports; 885 unsigned long flags; 886 void *iocmd_bufptr; 887 888 if (nports == 0) { 889 iocmd->status = BFA_STATUS_EINVAL; 890 goto out; 891 } 892 893 if (bfad_chk_iocmd_sz(payload_len, 894 sizeof(struct bfa_bsg_fabric_get_lports_s), 895 sizeof(wwn_t[iocmd->nports])) != BFA_STATUS_OK) { 896 iocmd->status = BFA_STATUS_VERSION_FAIL; 897 goto out; 898 } 899 900 iocmd_bufptr = (char *)iocmd + 901 sizeof(struct bfa_bsg_fabric_get_lports_s); 902 903 spin_lock_irqsave(&bfad->bfad_lock, flags); 904 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id); 905 if (fcs_vf == NULL) { 906 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 907 iocmd->status = BFA_STATUS_UNKNOWN_VFID; 908 goto out; 909 } 910 bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports); 911 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 912 913 iocmd->nports = nports; 914 iocmd->status = BFA_STATUS_OK; 915 out: 916 return 0; 917 } 918 919 int 920 bfad_iocmd_qos_set_bw(struct bfad_s *bfad, void *pcmd) 921 { 922 struct bfa_bsg_qos_bw_s *iocmd = (struct bfa_bsg_qos_bw_s *)pcmd; 923 unsigned long flags; 924 925 spin_lock_irqsave(&bfad->bfad_lock, flags); 926 iocmd->status = bfa_fcport_set_qos_bw(&bfad->bfa, &iocmd->qos_bw); 927 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 928 929 return 0; 930 } 931 932 int 933 bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd) 934 { 935 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; 936 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 937 unsigned long flags; 938 939 spin_lock_irqsave(&bfad->bfad_lock, flags); 940 941 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && 942 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) 943 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; 944 else { 945 if (cmd == IOCMD_RATELIM_ENABLE) 946 fcport->cfg.ratelimit = BFA_TRUE; 947 else if (cmd == IOCMD_RATELIM_DISABLE) 948 fcport->cfg.ratelimit = BFA_FALSE; 949 950 if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN) 951 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS; 952 953 iocmd->status = BFA_STATUS_OK; 954 } 955 956 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 957 958 return 0; 959 } 960 961 int 962 bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd) 963 { 964 struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd; 965 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 966 unsigned long flags; 967 968 spin_lock_irqsave(&bfad->bfad_lock, flags); 969 970 /* Auto and speeds greater than the supported speed, are invalid */ 971 if ((iocmd->speed == BFA_PORT_SPEED_AUTO) || 972 (iocmd->speed > fcport->speed_sup)) { 973 iocmd->status = BFA_STATUS_UNSUPP_SPEED; 974 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 975 return 0; 976 } 977 978 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && 979 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) 980 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; 981 else { 982 fcport->cfg.trl_def_speed = iocmd->speed; 983 iocmd->status = BFA_STATUS_OK; 984 } 985 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 986 987 return 0; 988 } 989 990 int 991 bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd) 992 { 993 struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd; 994 unsigned long flags; 995 996 spin_lock_irqsave(&bfad->bfad_lock, flags); 997 bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param); 998 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 999 iocmd->status = BFA_STATUS_OK; 1000 return 0; 1001 } 1002 1003 int 1004 bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd) 1005 { 1006 struct bfa_bsg_fcpim_modstats_s *iocmd = 1007 (struct bfa_bsg_fcpim_modstats_s *)cmd; 1008 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); 1009 struct list_head *qe, *qen; 1010 struct bfa_itnim_s *itnim; 1011 unsigned long flags; 1012 1013 spin_lock_irqsave(&bfad->bfad_lock, flags); 1014 /* accumulate IO stats from itnim */ 1015 memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s)); 1016 list_for_each_safe(qe, qen, &fcpim->itnim_q) { 1017 itnim = (struct bfa_itnim_s *) qe; 1018 bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats)); 1019 } 1020 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1021 iocmd->status = BFA_STATUS_OK; 1022 return 0; 1023 } 1024 1025 int 1026 bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd) 1027 { 1028 struct bfa_bsg_fcpim_modstatsclr_s *iocmd = 1029 (struct bfa_bsg_fcpim_modstatsclr_s *)cmd; 1030 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); 1031 struct list_head *qe, *qen; 1032 struct bfa_itnim_s *itnim; 1033 unsigned long flags; 1034 1035 spin_lock_irqsave(&bfad->bfad_lock, flags); 1036 list_for_each_safe(qe, qen, &fcpim->itnim_q) { 1037 itnim = (struct bfa_itnim_s *) qe; 1038 bfa_itnim_clear_stats(itnim); 1039 } 1040 memset(&fcpim->del_itn_stats, 0, 1041 sizeof(struct bfa_fcpim_del_itn_stats_s)); 1042 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1043 iocmd->status = BFA_STATUS_OK; 1044 return 0; 1045 } 1046 1047 int 1048 bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd) 1049 { 1050 struct bfa_bsg_fcpim_del_itn_stats_s *iocmd = 1051 (struct bfa_bsg_fcpim_del_itn_stats_s *)cmd; 1052 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); 1053 unsigned long flags; 1054 1055 spin_lock_irqsave(&bfad->bfad_lock, flags); 1056 memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats, 1057 sizeof(struct bfa_fcpim_del_itn_stats_s)); 1058 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1059 1060 iocmd->status = BFA_STATUS_OK; 1061 return 0; 1062 } 1063 1064 static int 1065 bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd) 1066 { 1067 struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd; 1068 struct bfa_fcs_lport_s *fcs_port; 1069 unsigned long flags; 1070 1071 spin_lock_irqsave(&bfad->bfad_lock, flags); 1072 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 1073 iocmd->vf_id, iocmd->lpwwn); 1074 if (!fcs_port) 1075 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 1076 else 1077 iocmd->status = bfa_fcs_itnim_attr_get(fcs_port, 1078 iocmd->rpwwn, &iocmd->attr); 1079 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1080 return 0; 1081 } 1082 1083 static int 1084 bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd) 1085 { 1086 struct bfa_bsg_itnim_iostats_s *iocmd = 1087 (struct bfa_bsg_itnim_iostats_s *)cmd; 1088 struct bfa_fcs_lport_s *fcs_port; 1089 struct bfa_fcs_itnim_s *itnim; 1090 unsigned long flags; 1091 1092 spin_lock_irqsave(&bfad->bfad_lock, flags); 1093 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 1094 iocmd->vf_id, iocmd->lpwwn); 1095 if (!fcs_port) { 1096 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 1097 bfa_trc(bfad, 0); 1098 } else { 1099 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); 1100 if (itnim == NULL) 1101 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 1102 else { 1103 iocmd->status = BFA_STATUS_OK; 1104 if (bfa_fcs_itnim_get_halitn(itnim)) 1105 memcpy((void *)&iocmd->iostats, (void *) 1106 &(bfa_fcs_itnim_get_halitn(itnim)->stats), 1107 sizeof(struct bfa_itnim_iostats_s)); 1108 } 1109 } 1110 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1111 return 0; 1112 } 1113 1114 static int 1115 bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd) 1116 { 1117 struct bfa_bsg_rport_reset_stats_s *iocmd = 1118 (struct bfa_bsg_rport_reset_stats_s *)cmd; 1119 struct bfa_fcs_lport_s *fcs_port; 1120 struct bfa_fcs_itnim_s *itnim; 1121 unsigned long flags; 1122 1123 spin_lock_irqsave(&bfad->bfad_lock, flags); 1124 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 1125 iocmd->vf_id, iocmd->pwwn); 1126 if (!fcs_port) 1127 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 1128 else { 1129 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); 1130 if (itnim == NULL) 1131 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 1132 else { 1133 iocmd->status = BFA_STATUS_OK; 1134 bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn); 1135 bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim)); 1136 } 1137 } 1138 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1139 1140 return 0; 1141 } 1142 1143 static int 1144 bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd) 1145 { 1146 struct bfa_bsg_itnim_itnstats_s *iocmd = 1147 (struct bfa_bsg_itnim_itnstats_s *)cmd; 1148 struct bfa_fcs_lport_s *fcs_port; 1149 struct bfa_fcs_itnim_s *itnim; 1150 unsigned long flags; 1151 1152 spin_lock_irqsave(&bfad->bfad_lock, flags); 1153 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 1154 iocmd->vf_id, iocmd->lpwwn); 1155 if (!fcs_port) { 1156 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 1157 bfa_trc(bfad, 0); 1158 } else { 1159 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); 1160 if (itnim == NULL) 1161 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 1162 else { 1163 iocmd->status = BFA_STATUS_OK; 1164 bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn, 1165 &iocmd->itnstats); 1166 } 1167 } 1168 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1169 return 0; 1170 } 1171 1172 int 1173 bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd) 1174 { 1175 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 1176 unsigned long flags; 1177 1178 spin_lock_irqsave(&bfad->bfad_lock, flags); 1179 iocmd->status = bfa_fcport_enable(&bfad->bfa); 1180 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1181 1182 return 0; 1183 } 1184 1185 int 1186 bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd) 1187 { 1188 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 1189 unsigned long flags; 1190 1191 spin_lock_irqsave(&bfad->bfad_lock, flags); 1192 iocmd->status = bfa_fcport_disable(&bfad->bfa); 1193 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1194 1195 return 0; 1196 } 1197 1198 int 1199 bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd) 1200 { 1201 struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd; 1202 struct bfad_hal_comp fcomp; 1203 unsigned long flags; 1204 1205 init_completion(&fcomp.comp); 1206 spin_lock_irqsave(&bfad->bfad_lock, flags); 1207 iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk, 1208 &iocmd->pcifn_cfg, 1209 bfad_hcb_comp, &fcomp); 1210 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1211 if (iocmd->status != BFA_STATUS_OK) 1212 goto out; 1213 1214 wait_for_completion(&fcomp.comp); 1215 iocmd->status = fcomp.status; 1216 out: 1217 return 0; 1218 } 1219 1220 int 1221 bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd) 1222 { 1223 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd; 1224 struct bfad_hal_comp fcomp; 1225 unsigned long flags; 1226 1227 init_completion(&fcomp.comp); 1228 spin_lock_irqsave(&bfad->bfad_lock, flags); 1229 iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk, 1230 &iocmd->pcifn_id, iocmd->port, 1231 iocmd->pcifn_class, iocmd->bw_min, 1232 iocmd->bw_max, bfad_hcb_comp, &fcomp); 1233 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1234 if (iocmd->status != BFA_STATUS_OK) 1235 goto out; 1236 1237 wait_for_completion(&fcomp.comp); 1238 iocmd->status = fcomp.status; 1239 out: 1240 return 0; 1241 } 1242 1243 int 1244 bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd) 1245 { 1246 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd; 1247 struct bfad_hal_comp fcomp; 1248 unsigned long flags; 1249 1250 init_completion(&fcomp.comp); 1251 spin_lock_irqsave(&bfad->bfad_lock, flags); 1252 iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk, 1253 iocmd->pcifn_id, 1254 bfad_hcb_comp, &fcomp); 1255 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1256 if (iocmd->status != BFA_STATUS_OK) 1257 goto out; 1258 1259 wait_for_completion(&fcomp.comp); 1260 iocmd->status = fcomp.status; 1261 out: 1262 return 0; 1263 } 1264 1265 int 1266 bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd) 1267 { 1268 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd; 1269 struct bfad_hal_comp fcomp; 1270 unsigned long flags; 1271 1272 init_completion(&fcomp.comp); 1273 spin_lock_irqsave(&bfad->bfad_lock, flags); 1274 iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk, 1275 iocmd->pcifn_id, iocmd->bw_min, 1276 iocmd->bw_max, bfad_hcb_comp, &fcomp); 1277 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1278 bfa_trc(bfad, iocmd->status); 1279 if (iocmd->status != BFA_STATUS_OK) 1280 goto out; 1281 1282 wait_for_completion(&fcomp.comp); 1283 iocmd->status = fcomp.status; 1284 bfa_trc(bfad, iocmd->status); 1285 out: 1286 return 0; 1287 } 1288 1289 int 1290 bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd) 1291 { 1292 struct bfa_bsg_adapter_cfg_mode_s *iocmd = 1293 (struct bfa_bsg_adapter_cfg_mode_s *)cmd; 1294 struct bfad_hal_comp fcomp; 1295 unsigned long flags = 0; 1296 1297 init_completion(&fcomp.comp); 1298 spin_lock_irqsave(&bfad->bfad_lock, flags); 1299 iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk, 1300 iocmd->cfg.mode, iocmd->cfg.max_pf, 1301 iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp); 1302 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1303 if (iocmd->status != BFA_STATUS_OK) 1304 goto out; 1305 1306 wait_for_completion(&fcomp.comp); 1307 iocmd->status = fcomp.status; 1308 out: 1309 return 0; 1310 } 1311 1312 int 1313 bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd) 1314 { 1315 struct bfa_bsg_port_cfg_mode_s *iocmd = 1316 (struct bfa_bsg_port_cfg_mode_s *)cmd; 1317 struct bfad_hal_comp fcomp; 1318 unsigned long flags = 0; 1319 1320 init_completion(&fcomp.comp); 1321 spin_lock_irqsave(&bfad->bfad_lock, flags); 1322 iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk, 1323 iocmd->instance, iocmd->cfg.mode, 1324 iocmd->cfg.max_pf, iocmd->cfg.max_vf, 1325 bfad_hcb_comp, &fcomp); 1326 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1327 if (iocmd->status != BFA_STATUS_OK) 1328 goto out; 1329 1330 wait_for_completion(&fcomp.comp); 1331 iocmd->status = fcomp.status; 1332 out: 1333 return 0; 1334 } 1335 1336 int 1337 bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd) 1338 { 1339 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; 1340 struct bfad_hal_comp fcomp; 1341 unsigned long flags; 1342 1343 init_completion(&fcomp.comp); 1344 spin_lock_irqsave(&bfad->bfad_lock, flags); 1345 if (cmd == IOCMD_FLASH_ENABLE_OPTROM) 1346 iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk, 1347 bfad_hcb_comp, &fcomp); 1348 else 1349 iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk, 1350 bfad_hcb_comp, &fcomp); 1351 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1352 1353 if (iocmd->status != BFA_STATUS_OK) 1354 goto out; 1355 1356 wait_for_completion(&fcomp.comp); 1357 iocmd->status = fcomp.status; 1358 out: 1359 return 0; 1360 } 1361 1362 int 1363 bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd) 1364 { 1365 struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd; 1366 struct bfad_hal_comp fcomp; 1367 unsigned long flags; 1368 1369 init_completion(&fcomp.comp); 1370 iocmd->status = BFA_STATUS_OK; 1371 spin_lock_irqsave(&bfad->bfad_lock, flags); 1372 iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr, 1373 bfad_hcb_comp, &fcomp); 1374 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1375 1376 if (iocmd->status != BFA_STATUS_OK) 1377 goto out; 1378 1379 wait_for_completion(&fcomp.comp); 1380 iocmd->status = fcomp.status; 1381 out: 1382 return 0; 1383 } 1384 1385 int 1386 bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len) 1387 { 1388 struct bfa_bsg_cee_attr_s *iocmd = 1389 (struct bfa_bsg_cee_attr_s *)cmd; 1390 void *iocmd_bufptr; 1391 struct bfad_hal_comp cee_comp; 1392 unsigned long flags; 1393 1394 if (bfad_chk_iocmd_sz(payload_len, 1395 sizeof(struct bfa_bsg_cee_attr_s), 1396 sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) { 1397 iocmd->status = BFA_STATUS_VERSION_FAIL; 1398 return 0; 1399 } 1400 1401 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s); 1402 1403 cee_comp.status = 0; 1404 init_completion(&cee_comp.comp); 1405 mutex_lock(&bfad_mutex); 1406 spin_lock_irqsave(&bfad->bfad_lock, flags); 1407 iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr, 1408 bfad_hcb_comp, &cee_comp); 1409 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1410 if (iocmd->status != BFA_STATUS_OK) { 1411 mutex_unlock(&bfad_mutex); 1412 bfa_trc(bfad, 0x5555); 1413 goto out; 1414 } 1415 wait_for_completion(&cee_comp.comp); 1416 mutex_unlock(&bfad_mutex); 1417 out: 1418 return 0; 1419 } 1420 1421 int 1422 bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd, 1423 unsigned int payload_len) 1424 { 1425 struct bfa_bsg_cee_stats_s *iocmd = 1426 (struct bfa_bsg_cee_stats_s *)cmd; 1427 void *iocmd_bufptr; 1428 struct bfad_hal_comp cee_comp; 1429 unsigned long flags; 1430 1431 if (bfad_chk_iocmd_sz(payload_len, 1432 sizeof(struct bfa_bsg_cee_stats_s), 1433 sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) { 1434 iocmd->status = BFA_STATUS_VERSION_FAIL; 1435 return 0; 1436 } 1437 1438 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s); 1439 1440 cee_comp.status = 0; 1441 init_completion(&cee_comp.comp); 1442 mutex_lock(&bfad_mutex); 1443 spin_lock_irqsave(&bfad->bfad_lock, flags); 1444 iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr, 1445 bfad_hcb_comp, &cee_comp); 1446 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1447 if (iocmd->status != BFA_STATUS_OK) { 1448 mutex_unlock(&bfad_mutex); 1449 bfa_trc(bfad, 0x5555); 1450 goto out; 1451 } 1452 wait_for_completion(&cee_comp.comp); 1453 mutex_unlock(&bfad_mutex); 1454 out: 1455 return 0; 1456 } 1457 1458 int 1459 bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd) 1460 { 1461 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 1462 unsigned long flags; 1463 1464 spin_lock_irqsave(&bfad->bfad_lock, flags); 1465 iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL); 1466 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1467 if (iocmd->status != BFA_STATUS_OK) 1468 bfa_trc(bfad, 0x5555); 1469 return 0; 1470 } 1471 1472 int 1473 bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd) 1474 { 1475 struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd; 1476 struct bfad_hal_comp fcomp; 1477 unsigned long flags; 1478 1479 init_completion(&fcomp.comp); 1480 spin_lock_irqsave(&bfad->bfad_lock, flags); 1481 iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media, 1482 bfad_hcb_comp, &fcomp); 1483 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1484 bfa_trc(bfad, iocmd->status); 1485 if (iocmd->status != BFA_STATUS_SFP_NOT_READY) 1486 goto out; 1487 1488 wait_for_completion(&fcomp.comp); 1489 iocmd->status = fcomp.status; 1490 out: 1491 return 0; 1492 } 1493 1494 int 1495 bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd) 1496 { 1497 struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd; 1498 struct bfad_hal_comp fcomp; 1499 unsigned long flags; 1500 1501 init_completion(&fcomp.comp); 1502 spin_lock_irqsave(&bfad->bfad_lock, flags); 1503 iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed, 1504 bfad_hcb_comp, &fcomp); 1505 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1506 bfa_trc(bfad, iocmd->status); 1507 if (iocmd->status != BFA_STATUS_SFP_NOT_READY) 1508 goto out; 1509 wait_for_completion(&fcomp.comp); 1510 iocmd->status = fcomp.status; 1511 out: 1512 return 0; 1513 } 1514 1515 int 1516 bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd) 1517 { 1518 struct bfa_bsg_flash_attr_s *iocmd = 1519 (struct bfa_bsg_flash_attr_s *)cmd; 1520 struct bfad_hal_comp fcomp; 1521 unsigned long flags; 1522 1523 init_completion(&fcomp.comp); 1524 spin_lock_irqsave(&bfad->bfad_lock, flags); 1525 iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr, 1526 bfad_hcb_comp, &fcomp); 1527 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1528 if (iocmd->status != BFA_STATUS_OK) 1529 goto out; 1530 wait_for_completion(&fcomp.comp); 1531 iocmd->status = fcomp.status; 1532 out: 1533 return 0; 1534 } 1535 1536 int 1537 bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd) 1538 { 1539 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd; 1540 struct bfad_hal_comp fcomp; 1541 unsigned long flags; 1542 1543 init_completion(&fcomp.comp); 1544 spin_lock_irqsave(&bfad->bfad_lock, flags); 1545 iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type, 1546 iocmd->instance, bfad_hcb_comp, &fcomp); 1547 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1548 if (iocmd->status != BFA_STATUS_OK) 1549 goto out; 1550 wait_for_completion(&fcomp.comp); 1551 iocmd->status = fcomp.status; 1552 out: 1553 return 0; 1554 } 1555 1556 int 1557 bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd, 1558 unsigned int payload_len) 1559 { 1560 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd; 1561 void *iocmd_bufptr; 1562 struct bfad_hal_comp fcomp; 1563 unsigned long flags; 1564 1565 if (bfad_chk_iocmd_sz(payload_len, 1566 sizeof(struct bfa_bsg_flash_s), 1567 iocmd->bufsz) != BFA_STATUS_OK) { 1568 iocmd->status = BFA_STATUS_VERSION_FAIL; 1569 return 0; 1570 } 1571 1572 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s); 1573 1574 init_completion(&fcomp.comp); 1575 spin_lock_irqsave(&bfad->bfad_lock, flags); 1576 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), 1577 iocmd->type, iocmd->instance, iocmd_bufptr, 1578 iocmd->bufsz, 0, bfad_hcb_comp, &fcomp); 1579 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1580 if (iocmd->status != BFA_STATUS_OK) 1581 goto out; 1582 wait_for_completion(&fcomp.comp); 1583 iocmd->status = fcomp.status; 1584 out: 1585 return 0; 1586 } 1587 1588 int 1589 bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd, 1590 unsigned int payload_len) 1591 { 1592 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd; 1593 struct bfad_hal_comp fcomp; 1594 void *iocmd_bufptr; 1595 unsigned long flags; 1596 1597 if (bfad_chk_iocmd_sz(payload_len, 1598 sizeof(struct bfa_bsg_flash_s), 1599 iocmd->bufsz) != BFA_STATUS_OK) { 1600 iocmd->status = BFA_STATUS_VERSION_FAIL; 1601 return 0; 1602 } 1603 1604 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s); 1605 1606 init_completion(&fcomp.comp); 1607 spin_lock_irqsave(&bfad->bfad_lock, flags); 1608 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type, 1609 iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0, 1610 bfad_hcb_comp, &fcomp); 1611 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1612 if (iocmd->status != BFA_STATUS_OK) 1613 goto out; 1614 wait_for_completion(&fcomp.comp); 1615 iocmd->status = fcomp.status; 1616 out: 1617 return 0; 1618 } 1619 1620 int 1621 bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd) 1622 { 1623 struct bfa_bsg_diag_get_temp_s *iocmd = 1624 (struct bfa_bsg_diag_get_temp_s *)cmd; 1625 struct bfad_hal_comp fcomp; 1626 unsigned long flags; 1627 1628 init_completion(&fcomp.comp); 1629 spin_lock_irqsave(&bfad->bfad_lock, flags); 1630 iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa), 1631 &iocmd->result, bfad_hcb_comp, &fcomp); 1632 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1633 bfa_trc(bfad, iocmd->status); 1634 if (iocmd->status != BFA_STATUS_OK) 1635 goto out; 1636 wait_for_completion(&fcomp.comp); 1637 iocmd->status = fcomp.status; 1638 out: 1639 return 0; 1640 } 1641 1642 int 1643 bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd) 1644 { 1645 struct bfa_bsg_diag_memtest_s *iocmd = 1646 (struct bfa_bsg_diag_memtest_s *)cmd; 1647 struct bfad_hal_comp fcomp; 1648 unsigned long flags; 1649 1650 init_completion(&fcomp.comp); 1651 spin_lock_irqsave(&bfad->bfad_lock, flags); 1652 iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa), 1653 &iocmd->memtest, iocmd->pat, 1654 &iocmd->result, bfad_hcb_comp, &fcomp); 1655 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1656 bfa_trc(bfad, iocmd->status); 1657 if (iocmd->status != BFA_STATUS_OK) 1658 goto out; 1659 wait_for_completion(&fcomp.comp); 1660 iocmd->status = fcomp.status; 1661 out: 1662 return 0; 1663 } 1664 1665 int 1666 bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd) 1667 { 1668 struct bfa_bsg_diag_loopback_s *iocmd = 1669 (struct bfa_bsg_diag_loopback_s *)cmd; 1670 struct bfad_hal_comp fcomp; 1671 unsigned long flags; 1672 1673 init_completion(&fcomp.comp); 1674 spin_lock_irqsave(&bfad->bfad_lock, flags); 1675 iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode, 1676 iocmd->speed, iocmd->lpcnt, iocmd->pat, 1677 &iocmd->result, bfad_hcb_comp, &fcomp); 1678 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1679 bfa_trc(bfad, iocmd->status); 1680 if (iocmd->status != BFA_STATUS_OK) 1681 goto out; 1682 wait_for_completion(&fcomp.comp); 1683 iocmd->status = fcomp.status; 1684 out: 1685 return 0; 1686 } 1687 1688 int 1689 bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd) 1690 { 1691 struct bfa_bsg_diag_fwping_s *iocmd = 1692 (struct bfa_bsg_diag_fwping_s *)cmd; 1693 struct bfad_hal_comp fcomp; 1694 unsigned long flags; 1695 1696 init_completion(&fcomp.comp); 1697 spin_lock_irqsave(&bfad->bfad_lock, flags); 1698 iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt, 1699 iocmd->pattern, &iocmd->result, 1700 bfad_hcb_comp, &fcomp); 1701 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1702 bfa_trc(bfad, iocmd->status); 1703 if (iocmd->status != BFA_STATUS_OK) 1704 goto out; 1705 bfa_trc(bfad, 0x77771); 1706 wait_for_completion(&fcomp.comp); 1707 iocmd->status = fcomp.status; 1708 out: 1709 return 0; 1710 } 1711 1712 int 1713 bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd) 1714 { 1715 struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd; 1716 struct bfad_hal_comp fcomp; 1717 unsigned long flags; 1718 1719 init_completion(&fcomp.comp); 1720 spin_lock_irqsave(&bfad->bfad_lock, flags); 1721 iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force, 1722 iocmd->queue, &iocmd->result, 1723 bfad_hcb_comp, &fcomp); 1724 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1725 if (iocmd->status != BFA_STATUS_OK) 1726 goto out; 1727 wait_for_completion(&fcomp.comp); 1728 iocmd->status = fcomp.status; 1729 out: 1730 return 0; 1731 } 1732 1733 int 1734 bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd) 1735 { 1736 struct bfa_bsg_sfp_show_s *iocmd = 1737 (struct bfa_bsg_sfp_show_s *)cmd; 1738 struct bfad_hal_comp fcomp; 1739 unsigned long flags; 1740 1741 init_completion(&fcomp.comp); 1742 spin_lock_irqsave(&bfad->bfad_lock, flags); 1743 iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp, 1744 bfad_hcb_comp, &fcomp); 1745 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1746 bfa_trc(bfad, iocmd->status); 1747 if (iocmd->status != BFA_STATUS_OK) 1748 goto out; 1749 wait_for_completion(&fcomp.comp); 1750 iocmd->status = fcomp.status; 1751 bfa_trc(bfad, iocmd->status); 1752 out: 1753 return 0; 1754 } 1755 1756 int 1757 bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd) 1758 { 1759 struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd; 1760 unsigned long flags; 1761 1762 spin_lock_irqsave(&bfad->bfad_lock, flags); 1763 iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa), 1764 &iocmd->ledtest); 1765 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1766 return 0; 1767 } 1768 1769 int 1770 bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd) 1771 { 1772 struct bfa_bsg_diag_beacon_s *iocmd = 1773 (struct bfa_bsg_diag_beacon_s *)cmd; 1774 unsigned long flags; 1775 1776 spin_lock_irqsave(&bfad->bfad_lock, flags); 1777 iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa), 1778 iocmd->beacon, iocmd->link_e2e_beacon, 1779 iocmd->second); 1780 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1781 return 0; 1782 } 1783 1784 int 1785 bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd) 1786 { 1787 struct bfa_bsg_diag_lb_stat_s *iocmd = 1788 (struct bfa_bsg_diag_lb_stat_s *)cmd; 1789 unsigned long flags; 1790 1791 spin_lock_irqsave(&bfad->bfad_lock, flags); 1792 iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa); 1793 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1794 bfa_trc(bfad, iocmd->status); 1795 1796 return 0; 1797 } 1798 1799 int 1800 bfad_iocmd_diag_dport_enable(struct bfad_s *bfad, void *pcmd) 1801 { 1802 struct bfa_bsg_dport_enable_s *iocmd = 1803 (struct bfa_bsg_dport_enable_s *)pcmd; 1804 unsigned long flags; 1805 struct bfad_hal_comp fcomp; 1806 1807 init_completion(&fcomp.comp); 1808 spin_lock_irqsave(&bfad->bfad_lock, flags); 1809 iocmd->status = bfa_dport_enable(&bfad->bfa, iocmd->lpcnt, 1810 iocmd->pat, bfad_hcb_comp, &fcomp); 1811 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1812 if (iocmd->status != BFA_STATUS_OK) 1813 bfa_trc(bfad, iocmd->status); 1814 else { 1815 wait_for_completion(&fcomp.comp); 1816 iocmd->status = fcomp.status; 1817 } 1818 return 0; 1819 } 1820 1821 int 1822 bfad_iocmd_diag_dport_disable(struct bfad_s *bfad, void *pcmd) 1823 { 1824 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; 1825 unsigned long flags; 1826 struct bfad_hal_comp fcomp; 1827 1828 init_completion(&fcomp.comp); 1829 spin_lock_irqsave(&bfad->bfad_lock, flags); 1830 iocmd->status = bfa_dport_disable(&bfad->bfa, bfad_hcb_comp, &fcomp); 1831 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1832 if (iocmd->status != BFA_STATUS_OK) 1833 bfa_trc(bfad, iocmd->status); 1834 else { 1835 wait_for_completion(&fcomp.comp); 1836 iocmd->status = fcomp.status; 1837 } 1838 return 0; 1839 } 1840 1841 int 1842 bfad_iocmd_diag_dport_start(struct bfad_s *bfad, void *pcmd) 1843 { 1844 struct bfa_bsg_dport_enable_s *iocmd = 1845 (struct bfa_bsg_dport_enable_s *)pcmd; 1846 unsigned long flags; 1847 struct bfad_hal_comp fcomp; 1848 1849 init_completion(&fcomp.comp); 1850 spin_lock_irqsave(&bfad->bfad_lock, flags); 1851 iocmd->status = bfa_dport_start(&bfad->bfa, iocmd->lpcnt, 1852 iocmd->pat, bfad_hcb_comp, 1853 &fcomp); 1854 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1855 1856 if (iocmd->status != BFA_STATUS_OK) { 1857 bfa_trc(bfad, iocmd->status); 1858 } else { 1859 wait_for_completion(&fcomp.comp); 1860 iocmd->status = fcomp.status; 1861 } 1862 1863 return 0; 1864 } 1865 1866 int 1867 bfad_iocmd_diag_dport_show(struct bfad_s *bfad, void *pcmd) 1868 { 1869 struct bfa_bsg_diag_dport_show_s *iocmd = 1870 (struct bfa_bsg_diag_dport_show_s *)pcmd; 1871 unsigned long flags; 1872 1873 spin_lock_irqsave(&bfad->bfad_lock, flags); 1874 iocmd->status = bfa_dport_show(&bfad->bfa, &iocmd->result); 1875 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1876 1877 return 0; 1878 } 1879 1880 1881 int 1882 bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd) 1883 { 1884 struct bfa_bsg_phy_attr_s *iocmd = 1885 (struct bfa_bsg_phy_attr_s *)cmd; 1886 struct bfad_hal_comp fcomp; 1887 unsigned long flags; 1888 1889 init_completion(&fcomp.comp); 1890 spin_lock_irqsave(&bfad->bfad_lock, flags); 1891 iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance, 1892 &iocmd->attr, bfad_hcb_comp, &fcomp); 1893 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1894 if (iocmd->status != BFA_STATUS_OK) 1895 goto out; 1896 wait_for_completion(&fcomp.comp); 1897 iocmd->status = fcomp.status; 1898 out: 1899 return 0; 1900 } 1901 1902 int 1903 bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd) 1904 { 1905 struct bfa_bsg_phy_stats_s *iocmd = 1906 (struct bfa_bsg_phy_stats_s *)cmd; 1907 struct bfad_hal_comp fcomp; 1908 unsigned long flags; 1909 1910 init_completion(&fcomp.comp); 1911 spin_lock_irqsave(&bfad->bfad_lock, flags); 1912 iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance, 1913 &iocmd->stats, bfad_hcb_comp, &fcomp); 1914 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1915 if (iocmd->status != BFA_STATUS_OK) 1916 goto out; 1917 wait_for_completion(&fcomp.comp); 1918 iocmd->status = fcomp.status; 1919 out: 1920 return 0; 1921 } 1922 1923 int 1924 bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len) 1925 { 1926 struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd; 1927 struct bfad_hal_comp fcomp; 1928 void *iocmd_bufptr; 1929 unsigned long flags; 1930 1931 if (bfad_chk_iocmd_sz(payload_len, 1932 sizeof(struct bfa_bsg_phy_s), 1933 iocmd->bufsz) != BFA_STATUS_OK) { 1934 iocmd->status = BFA_STATUS_VERSION_FAIL; 1935 return 0; 1936 } 1937 1938 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s); 1939 init_completion(&fcomp.comp); 1940 spin_lock_irqsave(&bfad->bfad_lock, flags); 1941 iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa), 1942 iocmd->instance, iocmd_bufptr, iocmd->bufsz, 1943 0, bfad_hcb_comp, &fcomp); 1944 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1945 if (iocmd->status != BFA_STATUS_OK) 1946 goto out; 1947 wait_for_completion(&fcomp.comp); 1948 iocmd->status = fcomp.status; 1949 if (iocmd->status != BFA_STATUS_OK) 1950 goto out; 1951 out: 1952 return 0; 1953 } 1954 1955 int 1956 bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd) 1957 { 1958 struct bfa_bsg_vhba_attr_s *iocmd = 1959 (struct bfa_bsg_vhba_attr_s *)cmd; 1960 struct bfa_vhba_attr_s *attr = &iocmd->attr; 1961 unsigned long flags; 1962 1963 spin_lock_irqsave(&bfad->bfad_lock, flags); 1964 attr->pwwn = bfad->bfa.ioc.attr->pwwn; 1965 attr->nwwn = bfad->bfa.ioc.attr->nwwn; 1966 attr->plog_enabled = (bfa_boolean_t)bfad->bfa.plog->plog_enabled; 1967 attr->io_profile = bfa_fcpim_get_io_profile(&bfad->bfa); 1968 attr->path_tov = bfa_fcpim_path_tov_get(&bfad->bfa); 1969 iocmd->status = BFA_STATUS_OK; 1970 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1971 return 0; 1972 } 1973 1974 int 1975 bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len) 1976 { 1977 struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd; 1978 void *iocmd_bufptr; 1979 struct bfad_hal_comp fcomp; 1980 unsigned long flags; 1981 1982 if (bfad_chk_iocmd_sz(payload_len, 1983 sizeof(struct bfa_bsg_phy_s), 1984 iocmd->bufsz) != BFA_STATUS_OK) { 1985 iocmd->status = BFA_STATUS_VERSION_FAIL; 1986 return 0; 1987 } 1988 1989 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s); 1990 init_completion(&fcomp.comp); 1991 spin_lock_irqsave(&bfad->bfad_lock, flags); 1992 iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa), 1993 iocmd->instance, iocmd_bufptr, iocmd->bufsz, 1994 0, bfad_hcb_comp, &fcomp); 1995 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1996 if (iocmd->status != BFA_STATUS_OK) 1997 goto out; 1998 wait_for_completion(&fcomp.comp); 1999 iocmd->status = fcomp.status; 2000 out: 2001 return 0; 2002 } 2003 2004 int 2005 bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd) 2006 { 2007 struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd; 2008 void *iocmd_bufptr; 2009 2010 if (iocmd->bufsz < sizeof(struct bfa_plog_s)) { 2011 bfa_trc(bfad, sizeof(struct bfa_plog_s)); 2012 iocmd->status = BFA_STATUS_EINVAL; 2013 goto out; 2014 } 2015 2016 iocmd->status = BFA_STATUS_OK; 2017 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s); 2018 memcpy(iocmd_bufptr, (u8 *) &bfad->plog_buf, sizeof(struct bfa_plog_s)); 2019 out: 2020 return 0; 2021 } 2022 2023 #define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */ 2024 int 2025 bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd, 2026 unsigned int payload_len) 2027 { 2028 struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd; 2029 void *iocmd_bufptr; 2030 unsigned long flags; 2031 u32 offset; 2032 2033 if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s), 2034 BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) { 2035 iocmd->status = BFA_STATUS_VERSION_FAIL; 2036 return 0; 2037 } 2038 2039 if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ || 2040 !IS_ALIGNED(iocmd->bufsz, sizeof(u16)) || 2041 !IS_ALIGNED(iocmd->offset, sizeof(u32))) { 2042 bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ); 2043 iocmd->status = BFA_STATUS_EINVAL; 2044 goto out; 2045 } 2046 2047 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s); 2048 spin_lock_irqsave(&bfad->bfad_lock, flags); 2049 offset = iocmd->offset; 2050 iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr, 2051 &offset, &iocmd->bufsz); 2052 iocmd->offset = offset; 2053 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2054 out: 2055 return 0; 2056 } 2057 2058 int 2059 bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 2060 { 2061 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 2062 unsigned long flags; 2063 2064 if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) { 2065 spin_lock_irqsave(&bfad->bfad_lock, flags); 2066 bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE; 2067 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2068 } else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR) 2069 bfad->plog_buf.head = bfad->plog_buf.tail = 0; 2070 else if (v_cmd == IOCMD_DEBUG_START_DTRC) 2071 bfa_trc_init(bfad->trcmod); 2072 else if (v_cmd == IOCMD_DEBUG_STOP_DTRC) 2073 bfa_trc_stop(bfad->trcmod); 2074 2075 iocmd->status = BFA_STATUS_OK; 2076 return 0; 2077 } 2078 2079 int 2080 bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd) 2081 { 2082 struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd; 2083 2084 if (iocmd->ctl == BFA_TRUE) 2085 bfad->plog_buf.plog_enabled = 1; 2086 else 2087 bfad->plog_buf.plog_enabled = 0; 2088 2089 iocmd->status = BFA_STATUS_OK; 2090 return 0; 2091 } 2092 2093 int 2094 bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 2095 { 2096 struct bfa_bsg_fcpim_profile_s *iocmd = 2097 (struct bfa_bsg_fcpim_profile_s *)cmd; 2098 struct timeval tv; 2099 unsigned long flags; 2100 2101 do_gettimeofday(&tv); 2102 spin_lock_irqsave(&bfad->bfad_lock, flags); 2103 if (v_cmd == IOCMD_FCPIM_PROFILE_ON) 2104 iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec); 2105 else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF) 2106 iocmd->status = bfa_fcpim_profile_off(&bfad->bfa); 2107 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2108 2109 return 0; 2110 } 2111 2112 static int 2113 bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd) 2114 { 2115 struct bfa_bsg_itnim_ioprofile_s *iocmd = 2116 (struct bfa_bsg_itnim_ioprofile_s *)cmd; 2117 struct bfa_fcs_lport_s *fcs_port; 2118 struct bfa_fcs_itnim_s *itnim; 2119 unsigned long flags; 2120 2121 spin_lock_irqsave(&bfad->bfad_lock, flags); 2122 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 2123 iocmd->vf_id, iocmd->lpwwn); 2124 if (!fcs_port) 2125 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 2126 else { 2127 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); 2128 if (itnim == NULL) 2129 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 2130 else 2131 iocmd->status = bfa_itnim_get_ioprofile( 2132 bfa_fcs_itnim_get_halitn(itnim), 2133 &iocmd->ioprofile); 2134 } 2135 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2136 return 0; 2137 } 2138 2139 int 2140 bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd) 2141 { 2142 struct bfa_bsg_fcport_stats_s *iocmd = 2143 (struct bfa_bsg_fcport_stats_s *)cmd; 2144 struct bfad_hal_comp fcomp; 2145 unsigned long flags; 2146 struct bfa_cb_pending_q_s cb_qe; 2147 2148 init_completion(&fcomp.comp); 2149 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, 2150 &fcomp, &iocmd->stats); 2151 spin_lock_irqsave(&bfad->bfad_lock, flags); 2152 iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe); 2153 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2154 if (iocmd->status != BFA_STATUS_OK) { 2155 bfa_trc(bfad, iocmd->status); 2156 goto out; 2157 } 2158 wait_for_completion(&fcomp.comp); 2159 iocmd->status = fcomp.status; 2160 out: 2161 return 0; 2162 } 2163 2164 int 2165 bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd) 2166 { 2167 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 2168 struct bfad_hal_comp fcomp; 2169 unsigned long flags; 2170 struct bfa_cb_pending_q_s cb_qe; 2171 2172 init_completion(&fcomp.comp); 2173 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL); 2174 2175 spin_lock_irqsave(&bfad->bfad_lock, flags); 2176 iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe); 2177 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2178 if (iocmd->status != BFA_STATUS_OK) { 2179 bfa_trc(bfad, iocmd->status); 2180 goto out; 2181 } 2182 wait_for_completion(&fcomp.comp); 2183 iocmd->status = fcomp.status; 2184 out: 2185 return 0; 2186 } 2187 2188 int 2189 bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd) 2190 { 2191 struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd; 2192 struct bfad_hal_comp fcomp; 2193 unsigned long flags; 2194 2195 init_completion(&fcomp.comp); 2196 spin_lock_irqsave(&bfad->bfad_lock, flags); 2197 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), 2198 BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id, 2199 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, 2200 bfad_hcb_comp, &fcomp); 2201 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2202 if (iocmd->status != BFA_STATUS_OK) 2203 goto out; 2204 wait_for_completion(&fcomp.comp); 2205 iocmd->status = fcomp.status; 2206 out: 2207 return 0; 2208 } 2209 2210 int 2211 bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd) 2212 { 2213 struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd; 2214 struct bfad_hal_comp fcomp; 2215 unsigned long flags; 2216 2217 init_completion(&fcomp.comp); 2218 spin_lock_irqsave(&bfad->bfad_lock, flags); 2219 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), 2220 BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id, 2221 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, 2222 bfad_hcb_comp, &fcomp); 2223 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2224 if (iocmd->status != BFA_STATUS_OK) 2225 goto out; 2226 wait_for_completion(&fcomp.comp); 2227 iocmd->status = fcomp.status; 2228 out: 2229 return 0; 2230 } 2231 2232 int 2233 bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd) 2234 { 2235 struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd; 2236 struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp; 2237 struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg; 2238 unsigned long flags; 2239 2240 spin_lock_irqsave(&bfad->bfad_lock, flags); 2241 pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled; 2242 pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns; 2243 pbcfg->speed = cfgrsp->pbc_cfg.port_speed; 2244 memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun)); 2245 iocmd->status = BFA_STATUS_OK; 2246 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2247 2248 return 0; 2249 } 2250 2251 int 2252 bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd) 2253 { 2254 struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd; 2255 struct bfad_hal_comp fcomp; 2256 unsigned long flags; 2257 2258 init_completion(&fcomp.comp); 2259 spin_lock_irqsave(&bfad->bfad_lock, flags); 2260 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), 2261 BFA_FLASH_PART_PXECFG, 2262 bfad->bfa.ioc.port_id, &iocmd->cfg, 2263 sizeof(struct bfa_ethboot_cfg_s), 0, 2264 bfad_hcb_comp, &fcomp); 2265 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2266 if (iocmd->status != BFA_STATUS_OK) 2267 goto out; 2268 wait_for_completion(&fcomp.comp); 2269 iocmd->status = fcomp.status; 2270 out: 2271 return 0; 2272 } 2273 2274 int 2275 bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd) 2276 { 2277 struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd; 2278 struct bfad_hal_comp fcomp; 2279 unsigned long flags; 2280 2281 init_completion(&fcomp.comp); 2282 spin_lock_irqsave(&bfad->bfad_lock, flags); 2283 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), 2284 BFA_FLASH_PART_PXECFG, 2285 bfad->bfa.ioc.port_id, &iocmd->cfg, 2286 sizeof(struct bfa_ethboot_cfg_s), 0, 2287 bfad_hcb_comp, &fcomp); 2288 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2289 if (iocmd->status != BFA_STATUS_OK) 2290 goto out; 2291 wait_for_completion(&fcomp.comp); 2292 iocmd->status = fcomp.status; 2293 out: 2294 return 0; 2295 } 2296 2297 int 2298 bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 2299 { 2300 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 2301 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2302 struct bfa_fcport_trunk_s *trunk = &fcport->trunk; 2303 unsigned long flags; 2304 2305 spin_lock_irqsave(&bfad->bfad_lock, flags); 2306 2307 if (bfa_fcport_is_dport(&bfad->bfa)) 2308 return BFA_STATUS_DPORT_ERR; 2309 2310 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) || 2311 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) 2312 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; 2313 else { 2314 if (v_cmd == IOCMD_TRUNK_ENABLE) { 2315 trunk->attr.state = BFA_TRUNK_OFFLINE; 2316 bfa_fcport_disable(&bfad->bfa); 2317 fcport->cfg.trunked = BFA_TRUE; 2318 } else if (v_cmd == IOCMD_TRUNK_DISABLE) { 2319 trunk->attr.state = BFA_TRUNK_DISABLED; 2320 bfa_fcport_disable(&bfad->bfa); 2321 fcport->cfg.trunked = BFA_FALSE; 2322 } 2323 2324 if (!bfa_fcport_is_disabled(&bfad->bfa)) 2325 bfa_fcport_enable(&bfad->bfa); 2326 2327 iocmd->status = BFA_STATUS_OK; 2328 } 2329 2330 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2331 2332 return 0; 2333 } 2334 2335 int 2336 bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd) 2337 { 2338 struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd; 2339 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2340 struct bfa_fcport_trunk_s *trunk = &fcport->trunk; 2341 unsigned long flags; 2342 2343 spin_lock_irqsave(&bfad->bfad_lock, flags); 2344 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) || 2345 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) 2346 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; 2347 else { 2348 memcpy((void *)&iocmd->attr, (void *)&trunk->attr, 2349 sizeof(struct bfa_trunk_attr_s)); 2350 iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa); 2351 iocmd->status = BFA_STATUS_OK; 2352 } 2353 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2354 2355 return 0; 2356 } 2357 2358 int 2359 bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 2360 { 2361 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 2362 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2363 unsigned long flags; 2364 2365 spin_lock_irqsave(&bfad->bfad_lock, flags); 2366 if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) { 2367 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && 2368 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) 2369 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; 2370 else { 2371 if (v_cmd == IOCMD_QOS_ENABLE) 2372 fcport->cfg.qos_enabled = BFA_TRUE; 2373 else if (v_cmd == IOCMD_QOS_DISABLE) { 2374 fcport->cfg.qos_enabled = BFA_FALSE; 2375 fcport->cfg.qos_bw.high = BFA_QOS_BW_HIGH; 2376 fcport->cfg.qos_bw.med = BFA_QOS_BW_MED; 2377 fcport->cfg.qos_bw.low = BFA_QOS_BW_LOW; 2378 } 2379 } 2380 } 2381 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2382 2383 return 0; 2384 } 2385 2386 int 2387 bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd) 2388 { 2389 struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd; 2390 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2391 unsigned long flags; 2392 2393 spin_lock_irqsave(&bfad->bfad_lock, flags); 2394 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && 2395 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) 2396 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; 2397 else { 2398 iocmd->attr.state = fcport->qos_attr.state; 2399 iocmd->attr.total_bb_cr = 2400 be32_to_cpu(fcport->qos_attr.total_bb_cr); 2401 iocmd->attr.qos_bw.high = fcport->cfg.qos_bw.high; 2402 iocmd->attr.qos_bw.med = fcport->cfg.qos_bw.med; 2403 iocmd->attr.qos_bw.low = fcport->cfg.qos_bw.low; 2404 iocmd->attr.qos_bw_op = fcport->qos_attr.qos_bw_op; 2405 iocmd->status = BFA_STATUS_OK; 2406 } 2407 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2408 2409 return 0; 2410 } 2411 2412 int 2413 bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd) 2414 { 2415 struct bfa_bsg_qos_vc_attr_s *iocmd = 2416 (struct bfa_bsg_qos_vc_attr_s *)cmd; 2417 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2418 struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr; 2419 unsigned long flags; 2420 u32 i = 0; 2421 2422 spin_lock_irqsave(&bfad->bfad_lock, flags); 2423 iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count); 2424 iocmd->attr.shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit); 2425 iocmd->attr.elp_opmode_flags = 2426 be32_to_cpu(bfa_vc_attr->elp_opmode_flags); 2427 2428 /* Individual VC info */ 2429 while (i < iocmd->attr.total_vc_count) { 2430 iocmd->attr.vc_info[i].vc_credit = 2431 bfa_vc_attr->vc_info[i].vc_credit; 2432 iocmd->attr.vc_info[i].borrow_credit = 2433 bfa_vc_attr->vc_info[i].borrow_credit; 2434 iocmd->attr.vc_info[i].priority = 2435 bfa_vc_attr->vc_info[i].priority; 2436 i++; 2437 } 2438 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2439 2440 iocmd->status = BFA_STATUS_OK; 2441 return 0; 2442 } 2443 2444 int 2445 bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd) 2446 { 2447 struct bfa_bsg_fcport_stats_s *iocmd = 2448 (struct bfa_bsg_fcport_stats_s *)cmd; 2449 struct bfad_hal_comp fcomp; 2450 unsigned long flags; 2451 struct bfa_cb_pending_q_s cb_qe; 2452 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2453 2454 init_completion(&fcomp.comp); 2455 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, 2456 &fcomp, &iocmd->stats); 2457 2458 spin_lock_irqsave(&bfad->bfad_lock, flags); 2459 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); 2460 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && 2461 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) 2462 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; 2463 else 2464 iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe); 2465 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2466 if (iocmd->status != BFA_STATUS_OK) { 2467 bfa_trc(bfad, iocmd->status); 2468 goto out; 2469 } 2470 wait_for_completion(&fcomp.comp); 2471 iocmd->status = fcomp.status; 2472 out: 2473 return 0; 2474 } 2475 2476 int 2477 bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd) 2478 { 2479 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 2480 struct bfad_hal_comp fcomp; 2481 unsigned long flags; 2482 struct bfa_cb_pending_q_s cb_qe; 2483 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2484 2485 init_completion(&fcomp.comp); 2486 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, 2487 &fcomp, NULL); 2488 2489 spin_lock_irqsave(&bfad->bfad_lock, flags); 2490 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); 2491 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && 2492 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) 2493 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; 2494 else 2495 iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe); 2496 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2497 if (iocmd->status != BFA_STATUS_OK) { 2498 bfa_trc(bfad, iocmd->status); 2499 goto out; 2500 } 2501 wait_for_completion(&fcomp.comp); 2502 iocmd->status = fcomp.status; 2503 out: 2504 return 0; 2505 } 2506 2507 int 2508 bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd) 2509 { 2510 struct bfa_bsg_vf_stats_s *iocmd = 2511 (struct bfa_bsg_vf_stats_s *)cmd; 2512 struct bfa_fcs_fabric_s *fcs_vf; 2513 unsigned long flags; 2514 2515 spin_lock_irqsave(&bfad->bfad_lock, flags); 2516 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id); 2517 if (fcs_vf == NULL) { 2518 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2519 iocmd->status = BFA_STATUS_UNKNOWN_VFID; 2520 goto out; 2521 } 2522 memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats, 2523 sizeof(struct bfa_vf_stats_s)); 2524 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2525 iocmd->status = BFA_STATUS_OK; 2526 out: 2527 return 0; 2528 } 2529 2530 int 2531 bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd) 2532 { 2533 struct bfa_bsg_vf_reset_stats_s *iocmd = 2534 (struct bfa_bsg_vf_reset_stats_s *)cmd; 2535 struct bfa_fcs_fabric_s *fcs_vf; 2536 unsigned long flags; 2537 2538 spin_lock_irqsave(&bfad->bfad_lock, flags); 2539 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id); 2540 if (fcs_vf == NULL) { 2541 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2542 iocmd->status = BFA_STATUS_UNKNOWN_VFID; 2543 goto out; 2544 } 2545 memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s)); 2546 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2547 iocmd->status = BFA_STATUS_OK; 2548 out: 2549 return 0; 2550 } 2551 2552 /* Function to reset the LUN SCAN mode */ 2553 static void 2554 bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg) 2555 { 2556 struct bfad_im_port_s *pport_im = bfad->pport.im_port; 2557 struct bfad_vport_s *vport = NULL; 2558 2559 /* Set the scsi device LUN SCAN flags for base port */ 2560 bfad_reset_sdev_bflags(pport_im, lunmask_cfg); 2561 2562 /* Set the scsi device LUN SCAN flags for the vports */ 2563 list_for_each_entry(vport, &bfad->vport_list, list_entry) 2564 bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg); 2565 } 2566 2567 int 2568 bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd) 2569 { 2570 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; 2571 unsigned long flags; 2572 2573 spin_lock_irqsave(&bfad->bfad_lock, flags); 2574 if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) { 2575 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE); 2576 /* Set the LUN Scanning mode to be Sequential scan */ 2577 if (iocmd->status == BFA_STATUS_OK) 2578 bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE); 2579 } else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) { 2580 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE); 2581 /* Set the LUN Scanning mode to default REPORT_LUNS scan */ 2582 if (iocmd->status == BFA_STATUS_OK) 2583 bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE); 2584 } else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR) 2585 iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa); 2586 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2587 return 0; 2588 } 2589 2590 int 2591 bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd) 2592 { 2593 struct bfa_bsg_fcpim_lunmask_query_s *iocmd = 2594 (struct bfa_bsg_fcpim_lunmask_query_s *)cmd; 2595 struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask; 2596 unsigned long flags; 2597 2598 spin_lock_irqsave(&bfad->bfad_lock, flags); 2599 iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask); 2600 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2601 return 0; 2602 } 2603 2604 int 2605 bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 2606 { 2607 struct bfa_bsg_fcpim_lunmask_s *iocmd = 2608 (struct bfa_bsg_fcpim_lunmask_s *)cmd; 2609 unsigned long flags; 2610 2611 spin_lock_irqsave(&bfad->bfad_lock, flags); 2612 if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD) 2613 iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id, 2614 &iocmd->pwwn, iocmd->rpwwn, iocmd->lun); 2615 else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE) 2616 iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa, 2617 iocmd->vf_id, &iocmd->pwwn, 2618 iocmd->rpwwn, iocmd->lun); 2619 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2620 return 0; 2621 } 2622 2623 int 2624 bfad_iocmd_fcpim_throttle_query(struct bfad_s *bfad, void *cmd) 2625 { 2626 struct bfa_bsg_fcpim_throttle_s *iocmd = 2627 (struct bfa_bsg_fcpim_throttle_s *)cmd; 2628 unsigned long flags; 2629 2630 spin_lock_irqsave(&bfad->bfad_lock, flags); 2631 iocmd->status = bfa_fcpim_throttle_get(&bfad->bfa, 2632 (void *)&iocmd->throttle); 2633 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2634 2635 return 0; 2636 } 2637 2638 int 2639 bfad_iocmd_fcpim_throttle_set(struct bfad_s *bfad, void *cmd) 2640 { 2641 struct bfa_bsg_fcpim_throttle_s *iocmd = 2642 (struct bfa_bsg_fcpim_throttle_s *)cmd; 2643 unsigned long flags; 2644 2645 spin_lock_irqsave(&bfad->bfad_lock, flags); 2646 iocmd->status = bfa_fcpim_throttle_set(&bfad->bfa, 2647 iocmd->throttle.cfg_value); 2648 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2649 2650 return 0; 2651 } 2652 2653 int 2654 bfad_iocmd_tfru_read(struct bfad_s *bfad, void *cmd) 2655 { 2656 struct bfa_bsg_tfru_s *iocmd = 2657 (struct bfa_bsg_tfru_s *)cmd; 2658 struct bfad_hal_comp fcomp; 2659 unsigned long flags = 0; 2660 2661 init_completion(&fcomp.comp); 2662 spin_lock_irqsave(&bfad->bfad_lock, flags); 2663 iocmd->status = bfa_tfru_read(BFA_FRU(&bfad->bfa), 2664 &iocmd->data, iocmd->len, iocmd->offset, 2665 bfad_hcb_comp, &fcomp); 2666 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2667 if (iocmd->status == BFA_STATUS_OK) { 2668 wait_for_completion(&fcomp.comp); 2669 iocmd->status = fcomp.status; 2670 } 2671 2672 return 0; 2673 } 2674 2675 int 2676 bfad_iocmd_tfru_write(struct bfad_s *bfad, void *cmd) 2677 { 2678 struct bfa_bsg_tfru_s *iocmd = 2679 (struct bfa_bsg_tfru_s *)cmd; 2680 struct bfad_hal_comp fcomp; 2681 unsigned long flags = 0; 2682 2683 init_completion(&fcomp.comp); 2684 spin_lock_irqsave(&bfad->bfad_lock, flags); 2685 iocmd->status = bfa_tfru_write(BFA_FRU(&bfad->bfa), 2686 &iocmd->data, iocmd->len, iocmd->offset, 2687 bfad_hcb_comp, &fcomp); 2688 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2689 if (iocmd->status == BFA_STATUS_OK) { 2690 wait_for_completion(&fcomp.comp); 2691 iocmd->status = fcomp.status; 2692 } 2693 2694 return 0; 2695 } 2696 2697 int 2698 bfad_iocmd_fruvpd_read(struct bfad_s *bfad, void *cmd) 2699 { 2700 struct bfa_bsg_fruvpd_s *iocmd = 2701 (struct bfa_bsg_fruvpd_s *)cmd; 2702 struct bfad_hal_comp fcomp; 2703 unsigned long flags = 0; 2704 2705 init_completion(&fcomp.comp); 2706 spin_lock_irqsave(&bfad->bfad_lock, flags); 2707 iocmd->status = bfa_fruvpd_read(BFA_FRU(&bfad->bfa), 2708 &iocmd->data, iocmd->len, iocmd->offset, 2709 bfad_hcb_comp, &fcomp); 2710 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2711 if (iocmd->status == BFA_STATUS_OK) { 2712 wait_for_completion(&fcomp.comp); 2713 iocmd->status = fcomp.status; 2714 } 2715 2716 return 0; 2717 } 2718 2719 int 2720 bfad_iocmd_fruvpd_update(struct bfad_s *bfad, void *cmd) 2721 { 2722 struct bfa_bsg_fruvpd_s *iocmd = 2723 (struct bfa_bsg_fruvpd_s *)cmd; 2724 struct bfad_hal_comp fcomp; 2725 unsigned long flags = 0; 2726 2727 init_completion(&fcomp.comp); 2728 spin_lock_irqsave(&bfad->bfad_lock, flags); 2729 iocmd->status = bfa_fruvpd_update(BFA_FRU(&bfad->bfa), 2730 &iocmd->data, iocmd->len, iocmd->offset, 2731 bfad_hcb_comp, &fcomp, iocmd->trfr_cmpl); 2732 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2733 if (iocmd->status == BFA_STATUS_OK) { 2734 wait_for_completion(&fcomp.comp); 2735 iocmd->status = fcomp.status; 2736 } 2737 2738 return 0; 2739 } 2740 2741 int 2742 bfad_iocmd_fruvpd_get_max_size(struct bfad_s *bfad, void *cmd) 2743 { 2744 struct bfa_bsg_fruvpd_max_size_s *iocmd = 2745 (struct bfa_bsg_fruvpd_max_size_s *)cmd; 2746 unsigned long flags = 0; 2747 2748 spin_lock_irqsave(&bfad->bfad_lock, flags); 2749 iocmd->status = bfa_fruvpd_get_max_size(BFA_FRU(&bfad->bfa), 2750 &iocmd->max_size); 2751 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2752 2753 return 0; 2754 } 2755 2756 static int 2757 bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, 2758 unsigned int payload_len) 2759 { 2760 int rc = -EINVAL; 2761 2762 switch (cmd) { 2763 case IOCMD_IOC_ENABLE: 2764 rc = bfad_iocmd_ioc_enable(bfad, iocmd); 2765 break; 2766 case IOCMD_IOC_DISABLE: 2767 rc = bfad_iocmd_ioc_disable(bfad, iocmd); 2768 break; 2769 case IOCMD_IOC_GET_INFO: 2770 rc = bfad_iocmd_ioc_get_info(bfad, iocmd); 2771 break; 2772 case IOCMD_IOC_GET_ATTR: 2773 rc = bfad_iocmd_ioc_get_attr(bfad, iocmd); 2774 break; 2775 case IOCMD_IOC_GET_STATS: 2776 rc = bfad_iocmd_ioc_get_stats(bfad, iocmd); 2777 break; 2778 case IOCMD_IOC_GET_FWSTATS: 2779 rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len); 2780 break; 2781 case IOCMD_IOC_RESET_STATS: 2782 case IOCMD_IOC_RESET_FWSTATS: 2783 rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd); 2784 break; 2785 case IOCMD_IOC_SET_ADAPTER_NAME: 2786 case IOCMD_IOC_SET_PORT_NAME: 2787 rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd); 2788 break; 2789 case IOCMD_IOCFC_GET_ATTR: 2790 rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd); 2791 break; 2792 case IOCMD_IOCFC_SET_INTR: 2793 rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd); 2794 break; 2795 case IOCMD_PORT_ENABLE: 2796 rc = bfad_iocmd_port_enable(bfad, iocmd); 2797 break; 2798 case IOCMD_PORT_DISABLE: 2799 rc = bfad_iocmd_port_disable(bfad, iocmd); 2800 break; 2801 case IOCMD_PORT_GET_ATTR: 2802 rc = bfad_iocmd_port_get_attr(bfad, iocmd); 2803 break; 2804 case IOCMD_PORT_GET_STATS: 2805 rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len); 2806 break; 2807 case IOCMD_PORT_RESET_STATS: 2808 rc = bfad_iocmd_port_reset_stats(bfad, iocmd); 2809 break; 2810 case IOCMD_PORT_CFG_TOPO: 2811 case IOCMD_PORT_CFG_SPEED: 2812 case IOCMD_PORT_CFG_ALPA: 2813 case IOCMD_PORT_CLR_ALPA: 2814 rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd); 2815 break; 2816 case IOCMD_PORT_CFG_MAXFRSZ: 2817 rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd); 2818 break; 2819 case IOCMD_PORT_BBCR_ENABLE: 2820 case IOCMD_PORT_BBCR_DISABLE: 2821 rc = bfad_iocmd_port_cfg_bbcr(bfad, cmd, iocmd); 2822 break; 2823 case IOCMD_PORT_BBCR_GET_ATTR: 2824 rc = bfad_iocmd_port_get_bbcr_attr(bfad, iocmd); 2825 break; 2826 case IOCMD_LPORT_GET_ATTR: 2827 rc = bfad_iocmd_lport_get_attr(bfad, iocmd); 2828 break; 2829 case IOCMD_LPORT_GET_STATS: 2830 rc = bfad_iocmd_lport_get_stats(bfad, iocmd); 2831 break; 2832 case IOCMD_LPORT_RESET_STATS: 2833 rc = bfad_iocmd_lport_reset_stats(bfad, iocmd); 2834 break; 2835 case IOCMD_LPORT_GET_IOSTATS: 2836 rc = bfad_iocmd_lport_get_iostats(bfad, iocmd); 2837 break; 2838 case IOCMD_LPORT_GET_RPORTS: 2839 rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len); 2840 break; 2841 case IOCMD_RPORT_GET_ATTR: 2842 rc = bfad_iocmd_rport_get_attr(bfad, iocmd); 2843 break; 2844 case IOCMD_RPORT_GET_ADDR: 2845 rc = bfad_iocmd_rport_get_addr(bfad, iocmd); 2846 break; 2847 case IOCMD_RPORT_GET_STATS: 2848 rc = bfad_iocmd_rport_get_stats(bfad, iocmd); 2849 break; 2850 case IOCMD_RPORT_RESET_STATS: 2851 rc = bfad_iocmd_rport_clr_stats(bfad, iocmd); 2852 break; 2853 case IOCMD_RPORT_SET_SPEED: 2854 rc = bfad_iocmd_rport_set_speed(bfad, iocmd); 2855 break; 2856 case IOCMD_VPORT_GET_ATTR: 2857 rc = bfad_iocmd_vport_get_attr(bfad, iocmd); 2858 break; 2859 case IOCMD_VPORT_GET_STATS: 2860 rc = bfad_iocmd_vport_get_stats(bfad, iocmd); 2861 break; 2862 case IOCMD_VPORT_RESET_STATS: 2863 rc = bfad_iocmd_vport_clr_stats(bfad, iocmd); 2864 break; 2865 case IOCMD_FABRIC_GET_LPORTS: 2866 rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len); 2867 break; 2868 case IOCMD_RATELIM_ENABLE: 2869 case IOCMD_RATELIM_DISABLE: 2870 rc = bfad_iocmd_ratelim(bfad, cmd, iocmd); 2871 break; 2872 case IOCMD_RATELIM_DEF_SPEED: 2873 rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd); 2874 break; 2875 case IOCMD_FCPIM_FAILOVER: 2876 rc = bfad_iocmd_cfg_fcpim(bfad, iocmd); 2877 break; 2878 case IOCMD_FCPIM_MODSTATS: 2879 rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd); 2880 break; 2881 case IOCMD_FCPIM_MODSTATSCLR: 2882 rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd); 2883 break; 2884 case IOCMD_FCPIM_DEL_ITN_STATS: 2885 rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd); 2886 break; 2887 case IOCMD_ITNIM_GET_ATTR: 2888 rc = bfad_iocmd_itnim_get_attr(bfad, iocmd); 2889 break; 2890 case IOCMD_ITNIM_GET_IOSTATS: 2891 rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd); 2892 break; 2893 case IOCMD_ITNIM_RESET_STATS: 2894 rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd); 2895 break; 2896 case IOCMD_ITNIM_GET_ITNSTATS: 2897 rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd); 2898 break; 2899 case IOCMD_FCPORT_ENABLE: 2900 rc = bfad_iocmd_fcport_enable(bfad, iocmd); 2901 break; 2902 case IOCMD_FCPORT_DISABLE: 2903 rc = bfad_iocmd_fcport_disable(bfad, iocmd); 2904 break; 2905 case IOCMD_IOC_PCIFN_CFG: 2906 rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd); 2907 break; 2908 case IOCMD_IOC_FW_SIG_INV: 2909 rc = bfad_iocmd_ioc_fw_sig_inv(bfad, iocmd); 2910 break; 2911 case IOCMD_PCIFN_CREATE: 2912 rc = bfad_iocmd_pcifn_create(bfad, iocmd); 2913 break; 2914 case IOCMD_PCIFN_DELETE: 2915 rc = bfad_iocmd_pcifn_delete(bfad, iocmd); 2916 break; 2917 case IOCMD_PCIFN_BW: 2918 rc = bfad_iocmd_pcifn_bw(bfad, iocmd); 2919 break; 2920 case IOCMD_ADAPTER_CFG_MODE: 2921 rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd); 2922 break; 2923 case IOCMD_PORT_CFG_MODE: 2924 rc = bfad_iocmd_port_cfg_mode(bfad, iocmd); 2925 break; 2926 case IOCMD_FLASH_ENABLE_OPTROM: 2927 case IOCMD_FLASH_DISABLE_OPTROM: 2928 rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd); 2929 break; 2930 case IOCMD_FAA_QUERY: 2931 rc = bfad_iocmd_faa_query(bfad, iocmd); 2932 break; 2933 case IOCMD_CEE_GET_ATTR: 2934 rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len); 2935 break; 2936 case IOCMD_CEE_GET_STATS: 2937 rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len); 2938 break; 2939 case IOCMD_CEE_RESET_STATS: 2940 rc = bfad_iocmd_cee_reset_stats(bfad, iocmd); 2941 break; 2942 case IOCMD_SFP_MEDIA: 2943 rc = bfad_iocmd_sfp_media(bfad, iocmd); 2944 break; 2945 case IOCMD_SFP_SPEED: 2946 rc = bfad_iocmd_sfp_speed(bfad, iocmd); 2947 break; 2948 case IOCMD_FLASH_GET_ATTR: 2949 rc = bfad_iocmd_flash_get_attr(bfad, iocmd); 2950 break; 2951 case IOCMD_FLASH_ERASE_PART: 2952 rc = bfad_iocmd_flash_erase_part(bfad, iocmd); 2953 break; 2954 case IOCMD_FLASH_UPDATE_PART: 2955 rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len); 2956 break; 2957 case IOCMD_FLASH_READ_PART: 2958 rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len); 2959 break; 2960 case IOCMD_DIAG_TEMP: 2961 rc = bfad_iocmd_diag_temp(bfad, iocmd); 2962 break; 2963 case IOCMD_DIAG_MEMTEST: 2964 rc = bfad_iocmd_diag_memtest(bfad, iocmd); 2965 break; 2966 case IOCMD_DIAG_LOOPBACK: 2967 rc = bfad_iocmd_diag_loopback(bfad, iocmd); 2968 break; 2969 case IOCMD_DIAG_FWPING: 2970 rc = bfad_iocmd_diag_fwping(bfad, iocmd); 2971 break; 2972 case IOCMD_DIAG_QUEUETEST: 2973 rc = bfad_iocmd_diag_queuetest(bfad, iocmd); 2974 break; 2975 case IOCMD_DIAG_SFP: 2976 rc = bfad_iocmd_diag_sfp(bfad, iocmd); 2977 break; 2978 case IOCMD_DIAG_LED: 2979 rc = bfad_iocmd_diag_led(bfad, iocmd); 2980 break; 2981 case IOCMD_DIAG_BEACON_LPORT: 2982 rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd); 2983 break; 2984 case IOCMD_DIAG_LB_STAT: 2985 rc = bfad_iocmd_diag_lb_stat(bfad, iocmd); 2986 break; 2987 case IOCMD_DIAG_DPORT_ENABLE: 2988 rc = bfad_iocmd_diag_dport_enable(bfad, iocmd); 2989 break; 2990 case IOCMD_DIAG_DPORT_DISABLE: 2991 rc = bfad_iocmd_diag_dport_disable(bfad, iocmd); 2992 break; 2993 case IOCMD_DIAG_DPORT_SHOW: 2994 rc = bfad_iocmd_diag_dport_show(bfad, iocmd); 2995 break; 2996 case IOCMD_DIAG_DPORT_START: 2997 rc = bfad_iocmd_diag_dport_start(bfad, iocmd); 2998 break; 2999 case IOCMD_PHY_GET_ATTR: 3000 rc = bfad_iocmd_phy_get_attr(bfad, iocmd); 3001 break; 3002 case IOCMD_PHY_GET_STATS: 3003 rc = bfad_iocmd_phy_get_stats(bfad, iocmd); 3004 break; 3005 case IOCMD_PHY_UPDATE_FW: 3006 rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len); 3007 break; 3008 case IOCMD_PHY_READ_FW: 3009 rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len); 3010 break; 3011 case IOCMD_VHBA_QUERY: 3012 rc = bfad_iocmd_vhba_query(bfad, iocmd); 3013 break; 3014 case IOCMD_DEBUG_PORTLOG: 3015 rc = bfad_iocmd_porglog_get(bfad, iocmd); 3016 break; 3017 case IOCMD_DEBUG_FW_CORE: 3018 rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len); 3019 break; 3020 case IOCMD_DEBUG_FW_STATE_CLR: 3021 case IOCMD_DEBUG_PORTLOG_CLR: 3022 case IOCMD_DEBUG_START_DTRC: 3023 case IOCMD_DEBUG_STOP_DTRC: 3024 rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd); 3025 break; 3026 case IOCMD_DEBUG_PORTLOG_CTL: 3027 rc = bfad_iocmd_porglog_ctl(bfad, iocmd); 3028 break; 3029 case IOCMD_FCPIM_PROFILE_ON: 3030 case IOCMD_FCPIM_PROFILE_OFF: 3031 rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd); 3032 break; 3033 case IOCMD_ITNIM_GET_IOPROFILE: 3034 rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd); 3035 break; 3036 case IOCMD_FCPORT_GET_STATS: 3037 rc = bfad_iocmd_fcport_get_stats(bfad, iocmd); 3038 break; 3039 case IOCMD_FCPORT_RESET_STATS: 3040 rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd); 3041 break; 3042 case IOCMD_BOOT_CFG: 3043 rc = bfad_iocmd_boot_cfg(bfad, iocmd); 3044 break; 3045 case IOCMD_BOOT_QUERY: 3046 rc = bfad_iocmd_boot_query(bfad, iocmd); 3047 break; 3048 case IOCMD_PREBOOT_QUERY: 3049 rc = bfad_iocmd_preboot_query(bfad, iocmd); 3050 break; 3051 case IOCMD_ETHBOOT_CFG: 3052 rc = bfad_iocmd_ethboot_cfg(bfad, iocmd); 3053 break; 3054 case IOCMD_ETHBOOT_QUERY: 3055 rc = bfad_iocmd_ethboot_query(bfad, iocmd); 3056 break; 3057 case IOCMD_TRUNK_ENABLE: 3058 case IOCMD_TRUNK_DISABLE: 3059 rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd); 3060 break; 3061 case IOCMD_TRUNK_GET_ATTR: 3062 rc = bfad_iocmd_trunk_get_attr(bfad, iocmd); 3063 break; 3064 case IOCMD_QOS_ENABLE: 3065 case IOCMD_QOS_DISABLE: 3066 rc = bfad_iocmd_qos(bfad, iocmd, cmd); 3067 break; 3068 case IOCMD_QOS_GET_ATTR: 3069 rc = bfad_iocmd_qos_get_attr(bfad, iocmd); 3070 break; 3071 case IOCMD_QOS_GET_VC_ATTR: 3072 rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd); 3073 break; 3074 case IOCMD_QOS_GET_STATS: 3075 rc = bfad_iocmd_qos_get_stats(bfad, iocmd); 3076 break; 3077 case IOCMD_QOS_RESET_STATS: 3078 rc = bfad_iocmd_qos_reset_stats(bfad, iocmd); 3079 break; 3080 case IOCMD_QOS_SET_BW: 3081 rc = bfad_iocmd_qos_set_bw(bfad, iocmd); 3082 break; 3083 case IOCMD_VF_GET_STATS: 3084 rc = bfad_iocmd_vf_get_stats(bfad, iocmd); 3085 break; 3086 case IOCMD_VF_RESET_STATS: 3087 rc = bfad_iocmd_vf_clr_stats(bfad, iocmd); 3088 break; 3089 case IOCMD_FCPIM_LUNMASK_ENABLE: 3090 case IOCMD_FCPIM_LUNMASK_DISABLE: 3091 case IOCMD_FCPIM_LUNMASK_CLEAR: 3092 rc = bfad_iocmd_lunmask(bfad, iocmd, cmd); 3093 break; 3094 case IOCMD_FCPIM_LUNMASK_QUERY: 3095 rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd); 3096 break; 3097 case IOCMD_FCPIM_LUNMASK_ADD: 3098 case IOCMD_FCPIM_LUNMASK_DELETE: 3099 rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd); 3100 break; 3101 case IOCMD_FCPIM_THROTTLE_QUERY: 3102 rc = bfad_iocmd_fcpim_throttle_query(bfad, iocmd); 3103 break; 3104 case IOCMD_FCPIM_THROTTLE_SET: 3105 rc = bfad_iocmd_fcpim_throttle_set(bfad, iocmd); 3106 break; 3107 /* TFRU */ 3108 case IOCMD_TFRU_READ: 3109 rc = bfad_iocmd_tfru_read(bfad, iocmd); 3110 break; 3111 case IOCMD_TFRU_WRITE: 3112 rc = bfad_iocmd_tfru_write(bfad, iocmd); 3113 break; 3114 /* FRU */ 3115 case IOCMD_FRUVPD_READ: 3116 rc = bfad_iocmd_fruvpd_read(bfad, iocmd); 3117 break; 3118 case IOCMD_FRUVPD_UPDATE: 3119 rc = bfad_iocmd_fruvpd_update(bfad, iocmd); 3120 break; 3121 case IOCMD_FRUVPD_GET_MAX_SIZE: 3122 rc = bfad_iocmd_fruvpd_get_max_size(bfad, iocmd); 3123 break; 3124 default: 3125 rc = -EINVAL; 3126 break; 3127 } 3128 return rc; 3129 } 3130 3131 static int 3132 bfad_im_bsg_vendor_request(struct fc_bsg_job *job) 3133 { 3134 uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0]; 3135 struct bfad_im_port_s *im_port = 3136 (struct bfad_im_port_s *) job->shost->hostdata[0]; 3137 struct bfad_s *bfad = im_port->bfad; 3138 struct request_queue *request_q = job->req->q; 3139 void *payload_kbuf; 3140 int rc = -EINVAL; 3141 3142 /* 3143 * Set the BSG device request_queue size to 256 to support 3144 * payloads larger than 512*1024K bytes. 3145 */ 3146 blk_queue_max_segments(request_q, 256); 3147 3148 /* Allocate a temp buffer to hold the passed in user space command */ 3149 payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL); 3150 if (!payload_kbuf) { 3151 rc = -ENOMEM; 3152 goto out; 3153 } 3154 3155 /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */ 3156 sg_copy_to_buffer(job->request_payload.sg_list, 3157 job->request_payload.sg_cnt, payload_kbuf, 3158 job->request_payload.payload_len); 3159 3160 /* Invoke IOCMD handler - to handle all the vendor command requests */ 3161 rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf, 3162 job->request_payload.payload_len); 3163 if (rc != BFA_STATUS_OK) 3164 goto error; 3165 3166 /* Copy the response data to the job->reply_payload sg_list */ 3167 sg_copy_from_buffer(job->reply_payload.sg_list, 3168 job->reply_payload.sg_cnt, 3169 payload_kbuf, 3170 job->reply_payload.payload_len); 3171 3172 /* free the command buffer */ 3173 kfree(payload_kbuf); 3174 3175 /* Fill the BSG job reply data */ 3176 job->reply_len = job->reply_payload.payload_len; 3177 job->reply->reply_payload_rcv_len = job->reply_payload.payload_len; 3178 job->reply->result = rc; 3179 3180 job->job_done(job); 3181 return rc; 3182 error: 3183 /* free the command buffer */ 3184 kfree(payload_kbuf); 3185 out: 3186 job->reply->result = rc; 3187 job->reply_len = sizeof(uint32_t); 3188 job->reply->reply_payload_rcv_len = 0; 3189 return rc; 3190 } 3191 3192 /* FC passthru call backs */ 3193 u64 3194 bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid) 3195 { 3196 struct bfad_fcxp *drv_fcxp = bfad_fcxp; 3197 struct bfa_sge_s *sge; 3198 u64 addr; 3199 3200 sge = drv_fcxp->req_sge + sgeid; 3201 addr = (u64)(size_t) sge->sg_addr; 3202 return addr; 3203 } 3204 3205 u32 3206 bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid) 3207 { 3208 struct bfad_fcxp *drv_fcxp = bfad_fcxp; 3209 struct bfa_sge_s *sge; 3210 3211 sge = drv_fcxp->req_sge + sgeid; 3212 return sge->sg_len; 3213 } 3214 3215 u64 3216 bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid) 3217 { 3218 struct bfad_fcxp *drv_fcxp = bfad_fcxp; 3219 struct bfa_sge_s *sge; 3220 u64 addr; 3221 3222 sge = drv_fcxp->rsp_sge + sgeid; 3223 addr = (u64)(size_t) sge->sg_addr; 3224 return addr; 3225 } 3226 3227 u32 3228 bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid) 3229 { 3230 struct bfad_fcxp *drv_fcxp = bfad_fcxp; 3231 struct bfa_sge_s *sge; 3232 3233 sge = drv_fcxp->rsp_sge + sgeid; 3234 return sge->sg_len; 3235 } 3236 3237 void 3238 bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg, 3239 bfa_status_t req_status, u32 rsp_len, u32 resid_len, 3240 struct fchs_s *rsp_fchs) 3241 { 3242 struct bfad_fcxp *drv_fcxp = bfad_fcxp; 3243 3244 drv_fcxp->req_status = req_status; 3245 drv_fcxp->rsp_len = rsp_len; 3246 3247 /* bfa_fcxp will be automatically freed by BFA */ 3248 drv_fcxp->bfa_fcxp = NULL; 3249 complete(&drv_fcxp->comp); 3250 } 3251 3252 struct bfad_buf_info * 3253 bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf, 3254 uint32_t payload_len, uint32_t *num_sgles) 3255 { 3256 struct bfad_buf_info *buf_base, *buf_info; 3257 struct bfa_sge_s *sg_table; 3258 int sge_num = 1; 3259 3260 buf_base = kzalloc((sizeof(struct bfad_buf_info) + 3261 sizeof(struct bfa_sge_s)) * sge_num, GFP_KERNEL); 3262 if (!buf_base) 3263 return NULL; 3264 3265 sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) + 3266 (sizeof(struct bfad_buf_info) * sge_num)); 3267 3268 /* Allocate dma coherent memory */ 3269 buf_info = buf_base; 3270 buf_info->size = payload_len; 3271 buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, buf_info->size, 3272 &buf_info->phys, GFP_KERNEL); 3273 if (!buf_info->virt) 3274 goto out_free_mem; 3275 3276 /* copy the linear bsg buffer to buf_info */ 3277 memset(buf_info->virt, 0, buf_info->size); 3278 memcpy(buf_info->virt, payload_kbuf, buf_info->size); 3279 3280 /* 3281 * Setup SG table 3282 */ 3283 sg_table->sg_len = buf_info->size; 3284 sg_table->sg_addr = (void *)(size_t) buf_info->phys; 3285 3286 *num_sgles = sge_num; 3287 3288 return buf_base; 3289 3290 out_free_mem: 3291 kfree(buf_base); 3292 return NULL; 3293 } 3294 3295 void 3296 bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base, 3297 uint32_t num_sgles) 3298 { 3299 int i; 3300 struct bfad_buf_info *buf_info = buf_base; 3301 3302 if (buf_base) { 3303 for (i = 0; i < num_sgles; buf_info++, i++) { 3304 if (buf_info->virt != NULL) 3305 dma_free_coherent(&bfad->pcidev->dev, 3306 buf_info->size, buf_info->virt, 3307 buf_info->phys); 3308 } 3309 kfree(buf_base); 3310 } 3311 } 3312 3313 int 3314 bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp, 3315 bfa_bsg_fcpt_t *bsg_fcpt) 3316 { 3317 struct bfa_fcxp_s *hal_fcxp; 3318 struct bfad_s *bfad = drv_fcxp->port->bfad; 3319 unsigned long flags; 3320 uint8_t lp_tag; 3321 3322 spin_lock_irqsave(&bfad->bfad_lock, flags); 3323 3324 /* Allocate bfa_fcxp structure */ 3325 hal_fcxp = bfa_fcxp_req_rsp_alloc(drv_fcxp, &bfad->bfa, 3326 drv_fcxp->num_req_sgles, 3327 drv_fcxp->num_rsp_sgles, 3328 bfad_fcxp_get_req_sgaddr_cb, 3329 bfad_fcxp_get_req_sglen_cb, 3330 bfad_fcxp_get_rsp_sgaddr_cb, 3331 bfad_fcxp_get_rsp_sglen_cb, BFA_TRUE); 3332 if (!hal_fcxp) { 3333 bfa_trc(bfad, 0); 3334 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3335 return BFA_STATUS_ENOMEM; 3336 } 3337 3338 drv_fcxp->bfa_fcxp = hal_fcxp; 3339 3340 lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id); 3341 3342 bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag, 3343 bsg_fcpt->cts, bsg_fcpt->cos, 3344 job->request_payload.payload_len, 3345 &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad, 3346 job->reply_payload.payload_len, bsg_fcpt->tsecs); 3347 3348 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3349 3350 return BFA_STATUS_OK; 3351 } 3352 3353 int 3354 bfad_im_bsg_els_ct_request(struct fc_bsg_job *job) 3355 { 3356 struct bfa_bsg_data *bsg_data; 3357 struct bfad_im_port_s *im_port = 3358 (struct bfad_im_port_s *) job->shost->hostdata[0]; 3359 struct bfad_s *bfad = im_port->bfad; 3360 bfa_bsg_fcpt_t *bsg_fcpt; 3361 struct bfad_fcxp *drv_fcxp; 3362 struct bfa_fcs_lport_s *fcs_port; 3363 struct bfa_fcs_rport_s *fcs_rport; 3364 uint32_t command_type = job->request->msgcode; 3365 unsigned long flags; 3366 struct bfad_buf_info *rsp_buf_info; 3367 void *req_kbuf = NULL, *rsp_kbuf = NULL; 3368 int rc = -EINVAL; 3369 3370 job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */ 3371 job->reply->reply_payload_rcv_len = 0; 3372 3373 /* Get the payload passed in from userspace */ 3374 bsg_data = (struct bfa_bsg_data *) (((char *)job->request) + 3375 sizeof(struct fc_bsg_request)); 3376 if (bsg_data == NULL) 3377 goto out; 3378 3379 /* 3380 * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload 3381 * buffer of size bsg_data->payload_len 3382 */ 3383 bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL); 3384 if (!bsg_fcpt) { 3385 rc = -ENOMEM; 3386 goto out; 3387 } 3388 3389 if (copy_from_user((uint8_t *)bsg_fcpt, 3390 (void *)(unsigned long)bsg_data->payload, 3391 bsg_data->payload_len)) { 3392 kfree(bsg_fcpt); 3393 rc = -EIO; 3394 goto out; 3395 } 3396 3397 drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL); 3398 if (drv_fcxp == NULL) { 3399 kfree(bsg_fcpt); 3400 rc = -ENOMEM; 3401 goto out; 3402 } 3403 3404 spin_lock_irqsave(&bfad->bfad_lock, flags); 3405 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id, 3406 bsg_fcpt->lpwwn); 3407 if (fcs_port == NULL) { 3408 bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN; 3409 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3410 goto out_free_mem; 3411 } 3412 3413 /* Check if the port is online before sending FC Passthru cmd */ 3414 if (!bfa_fcs_lport_is_online(fcs_port)) { 3415 bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE; 3416 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3417 goto out_free_mem; 3418 } 3419 3420 drv_fcxp->port = fcs_port->bfad_port; 3421 3422 if (drv_fcxp->port->bfad == 0) 3423 drv_fcxp->port->bfad = bfad; 3424 3425 /* Fetch the bfa_rport - if nexus needed */ 3426 if (command_type == FC_BSG_HST_ELS_NOLOGIN || 3427 command_type == FC_BSG_HST_CT) { 3428 /* BSG HST commands: no nexus needed */ 3429 drv_fcxp->bfa_rport = NULL; 3430 3431 } else if (command_type == FC_BSG_RPT_ELS || 3432 command_type == FC_BSG_RPT_CT) { 3433 /* BSG RPT commands: nexus needed */ 3434 fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port, 3435 bsg_fcpt->dpwwn); 3436 if (fcs_rport == NULL) { 3437 bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN; 3438 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3439 goto out_free_mem; 3440 } 3441 3442 drv_fcxp->bfa_rport = fcs_rport->bfa_rport; 3443 3444 } else { /* Unknown BSG msgcode; return -EINVAL */ 3445 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3446 goto out_free_mem; 3447 } 3448 3449 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3450 3451 /* allocate memory for req / rsp buffers */ 3452 req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL); 3453 if (!req_kbuf) { 3454 printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n", 3455 bfad->pci_name); 3456 rc = -ENOMEM; 3457 goto out_free_mem; 3458 } 3459 3460 rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL); 3461 if (!rsp_kbuf) { 3462 printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n", 3463 bfad->pci_name); 3464 rc = -ENOMEM; 3465 goto out_free_mem; 3466 } 3467 3468 /* map req sg - copy the sg_list passed in to the linear buffer */ 3469 sg_copy_to_buffer(job->request_payload.sg_list, 3470 job->request_payload.sg_cnt, req_kbuf, 3471 job->request_payload.payload_len); 3472 3473 drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf, 3474 job->request_payload.payload_len, 3475 &drv_fcxp->num_req_sgles); 3476 if (!drv_fcxp->reqbuf_info) { 3477 printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n", 3478 bfad->pci_name); 3479 rc = -ENOMEM; 3480 goto out_free_mem; 3481 } 3482 3483 drv_fcxp->req_sge = (struct bfa_sge_s *) 3484 (((uint8_t *)drv_fcxp->reqbuf_info) + 3485 (sizeof(struct bfad_buf_info) * 3486 drv_fcxp->num_req_sgles)); 3487 3488 /* map rsp sg */ 3489 drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf, 3490 job->reply_payload.payload_len, 3491 &drv_fcxp->num_rsp_sgles); 3492 if (!drv_fcxp->rspbuf_info) { 3493 printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n", 3494 bfad->pci_name); 3495 rc = -ENOMEM; 3496 goto out_free_mem; 3497 } 3498 3499 rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info; 3500 drv_fcxp->rsp_sge = (struct bfa_sge_s *) 3501 (((uint8_t *)drv_fcxp->rspbuf_info) + 3502 (sizeof(struct bfad_buf_info) * 3503 drv_fcxp->num_rsp_sgles)); 3504 3505 /* fcxp send */ 3506 init_completion(&drv_fcxp->comp); 3507 rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt); 3508 if (rc == BFA_STATUS_OK) { 3509 wait_for_completion(&drv_fcxp->comp); 3510 bsg_fcpt->status = drv_fcxp->req_status; 3511 } else { 3512 bsg_fcpt->status = rc; 3513 goto out_free_mem; 3514 } 3515 3516 /* fill the job->reply data */ 3517 if (drv_fcxp->req_status == BFA_STATUS_OK) { 3518 job->reply_len = drv_fcxp->rsp_len; 3519 job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len; 3520 job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 3521 } else { 3522 job->reply->reply_payload_rcv_len = 3523 sizeof(struct fc_bsg_ctels_reply); 3524 job->reply_len = sizeof(uint32_t); 3525 job->reply->reply_data.ctels_reply.status = 3526 FC_CTELS_STATUS_REJECT; 3527 } 3528 3529 /* Copy the response data to the reply_payload sg list */ 3530 sg_copy_from_buffer(job->reply_payload.sg_list, 3531 job->reply_payload.sg_cnt, 3532 (uint8_t *)rsp_buf_info->virt, 3533 job->reply_payload.payload_len); 3534 3535 out_free_mem: 3536 bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info, 3537 drv_fcxp->num_rsp_sgles); 3538 bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info, 3539 drv_fcxp->num_req_sgles); 3540 kfree(req_kbuf); 3541 kfree(rsp_kbuf); 3542 3543 /* Need a copy to user op */ 3544 if (copy_to_user((void *)(unsigned long)bsg_data->payload, 3545 (void *)bsg_fcpt, bsg_data->payload_len)) 3546 rc = -EIO; 3547 3548 kfree(bsg_fcpt); 3549 kfree(drv_fcxp); 3550 out: 3551 job->reply->result = rc; 3552 3553 if (rc == BFA_STATUS_OK) 3554 job->job_done(job); 3555 3556 return rc; 3557 } 3558 3559 int 3560 bfad_im_bsg_request(struct fc_bsg_job *job) 3561 { 3562 uint32_t rc = BFA_STATUS_OK; 3563 3564 switch (job->request->msgcode) { 3565 case FC_BSG_HST_VENDOR: 3566 /* Process BSG HST Vendor requests */ 3567 rc = bfad_im_bsg_vendor_request(job); 3568 break; 3569 case FC_BSG_HST_ELS_NOLOGIN: 3570 case FC_BSG_RPT_ELS: 3571 case FC_BSG_HST_CT: 3572 case FC_BSG_RPT_CT: 3573 /* Process BSG ELS/CT commands */ 3574 rc = bfad_im_bsg_els_ct_request(job); 3575 break; 3576 default: 3577 job->reply->result = rc = -EINVAL; 3578 job->reply->reply_payload_rcv_len = 0; 3579 break; 3580 } 3581 3582 return rc; 3583 } 3584 3585 int 3586 bfad_im_bsg_timeout(struct fc_bsg_job *job) 3587 { 3588 /* Don't complete the BSG job request - return -EAGAIN 3589 * to reset bsg job timeout : for ELS/CT pass thru we 3590 * already have timer to track the request. 3591 */ 3592 return -EAGAIN; 3593 } 3594