1 /* 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. 3 * All rights reserved 4 * www.brocade.com 5 * 6 * Linux driver for Brocade Fibre Channel Host Bus Adapter. 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License (GPL) Version 2 as 10 * published by the Free Software Foundation 11 * 12 * This program is distributed in the hope that it will be useful, but 13 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 */ 17 18 #include <linux/uaccess.h> 19 #include "bfad_drv.h" 20 #include "bfad_im.h" 21 #include "bfad_bsg.h" 22 23 BFA_TRC_FILE(LDRV, BSG); 24 25 int 26 bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd) 27 { 28 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 29 int rc = 0; 30 unsigned long flags; 31 32 spin_lock_irqsave(&bfad->bfad_lock, flags); 33 /* If IOC is not in disabled state - return */ 34 if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) { 35 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 36 iocmd->status = BFA_STATUS_OK; 37 return rc; 38 } 39 40 init_completion(&bfad->enable_comp); 41 bfa_iocfc_enable(&bfad->bfa); 42 iocmd->status = BFA_STATUS_OK; 43 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 44 wait_for_completion(&bfad->enable_comp); 45 46 return rc; 47 } 48 49 int 50 bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd) 51 { 52 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 53 int rc = 0; 54 unsigned long flags; 55 56 spin_lock_irqsave(&bfad->bfad_lock, flags); 57 if (bfa_ioc_is_disabled(&bfad->bfa.ioc)) { 58 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 59 iocmd->status = BFA_STATUS_OK; 60 return rc; 61 } 62 63 if (bfad->disable_active) { 64 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 65 return -EBUSY; 66 } 67 68 bfad->disable_active = BFA_TRUE; 69 init_completion(&bfad->disable_comp); 70 bfa_iocfc_disable(&bfad->bfa); 71 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 72 73 wait_for_completion(&bfad->disable_comp); 74 bfad->disable_active = BFA_FALSE; 75 iocmd->status = BFA_STATUS_OK; 76 77 return rc; 78 } 79 80 static int 81 bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd) 82 { 83 int i; 84 struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd; 85 struct bfad_im_port_s *im_port; 86 struct bfa_port_attr_s pattr; 87 unsigned long flags; 88 89 spin_lock_irqsave(&bfad->bfad_lock, flags); 90 bfa_fcport_get_attr(&bfad->bfa, &pattr); 91 iocmd->nwwn = pattr.nwwn; 92 iocmd->pwwn = pattr.pwwn; 93 iocmd->ioc_type = bfa_get_type(&bfad->bfa); 94 iocmd->mac = bfa_get_mac(&bfad->bfa); 95 iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa); 96 bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum); 97 iocmd->factorynwwn = pattr.factorynwwn; 98 iocmd->factorypwwn = pattr.factorypwwn; 99 iocmd->bfad_num = bfad->inst_no; 100 im_port = bfad->pport.im_port; 101 iocmd->host = im_port->shost->host_no; 102 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 103 104 strcpy(iocmd->name, bfad->adapter_name); 105 strcpy(iocmd->port_name, bfad->port_name); 106 strcpy(iocmd->hwpath, bfad->pci_name); 107 108 /* set adapter hw path */ 109 strcpy(iocmd->adapter_hwpath, bfad->pci_name); 110 for (i = 0; iocmd->adapter_hwpath[i] != ':' && i < BFA_STRING_32; i++) 111 ; 112 for (; iocmd->adapter_hwpath[++i] != ':' && i < BFA_STRING_32; ) 113 ; 114 iocmd->adapter_hwpath[i] = '\0'; 115 iocmd->status = BFA_STATUS_OK; 116 return 0; 117 } 118 119 static int 120 bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd) 121 { 122 struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd; 123 unsigned long flags; 124 125 spin_lock_irqsave(&bfad->bfad_lock, flags); 126 bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr); 127 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 128 129 /* fill in driver attr info */ 130 strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME); 131 strncpy(iocmd->ioc_attr.driver_attr.driver_ver, 132 BFAD_DRIVER_VERSION, BFA_VERSION_LEN); 133 strcpy(iocmd->ioc_attr.driver_attr.fw_ver, 134 iocmd->ioc_attr.adapter_attr.fw_ver); 135 strcpy(iocmd->ioc_attr.driver_attr.bios_ver, 136 iocmd->ioc_attr.adapter_attr.optrom_ver); 137 138 /* copy chip rev info first otherwise it will be overwritten */ 139 memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev, 140 sizeof(bfad->pci_attr.chip_rev)); 141 memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr, 142 sizeof(struct bfa_ioc_pci_attr_s)); 143 144 iocmd->status = BFA_STATUS_OK; 145 return 0; 146 } 147 148 int 149 bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd) 150 { 151 struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd; 152 153 bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats); 154 iocmd->status = BFA_STATUS_OK; 155 return 0; 156 } 157 158 int 159 bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd, 160 unsigned int payload_len) 161 { 162 struct bfa_bsg_ioc_fwstats_s *iocmd = 163 (struct bfa_bsg_ioc_fwstats_s *)cmd; 164 void *iocmd_bufptr; 165 unsigned long flags; 166 167 if (bfad_chk_iocmd_sz(payload_len, 168 sizeof(struct bfa_bsg_ioc_fwstats_s), 169 sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) { 170 iocmd->status = BFA_STATUS_VERSION_FAIL; 171 goto out; 172 } 173 174 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s); 175 spin_lock_irqsave(&bfad->bfad_lock, flags); 176 iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr); 177 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 178 179 if (iocmd->status != BFA_STATUS_OK) { 180 bfa_trc(bfad, iocmd->status); 181 goto out; 182 } 183 out: 184 bfa_trc(bfad, 0x6666); 185 return 0; 186 } 187 188 int 189 bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 190 { 191 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 192 unsigned long flags; 193 194 if (v_cmd == IOCMD_IOC_RESET_STATS) { 195 bfa_ioc_clear_stats(&bfad->bfa); 196 iocmd->status = BFA_STATUS_OK; 197 } else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) { 198 spin_lock_irqsave(&bfad->bfad_lock, flags); 199 iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc); 200 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 201 } 202 203 return 0; 204 } 205 206 int 207 bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 208 { 209 struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd; 210 211 if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME) 212 strcpy(bfad->adapter_name, iocmd->name); 213 else if (v_cmd == IOCMD_IOC_SET_PORT_NAME) 214 strcpy(bfad->port_name, iocmd->name); 215 216 iocmd->status = BFA_STATUS_OK; 217 return 0; 218 } 219 220 int 221 bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd) 222 { 223 struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd; 224 225 iocmd->status = BFA_STATUS_OK; 226 bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr); 227 228 return 0; 229 } 230 231 int 232 bfad_iocmd_ioc_fw_sig_inv(struct bfad_s *bfad, void *cmd) 233 { 234 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 235 unsigned long flags; 236 237 spin_lock_irqsave(&bfad->bfad_lock, flags); 238 iocmd->status = bfa_ioc_fwsig_invalidate(&bfad->bfa.ioc); 239 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 240 return 0; 241 } 242 243 int 244 bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd) 245 { 246 struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd; 247 unsigned long flags; 248 249 spin_lock_irqsave(&bfad->bfad_lock, flags); 250 iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr); 251 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 252 253 return 0; 254 } 255 256 int 257 bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd) 258 { 259 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 260 struct bfad_hal_comp fcomp; 261 unsigned long flags; 262 263 init_completion(&fcomp.comp); 264 spin_lock_irqsave(&bfad->bfad_lock, flags); 265 iocmd->status = bfa_port_enable(&bfad->bfa.modules.port, 266 bfad_hcb_comp, &fcomp); 267 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 268 if (iocmd->status != BFA_STATUS_OK) { 269 bfa_trc(bfad, iocmd->status); 270 return 0; 271 } 272 wait_for_completion(&fcomp.comp); 273 iocmd->status = fcomp.status; 274 return 0; 275 } 276 277 int 278 bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd) 279 { 280 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 281 struct bfad_hal_comp fcomp; 282 unsigned long flags; 283 284 init_completion(&fcomp.comp); 285 spin_lock_irqsave(&bfad->bfad_lock, flags); 286 iocmd->status = bfa_port_disable(&bfad->bfa.modules.port, 287 bfad_hcb_comp, &fcomp); 288 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 289 290 if (iocmd->status != BFA_STATUS_OK) { 291 bfa_trc(bfad, iocmd->status); 292 return 0; 293 } 294 wait_for_completion(&fcomp.comp); 295 iocmd->status = fcomp.status; 296 return 0; 297 } 298 299 static int 300 bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd) 301 { 302 struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd; 303 struct bfa_lport_attr_s port_attr; 304 unsigned long flags; 305 306 spin_lock_irqsave(&bfad->bfad_lock, flags); 307 bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr); 308 bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); 309 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 310 311 if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE) 312 iocmd->attr.pid = port_attr.pid; 313 else 314 iocmd->attr.pid = 0; 315 316 iocmd->attr.port_type = port_attr.port_type; 317 iocmd->attr.loopback = port_attr.loopback; 318 iocmd->attr.authfail = port_attr.authfail; 319 strncpy(iocmd->attr.port_symname.symname, 320 port_attr.port_cfg.sym_name.symname, 321 sizeof(port_attr.port_cfg.sym_name.symname)); 322 323 iocmd->status = BFA_STATUS_OK; 324 return 0; 325 } 326 327 int 328 bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd, 329 unsigned int payload_len) 330 { 331 struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd; 332 struct bfad_hal_comp fcomp; 333 void *iocmd_bufptr; 334 unsigned long flags; 335 336 if (bfad_chk_iocmd_sz(payload_len, 337 sizeof(struct bfa_bsg_port_stats_s), 338 sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) { 339 iocmd->status = BFA_STATUS_VERSION_FAIL; 340 return 0; 341 } 342 343 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s); 344 345 init_completion(&fcomp.comp); 346 spin_lock_irqsave(&bfad->bfad_lock, flags); 347 iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port, 348 iocmd_bufptr, bfad_hcb_comp, &fcomp); 349 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 350 if (iocmd->status != BFA_STATUS_OK) { 351 bfa_trc(bfad, iocmd->status); 352 goto out; 353 } 354 355 wait_for_completion(&fcomp.comp); 356 iocmd->status = fcomp.status; 357 out: 358 return 0; 359 } 360 361 int 362 bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd) 363 { 364 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 365 struct bfad_hal_comp fcomp; 366 unsigned long flags; 367 368 init_completion(&fcomp.comp); 369 spin_lock_irqsave(&bfad->bfad_lock, flags); 370 iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port, 371 bfad_hcb_comp, &fcomp); 372 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 373 if (iocmd->status != BFA_STATUS_OK) { 374 bfa_trc(bfad, iocmd->status); 375 return 0; 376 } 377 wait_for_completion(&fcomp.comp); 378 iocmd->status = fcomp.status; 379 return 0; 380 } 381 382 int 383 bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd) 384 { 385 struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd; 386 unsigned long flags; 387 388 spin_lock_irqsave(&bfad->bfad_lock, flags); 389 if (v_cmd == IOCMD_PORT_CFG_TOPO) 390 cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param); 391 else if (v_cmd == IOCMD_PORT_CFG_SPEED) 392 cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param); 393 else if (v_cmd == IOCMD_PORT_CFG_ALPA) 394 cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param); 395 else if (v_cmd == IOCMD_PORT_CLR_ALPA) 396 cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa); 397 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 398 399 return 0; 400 } 401 402 int 403 bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd) 404 { 405 struct bfa_bsg_port_cfg_maxfrsize_s *iocmd = 406 (struct bfa_bsg_port_cfg_maxfrsize_s *)cmd; 407 unsigned long flags; 408 409 spin_lock_irqsave(&bfad->bfad_lock, flags); 410 iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize); 411 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 412 413 return 0; 414 } 415 416 int 417 bfad_iocmd_port_cfg_bbcr(struct bfad_s *bfad, unsigned int cmd, void *pcmd) 418 { 419 struct bfa_bsg_bbcr_enable_s *iocmd = 420 (struct bfa_bsg_bbcr_enable_s *)pcmd; 421 unsigned long flags; 422 int rc; 423 424 spin_lock_irqsave(&bfad->bfad_lock, flags); 425 if (cmd == IOCMD_PORT_BBCR_ENABLE) 426 rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_TRUE, iocmd->bb_scn); 427 else if (cmd == IOCMD_PORT_BBCR_DISABLE) 428 rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_FALSE, 0); 429 else { 430 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 431 return -EINVAL; 432 } 433 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 434 435 iocmd->status = rc; 436 return 0; 437 } 438 439 int 440 bfad_iocmd_port_get_bbcr_attr(struct bfad_s *bfad, void *pcmd) 441 { 442 struct bfa_bsg_bbcr_attr_s *iocmd = (struct bfa_bsg_bbcr_attr_s *) pcmd; 443 unsigned long flags; 444 445 spin_lock_irqsave(&bfad->bfad_lock, flags); 446 iocmd->status = 447 bfa_fcport_get_bbcr_attr(&bfad->bfa, &iocmd->attr); 448 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 449 450 return 0; 451 } 452 453 454 static int 455 bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd) 456 { 457 struct bfa_fcs_lport_s *fcs_port; 458 struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd; 459 unsigned long flags; 460 461 spin_lock_irqsave(&bfad->bfad_lock, flags); 462 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 463 iocmd->vf_id, iocmd->pwwn); 464 if (fcs_port == NULL) { 465 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 466 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 467 goto out; 468 } 469 470 bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr); 471 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 472 iocmd->status = BFA_STATUS_OK; 473 out: 474 return 0; 475 } 476 477 int 478 bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd) 479 { 480 struct bfa_fcs_lport_s *fcs_port; 481 struct bfa_bsg_lport_stats_s *iocmd = 482 (struct bfa_bsg_lport_stats_s *)cmd; 483 unsigned long flags; 484 485 spin_lock_irqsave(&bfad->bfad_lock, flags); 486 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 487 iocmd->vf_id, iocmd->pwwn); 488 if (fcs_port == NULL) { 489 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 490 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 491 goto out; 492 } 493 494 bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats); 495 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 496 iocmd->status = BFA_STATUS_OK; 497 out: 498 return 0; 499 } 500 501 int 502 bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd) 503 { 504 struct bfa_fcs_lport_s *fcs_port; 505 struct bfa_bsg_reset_stats_s *iocmd = 506 (struct bfa_bsg_reset_stats_s *)cmd; 507 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); 508 struct list_head *qe, *qen; 509 struct bfa_itnim_s *itnim; 510 unsigned long flags; 511 512 spin_lock_irqsave(&bfad->bfad_lock, flags); 513 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 514 iocmd->vf_id, iocmd->vpwwn); 515 if (fcs_port == NULL) { 516 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 517 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 518 goto out; 519 } 520 521 bfa_fcs_lport_clear_stats(fcs_port); 522 /* clear IO stats from all active itnims */ 523 list_for_each_safe(qe, qen, &fcpim->itnim_q) { 524 itnim = (struct bfa_itnim_s *) qe; 525 if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag) 526 continue; 527 bfa_itnim_clear_stats(itnim); 528 } 529 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 530 iocmd->status = BFA_STATUS_OK; 531 out: 532 return 0; 533 } 534 535 int 536 bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd) 537 { 538 struct bfa_fcs_lport_s *fcs_port; 539 struct bfa_bsg_lport_iostats_s *iocmd = 540 (struct bfa_bsg_lport_iostats_s *)cmd; 541 unsigned long flags; 542 543 spin_lock_irqsave(&bfad->bfad_lock, flags); 544 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 545 iocmd->vf_id, iocmd->pwwn); 546 if (fcs_port == NULL) { 547 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 548 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 549 goto out; 550 } 551 552 bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats, 553 fcs_port->lp_tag); 554 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 555 iocmd->status = BFA_STATUS_OK; 556 out: 557 return 0; 558 } 559 560 int 561 bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd, 562 unsigned int payload_len) 563 { 564 struct bfa_bsg_lport_get_rports_s *iocmd = 565 (struct bfa_bsg_lport_get_rports_s *)cmd; 566 struct bfa_fcs_lport_s *fcs_port; 567 unsigned long flags; 568 void *iocmd_bufptr; 569 570 if (iocmd->nrports == 0) 571 return -EINVAL; 572 573 if (bfad_chk_iocmd_sz(payload_len, 574 sizeof(struct bfa_bsg_lport_get_rports_s), 575 sizeof(struct bfa_rport_qualifier_s) * iocmd->nrports) 576 != BFA_STATUS_OK) { 577 iocmd->status = BFA_STATUS_VERSION_FAIL; 578 return 0; 579 } 580 581 iocmd_bufptr = (char *)iocmd + 582 sizeof(struct bfa_bsg_lport_get_rports_s); 583 spin_lock_irqsave(&bfad->bfad_lock, flags); 584 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 585 iocmd->vf_id, iocmd->pwwn); 586 if (fcs_port == NULL) { 587 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 588 bfa_trc(bfad, 0); 589 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 590 goto out; 591 } 592 593 bfa_fcs_lport_get_rport_quals(fcs_port, 594 (struct bfa_rport_qualifier_s *)iocmd_bufptr, 595 &iocmd->nrports); 596 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 597 iocmd->status = BFA_STATUS_OK; 598 out: 599 return 0; 600 } 601 602 int 603 bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd) 604 { 605 struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd; 606 struct bfa_fcs_lport_s *fcs_port; 607 struct bfa_fcs_rport_s *fcs_rport; 608 unsigned long flags; 609 610 spin_lock_irqsave(&bfad->bfad_lock, flags); 611 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 612 iocmd->vf_id, iocmd->pwwn); 613 if (fcs_port == NULL) { 614 bfa_trc(bfad, 0); 615 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 616 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 617 goto out; 618 } 619 620 if (iocmd->pid) 621 fcs_rport = bfa_fcs_lport_get_rport_by_qualifier(fcs_port, 622 iocmd->rpwwn, iocmd->pid); 623 else 624 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); 625 if (fcs_rport == NULL) { 626 bfa_trc(bfad, 0); 627 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 628 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 629 goto out; 630 } 631 632 bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr); 633 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 634 iocmd->status = BFA_STATUS_OK; 635 out: 636 return 0; 637 } 638 639 static int 640 bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd) 641 { 642 struct bfa_bsg_rport_scsi_addr_s *iocmd = 643 (struct bfa_bsg_rport_scsi_addr_s *)cmd; 644 struct bfa_fcs_lport_s *fcs_port; 645 struct bfa_fcs_itnim_s *fcs_itnim; 646 struct bfad_itnim_s *drv_itnim; 647 unsigned long flags; 648 649 spin_lock_irqsave(&bfad->bfad_lock, flags); 650 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 651 iocmd->vf_id, iocmd->pwwn); 652 if (fcs_port == NULL) { 653 bfa_trc(bfad, 0); 654 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 655 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 656 goto out; 657 } 658 659 fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); 660 if (fcs_itnim == NULL) { 661 bfa_trc(bfad, 0); 662 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 663 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 664 goto out; 665 } 666 667 drv_itnim = fcs_itnim->itnim_drv; 668 669 if (drv_itnim && drv_itnim->im_port) 670 iocmd->host = drv_itnim->im_port->shost->host_no; 671 else { 672 bfa_trc(bfad, 0); 673 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 674 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 675 goto out; 676 } 677 678 iocmd->target = drv_itnim->scsi_tgt_id; 679 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 680 681 iocmd->bus = 0; 682 iocmd->lun = 0; 683 iocmd->status = BFA_STATUS_OK; 684 out: 685 return 0; 686 } 687 688 int 689 bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd) 690 { 691 struct bfa_bsg_rport_stats_s *iocmd = 692 (struct bfa_bsg_rport_stats_s *)cmd; 693 struct bfa_fcs_lport_s *fcs_port; 694 struct bfa_fcs_rport_s *fcs_rport; 695 unsigned long flags; 696 697 spin_lock_irqsave(&bfad->bfad_lock, flags); 698 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 699 iocmd->vf_id, iocmd->pwwn); 700 if (fcs_port == NULL) { 701 bfa_trc(bfad, 0); 702 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 703 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 704 goto out; 705 } 706 707 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); 708 if (fcs_rport == NULL) { 709 bfa_trc(bfad, 0); 710 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 711 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 712 goto out; 713 } 714 715 memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats, 716 sizeof(struct bfa_rport_stats_s)); 717 if (bfa_fcs_rport_get_halrport(fcs_rport)) { 718 memcpy((void *)&iocmd->stats.hal_stats, 719 (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats), 720 sizeof(struct bfa_rport_hal_stats_s)); 721 } 722 723 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 724 iocmd->status = BFA_STATUS_OK; 725 out: 726 return 0; 727 } 728 729 int 730 bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd) 731 { 732 struct bfa_bsg_rport_reset_stats_s *iocmd = 733 (struct bfa_bsg_rport_reset_stats_s *)cmd; 734 struct bfa_fcs_lport_s *fcs_port; 735 struct bfa_fcs_rport_s *fcs_rport; 736 struct bfa_rport_s *rport; 737 unsigned long flags; 738 739 spin_lock_irqsave(&bfad->bfad_lock, flags); 740 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 741 iocmd->vf_id, iocmd->pwwn); 742 if (fcs_port == NULL) { 743 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 744 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 745 goto out; 746 } 747 748 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); 749 if (fcs_rport == NULL) { 750 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 751 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 752 goto out; 753 } 754 755 memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s)); 756 rport = bfa_fcs_rport_get_halrport(fcs_rport); 757 if (rport) 758 memset(&rport->stats, 0, sizeof(rport->stats)); 759 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 760 iocmd->status = BFA_STATUS_OK; 761 out: 762 return 0; 763 } 764 765 int 766 bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd) 767 { 768 struct bfa_bsg_rport_set_speed_s *iocmd = 769 (struct bfa_bsg_rport_set_speed_s *)cmd; 770 struct bfa_fcs_lport_s *fcs_port; 771 struct bfa_fcs_rport_s *fcs_rport; 772 unsigned long flags; 773 774 spin_lock_irqsave(&bfad->bfad_lock, flags); 775 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 776 iocmd->vf_id, iocmd->pwwn); 777 if (fcs_port == NULL) { 778 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 779 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 780 goto out; 781 } 782 783 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); 784 if (fcs_rport == NULL) { 785 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 786 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 787 goto out; 788 } 789 790 fcs_rport->rpf.assigned_speed = iocmd->speed; 791 /* Set this speed in f/w only if the RPSC speed is not available */ 792 if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN) 793 if (fcs_rport->bfa_rport) 794 bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed); 795 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 796 iocmd->status = BFA_STATUS_OK; 797 out: 798 return 0; 799 } 800 801 int 802 bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd) 803 { 804 struct bfa_fcs_vport_s *fcs_vport; 805 struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd; 806 unsigned long flags; 807 808 spin_lock_irqsave(&bfad->bfad_lock, flags); 809 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 810 iocmd->vf_id, iocmd->vpwwn); 811 if (fcs_vport == NULL) { 812 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 813 iocmd->status = BFA_STATUS_UNKNOWN_VWWN; 814 goto out; 815 } 816 817 bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr); 818 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 819 iocmd->status = BFA_STATUS_OK; 820 out: 821 return 0; 822 } 823 824 int 825 bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd) 826 { 827 struct bfa_fcs_vport_s *fcs_vport; 828 struct bfa_bsg_vport_stats_s *iocmd = 829 (struct bfa_bsg_vport_stats_s *)cmd; 830 unsigned long flags; 831 832 spin_lock_irqsave(&bfad->bfad_lock, flags); 833 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 834 iocmd->vf_id, iocmd->vpwwn); 835 if (fcs_vport == NULL) { 836 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 837 iocmd->status = BFA_STATUS_UNKNOWN_VWWN; 838 goto out; 839 } 840 841 memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats, 842 sizeof(struct bfa_vport_stats_s)); 843 memcpy((void *)&iocmd->vport_stats.port_stats, 844 (void *)&fcs_vport->lport.stats, 845 sizeof(struct bfa_lport_stats_s)); 846 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 847 iocmd->status = BFA_STATUS_OK; 848 out: 849 return 0; 850 } 851 852 int 853 bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd) 854 { 855 struct bfa_fcs_vport_s *fcs_vport; 856 struct bfa_bsg_reset_stats_s *iocmd = 857 (struct bfa_bsg_reset_stats_s *)cmd; 858 unsigned long flags; 859 860 spin_lock_irqsave(&bfad->bfad_lock, flags); 861 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 862 iocmd->vf_id, iocmd->vpwwn); 863 if (fcs_vport == NULL) { 864 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 865 iocmd->status = BFA_STATUS_UNKNOWN_VWWN; 866 goto out; 867 } 868 869 memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s)); 870 memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s)); 871 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 872 iocmd->status = BFA_STATUS_OK; 873 out: 874 return 0; 875 } 876 877 static int 878 bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd, 879 unsigned int payload_len) 880 { 881 struct bfa_bsg_fabric_get_lports_s *iocmd = 882 (struct bfa_bsg_fabric_get_lports_s *)cmd; 883 bfa_fcs_vf_t *fcs_vf; 884 uint32_t nports = iocmd->nports; 885 unsigned long flags; 886 void *iocmd_bufptr; 887 888 if (nports == 0) { 889 iocmd->status = BFA_STATUS_EINVAL; 890 goto out; 891 } 892 893 if (bfad_chk_iocmd_sz(payload_len, 894 sizeof(struct bfa_bsg_fabric_get_lports_s), 895 sizeof(wwn_t[iocmd->nports])) != BFA_STATUS_OK) { 896 iocmd->status = BFA_STATUS_VERSION_FAIL; 897 goto out; 898 } 899 900 iocmd_bufptr = (char *)iocmd + 901 sizeof(struct bfa_bsg_fabric_get_lports_s); 902 903 spin_lock_irqsave(&bfad->bfad_lock, flags); 904 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id); 905 if (fcs_vf == NULL) { 906 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 907 iocmd->status = BFA_STATUS_UNKNOWN_VFID; 908 goto out; 909 } 910 bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports); 911 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 912 913 iocmd->nports = nports; 914 iocmd->status = BFA_STATUS_OK; 915 out: 916 return 0; 917 } 918 919 int 920 bfad_iocmd_qos_set_bw(struct bfad_s *bfad, void *pcmd) 921 { 922 struct bfa_bsg_qos_bw_s *iocmd = (struct bfa_bsg_qos_bw_s *)pcmd; 923 unsigned long flags; 924 925 spin_lock_irqsave(&bfad->bfad_lock, flags); 926 iocmd->status = bfa_fcport_set_qos_bw(&bfad->bfa, &iocmd->qos_bw); 927 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 928 929 return 0; 930 } 931 932 int 933 bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd) 934 { 935 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; 936 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 937 unsigned long flags; 938 939 spin_lock_irqsave(&bfad->bfad_lock, flags); 940 941 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && 942 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) 943 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; 944 else { 945 if (cmd == IOCMD_RATELIM_ENABLE) 946 fcport->cfg.ratelimit = BFA_TRUE; 947 else if (cmd == IOCMD_RATELIM_DISABLE) 948 fcport->cfg.ratelimit = BFA_FALSE; 949 950 if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN) 951 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS; 952 953 iocmd->status = BFA_STATUS_OK; 954 } 955 956 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 957 958 return 0; 959 } 960 961 int 962 bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd) 963 { 964 struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd; 965 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 966 unsigned long flags; 967 968 spin_lock_irqsave(&bfad->bfad_lock, flags); 969 970 /* Auto and speeds greater than the supported speed, are invalid */ 971 if ((iocmd->speed == BFA_PORT_SPEED_AUTO) || 972 (iocmd->speed > fcport->speed_sup)) { 973 iocmd->status = BFA_STATUS_UNSUPP_SPEED; 974 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 975 return 0; 976 } 977 978 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && 979 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) 980 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; 981 else { 982 fcport->cfg.trl_def_speed = iocmd->speed; 983 iocmd->status = BFA_STATUS_OK; 984 } 985 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 986 987 return 0; 988 } 989 990 int 991 bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd) 992 { 993 struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd; 994 unsigned long flags; 995 996 spin_lock_irqsave(&bfad->bfad_lock, flags); 997 bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param); 998 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 999 iocmd->status = BFA_STATUS_OK; 1000 return 0; 1001 } 1002 1003 int 1004 bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd) 1005 { 1006 struct bfa_bsg_fcpim_modstats_s *iocmd = 1007 (struct bfa_bsg_fcpim_modstats_s *)cmd; 1008 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); 1009 struct list_head *qe, *qen; 1010 struct bfa_itnim_s *itnim; 1011 unsigned long flags; 1012 1013 spin_lock_irqsave(&bfad->bfad_lock, flags); 1014 /* accumulate IO stats from itnim */ 1015 memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s)); 1016 list_for_each_safe(qe, qen, &fcpim->itnim_q) { 1017 itnim = (struct bfa_itnim_s *) qe; 1018 bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats)); 1019 } 1020 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1021 iocmd->status = BFA_STATUS_OK; 1022 return 0; 1023 } 1024 1025 int 1026 bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd) 1027 { 1028 struct bfa_bsg_fcpim_modstatsclr_s *iocmd = 1029 (struct bfa_bsg_fcpim_modstatsclr_s *)cmd; 1030 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); 1031 struct list_head *qe, *qen; 1032 struct bfa_itnim_s *itnim; 1033 unsigned long flags; 1034 1035 spin_lock_irqsave(&bfad->bfad_lock, flags); 1036 list_for_each_safe(qe, qen, &fcpim->itnim_q) { 1037 itnim = (struct bfa_itnim_s *) qe; 1038 bfa_itnim_clear_stats(itnim); 1039 } 1040 memset(&fcpim->del_itn_stats, 0, 1041 sizeof(struct bfa_fcpim_del_itn_stats_s)); 1042 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1043 iocmd->status = BFA_STATUS_OK; 1044 return 0; 1045 } 1046 1047 int 1048 bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd) 1049 { 1050 struct bfa_bsg_fcpim_del_itn_stats_s *iocmd = 1051 (struct bfa_bsg_fcpim_del_itn_stats_s *)cmd; 1052 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); 1053 unsigned long flags; 1054 1055 spin_lock_irqsave(&bfad->bfad_lock, flags); 1056 memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats, 1057 sizeof(struct bfa_fcpim_del_itn_stats_s)); 1058 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1059 1060 iocmd->status = BFA_STATUS_OK; 1061 return 0; 1062 } 1063 1064 static int 1065 bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd) 1066 { 1067 struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd; 1068 struct bfa_fcs_lport_s *fcs_port; 1069 unsigned long flags; 1070 1071 spin_lock_irqsave(&bfad->bfad_lock, flags); 1072 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 1073 iocmd->vf_id, iocmd->lpwwn); 1074 if (!fcs_port) 1075 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 1076 else 1077 iocmd->status = bfa_fcs_itnim_attr_get(fcs_port, 1078 iocmd->rpwwn, &iocmd->attr); 1079 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1080 return 0; 1081 } 1082 1083 static int 1084 bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd) 1085 { 1086 struct bfa_bsg_itnim_iostats_s *iocmd = 1087 (struct bfa_bsg_itnim_iostats_s *)cmd; 1088 struct bfa_fcs_lport_s *fcs_port; 1089 struct bfa_fcs_itnim_s *itnim; 1090 unsigned long flags; 1091 1092 spin_lock_irqsave(&bfad->bfad_lock, flags); 1093 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 1094 iocmd->vf_id, iocmd->lpwwn); 1095 if (!fcs_port) { 1096 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 1097 bfa_trc(bfad, 0); 1098 } else { 1099 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); 1100 if (itnim == NULL) 1101 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 1102 else { 1103 iocmd->status = BFA_STATUS_OK; 1104 if (bfa_fcs_itnim_get_halitn(itnim)) 1105 memcpy((void *)&iocmd->iostats, (void *) 1106 &(bfa_fcs_itnim_get_halitn(itnim)->stats), 1107 sizeof(struct bfa_itnim_iostats_s)); 1108 } 1109 } 1110 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1111 return 0; 1112 } 1113 1114 static int 1115 bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd) 1116 { 1117 struct bfa_bsg_rport_reset_stats_s *iocmd = 1118 (struct bfa_bsg_rport_reset_stats_s *)cmd; 1119 struct bfa_fcs_lport_s *fcs_port; 1120 struct bfa_fcs_itnim_s *itnim; 1121 unsigned long flags; 1122 1123 spin_lock_irqsave(&bfad->bfad_lock, flags); 1124 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 1125 iocmd->vf_id, iocmd->pwwn); 1126 if (!fcs_port) 1127 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 1128 else { 1129 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); 1130 if (itnim == NULL) 1131 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 1132 else { 1133 iocmd->status = BFA_STATUS_OK; 1134 bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn); 1135 bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim)); 1136 } 1137 } 1138 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1139 1140 return 0; 1141 } 1142 1143 static int 1144 bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd) 1145 { 1146 struct bfa_bsg_itnim_itnstats_s *iocmd = 1147 (struct bfa_bsg_itnim_itnstats_s *)cmd; 1148 struct bfa_fcs_lport_s *fcs_port; 1149 struct bfa_fcs_itnim_s *itnim; 1150 unsigned long flags; 1151 1152 spin_lock_irqsave(&bfad->bfad_lock, flags); 1153 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 1154 iocmd->vf_id, iocmd->lpwwn); 1155 if (!fcs_port) { 1156 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 1157 bfa_trc(bfad, 0); 1158 } else { 1159 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); 1160 if (itnim == NULL) 1161 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 1162 else { 1163 iocmd->status = BFA_STATUS_OK; 1164 bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn, 1165 &iocmd->itnstats); 1166 } 1167 } 1168 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1169 return 0; 1170 } 1171 1172 int 1173 bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd) 1174 { 1175 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 1176 unsigned long flags; 1177 1178 spin_lock_irqsave(&bfad->bfad_lock, flags); 1179 iocmd->status = bfa_fcport_enable(&bfad->bfa); 1180 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1181 1182 return 0; 1183 } 1184 1185 int 1186 bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd) 1187 { 1188 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 1189 unsigned long flags; 1190 1191 spin_lock_irqsave(&bfad->bfad_lock, flags); 1192 iocmd->status = bfa_fcport_disable(&bfad->bfa); 1193 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1194 1195 return 0; 1196 } 1197 1198 int 1199 bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd) 1200 { 1201 struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd; 1202 struct bfad_hal_comp fcomp; 1203 unsigned long flags; 1204 1205 init_completion(&fcomp.comp); 1206 spin_lock_irqsave(&bfad->bfad_lock, flags); 1207 iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk, 1208 &iocmd->pcifn_cfg, 1209 bfad_hcb_comp, &fcomp); 1210 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1211 if (iocmd->status != BFA_STATUS_OK) 1212 goto out; 1213 1214 wait_for_completion(&fcomp.comp); 1215 iocmd->status = fcomp.status; 1216 out: 1217 return 0; 1218 } 1219 1220 int 1221 bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd) 1222 { 1223 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd; 1224 struct bfad_hal_comp fcomp; 1225 unsigned long flags; 1226 1227 init_completion(&fcomp.comp); 1228 spin_lock_irqsave(&bfad->bfad_lock, flags); 1229 iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk, 1230 &iocmd->pcifn_id, iocmd->port, 1231 iocmd->pcifn_class, iocmd->bw_min, 1232 iocmd->bw_max, bfad_hcb_comp, &fcomp); 1233 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1234 if (iocmd->status != BFA_STATUS_OK) 1235 goto out; 1236 1237 wait_for_completion(&fcomp.comp); 1238 iocmd->status = fcomp.status; 1239 out: 1240 return 0; 1241 } 1242 1243 int 1244 bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd) 1245 { 1246 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd; 1247 struct bfad_hal_comp fcomp; 1248 unsigned long flags; 1249 1250 init_completion(&fcomp.comp); 1251 spin_lock_irqsave(&bfad->bfad_lock, flags); 1252 iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk, 1253 iocmd->pcifn_id, 1254 bfad_hcb_comp, &fcomp); 1255 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1256 if (iocmd->status != BFA_STATUS_OK) 1257 goto out; 1258 1259 wait_for_completion(&fcomp.comp); 1260 iocmd->status = fcomp.status; 1261 out: 1262 return 0; 1263 } 1264 1265 int 1266 bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd) 1267 { 1268 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd; 1269 struct bfad_hal_comp fcomp; 1270 unsigned long flags; 1271 1272 init_completion(&fcomp.comp); 1273 spin_lock_irqsave(&bfad->bfad_lock, flags); 1274 iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk, 1275 iocmd->pcifn_id, iocmd->bw_min, 1276 iocmd->bw_max, bfad_hcb_comp, &fcomp); 1277 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1278 bfa_trc(bfad, iocmd->status); 1279 if (iocmd->status != BFA_STATUS_OK) 1280 goto out; 1281 1282 wait_for_completion(&fcomp.comp); 1283 iocmd->status = fcomp.status; 1284 bfa_trc(bfad, iocmd->status); 1285 out: 1286 return 0; 1287 } 1288 1289 int 1290 bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd) 1291 { 1292 struct bfa_bsg_adapter_cfg_mode_s *iocmd = 1293 (struct bfa_bsg_adapter_cfg_mode_s *)cmd; 1294 struct bfad_hal_comp fcomp; 1295 unsigned long flags = 0; 1296 1297 init_completion(&fcomp.comp); 1298 spin_lock_irqsave(&bfad->bfad_lock, flags); 1299 iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk, 1300 iocmd->cfg.mode, iocmd->cfg.max_pf, 1301 iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp); 1302 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1303 if (iocmd->status != BFA_STATUS_OK) 1304 goto out; 1305 1306 wait_for_completion(&fcomp.comp); 1307 iocmd->status = fcomp.status; 1308 out: 1309 return 0; 1310 } 1311 1312 int 1313 bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd) 1314 { 1315 struct bfa_bsg_port_cfg_mode_s *iocmd = 1316 (struct bfa_bsg_port_cfg_mode_s *)cmd; 1317 struct bfad_hal_comp fcomp; 1318 unsigned long flags = 0; 1319 1320 init_completion(&fcomp.comp); 1321 spin_lock_irqsave(&bfad->bfad_lock, flags); 1322 iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk, 1323 iocmd->instance, iocmd->cfg.mode, 1324 iocmd->cfg.max_pf, iocmd->cfg.max_vf, 1325 bfad_hcb_comp, &fcomp); 1326 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1327 if (iocmd->status != BFA_STATUS_OK) 1328 goto out; 1329 1330 wait_for_completion(&fcomp.comp); 1331 iocmd->status = fcomp.status; 1332 out: 1333 return 0; 1334 } 1335 1336 int 1337 bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd) 1338 { 1339 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; 1340 struct bfad_hal_comp fcomp; 1341 unsigned long flags; 1342 1343 init_completion(&fcomp.comp); 1344 spin_lock_irqsave(&bfad->bfad_lock, flags); 1345 if (cmd == IOCMD_FLASH_ENABLE_OPTROM) 1346 iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk, 1347 bfad_hcb_comp, &fcomp); 1348 else 1349 iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk, 1350 bfad_hcb_comp, &fcomp); 1351 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1352 1353 if (iocmd->status != BFA_STATUS_OK) 1354 goto out; 1355 1356 wait_for_completion(&fcomp.comp); 1357 iocmd->status = fcomp.status; 1358 out: 1359 return 0; 1360 } 1361 1362 int 1363 bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd) 1364 { 1365 struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd; 1366 struct bfad_hal_comp fcomp; 1367 unsigned long flags; 1368 1369 init_completion(&fcomp.comp); 1370 iocmd->status = BFA_STATUS_OK; 1371 spin_lock_irqsave(&bfad->bfad_lock, flags); 1372 iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr, 1373 bfad_hcb_comp, &fcomp); 1374 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1375 1376 if (iocmd->status != BFA_STATUS_OK) 1377 goto out; 1378 1379 wait_for_completion(&fcomp.comp); 1380 iocmd->status = fcomp.status; 1381 out: 1382 return 0; 1383 } 1384 1385 int 1386 bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len) 1387 { 1388 struct bfa_bsg_cee_attr_s *iocmd = 1389 (struct bfa_bsg_cee_attr_s *)cmd; 1390 void *iocmd_bufptr; 1391 struct bfad_hal_comp cee_comp; 1392 unsigned long flags; 1393 1394 if (bfad_chk_iocmd_sz(payload_len, 1395 sizeof(struct bfa_bsg_cee_attr_s), 1396 sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) { 1397 iocmd->status = BFA_STATUS_VERSION_FAIL; 1398 return 0; 1399 } 1400 1401 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s); 1402 1403 cee_comp.status = 0; 1404 init_completion(&cee_comp.comp); 1405 mutex_lock(&bfad_mutex); 1406 spin_lock_irqsave(&bfad->bfad_lock, flags); 1407 iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr, 1408 bfad_hcb_comp, &cee_comp); 1409 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1410 if (iocmd->status != BFA_STATUS_OK) { 1411 mutex_unlock(&bfad_mutex); 1412 bfa_trc(bfad, 0x5555); 1413 goto out; 1414 } 1415 wait_for_completion(&cee_comp.comp); 1416 mutex_unlock(&bfad_mutex); 1417 out: 1418 return 0; 1419 } 1420 1421 int 1422 bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd, 1423 unsigned int payload_len) 1424 { 1425 struct bfa_bsg_cee_stats_s *iocmd = 1426 (struct bfa_bsg_cee_stats_s *)cmd; 1427 void *iocmd_bufptr; 1428 struct bfad_hal_comp cee_comp; 1429 unsigned long flags; 1430 1431 if (bfad_chk_iocmd_sz(payload_len, 1432 sizeof(struct bfa_bsg_cee_stats_s), 1433 sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) { 1434 iocmd->status = BFA_STATUS_VERSION_FAIL; 1435 return 0; 1436 } 1437 1438 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s); 1439 1440 cee_comp.status = 0; 1441 init_completion(&cee_comp.comp); 1442 mutex_lock(&bfad_mutex); 1443 spin_lock_irqsave(&bfad->bfad_lock, flags); 1444 iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr, 1445 bfad_hcb_comp, &cee_comp); 1446 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1447 if (iocmd->status != BFA_STATUS_OK) { 1448 mutex_unlock(&bfad_mutex); 1449 bfa_trc(bfad, 0x5555); 1450 goto out; 1451 } 1452 wait_for_completion(&cee_comp.comp); 1453 mutex_unlock(&bfad_mutex); 1454 out: 1455 return 0; 1456 } 1457 1458 int 1459 bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd) 1460 { 1461 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 1462 unsigned long flags; 1463 1464 spin_lock_irqsave(&bfad->bfad_lock, flags); 1465 iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL); 1466 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1467 if (iocmd->status != BFA_STATUS_OK) 1468 bfa_trc(bfad, 0x5555); 1469 return 0; 1470 } 1471 1472 int 1473 bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd) 1474 { 1475 struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd; 1476 struct bfad_hal_comp fcomp; 1477 unsigned long flags; 1478 1479 init_completion(&fcomp.comp); 1480 spin_lock_irqsave(&bfad->bfad_lock, flags); 1481 iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media, 1482 bfad_hcb_comp, &fcomp); 1483 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1484 bfa_trc(bfad, iocmd->status); 1485 if (iocmd->status != BFA_STATUS_SFP_NOT_READY) 1486 goto out; 1487 1488 wait_for_completion(&fcomp.comp); 1489 iocmd->status = fcomp.status; 1490 out: 1491 return 0; 1492 } 1493 1494 int 1495 bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd) 1496 { 1497 struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd; 1498 struct bfad_hal_comp fcomp; 1499 unsigned long flags; 1500 1501 init_completion(&fcomp.comp); 1502 spin_lock_irqsave(&bfad->bfad_lock, flags); 1503 iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed, 1504 bfad_hcb_comp, &fcomp); 1505 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1506 bfa_trc(bfad, iocmd->status); 1507 if (iocmd->status != BFA_STATUS_SFP_NOT_READY) 1508 goto out; 1509 wait_for_completion(&fcomp.comp); 1510 iocmd->status = fcomp.status; 1511 out: 1512 return 0; 1513 } 1514 1515 int 1516 bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd) 1517 { 1518 struct bfa_bsg_flash_attr_s *iocmd = 1519 (struct bfa_bsg_flash_attr_s *)cmd; 1520 struct bfad_hal_comp fcomp; 1521 unsigned long flags; 1522 1523 init_completion(&fcomp.comp); 1524 spin_lock_irqsave(&bfad->bfad_lock, flags); 1525 iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr, 1526 bfad_hcb_comp, &fcomp); 1527 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1528 if (iocmd->status != BFA_STATUS_OK) 1529 goto out; 1530 wait_for_completion(&fcomp.comp); 1531 iocmd->status = fcomp.status; 1532 out: 1533 return 0; 1534 } 1535 1536 int 1537 bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd) 1538 { 1539 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd; 1540 struct bfad_hal_comp fcomp; 1541 unsigned long flags; 1542 1543 init_completion(&fcomp.comp); 1544 spin_lock_irqsave(&bfad->bfad_lock, flags); 1545 iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type, 1546 iocmd->instance, bfad_hcb_comp, &fcomp); 1547 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1548 if (iocmd->status != BFA_STATUS_OK) 1549 goto out; 1550 wait_for_completion(&fcomp.comp); 1551 iocmd->status = fcomp.status; 1552 out: 1553 return 0; 1554 } 1555 1556 int 1557 bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd, 1558 unsigned int payload_len) 1559 { 1560 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd; 1561 void *iocmd_bufptr; 1562 struct bfad_hal_comp fcomp; 1563 unsigned long flags; 1564 1565 if (bfad_chk_iocmd_sz(payload_len, 1566 sizeof(struct bfa_bsg_flash_s), 1567 iocmd->bufsz) != BFA_STATUS_OK) { 1568 iocmd->status = BFA_STATUS_VERSION_FAIL; 1569 return 0; 1570 } 1571 1572 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s); 1573 1574 init_completion(&fcomp.comp); 1575 spin_lock_irqsave(&bfad->bfad_lock, flags); 1576 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), 1577 iocmd->type, iocmd->instance, iocmd_bufptr, 1578 iocmd->bufsz, 0, bfad_hcb_comp, &fcomp); 1579 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1580 if (iocmd->status != BFA_STATUS_OK) 1581 goto out; 1582 wait_for_completion(&fcomp.comp); 1583 iocmd->status = fcomp.status; 1584 out: 1585 return 0; 1586 } 1587 1588 int 1589 bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd, 1590 unsigned int payload_len) 1591 { 1592 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd; 1593 struct bfad_hal_comp fcomp; 1594 void *iocmd_bufptr; 1595 unsigned long flags; 1596 1597 if (bfad_chk_iocmd_sz(payload_len, 1598 sizeof(struct bfa_bsg_flash_s), 1599 iocmd->bufsz) != BFA_STATUS_OK) { 1600 iocmd->status = BFA_STATUS_VERSION_FAIL; 1601 return 0; 1602 } 1603 1604 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s); 1605 1606 init_completion(&fcomp.comp); 1607 spin_lock_irqsave(&bfad->bfad_lock, flags); 1608 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type, 1609 iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0, 1610 bfad_hcb_comp, &fcomp); 1611 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1612 if (iocmd->status != BFA_STATUS_OK) 1613 goto out; 1614 wait_for_completion(&fcomp.comp); 1615 iocmd->status = fcomp.status; 1616 out: 1617 return 0; 1618 } 1619 1620 int 1621 bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd) 1622 { 1623 struct bfa_bsg_diag_get_temp_s *iocmd = 1624 (struct bfa_bsg_diag_get_temp_s *)cmd; 1625 struct bfad_hal_comp fcomp; 1626 unsigned long flags; 1627 1628 init_completion(&fcomp.comp); 1629 spin_lock_irqsave(&bfad->bfad_lock, flags); 1630 iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa), 1631 &iocmd->result, bfad_hcb_comp, &fcomp); 1632 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1633 bfa_trc(bfad, iocmd->status); 1634 if (iocmd->status != BFA_STATUS_OK) 1635 goto out; 1636 wait_for_completion(&fcomp.comp); 1637 iocmd->status = fcomp.status; 1638 out: 1639 return 0; 1640 } 1641 1642 int 1643 bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd) 1644 { 1645 struct bfa_bsg_diag_memtest_s *iocmd = 1646 (struct bfa_bsg_diag_memtest_s *)cmd; 1647 struct bfad_hal_comp fcomp; 1648 unsigned long flags; 1649 1650 init_completion(&fcomp.comp); 1651 spin_lock_irqsave(&bfad->bfad_lock, flags); 1652 iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa), 1653 &iocmd->memtest, iocmd->pat, 1654 &iocmd->result, bfad_hcb_comp, &fcomp); 1655 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1656 bfa_trc(bfad, iocmd->status); 1657 if (iocmd->status != BFA_STATUS_OK) 1658 goto out; 1659 wait_for_completion(&fcomp.comp); 1660 iocmd->status = fcomp.status; 1661 out: 1662 return 0; 1663 } 1664 1665 int 1666 bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd) 1667 { 1668 struct bfa_bsg_diag_loopback_s *iocmd = 1669 (struct bfa_bsg_diag_loopback_s *)cmd; 1670 struct bfad_hal_comp fcomp; 1671 unsigned long flags; 1672 1673 init_completion(&fcomp.comp); 1674 spin_lock_irqsave(&bfad->bfad_lock, flags); 1675 iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode, 1676 iocmd->speed, iocmd->lpcnt, iocmd->pat, 1677 &iocmd->result, bfad_hcb_comp, &fcomp); 1678 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1679 bfa_trc(bfad, iocmd->status); 1680 if (iocmd->status != BFA_STATUS_OK) 1681 goto out; 1682 wait_for_completion(&fcomp.comp); 1683 iocmd->status = fcomp.status; 1684 out: 1685 return 0; 1686 } 1687 1688 int 1689 bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd) 1690 { 1691 struct bfa_bsg_diag_fwping_s *iocmd = 1692 (struct bfa_bsg_diag_fwping_s *)cmd; 1693 struct bfad_hal_comp fcomp; 1694 unsigned long flags; 1695 1696 init_completion(&fcomp.comp); 1697 spin_lock_irqsave(&bfad->bfad_lock, flags); 1698 iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt, 1699 iocmd->pattern, &iocmd->result, 1700 bfad_hcb_comp, &fcomp); 1701 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1702 bfa_trc(bfad, iocmd->status); 1703 if (iocmd->status != BFA_STATUS_OK) 1704 goto out; 1705 bfa_trc(bfad, 0x77771); 1706 wait_for_completion(&fcomp.comp); 1707 iocmd->status = fcomp.status; 1708 out: 1709 return 0; 1710 } 1711 1712 int 1713 bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd) 1714 { 1715 struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd; 1716 struct bfad_hal_comp fcomp; 1717 unsigned long flags; 1718 1719 init_completion(&fcomp.comp); 1720 spin_lock_irqsave(&bfad->bfad_lock, flags); 1721 iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force, 1722 iocmd->queue, &iocmd->result, 1723 bfad_hcb_comp, &fcomp); 1724 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1725 if (iocmd->status != BFA_STATUS_OK) 1726 goto out; 1727 wait_for_completion(&fcomp.comp); 1728 iocmd->status = fcomp.status; 1729 out: 1730 return 0; 1731 } 1732 1733 int 1734 bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd) 1735 { 1736 struct bfa_bsg_sfp_show_s *iocmd = 1737 (struct bfa_bsg_sfp_show_s *)cmd; 1738 struct bfad_hal_comp fcomp; 1739 unsigned long flags; 1740 1741 init_completion(&fcomp.comp); 1742 spin_lock_irqsave(&bfad->bfad_lock, flags); 1743 iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp, 1744 bfad_hcb_comp, &fcomp); 1745 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1746 bfa_trc(bfad, iocmd->status); 1747 if (iocmd->status != BFA_STATUS_OK) 1748 goto out; 1749 wait_for_completion(&fcomp.comp); 1750 iocmd->status = fcomp.status; 1751 bfa_trc(bfad, iocmd->status); 1752 out: 1753 return 0; 1754 } 1755 1756 int 1757 bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd) 1758 { 1759 struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd; 1760 unsigned long flags; 1761 1762 spin_lock_irqsave(&bfad->bfad_lock, flags); 1763 iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa), 1764 &iocmd->ledtest); 1765 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1766 return 0; 1767 } 1768 1769 int 1770 bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd) 1771 { 1772 struct bfa_bsg_diag_beacon_s *iocmd = 1773 (struct bfa_bsg_diag_beacon_s *)cmd; 1774 unsigned long flags; 1775 1776 spin_lock_irqsave(&bfad->bfad_lock, flags); 1777 iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa), 1778 iocmd->beacon, iocmd->link_e2e_beacon, 1779 iocmd->second); 1780 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1781 return 0; 1782 } 1783 1784 int 1785 bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd) 1786 { 1787 struct bfa_bsg_diag_lb_stat_s *iocmd = 1788 (struct bfa_bsg_diag_lb_stat_s *)cmd; 1789 unsigned long flags; 1790 1791 spin_lock_irqsave(&bfad->bfad_lock, flags); 1792 iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa); 1793 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1794 bfa_trc(bfad, iocmd->status); 1795 1796 return 0; 1797 } 1798 1799 int 1800 bfad_iocmd_diag_dport_enable(struct bfad_s *bfad, void *pcmd) 1801 { 1802 struct bfa_bsg_dport_enable_s *iocmd = 1803 (struct bfa_bsg_dport_enable_s *)pcmd; 1804 unsigned long flags; 1805 struct bfad_hal_comp fcomp; 1806 1807 init_completion(&fcomp.comp); 1808 spin_lock_irqsave(&bfad->bfad_lock, flags); 1809 iocmd->status = bfa_dport_enable(&bfad->bfa, iocmd->lpcnt, 1810 iocmd->pat, bfad_hcb_comp, &fcomp); 1811 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1812 if (iocmd->status != BFA_STATUS_OK) 1813 bfa_trc(bfad, iocmd->status); 1814 else { 1815 wait_for_completion(&fcomp.comp); 1816 iocmd->status = fcomp.status; 1817 } 1818 return 0; 1819 } 1820 1821 int 1822 bfad_iocmd_diag_dport_disable(struct bfad_s *bfad, void *pcmd) 1823 { 1824 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; 1825 unsigned long flags; 1826 struct bfad_hal_comp fcomp; 1827 1828 init_completion(&fcomp.comp); 1829 spin_lock_irqsave(&bfad->bfad_lock, flags); 1830 iocmd->status = bfa_dport_disable(&bfad->bfa, bfad_hcb_comp, &fcomp); 1831 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1832 if (iocmd->status != BFA_STATUS_OK) 1833 bfa_trc(bfad, iocmd->status); 1834 else { 1835 wait_for_completion(&fcomp.comp); 1836 iocmd->status = fcomp.status; 1837 } 1838 return 0; 1839 } 1840 1841 int 1842 bfad_iocmd_diag_dport_start(struct bfad_s *bfad, void *pcmd) 1843 { 1844 struct bfa_bsg_dport_enable_s *iocmd = 1845 (struct bfa_bsg_dport_enable_s *)pcmd; 1846 unsigned long flags; 1847 struct bfad_hal_comp fcomp; 1848 1849 init_completion(&fcomp.comp); 1850 spin_lock_irqsave(&bfad->bfad_lock, flags); 1851 iocmd->status = bfa_dport_start(&bfad->bfa, iocmd->lpcnt, 1852 iocmd->pat, bfad_hcb_comp, 1853 &fcomp); 1854 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1855 1856 if (iocmd->status != BFA_STATUS_OK) { 1857 bfa_trc(bfad, iocmd->status); 1858 } else { 1859 wait_for_completion(&fcomp.comp); 1860 iocmd->status = fcomp.status; 1861 } 1862 1863 return 0; 1864 } 1865 1866 int 1867 bfad_iocmd_diag_dport_show(struct bfad_s *bfad, void *pcmd) 1868 { 1869 struct bfa_bsg_diag_dport_show_s *iocmd = 1870 (struct bfa_bsg_diag_dport_show_s *)pcmd; 1871 unsigned long flags; 1872 1873 spin_lock_irqsave(&bfad->bfad_lock, flags); 1874 iocmd->status = bfa_dport_show(&bfad->bfa, &iocmd->result); 1875 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1876 1877 return 0; 1878 } 1879 1880 1881 int 1882 bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd) 1883 { 1884 struct bfa_bsg_phy_attr_s *iocmd = 1885 (struct bfa_bsg_phy_attr_s *)cmd; 1886 struct bfad_hal_comp fcomp; 1887 unsigned long flags; 1888 1889 init_completion(&fcomp.comp); 1890 spin_lock_irqsave(&bfad->bfad_lock, flags); 1891 iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance, 1892 &iocmd->attr, bfad_hcb_comp, &fcomp); 1893 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1894 if (iocmd->status != BFA_STATUS_OK) 1895 goto out; 1896 wait_for_completion(&fcomp.comp); 1897 iocmd->status = fcomp.status; 1898 out: 1899 return 0; 1900 } 1901 1902 int 1903 bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd) 1904 { 1905 struct bfa_bsg_phy_stats_s *iocmd = 1906 (struct bfa_bsg_phy_stats_s *)cmd; 1907 struct bfad_hal_comp fcomp; 1908 unsigned long flags; 1909 1910 init_completion(&fcomp.comp); 1911 spin_lock_irqsave(&bfad->bfad_lock, flags); 1912 iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance, 1913 &iocmd->stats, bfad_hcb_comp, &fcomp); 1914 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1915 if (iocmd->status != BFA_STATUS_OK) 1916 goto out; 1917 wait_for_completion(&fcomp.comp); 1918 iocmd->status = fcomp.status; 1919 out: 1920 return 0; 1921 } 1922 1923 int 1924 bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len) 1925 { 1926 struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd; 1927 struct bfad_hal_comp fcomp; 1928 void *iocmd_bufptr; 1929 unsigned long flags; 1930 1931 if (bfad_chk_iocmd_sz(payload_len, 1932 sizeof(struct bfa_bsg_phy_s), 1933 iocmd->bufsz) != BFA_STATUS_OK) { 1934 iocmd->status = BFA_STATUS_VERSION_FAIL; 1935 return 0; 1936 } 1937 1938 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s); 1939 init_completion(&fcomp.comp); 1940 spin_lock_irqsave(&bfad->bfad_lock, flags); 1941 iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa), 1942 iocmd->instance, iocmd_bufptr, iocmd->bufsz, 1943 0, bfad_hcb_comp, &fcomp); 1944 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1945 if (iocmd->status != BFA_STATUS_OK) 1946 goto out; 1947 wait_for_completion(&fcomp.comp); 1948 iocmd->status = fcomp.status; 1949 if (iocmd->status != BFA_STATUS_OK) 1950 goto out; 1951 out: 1952 return 0; 1953 } 1954 1955 int 1956 bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd) 1957 { 1958 struct bfa_bsg_vhba_attr_s *iocmd = 1959 (struct bfa_bsg_vhba_attr_s *)cmd; 1960 struct bfa_vhba_attr_s *attr = &iocmd->attr; 1961 unsigned long flags; 1962 1963 spin_lock_irqsave(&bfad->bfad_lock, flags); 1964 attr->pwwn = bfad->bfa.ioc.attr->pwwn; 1965 attr->nwwn = bfad->bfa.ioc.attr->nwwn; 1966 attr->plog_enabled = (bfa_boolean_t)bfad->bfa.plog->plog_enabled; 1967 attr->io_profile = bfa_fcpim_get_io_profile(&bfad->bfa); 1968 attr->path_tov = bfa_fcpim_path_tov_get(&bfad->bfa); 1969 iocmd->status = BFA_STATUS_OK; 1970 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1971 return 0; 1972 } 1973 1974 int 1975 bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len) 1976 { 1977 struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd; 1978 void *iocmd_bufptr; 1979 struct bfad_hal_comp fcomp; 1980 unsigned long flags; 1981 1982 if (bfad_chk_iocmd_sz(payload_len, 1983 sizeof(struct bfa_bsg_phy_s), 1984 iocmd->bufsz) != BFA_STATUS_OK) { 1985 iocmd->status = BFA_STATUS_VERSION_FAIL; 1986 return 0; 1987 } 1988 1989 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s); 1990 init_completion(&fcomp.comp); 1991 spin_lock_irqsave(&bfad->bfad_lock, flags); 1992 iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa), 1993 iocmd->instance, iocmd_bufptr, iocmd->bufsz, 1994 0, bfad_hcb_comp, &fcomp); 1995 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1996 if (iocmd->status != BFA_STATUS_OK) 1997 goto out; 1998 wait_for_completion(&fcomp.comp); 1999 iocmd->status = fcomp.status; 2000 out: 2001 return 0; 2002 } 2003 2004 int 2005 bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd) 2006 { 2007 struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd; 2008 void *iocmd_bufptr; 2009 2010 if (iocmd->bufsz < sizeof(struct bfa_plog_s)) { 2011 bfa_trc(bfad, sizeof(struct bfa_plog_s)); 2012 iocmd->status = BFA_STATUS_EINVAL; 2013 goto out; 2014 } 2015 2016 iocmd->status = BFA_STATUS_OK; 2017 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s); 2018 memcpy(iocmd_bufptr, (u8 *) &bfad->plog_buf, sizeof(struct bfa_plog_s)); 2019 out: 2020 return 0; 2021 } 2022 2023 #define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */ 2024 int 2025 bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd, 2026 unsigned int payload_len) 2027 { 2028 struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd; 2029 void *iocmd_bufptr; 2030 unsigned long flags; 2031 u32 offset; 2032 2033 if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s), 2034 BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) { 2035 iocmd->status = BFA_STATUS_VERSION_FAIL; 2036 return 0; 2037 } 2038 2039 if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ || 2040 !IS_ALIGNED(iocmd->bufsz, sizeof(u16)) || 2041 !IS_ALIGNED(iocmd->offset, sizeof(u32))) { 2042 bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ); 2043 iocmd->status = BFA_STATUS_EINVAL; 2044 goto out; 2045 } 2046 2047 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s); 2048 spin_lock_irqsave(&bfad->bfad_lock, flags); 2049 offset = iocmd->offset; 2050 iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr, 2051 &offset, &iocmd->bufsz); 2052 iocmd->offset = offset; 2053 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2054 out: 2055 return 0; 2056 } 2057 2058 int 2059 bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 2060 { 2061 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 2062 unsigned long flags; 2063 2064 if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) { 2065 spin_lock_irqsave(&bfad->bfad_lock, flags); 2066 bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE; 2067 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2068 } else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR) 2069 bfad->plog_buf.head = bfad->plog_buf.tail = 0; 2070 else if (v_cmd == IOCMD_DEBUG_START_DTRC) 2071 bfa_trc_init(bfad->trcmod); 2072 else if (v_cmd == IOCMD_DEBUG_STOP_DTRC) 2073 bfa_trc_stop(bfad->trcmod); 2074 2075 iocmd->status = BFA_STATUS_OK; 2076 return 0; 2077 } 2078 2079 int 2080 bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd) 2081 { 2082 struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd; 2083 2084 if (iocmd->ctl == BFA_TRUE) 2085 bfad->plog_buf.plog_enabled = 1; 2086 else 2087 bfad->plog_buf.plog_enabled = 0; 2088 2089 iocmd->status = BFA_STATUS_OK; 2090 return 0; 2091 } 2092 2093 int 2094 bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 2095 { 2096 struct bfa_bsg_fcpim_profile_s *iocmd = 2097 (struct bfa_bsg_fcpim_profile_s *)cmd; 2098 struct timeval tv; 2099 unsigned long flags; 2100 2101 do_gettimeofday(&tv); 2102 spin_lock_irqsave(&bfad->bfad_lock, flags); 2103 if (v_cmd == IOCMD_FCPIM_PROFILE_ON) 2104 iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec); 2105 else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF) 2106 iocmd->status = bfa_fcpim_profile_off(&bfad->bfa); 2107 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2108 2109 return 0; 2110 } 2111 2112 static int 2113 bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd) 2114 { 2115 struct bfa_bsg_itnim_ioprofile_s *iocmd = 2116 (struct bfa_bsg_itnim_ioprofile_s *)cmd; 2117 struct bfa_fcs_lport_s *fcs_port; 2118 struct bfa_fcs_itnim_s *itnim; 2119 unsigned long flags; 2120 2121 spin_lock_irqsave(&bfad->bfad_lock, flags); 2122 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 2123 iocmd->vf_id, iocmd->lpwwn); 2124 if (!fcs_port) 2125 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 2126 else { 2127 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); 2128 if (itnim == NULL) 2129 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 2130 else 2131 iocmd->status = bfa_itnim_get_ioprofile( 2132 bfa_fcs_itnim_get_halitn(itnim), 2133 &iocmd->ioprofile); 2134 } 2135 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2136 return 0; 2137 } 2138 2139 int 2140 bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd) 2141 { 2142 struct bfa_bsg_fcport_stats_s *iocmd = 2143 (struct bfa_bsg_fcport_stats_s *)cmd; 2144 struct bfad_hal_comp fcomp; 2145 unsigned long flags; 2146 struct bfa_cb_pending_q_s cb_qe; 2147 2148 init_completion(&fcomp.comp); 2149 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, 2150 &fcomp, &iocmd->stats); 2151 spin_lock_irqsave(&bfad->bfad_lock, flags); 2152 iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe); 2153 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2154 if (iocmd->status != BFA_STATUS_OK) { 2155 bfa_trc(bfad, iocmd->status); 2156 goto out; 2157 } 2158 wait_for_completion(&fcomp.comp); 2159 iocmd->status = fcomp.status; 2160 out: 2161 return 0; 2162 } 2163 2164 int 2165 bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd) 2166 { 2167 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 2168 struct bfad_hal_comp fcomp; 2169 unsigned long flags; 2170 struct bfa_cb_pending_q_s cb_qe; 2171 2172 init_completion(&fcomp.comp); 2173 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL); 2174 2175 spin_lock_irqsave(&bfad->bfad_lock, flags); 2176 iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe); 2177 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2178 if (iocmd->status != BFA_STATUS_OK) { 2179 bfa_trc(bfad, iocmd->status); 2180 goto out; 2181 } 2182 wait_for_completion(&fcomp.comp); 2183 iocmd->status = fcomp.status; 2184 out: 2185 return 0; 2186 } 2187 2188 int 2189 bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd) 2190 { 2191 struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd; 2192 struct bfad_hal_comp fcomp; 2193 unsigned long flags; 2194 2195 init_completion(&fcomp.comp); 2196 spin_lock_irqsave(&bfad->bfad_lock, flags); 2197 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), 2198 BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id, 2199 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, 2200 bfad_hcb_comp, &fcomp); 2201 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2202 if (iocmd->status != BFA_STATUS_OK) 2203 goto out; 2204 wait_for_completion(&fcomp.comp); 2205 iocmd->status = fcomp.status; 2206 out: 2207 return 0; 2208 } 2209 2210 int 2211 bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd) 2212 { 2213 struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd; 2214 struct bfad_hal_comp fcomp; 2215 unsigned long flags; 2216 2217 init_completion(&fcomp.comp); 2218 spin_lock_irqsave(&bfad->bfad_lock, flags); 2219 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), 2220 BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id, 2221 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, 2222 bfad_hcb_comp, &fcomp); 2223 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2224 if (iocmd->status != BFA_STATUS_OK) 2225 goto out; 2226 wait_for_completion(&fcomp.comp); 2227 iocmd->status = fcomp.status; 2228 out: 2229 return 0; 2230 } 2231 2232 int 2233 bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd) 2234 { 2235 struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd; 2236 struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp; 2237 struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg; 2238 unsigned long flags; 2239 2240 spin_lock_irqsave(&bfad->bfad_lock, flags); 2241 pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled; 2242 pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns; 2243 pbcfg->speed = cfgrsp->pbc_cfg.port_speed; 2244 memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun)); 2245 iocmd->status = BFA_STATUS_OK; 2246 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2247 2248 return 0; 2249 } 2250 2251 int 2252 bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd) 2253 { 2254 struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd; 2255 struct bfad_hal_comp fcomp; 2256 unsigned long flags; 2257 2258 init_completion(&fcomp.comp); 2259 spin_lock_irqsave(&bfad->bfad_lock, flags); 2260 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), 2261 BFA_FLASH_PART_PXECFG, 2262 bfad->bfa.ioc.port_id, &iocmd->cfg, 2263 sizeof(struct bfa_ethboot_cfg_s), 0, 2264 bfad_hcb_comp, &fcomp); 2265 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2266 if (iocmd->status != BFA_STATUS_OK) 2267 goto out; 2268 wait_for_completion(&fcomp.comp); 2269 iocmd->status = fcomp.status; 2270 out: 2271 return 0; 2272 } 2273 2274 int 2275 bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd) 2276 { 2277 struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd; 2278 struct bfad_hal_comp fcomp; 2279 unsigned long flags; 2280 2281 init_completion(&fcomp.comp); 2282 spin_lock_irqsave(&bfad->bfad_lock, flags); 2283 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), 2284 BFA_FLASH_PART_PXECFG, 2285 bfad->bfa.ioc.port_id, &iocmd->cfg, 2286 sizeof(struct bfa_ethboot_cfg_s), 0, 2287 bfad_hcb_comp, &fcomp); 2288 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2289 if (iocmd->status != BFA_STATUS_OK) 2290 goto out; 2291 wait_for_completion(&fcomp.comp); 2292 iocmd->status = fcomp.status; 2293 out: 2294 return 0; 2295 } 2296 2297 int 2298 bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 2299 { 2300 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 2301 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2302 struct bfa_fcport_trunk_s *trunk = &fcport->trunk; 2303 unsigned long flags; 2304 2305 spin_lock_irqsave(&bfad->bfad_lock, flags); 2306 2307 if (bfa_fcport_is_dport(&bfad->bfa)) { 2308 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2309 return BFA_STATUS_DPORT_ERR; 2310 } 2311 2312 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) || 2313 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) 2314 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; 2315 else { 2316 if (v_cmd == IOCMD_TRUNK_ENABLE) { 2317 trunk->attr.state = BFA_TRUNK_OFFLINE; 2318 bfa_fcport_disable(&bfad->bfa); 2319 fcport->cfg.trunked = BFA_TRUE; 2320 } else if (v_cmd == IOCMD_TRUNK_DISABLE) { 2321 trunk->attr.state = BFA_TRUNK_DISABLED; 2322 bfa_fcport_disable(&bfad->bfa); 2323 fcport->cfg.trunked = BFA_FALSE; 2324 } 2325 2326 if (!bfa_fcport_is_disabled(&bfad->bfa)) 2327 bfa_fcport_enable(&bfad->bfa); 2328 2329 iocmd->status = BFA_STATUS_OK; 2330 } 2331 2332 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2333 2334 return 0; 2335 } 2336 2337 int 2338 bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd) 2339 { 2340 struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd; 2341 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2342 struct bfa_fcport_trunk_s *trunk = &fcport->trunk; 2343 unsigned long flags; 2344 2345 spin_lock_irqsave(&bfad->bfad_lock, flags); 2346 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) || 2347 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) 2348 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; 2349 else { 2350 memcpy((void *)&iocmd->attr, (void *)&trunk->attr, 2351 sizeof(struct bfa_trunk_attr_s)); 2352 iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa); 2353 iocmd->status = BFA_STATUS_OK; 2354 } 2355 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2356 2357 return 0; 2358 } 2359 2360 int 2361 bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 2362 { 2363 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 2364 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2365 unsigned long flags; 2366 2367 spin_lock_irqsave(&bfad->bfad_lock, flags); 2368 if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) { 2369 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && 2370 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) 2371 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; 2372 else { 2373 if (v_cmd == IOCMD_QOS_ENABLE) 2374 fcport->cfg.qos_enabled = BFA_TRUE; 2375 else if (v_cmd == IOCMD_QOS_DISABLE) { 2376 fcport->cfg.qos_enabled = BFA_FALSE; 2377 fcport->cfg.qos_bw.high = BFA_QOS_BW_HIGH; 2378 fcport->cfg.qos_bw.med = BFA_QOS_BW_MED; 2379 fcport->cfg.qos_bw.low = BFA_QOS_BW_LOW; 2380 } 2381 } 2382 } 2383 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2384 2385 return 0; 2386 } 2387 2388 int 2389 bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd) 2390 { 2391 struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd; 2392 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2393 unsigned long flags; 2394 2395 spin_lock_irqsave(&bfad->bfad_lock, flags); 2396 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && 2397 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) 2398 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; 2399 else { 2400 iocmd->attr.state = fcport->qos_attr.state; 2401 iocmd->attr.total_bb_cr = 2402 be32_to_cpu(fcport->qos_attr.total_bb_cr); 2403 iocmd->attr.qos_bw.high = fcport->cfg.qos_bw.high; 2404 iocmd->attr.qos_bw.med = fcport->cfg.qos_bw.med; 2405 iocmd->attr.qos_bw.low = fcport->cfg.qos_bw.low; 2406 iocmd->attr.qos_bw_op = fcport->qos_attr.qos_bw_op; 2407 iocmd->status = BFA_STATUS_OK; 2408 } 2409 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2410 2411 return 0; 2412 } 2413 2414 int 2415 bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd) 2416 { 2417 struct bfa_bsg_qos_vc_attr_s *iocmd = 2418 (struct bfa_bsg_qos_vc_attr_s *)cmd; 2419 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2420 struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr; 2421 unsigned long flags; 2422 u32 i = 0; 2423 2424 spin_lock_irqsave(&bfad->bfad_lock, flags); 2425 iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count); 2426 iocmd->attr.shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit); 2427 iocmd->attr.elp_opmode_flags = 2428 be32_to_cpu(bfa_vc_attr->elp_opmode_flags); 2429 2430 /* Individual VC info */ 2431 while (i < iocmd->attr.total_vc_count) { 2432 iocmd->attr.vc_info[i].vc_credit = 2433 bfa_vc_attr->vc_info[i].vc_credit; 2434 iocmd->attr.vc_info[i].borrow_credit = 2435 bfa_vc_attr->vc_info[i].borrow_credit; 2436 iocmd->attr.vc_info[i].priority = 2437 bfa_vc_attr->vc_info[i].priority; 2438 i++; 2439 } 2440 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2441 2442 iocmd->status = BFA_STATUS_OK; 2443 return 0; 2444 } 2445 2446 int 2447 bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd) 2448 { 2449 struct bfa_bsg_fcport_stats_s *iocmd = 2450 (struct bfa_bsg_fcport_stats_s *)cmd; 2451 struct bfad_hal_comp fcomp; 2452 unsigned long flags; 2453 struct bfa_cb_pending_q_s cb_qe; 2454 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2455 2456 init_completion(&fcomp.comp); 2457 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, 2458 &fcomp, &iocmd->stats); 2459 2460 spin_lock_irqsave(&bfad->bfad_lock, flags); 2461 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); 2462 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && 2463 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) 2464 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; 2465 else 2466 iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe); 2467 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2468 if (iocmd->status != BFA_STATUS_OK) { 2469 bfa_trc(bfad, iocmd->status); 2470 goto out; 2471 } 2472 wait_for_completion(&fcomp.comp); 2473 iocmd->status = fcomp.status; 2474 out: 2475 return 0; 2476 } 2477 2478 int 2479 bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd) 2480 { 2481 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 2482 struct bfad_hal_comp fcomp; 2483 unsigned long flags; 2484 struct bfa_cb_pending_q_s cb_qe; 2485 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2486 2487 init_completion(&fcomp.comp); 2488 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, 2489 &fcomp, NULL); 2490 2491 spin_lock_irqsave(&bfad->bfad_lock, flags); 2492 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); 2493 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && 2494 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) 2495 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; 2496 else 2497 iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe); 2498 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2499 if (iocmd->status != BFA_STATUS_OK) { 2500 bfa_trc(bfad, iocmd->status); 2501 goto out; 2502 } 2503 wait_for_completion(&fcomp.comp); 2504 iocmd->status = fcomp.status; 2505 out: 2506 return 0; 2507 } 2508 2509 int 2510 bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd) 2511 { 2512 struct bfa_bsg_vf_stats_s *iocmd = 2513 (struct bfa_bsg_vf_stats_s *)cmd; 2514 struct bfa_fcs_fabric_s *fcs_vf; 2515 unsigned long flags; 2516 2517 spin_lock_irqsave(&bfad->bfad_lock, flags); 2518 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id); 2519 if (fcs_vf == NULL) { 2520 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2521 iocmd->status = BFA_STATUS_UNKNOWN_VFID; 2522 goto out; 2523 } 2524 memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats, 2525 sizeof(struct bfa_vf_stats_s)); 2526 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2527 iocmd->status = BFA_STATUS_OK; 2528 out: 2529 return 0; 2530 } 2531 2532 int 2533 bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd) 2534 { 2535 struct bfa_bsg_vf_reset_stats_s *iocmd = 2536 (struct bfa_bsg_vf_reset_stats_s *)cmd; 2537 struct bfa_fcs_fabric_s *fcs_vf; 2538 unsigned long flags; 2539 2540 spin_lock_irqsave(&bfad->bfad_lock, flags); 2541 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id); 2542 if (fcs_vf == NULL) { 2543 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2544 iocmd->status = BFA_STATUS_UNKNOWN_VFID; 2545 goto out; 2546 } 2547 memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s)); 2548 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2549 iocmd->status = BFA_STATUS_OK; 2550 out: 2551 return 0; 2552 } 2553 2554 /* Function to reset the LUN SCAN mode */ 2555 static void 2556 bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg) 2557 { 2558 struct bfad_im_port_s *pport_im = bfad->pport.im_port; 2559 struct bfad_vport_s *vport = NULL; 2560 2561 /* Set the scsi device LUN SCAN flags for base port */ 2562 bfad_reset_sdev_bflags(pport_im, lunmask_cfg); 2563 2564 /* Set the scsi device LUN SCAN flags for the vports */ 2565 list_for_each_entry(vport, &bfad->vport_list, list_entry) 2566 bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg); 2567 } 2568 2569 int 2570 bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd) 2571 { 2572 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; 2573 unsigned long flags; 2574 2575 spin_lock_irqsave(&bfad->bfad_lock, flags); 2576 if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) { 2577 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE); 2578 /* Set the LUN Scanning mode to be Sequential scan */ 2579 if (iocmd->status == BFA_STATUS_OK) 2580 bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE); 2581 } else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) { 2582 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE); 2583 /* Set the LUN Scanning mode to default REPORT_LUNS scan */ 2584 if (iocmd->status == BFA_STATUS_OK) 2585 bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE); 2586 } else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR) 2587 iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa); 2588 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2589 return 0; 2590 } 2591 2592 int 2593 bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd) 2594 { 2595 struct bfa_bsg_fcpim_lunmask_query_s *iocmd = 2596 (struct bfa_bsg_fcpim_lunmask_query_s *)cmd; 2597 struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask; 2598 unsigned long flags; 2599 2600 spin_lock_irqsave(&bfad->bfad_lock, flags); 2601 iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask); 2602 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2603 return 0; 2604 } 2605 2606 int 2607 bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 2608 { 2609 struct bfa_bsg_fcpim_lunmask_s *iocmd = 2610 (struct bfa_bsg_fcpim_lunmask_s *)cmd; 2611 unsigned long flags; 2612 2613 spin_lock_irqsave(&bfad->bfad_lock, flags); 2614 if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD) 2615 iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id, 2616 &iocmd->pwwn, iocmd->rpwwn, iocmd->lun); 2617 else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE) 2618 iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa, 2619 iocmd->vf_id, &iocmd->pwwn, 2620 iocmd->rpwwn, iocmd->lun); 2621 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2622 return 0; 2623 } 2624 2625 int 2626 bfad_iocmd_fcpim_throttle_query(struct bfad_s *bfad, void *cmd) 2627 { 2628 struct bfa_bsg_fcpim_throttle_s *iocmd = 2629 (struct bfa_bsg_fcpim_throttle_s *)cmd; 2630 unsigned long flags; 2631 2632 spin_lock_irqsave(&bfad->bfad_lock, flags); 2633 iocmd->status = bfa_fcpim_throttle_get(&bfad->bfa, 2634 (void *)&iocmd->throttle); 2635 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2636 2637 return 0; 2638 } 2639 2640 int 2641 bfad_iocmd_fcpim_throttle_set(struct bfad_s *bfad, void *cmd) 2642 { 2643 struct bfa_bsg_fcpim_throttle_s *iocmd = 2644 (struct bfa_bsg_fcpim_throttle_s *)cmd; 2645 unsigned long flags; 2646 2647 spin_lock_irqsave(&bfad->bfad_lock, flags); 2648 iocmd->status = bfa_fcpim_throttle_set(&bfad->bfa, 2649 iocmd->throttle.cfg_value); 2650 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2651 2652 return 0; 2653 } 2654 2655 int 2656 bfad_iocmd_tfru_read(struct bfad_s *bfad, void *cmd) 2657 { 2658 struct bfa_bsg_tfru_s *iocmd = 2659 (struct bfa_bsg_tfru_s *)cmd; 2660 struct bfad_hal_comp fcomp; 2661 unsigned long flags = 0; 2662 2663 init_completion(&fcomp.comp); 2664 spin_lock_irqsave(&bfad->bfad_lock, flags); 2665 iocmd->status = bfa_tfru_read(BFA_FRU(&bfad->bfa), 2666 &iocmd->data, iocmd->len, iocmd->offset, 2667 bfad_hcb_comp, &fcomp); 2668 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2669 if (iocmd->status == BFA_STATUS_OK) { 2670 wait_for_completion(&fcomp.comp); 2671 iocmd->status = fcomp.status; 2672 } 2673 2674 return 0; 2675 } 2676 2677 int 2678 bfad_iocmd_tfru_write(struct bfad_s *bfad, void *cmd) 2679 { 2680 struct bfa_bsg_tfru_s *iocmd = 2681 (struct bfa_bsg_tfru_s *)cmd; 2682 struct bfad_hal_comp fcomp; 2683 unsigned long flags = 0; 2684 2685 init_completion(&fcomp.comp); 2686 spin_lock_irqsave(&bfad->bfad_lock, flags); 2687 iocmd->status = bfa_tfru_write(BFA_FRU(&bfad->bfa), 2688 &iocmd->data, iocmd->len, iocmd->offset, 2689 bfad_hcb_comp, &fcomp); 2690 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2691 if (iocmd->status == BFA_STATUS_OK) { 2692 wait_for_completion(&fcomp.comp); 2693 iocmd->status = fcomp.status; 2694 } 2695 2696 return 0; 2697 } 2698 2699 int 2700 bfad_iocmd_fruvpd_read(struct bfad_s *bfad, void *cmd) 2701 { 2702 struct bfa_bsg_fruvpd_s *iocmd = 2703 (struct bfa_bsg_fruvpd_s *)cmd; 2704 struct bfad_hal_comp fcomp; 2705 unsigned long flags = 0; 2706 2707 init_completion(&fcomp.comp); 2708 spin_lock_irqsave(&bfad->bfad_lock, flags); 2709 iocmd->status = bfa_fruvpd_read(BFA_FRU(&bfad->bfa), 2710 &iocmd->data, iocmd->len, iocmd->offset, 2711 bfad_hcb_comp, &fcomp); 2712 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2713 if (iocmd->status == BFA_STATUS_OK) { 2714 wait_for_completion(&fcomp.comp); 2715 iocmd->status = fcomp.status; 2716 } 2717 2718 return 0; 2719 } 2720 2721 int 2722 bfad_iocmd_fruvpd_update(struct bfad_s *bfad, void *cmd) 2723 { 2724 struct bfa_bsg_fruvpd_s *iocmd = 2725 (struct bfa_bsg_fruvpd_s *)cmd; 2726 struct bfad_hal_comp fcomp; 2727 unsigned long flags = 0; 2728 2729 init_completion(&fcomp.comp); 2730 spin_lock_irqsave(&bfad->bfad_lock, flags); 2731 iocmd->status = bfa_fruvpd_update(BFA_FRU(&bfad->bfa), 2732 &iocmd->data, iocmd->len, iocmd->offset, 2733 bfad_hcb_comp, &fcomp, iocmd->trfr_cmpl); 2734 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2735 if (iocmd->status == BFA_STATUS_OK) { 2736 wait_for_completion(&fcomp.comp); 2737 iocmd->status = fcomp.status; 2738 } 2739 2740 return 0; 2741 } 2742 2743 int 2744 bfad_iocmd_fruvpd_get_max_size(struct bfad_s *bfad, void *cmd) 2745 { 2746 struct bfa_bsg_fruvpd_max_size_s *iocmd = 2747 (struct bfa_bsg_fruvpd_max_size_s *)cmd; 2748 unsigned long flags = 0; 2749 2750 spin_lock_irqsave(&bfad->bfad_lock, flags); 2751 iocmd->status = bfa_fruvpd_get_max_size(BFA_FRU(&bfad->bfa), 2752 &iocmd->max_size); 2753 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2754 2755 return 0; 2756 } 2757 2758 static int 2759 bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, 2760 unsigned int payload_len) 2761 { 2762 int rc = -EINVAL; 2763 2764 switch (cmd) { 2765 case IOCMD_IOC_ENABLE: 2766 rc = bfad_iocmd_ioc_enable(bfad, iocmd); 2767 break; 2768 case IOCMD_IOC_DISABLE: 2769 rc = bfad_iocmd_ioc_disable(bfad, iocmd); 2770 break; 2771 case IOCMD_IOC_GET_INFO: 2772 rc = bfad_iocmd_ioc_get_info(bfad, iocmd); 2773 break; 2774 case IOCMD_IOC_GET_ATTR: 2775 rc = bfad_iocmd_ioc_get_attr(bfad, iocmd); 2776 break; 2777 case IOCMD_IOC_GET_STATS: 2778 rc = bfad_iocmd_ioc_get_stats(bfad, iocmd); 2779 break; 2780 case IOCMD_IOC_GET_FWSTATS: 2781 rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len); 2782 break; 2783 case IOCMD_IOC_RESET_STATS: 2784 case IOCMD_IOC_RESET_FWSTATS: 2785 rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd); 2786 break; 2787 case IOCMD_IOC_SET_ADAPTER_NAME: 2788 case IOCMD_IOC_SET_PORT_NAME: 2789 rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd); 2790 break; 2791 case IOCMD_IOCFC_GET_ATTR: 2792 rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd); 2793 break; 2794 case IOCMD_IOCFC_SET_INTR: 2795 rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd); 2796 break; 2797 case IOCMD_PORT_ENABLE: 2798 rc = bfad_iocmd_port_enable(bfad, iocmd); 2799 break; 2800 case IOCMD_PORT_DISABLE: 2801 rc = bfad_iocmd_port_disable(bfad, iocmd); 2802 break; 2803 case IOCMD_PORT_GET_ATTR: 2804 rc = bfad_iocmd_port_get_attr(bfad, iocmd); 2805 break; 2806 case IOCMD_PORT_GET_STATS: 2807 rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len); 2808 break; 2809 case IOCMD_PORT_RESET_STATS: 2810 rc = bfad_iocmd_port_reset_stats(bfad, iocmd); 2811 break; 2812 case IOCMD_PORT_CFG_TOPO: 2813 case IOCMD_PORT_CFG_SPEED: 2814 case IOCMD_PORT_CFG_ALPA: 2815 case IOCMD_PORT_CLR_ALPA: 2816 rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd); 2817 break; 2818 case IOCMD_PORT_CFG_MAXFRSZ: 2819 rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd); 2820 break; 2821 case IOCMD_PORT_BBCR_ENABLE: 2822 case IOCMD_PORT_BBCR_DISABLE: 2823 rc = bfad_iocmd_port_cfg_bbcr(bfad, cmd, iocmd); 2824 break; 2825 case IOCMD_PORT_BBCR_GET_ATTR: 2826 rc = bfad_iocmd_port_get_bbcr_attr(bfad, iocmd); 2827 break; 2828 case IOCMD_LPORT_GET_ATTR: 2829 rc = bfad_iocmd_lport_get_attr(bfad, iocmd); 2830 break; 2831 case IOCMD_LPORT_GET_STATS: 2832 rc = bfad_iocmd_lport_get_stats(bfad, iocmd); 2833 break; 2834 case IOCMD_LPORT_RESET_STATS: 2835 rc = bfad_iocmd_lport_reset_stats(bfad, iocmd); 2836 break; 2837 case IOCMD_LPORT_GET_IOSTATS: 2838 rc = bfad_iocmd_lport_get_iostats(bfad, iocmd); 2839 break; 2840 case IOCMD_LPORT_GET_RPORTS: 2841 rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len); 2842 break; 2843 case IOCMD_RPORT_GET_ATTR: 2844 rc = bfad_iocmd_rport_get_attr(bfad, iocmd); 2845 break; 2846 case IOCMD_RPORT_GET_ADDR: 2847 rc = bfad_iocmd_rport_get_addr(bfad, iocmd); 2848 break; 2849 case IOCMD_RPORT_GET_STATS: 2850 rc = bfad_iocmd_rport_get_stats(bfad, iocmd); 2851 break; 2852 case IOCMD_RPORT_RESET_STATS: 2853 rc = bfad_iocmd_rport_clr_stats(bfad, iocmd); 2854 break; 2855 case IOCMD_RPORT_SET_SPEED: 2856 rc = bfad_iocmd_rport_set_speed(bfad, iocmd); 2857 break; 2858 case IOCMD_VPORT_GET_ATTR: 2859 rc = bfad_iocmd_vport_get_attr(bfad, iocmd); 2860 break; 2861 case IOCMD_VPORT_GET_STATS: 2862 rc = bfad_iocmd_vport_get_stats(bfad, iocmd); 2863 break; 2864 case IOCMD_VPORT_RESET_STATS: 2865 rc = bfad_iocmd_vport_clr_stats(bfad, iocmd); 2866 break; 2867 case IOCMD_FABRIC_GET_LPORTS: 2868 rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len); 2869 break; 2870 case IOCMD_RATELIM_ENABLE: 2871 case IOCMD_RATELIM_DISABLE: 2872 rc = bfad_iocmd_ratelim(bfad, cmd, iocmd); 2873 break; 2874 case IOCMD_RATELIM_DEF_SPEED: 2875 rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd); 2876 break; 2877 case IOCMD_FCPIM_FAILOVER: 2878 rc = bfad_iocmd_cfg_fcpim(bfad, iocmd); 2879 break; 2880 case IOCMD_FCPIM_MODSTATS: 2881 rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd); 2882 break; 2883 case IOCMD_FCPIM_MODSTATSCLR: 2884 rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd); 2885 break; 2886 case IOCMD_FCPIM_DEL_ITN_STATS: 2887 rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd); 2888 break; 2889 case IOCMD_ITNIM_GET_ATTR: 2890 rc = bfad_iocmd_itnim_get_attr(bfad, iocmd); 2891 break; 2892 case IOCMD_ITNIM_GET_IOSTATS: 2893 rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd); 2894 break; 2895 case IOCMD_ITNIM_RESET_STATS: 2896 rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd); 2897 break; 2898 case IOCMD_ITNIM_GET_ITNSTATS: 2899 rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd); 2900 break; 2901 case IOCMD_FCPORT_ENABLE: 2902 rc = bfad_iocmd_fcport_enable(bfad, iocmd); 2903 break; 2904 case IOCMD_FCPORT_DISABLE: 2905 rc = bfad_iocmd_fcport_disable(bfad, iocmd); 2906 break; 2907 case IOCMD_IOC_PCIFN_CFG: 2908 rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd); 2909 break; 2910 case IOCMD_IOC_FW_SIG_INV: 2911 rc = bfad_iocmd_ioc_fw_sig_inv(bfad, iocmd); 2912 break; 2913 case IOCMD_PCIFN_CREATE: 2914 rc = bfad_iocmd_pcifn_create(bfad, iocmd); 2915 break; 2916 case IOCMD_PCIFN_DELETE: 2917 rc = bfad_iocmd_pcifn_delete(bfad, iocmd); 2918 break; 2919 case IOCMD_PCIFN_BW: 2920 rc = bfad_iocmd_pcifn_bw(bfad, iocmd); 2921 break; 2922 case IOCMD_ADAPTER_CFG_MODE: 2923 rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd); 2924 break; 2925 case IOCMD_PORT_CFG_MODE: 2926 rc = bfad_iocmd_port_cfg_mode(bfad, iocmd); 2927 break; 2928 case IOCMD_FLASH_ENABLE_OPTROM: 2929 case IOCMD_FLASH_DISABLE_OPTROM: 2930 rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd); 2931 break; 2932 case IOCMD_FAA_QUERY: 2933 rc = bfad_iocmd_faa_query(bfad, iocmd); 2934 break; 2935 case IOCMD_CEE_GET_ATTR: 2936 rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len); 2937 break; 2938 case IOCMD_CEE_GET_STATS: 2939 rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len); 2940 break; 2941 case IOCMD_CEE_RESET_STATS: 2942 rc = bfad_iocmd_cee_reset_stats(bfad, iocmd); 2943 break; 2944 case IOCMD_SFP_MEDIA: 2945 rc = bfad_iocmd_sfp_media(bfad, iocmd); 2946 break; 2947 case IOCMD_SFP_SPEED: 2948 rc = bfad_iocmd_sfp_speed(bfad, iocmd); 2949 break; 2950 case IOCMD_FLASH_GET_ATTR: 2951 rc = bfad_iocmd_flash_get_attr(bfad, iocmd); 2952 break; 2953 case IOCMD_FLASH_ERASE_PART: 2954 rc = bfad_iocmd_flash_erase_part(bfad, iocmd); 2955 break; 2956 case IOCMD_FLASH_UPDATE_PART: 2957 rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len); 2958 break; 2959 case IOCMD_FLASH_READ_PART: 2960 rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len); 2961 break; 2962 case IOCMD_DIAG_TEMP: 2963 rc = bfad_iocmd_diag_temp(bfad, iocmd); 2964 break; 2965 case IOCMD_DIAG_MEMTEST: 2966 rc = bfad_iocmd_diag_memtest(bfad, iocmd); 2967 break; 2968 case IOCMD_DIAG_LOOPBACK: 2969 rc = bfad_iocmd_diag_loopback(bfad, iocmd); 2970 break; 2971 case IOCMD_DIAG_FWPING: 2972 rc = bfad_iocmd_diag_fwping(bfad, iocmd); 2973 break; 2974 case IOCMD_DIAG_QUEUETEST: 2975 rc = bfad_iocmd_diag_queuetest(bfad, iocmd); 2976 break; 2977 case IOCMD_DIAG_SFP: 2978 rc = bfad_iocmd_diag_sfp(bfad, iocmd); 2979 break; 2980 case IOCMD_DIAG_LED: 2981 rc = bfad_iocmd_diag_led(bfad, iocmd); 2982 break; 2983 case IOCMD_DIAG_BEACON_LPORT: 2984 rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd); 2985 break; 2986 case IOCMD_DIAG_LB_STAT: 2987 rc = bfad_iocmd_diag_lb_stat(bfad, iocmd); 2988 break; 2989 case IOCMD_DIAG_DPORT_ENABLE: 2990 rc = bfad_iocmd_diag_dport_enable(bfad, iocmd); 2991 break; 2992 case IOCMD_DIAG_DPORT_DISABLE: 2993 rc = bfad_iocmd_diag_dport_disable(bfad, iocmd); 2994 break; 2995 case IOCMD_DIAG_DPORT_SHOW: 2996 rc = bfad_iocmd_diag_dport_show(bfad, iocmd); 2997 break; 2998 case IOCMD_DIAG_DPORT_START: 2999 rc = bfad_iocmd_diag_dport_start(bfad, iocmd); 3000 break; 3001 case IOCMD_PHY_GET_ATTR: 3002 rc = bfad_iocmd_phy_get_attr(bfad, iocmd); 3003 break; 3004 case IOCMD_PHY_GET_STATS: 3005 rc = bfad_iocmd_phy_get_stats(bfad, iocmd); 3006 break; 3007 case IOCMD_PHY_UPDATE_FW: 3008 rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len); 3009 break; 3010 case IOCMD_PHY_READ_FW: 3011 rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len); 3012 break; 3013 case IOCMD_VHBA_QUERY: 3014 rc = bfad_iocmd_vhba_query(bfad, iocmd); 3015 break; 3016 case IOCMD_DEBUG_PORTLOG: 3017 rc = bfad_iocmd_porglog_get(bfad, iocmd); 3018 break; 3019 case IOCMD_DEBUG_FW_CORE: 3020 rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len); 3021 break; 3022 case IOCMD_DEBUG_FW_STATE_CLR: 3023 case IOCMD_DEBUG_PORTLOG_CLR: 3024 case IOCMD_DEBUG_START_DTRC: 3025 case IOCMD_DEBUG_STOP_DTRC: 3026 rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd); 3027 break; 3028 case IOCMD_DEBUG_PORTLOG_CTL: 3029 rc = bfad_iocmd_porglog_ctl(bfad, iocmd); 3030 break; 3031 case IOCMD_FCPIM_PROFILE_ON: 3032 case IOCMD_FCPIM_PROFILE_OFF: 3033 rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd); 3034 break; 3035 case IOCMD_ITNIM_GET_IOPROFILE: 3036 rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd); 3037 break; 3038 case IOCMD_FCPORT_GET_STATS: 3039 rc = bfad_iocmd_fcport_get_stats(bfad, iocmd); 3040 break; 3041 case IOCMD_FCPORT_RESET_STATS: 3042 rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd); 3043 break; 3044 case IOCMD_BOOT_CFG: 3045 rc = bfad_iocmd_boot_cfg(bfad, iocmd); 3046 break; 3047 case IOCMD_BOOT_QUERY: 3048 rc = bfad_iocmd_boot_query(bfad, iocmd); 3049 break; 3050 case IOCMD_PREBOOT_QUERY: 3051 rc = bfad_iocmd_preboot_query(bfad, iocmd); 3052 break; 3053 case IOCMD_ETHBOOT_CFG: 3054 rc = bfad_iocmd_ethboot_cfg(bfad, iocmd); 3055 break; 3056 case IOCMD_ETHBOOT_QUERY: 3057 rc = bfad_iocmd_ethboot_query(bfad, iocmd); 3058 break; 3059 case IOCMD_TRUNK_ENABLE: 3060 case IOCMD_TRUNK_DISABLE: 3061 rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd); 3062 break; 3063 case IOCMD_TRUNK_GET_ATTR: 3064 rc = bfad_iocmd_trunk_get_attr(bfad, iocmd); 3065 break; 3066 case IOCMD_QOS_ENABLE: 3067 case IOCMD_QOS_DISABLE: 3068 rc = bfad_iocmd_qos(bfad, iocmd, cmd); 3069 break; 3070 case IOCMD_QOS_GET_ATTR: 3071 rc = bfad_iocmd_qos_get_attr(bfad, iocmd); 3072 break; 3073 case IOCMD_QOS_GET_VC_ATTR: 3074 rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd); 3075 break; 3076 case IOCMD_QOS_GET_STATS: 3077 rc = bfad_iocmd_qos_get_stats(bfad, iocmd); 3078 break; 3079 case IOCMD_QOS_RESET_STATS: 3080 rc = bfad_iocmd_qos_reset_stats(bfad, iocmd); 3081 break; 3082 case IOCMD_QOS_SET_BW: 3083 rc = bfad_iocmd_qos_set_bw(bfad, iocmd); 3084 break; 3085 case IOCMD_VF_GET_STATS: 3086 rc = bfad_iocmd_vf_get_stats(bfad, iocmd); 3087 break; 3088 case IOCMD_VF_RESET_STATS: 3089 rc = bfad_iocmd_vf_clr_stats(bfad, iocmd); 3090 break; 3091 case IOCMD_FCPIM_LUNMASK_ENABLE: 3092 case IOCMD_FCPIM_LUNMASK_DISABLE: 3093 case IOCMD_FCPIM_LUNMASK_CLEAR: 3094 rc = bfad_iocmd_lunmask(bfad, iocmd, cmd); 3095 break; 3096 case IOCMD_FCPIM_LUNMASK_QUERY: 3097 rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd); 3098 break; 3099 case IOCMD_FCPIM_LUNMASK_ADD: 3100 case IOCMD_FCPIM_LUNMASK_DELETE: 3101 rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd); 3102 break; 3103 case IOCMD_FCPIM_THROTTLE_QUERY: 3104 rc = bfad_iocmd_fcpim_throttle_query(bfad, iocmd); 3105 break; 3106 case IOCMD_FCPIM_THROTTLE_SET: 3107 rc = bfad_iocmd_fcpim_throttle_set(bfad, iocmd); 3108 break; 3109 /* TFRU */ 3110 case IOCMD_TFRU_READ: 3111 rc = bfad_iocmd_tfru_read(bfad, iocmd); 3112 break; 3113 case IOCMD_TFRU_WRITE: 3114 rc = bfad_iocmd_tfru_write(bfad, iocmd); 3115 break; 3116 /* FRU */ 3117 case IOCMD_FRUVPD_READ: 3118 rc = bfad_iocmd_fruvpd_read(bfad, iocmd); 3119 break; 3120 case IOCMD_FRUVPD_UPDATE: 3121 rc = bfad_iocmd_fruvpd_update(bfad, iocmd); 3122 break; 3123 case IOCMD_FRUVPD_GET_MAX_SIZE: 3124 rc = bfad_iocmd_fruvpd_get_max_size(bfad, iocmd); 3125 break; 3126 default: 3127 rc = -EINVAL; 3128 break; 3129 } 3130 return rc; 3131 } 3132 3133 static int 3134 bfad_im_bsg_vendor_request(struct fc_bsg_job *job) 3135 { 3136 uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0]; 3137 struct bfad_im_port_s *im_port = 3138 (struct bfad_im_port_s *) job->shost->hostdata[0]; 3139 struct bfad_s *bfad = im_port->bfad; 3140 struct request_queue *request_q = job->req->q; 3141 void *payload_kbuf; 3142 int rc = -EINVAL; 3143 3144 /* 3145 * Set the BSG device request_queue size to 256 to support 3146 * payloads larger than 512*1024K bytes. 3147 */ 3148 blk_queue_max_segments(request_q, 256); 3149 3150 /* Allocate a temp buffer to hold the passed in user space command */ 3151 payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL); 3152 if (!payload_kbuf) { 3153 rc = -ENOMEM; 3154 goto out; 3155 } 3156 3157 /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */ 3158 sg_copy_to_buffer(job->request_payload.sg_list, 3159 job->request_payload.sg_cnt, payload_kbuf, 3160 job->request_payload.payload_len); 3161 3162 /* Invoke IOCMD handler - to handle all the vendor command requests */ 3163 rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf, 3164 job->request_payload.payload_len); 3165 if (rc != BFA_STATUS_OK) 3166 goto error; 3167 3168 /* Copy the response data to the job->reply_payload sg_list */ 3169 sg_copy_from_buffer(job->reply_payload.sg_list, 3170 job->reply_payload.sg_cnt, 3171 payload_kbuf, 3172 job->reply_payload.payload_len); 3173 3174 /* free the command buffer */ 3175 kfree(payload_kbuf); 3176 3177 /* Fill the BSG job reply data */ 3178 job->reply_len = job->reply_payload.payload_len; 3179 job->reply->reply_payload_rcv_len = job->reply_payload.payload_len; 3180 job->reply->result = rc; 3181 3182 job->job_done(job); 3183 return rc; 3184 error: 3185 /* free the command buffer */ 3186 kfree(payload_kbuf); 3187 out: 3188 job->reply->result = rc; 3189 job->reply_len = sizeof(uint32_t); 3190 job->reply->reply_payload_rcv_len = 0; 3191 return rc; 3192 } 3193 3194 /* FC passthru call backs */ 3195 u64 3196 bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid) 3197 { 3198 struct bfad_fcxp *drv_fcxp = bfad_fcxp; 3199 struct bfa_sge_s *sge; 3200 u64 addr; 3201 3202 sge = drv_fcxp->req_sge + sgeid; 3203 addr = (u64)(size_t) sge->sg_addr; 3204 return addr; 3205 } 3206 3207 u32 3208 bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid) 3209 { 3210 struct bfad_fcxp *drv_fcxp = bfad_fcxp; 3211 struct bfa_sge_s *sge; 3212 3213 sge = drv_fcxp->req_sge + sgeid; 3214 return sge->sg_len; 3215 } 3216 3217 u64 3218 bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid) 3219 { 3220 struct bfad_fcxp *drv_fcxp = bfad_fcxp; 3221 struct bfa_sge_s *sge; 3222 u64 addr; 3223 3224 sge = drv_fcxp->rsp_sge + sgeid; 3225 addr = (u64)(size_t) sge->sg_addr; 3226 return addr; 3227 } 3228 3229 u32 3230 bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid) 3231 { 3232 struct bfad_fcxp *drv_fcxp = bfad_fcxp; 3233 struct bfa_sge_s *sge; 3234 3235 sge = drv_fcxp->rsp_sge + sgeid; 3236 return sge->sg_len; 3237 } 3238 3239 void 3240 bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg, 3241 bfa_status_t req_status, u32 rsp_len, u32 resid_len, 3242 struct fchs_s *rsp_fchs) 3243 { 3244 struct bfad_fcxp *drv_fcxp = bfad_fcxp; 3245 3246 drv_fcxp->req_status = req_status; 3247 drv_fcxp->rsp_len = rsp_len; 3248 3249 /* bfa_fcxp will be automatically freed by BFA */ 3250 drv_fcxp->bfa_fcxp = NULL; 3251 complete(&drv_fcxp->comp); 3252 } 3253 3254 struct bfad_buf_info * 3255 bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf, 3256 uint32_t payload_len, uint32_t *num_sgles) 3257 { 3258 struct bfad_buf_info *buf_base, *buf_info; 3259 struct bfa_sge_s *sg_table; 3260 int sge_num = 1; 3261 3262 buf_base = kzalloc((sizeof(struct bfad_buf_info) + 3263 sizeof(struct bfa_sge_s)) * sge_num, GFP_KERNEL); 3264 if (!buf_base) 3265 return NULL; 3266 3267 sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) + 3268 (sizeof(struct bfad_buf_info) * sge_num)); 3269 3270 /* Allocate dma coherent memory */ 3271 buf_info = buf_base; 3272 buf_info->size = payload_len; 3273 buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, buf_info->size, 3274 &buf_info->phys, GFP_KERNEL); 3275 if (!buf_info->virt) 3276 goto out_free_mem; 3277 3278 /* copy the linear bsg buffer to buf_info */ 3279 memset(buf_info->virt, 0, buf_info->size); 3280 memcpy(buf_info->virt, payload_kbuf, buf_info->size); 3281 3282 /* 3283 * Setup SG table 3284 */ 3285 sg_table->sg_len = buf_info->size; 3286 sg_table->sg_addr = (void *)(size_t) buf_info->phys; 3287 3288 *num_sgles = sge_num; 3289 3290 return buf_base; 3291 3292 out_free_mem: 3293 kfree(buf_base); 3294 return NULL; 3295 } 3296 3297 void 3298 bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base, 3299 uint32_t num_sgles) 3300 { 3301 int i; 3302 struct bfad_buf_info *buf_info = buf_base; 3303 3304 if (buf_base) { 3305 for (i = 0; i < num_sgles; buf_info++, i++) { 3306 if (buf_info->virt != NULL) 3307 dma_free_coherent(&bfad->pcidev->dev, 3308 buf_info->size, buf_info->virt, 3309 buf_info->phys); 3310 } 3311 kfree(buf_base); 3312 } 3313 } 3314 3315 int 3316 bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp, 3317 bfa_bsg_fcpt_t *bsg_fcpt) 3318 { 3319 struct bfa_fcxp_s *hal_fcxp; 3320 struct bfad_s *bfad = drv_fcxp->port->bfad; 3321 unsigned long flags; 3322 uint8_t lp_tag; 3323 3324 spin_lock_irqsave(&bfad->bfad_lock, flags); 3325 3326 /* Allocate bfa_fcxp structure */ 3327 hal_fcxp = bfa_fcxp_req_rsp_alloc(drv_fcxp, &bfad->bfa, 3328 drv_fcxp->num_req_sgles, 3329 drv_fcxp->num_rsp_sgles, 3330 bfad_fcxp_get_req_sgaddr_cb, 3331 bfad_fcxp_get_req_sglen_cb, 3332 bfad_fcxp_get_rsp_sgaddr_cb, 3333 bfad_fcxp_get_rsp_sglen_cb, BFA_TRUE); 3334 if (!hal_fcxp) { 3335 bfa_trc(bfad, 0); 3336 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3337 return BFA_STATUS_ENOMEM; 3338 } 3339 3340 drv_fcxp->bfa_fcxp = hal_fcxp; 3341 3342 lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id); 3343 3344 bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag, 3345 bsg_fcpt->cts, bsg_fcpt->cos, 3346 job->request_payload.payload_len, 3347 &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad, 3348 job->reply_payload.payload_len, bsg_fcpt->tsecs); 3349 3350 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3351 3352 return BFA_STATUS_OK; 3353 } 3354 3355 int 3356 bfad_im_bsg_els_ct_request(struct fc_bsg_job *job) 3357 { 3358 struct bfa_bsg_data *bsg_data; 3359 struct bfad_im_port_s *im_port = 3360 (struct bfad_im_port_s *) job->shost->hostdata[0]; 3361 struct bfad_s *bfad = im_port->bfad; 3362 bfa_bsg_fcpt_t *bsg_fcpt; 3363 struct bfad_fcxp *drv_fcxp; 3364 struct bfa_fcs_lport_s *fcs_port; 3365 struct bfa_fcs_rport_s *fcs_rport; 3366 uint32_t command_type = job->request->msgcode; 3367 unsigned long flags; 3368 struct bfad_buf_info *rsp_buf_info; 3369 void *req_kbuf = NULL, *rsp_kbuf = NULL; 3370 int rc = -EINVAL; 3371 3372 job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */ 3373 job->reply->reply_payload_rcv_len = 0; 3374 3375 /* Get the payload passed in from userspace */ 3376 bsg_data = (struct bfa_bsg_data *) (((char *)job->request) + 3377 sizeof(struct fc_bsg_request)); 3378 if (bsg_data == NULL) 3379 goto out; 3380 3381 /* 3382 * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload 3383 * buffer of size bsg_data->payload_len 3384 */ 3385 bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL); 3386 if (!bsg_fcpt) { 3387 rc = -ENOMEM; 3388 goto out; 3389 } 3390 3391 if (copy_from_user((uint8_t *)bsg_fcpt, 3392 (void *)(unsigned long)bsg_data->payload, 3393 bsg_data->payload_len)) { 3394 kfree(bsg_fcpt); 3395 rc = -EIO; 3396 goto out; 3397 } 3398 3399 drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL); 3400 if (drv_fcxp == NULL) { 3401 kfree(bsg_fcpt); 3402 rc = -ENOMEM; 3403 goto out; 3404 } 3405 3406 spin_lock_irqsave(&bfad->bfad_lock, flags); 3407 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id, 3408 bsg_fcpt->lpwwn); 3409 if (fcs_port == NULL) { 3410 bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN; 3411 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3412 goto out_free_mem; 3413 } 3414 3415 /* Check if the port is online before sending FC Passthru cmd */ 3416 if (!bfa_fcs_lport_is_online(fcs_port)) { 3417 bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE; 3418 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3419 goto out_free_mem; 3420 } 3421 3422 drv_fcxp->port = fcs_port->bfad_port; 3423 3424 if (drv_fcxp->port->bfad == 0) 3425 drv_fcxp->port->bfad = bfad; 3426 3427 /* Fetch the bfa_rport - if nexus needed */ 3428 if (command_type == FC_BSG_HST_ELS_NOLOGIN || 3429 command_type == FC_BSG_HST_CT) { 3430 /* BSG HST commands: no nexus needed */ 3431 drv_fcxp->bfa_rport = NULL; 3432 3433 } else if (command_type == FC_BSG_RPT_ELS || 3434 command_type == FC_BSG_RPT_CT) { 3435 /* BSG RPT commands: nexus needed */ 3436 fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port, 3437 bsg_fcpt->dpwwn); 3438 if (fcs_rport == NULL) { 3439 bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN; 3440 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3441 goto out_free_mem; 3442 } 3443 3444 drv_fcxp->bfa_rport = fcs_rport->bfa_rport; 3445 3446 } else { /* Unknown BSG msgcode; return -EINVAL */ 3447 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3448 goto out_free_mem; 3449 } 3450 3451 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3452 3453 /* allocate memory for req / rsp buffers */ 3454 req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL); 3455 if (!req_kbuf) { 3456 printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n", 3457 bfad->pci_name); 3458 rc = -ENOMEM; 3459 goto out_free_mem; 3460 } 3461 3462 rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL); 3463 if (!rsp_kbuf) { 3464 printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n", 3465 bfad->pci_name); 3466 rc = -ENOMEM; 3467 goto out_free_mem; 3468 } 3469 3470 /* map req sg - copy the sg_list passed in to the linear buffer */ 3471 sg_copy_to_buffer(job->request_payload.sg_list, 3472 job->request_payload.sg_cnt, req_kbuf, 3473 job->request_payload.payload_len); 3474 3475 drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf, 3476 job->request_payload.payload_len, 3477 &drv_fcxp->num_req_sgles); 3478 if (!drv_fcxp->reqbuf_info) { 3479 printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n", 3480 bfad->pci_name); 3481 rc = -ENOMEM; 3482 goto out_free_mem; 3483 } 3484 3485 drv_fcxp->req_sge = (struct bfa_sge_s *) 3486 (((uint8_t *)drv_fcxp->reqbuf_info) + 3487 (sizeof(struct bfad_buf_info) * 3488 drv_fcxp->num_req_sgles)); 3489 3490 /* map rsp sg */ 3491 drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf, 3492 job->reply_payload.payload_len, 3493 &drv_fcxp->num_rsp_sgles); 3494 if (!drv_fcxp->rspbuf_info) { 3495 printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n", 3496 bfad->pci_name); 3497 rc = -ENOMEM; 3498 goto out_free_mem; 3499 } 3500 3501 rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info; 3502 drv_fcxp->rsp_sge = (struct bfa_sge_s *) 3503 (((uint8_t *)drv_fcxp->rspbuf_info) + 3504 (sizeof(struct bfad_buf_info) * 3505 drv_fcxp->num_rsp_sgles)); 3506 3507 /* fcxp send */ 3508 init_completion(&drv_fcxp->comp); 3509 rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt); 3510 if (rc == BFA_STATUS_OK) { 3511 wait_for_completion(&drv_fcxp->comp); 3512 bsg_fcpt->status = drv_fcxp->req_status; 3513 } else { 3514 bsg_fcpt->status = rc; 3515 goto out_free_mem; 3516 } 3517 3518 /* fill the job->reply data */ 3519 if (drv_fcxp->req_status == BFA_STATUS_OK) { 3520 job->reply_len = drv_fcxp->rsp_len; 3521 job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len; 3522 job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 3523 } else { 3524 job->reply->reply_payload_rcv_len = 3525 sizeof(struct fc_bsg_ctels_reply); 3526 job->reply_len = sizeof(uint32_t); 3527 job->reply->reply_data.ctels_reply.status = 3528 FC_CTELS_STATUS_REJECT; 3529 } 3530 3531 /* Copy the response data to the reply_payload sg list */ 3532 sg_copy_from_buffer(job->reply_payload.sg_list, 3533 job->reply_payload.sg_cnt, 3534 (uint8_t *)rsp_buf_info->virt, 3535 job->reply_payload.payload_len); 3536 3537 out_free_mem: 3538 bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info, 3539 drv_fcxp->num_rsp_sgles); 3540 bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info, 3541 drv_fcxp->num_req_sgles); 3542 kfree(req_kbuf); 3543 kfree(rsp_kbuf); 3544 3545 /* Need a copy to user op */ 3546 if (copy_to_user((void *)(unsigned long)bsg_data->payload, 3547 (void *)bsg_fcpt, bsg_data->payload_len)) 3548 rc = -EIO; 3549 3550 kfree(bsg_fcpt); 3551 kfree(drv_fcxp); 3552 out: 3553 job->reply->result = rc; 3554 3555 if (rc == BFA_STATUS_OK) 3556 job->job_done(job); 3557 3558 return rc; 3559 } 3560 3561 int 3562 bfad_im_bsg_request(struct fc_bsg_job *job) 3563 { 3564 uint32_t rc = BFA_STATUS_OK; 3565 3566 switch (job->request->msgcode) { 3567 case FC_BSG_HST_VENDOR: 3568 /* Process BSG HST Vendor requests */ 3569 rc = bfad_im_bsg_vendor_request(job); 3570 break; 3571 case FC_BSG_HST_ELS_NOLOGIN: 3572 case FC_BSG_RPT_ELS: 3573 case FC_BSG_HST_CT: 3574 case FC_BSG_RPT_CT: 3575 /* Process BSG ELS/CT commands */ 3576 rc = bfad_im_bsg_els_ct_request(job); 3577 break; 3578 default: 3579 job->reply->result = rc = -EINVAL; 3580 job->reply->reply_payload_rcv_len = 0; 3581 break; 3582 } 3583 3584 return rc; 3585 } 3586 3587 int 3588 bfad_im_bsg_timeout(struct fc_bsg_job *job) 3589 { 3590 /* Don't complete the BSG job request - return -EAGAIN 3591 * to reset bsg job timeout : for ELS/CT pass thru we 3592 * already have timer to track the request. 3593 */ 3594 return -EAGAIN; 3595 } 3596