1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. 4 * Copyright (c) 2014- QLogic Corporation. 5 * All rights reserved 6 * www.qlogic.com 7 * 8 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. 9 */ 10 11 #include <linux/uaccess.h> 12 #include "bfad_drv.h" 13 #include "bfad_im.h" 14 #include "bfad_bsg.h" 15 16 BFA_TRC_FILE(LDRV, BSG); 17 18 int 19 bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd) 20 { 21 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 22 unsigned long flags; 23 24 spin_lock_irqsave(&bfad->bfad_lock, flags); 25 /* If IOC is not in disabled state - return */ 26 if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) { 27 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 28 iocmd->status = BFA_STATUS_OK; 29 return 0; 30 } 31 32 init_completion(&bfad->enable_comp); 33 bfa_iocfc_enable(&bfad->bfa); 34 iocmd->status = BFA_STATUS_OK; 35 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 36 wait_for_completion(&bfad->enable_comp); 37 38 return 0; 39 } 40 41 int 42 bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd) 43 { 44 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 45 unsigned long flags; 46 47 spin_lock_irqsave(&bfad->bfad_lock, flags); 48 if (bfa_ioc_is_disabled(&bfad->bfa.ioc)) { 49 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 50 iocmd->status = BFA_STATUS_OK; 51 return 0; 52 } 53 54 if (bfad->disable_active) { 55 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 56 return -EBUSY; 57 } 58 59 bfad->disable_active = BFA_TRUE; 60 init_completion(&bfad->disable_comp); 61 bfa_iocfc_disable(&bfad->bfa); 62 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 63 64 wait_for_completion(&bfad->disable_comp); 65 bfad->disable_active = BFA_FALSE; 66 iocmd->status = BFA_STATUS_OK; 67 68 return 0; 69 } 70 71 static int 72 bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd) 73 { 74 int i; 75 struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd; 76 struct bfad_im_port_s *im_port; 77 struct bfa_port_attr_s pattr; 78 unsigned long flags; 79 80 spin_lock_irqsave(&bfad->bfad_lock, flags); 81 bfa_fcport_get_attr(&bfad->bfa, &pattr); 82 iocmd->nwwn = pattr.nwwn; 83 iocmd->pwwn = pattr.pwwn; 84 iocmd->ioc_type = bfa_get_type(&bfad->bfa); 85 iocmd->mac = bfa_get_mac(&bfad->bfa); 86 iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa); 87 bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum); 88 iocmd->factorynwwn = pattr.factorynwwn; 89 iocmd->factorypwwn = pattr.factorypwwn; 90 iocmd->bfad_num = bfad->inst_no; 91 im_port = bfad->pport.im_port; 92 iocmd->host = im_port->shost->host_no; 93 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 94 95 strcpy(iocmd->name, bfad->adapter_name); 96 strcpy(iocmd->port_name, bfad->port_name); 97 strcpy(iocmd->hwpath, bfad->pci_name); 98 99 /* set adapter hw path */ 100 strcpy(iocmd->adapter_hwpath, bfad->pci_name); 101 for (i = 0; iocmd->adapter_hwpath[i] != ':' && i < BFA_STRING_32; i++) 102 ; 103 for (; iocmd->adapter_hwpath[++i] != ':' && i < BFA_STRING_32; ) 104 ; 105 iocmd->adapter_hwpath[i] = '\0'; 106 iocmd->status = BFA_STATUS_OK; 107 return 0; 108 } 109 110 static int 111 bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd) 112 { 113 struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd; 114 unsigned long flags; 115 116 spin_lock_irqsave(&bfad->bfad_lock, flags); 117 bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr); 118 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 119 120 /* fill in driver attr info */ 121 strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME); 122 strlcpy(iocmd->ioc_attr.driver_attr.driver_ver, 123 BFAD_DRIVER_VERSION, BFA_VERSION_LEN); 124 strcpy(iocmd->ioc_attr.driver_attr.fw_ver, 125 iocmd->ioc_attr.adapter_attr.fw_ver); 126 strcpy(iocmd->ioc_attr.driver_attr.bios_ver, 127 iocmd->ioc_attr.adapter_attr.optrom_ver); 128 129 /* copy chip rev info first otherwise it will be overwritten */ 130 memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev, 131 sizeof(bfad->pci_attr.chip_rev)); 132 memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr, 133 sizeof(struct bfa_ioc_pci_attr_s)); 134 135 iocmd->status = BFA_STATUS_OK; 136 return 0; 137 } 138 139 static int 140 bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd) 141 { 142 struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd; 143 144 bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats); 145 iocmd->status = BFA_STATUS_OK; 146 return 0; 147 } 148 149 int 150 bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd, 151 unsigned int payload_len) 152 { 153 struct bfa_bsg_ioc_fwstats_s *iocmd = 154 (struct bfa_bsg_ioc_fwstats_s *)cmd; 155 void *iocmd_bufptr; 156 unsigned long flags; 157 158 if (bfad_chk_iocmd_sz(payload_len, 159 sizeof(struct bfa_bsg_ioc_fwstats_s), 160 sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) { 161 iocmd->status = BFA_STATUS_VERSION_FAIL; 162 goto out; 163 } 164 165 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s); 166 spin_lock_irqsave(&bfad->bfad_lock, flags); 167 iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr); 168 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 169 170 if (iocmd->status != BFA_STATUS_OK) { 171 bfa_trc(bfad, iocmd->status); 172 goto out; 173 } 174 out: 175 bfa_trc(bfad, 0x6666); 176 return 0; 177 } 178 179 int 180 bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 181 { 182 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 183 unsigned long flags; 184 185 if (v_cmd == IOCMD_IOC_RESET_STATS) { 186 bfa_ioc_clear_stats(&bfad->bfa); 187 iocmd->status = BFA_STATUS_OK; 188 } else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) { 189 spin_lock_irqsave(&bfad->bfad_lock, flags); 190 iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc); 191 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 192 } 193 194 return 0; 195 } 196 197 int 198 bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 199 { 200 struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd; 201 202 if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME) 203 strcpy(bfad->adapter_name, iocmd->name); 204 else if (v_cmd == IOCMD_IOC_SET_PORT_NAME) 205 strcpy(bfad->port_name, iocmd->name); 206 207 iocmd->status = BFA_STATUS_OK; 208 return 0; 209 } 210 211 int 212 bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd) 213 { 214 struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd; 215 216 iocmd->status = BFA_STATUS_OK; 217 bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr); 218 219 return 0; 220 } 221 222 int 223 bfad_iocmd_ioc_fw_sig_inv(struct bfad_s *bfad, void *cmd) 224 { 225 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 226 unsigned long flags; 227 228 spin_lock_irqsave(&bfad->bfad_lock, flags); 229 iocmd->status = bfa_ioc_fwsig_invalidate(&bfad->bfa.ioc); 230 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 231 return 0; 232 } 233 234 int 235 bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd) 236 { 237 struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd; 238 unsigned long flags; 239 240 spin_lock_irqsave(&bfad->bfad_lock, flags); 241 iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr); 242 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 243 244 return 0; 245 } 246 247 int 248 bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd) 249 { 250 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 251 struct bfad_hal_comp fcomp; 252 unsigned long flags; 253 254 init_completion(&fcomp.comp); 255 spin_lock_irqsave(&bfad->bfad_lock, flags); 256 iocmd->status = bfa_port_enable(&bfad->bfa.modules.port, 257 bfad_hcb_comp, &fcomp); 258 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 259 if (iocmd->status != BFA_STATUS_OK) { 260 bfa_trc(bfad, iocmd->status); 261 return 0; 262 } 263 wait_for_completion(&fcomp.comp); 264 iocmd->status = fcomp.status; 265 return 0; 266 } 267 268 int 269 bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd) 270 { 271 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 272 struct bfad_hal_comp fcomp; 273 unsigned long flags; 274 275 init_completion(&fcomp.comp); 276 spin_lock_irqsave(&bfad->bfad_lock, flags); 277 iocmd->status = bfa_port_disable(&bfad->bfa.modules.port, 278 bfad_hcb_comp, &fcomp); 279 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 280 281 if (iocmd->status != BFA_STATUS_OK) { 282 bfa_trc(bfad, iocmd->status); 283 return 0; 284 } 285 wait_for_completion(&fcomp.comp); 286 iocmd->status = fcomp.status; 287 return 0; 288 } 289 290 static int 291 bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd) 292 { 293 struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd; 294 struct bfa_lport_attr_s port_attr; 295 unsigned long flags; 296 297 spin_lock_irqsave(&bfad->bfad_lock, flags); 298 bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr); 299 bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); 300 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 301 302 if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE) 303 iocmd->attr.pid = port_attr.pid; 304 else 305 iocmd->attr.pid = 0; 306 307 iocmd->attr.port_type = port_attr.port_type; 308 iocmd->attr.loopback = port_attr.loopback; 309 iocmd->attr.authfail = port_attr.authfail; 310 strlcpy(iocmd->attr.port_symname.symname, 311 port_attr.port_cfg.sym_name.symname, 312 sizeof(iocmd->attr.port_symname.symname)); 313 314 iocmd->status = BFA_STATUS_OK; 315 return 0; 316 } 317 318 int 319 bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd, 320 unsigned int payload_len) 321 { 322 struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd; 323 struct bfad_hal_comp fcomp; 324 void *iocmd_bufptr; 325 unsigned long flags; 326 327 if (bfad_chk_iocmd_sz(payload_len, 328 sizeof(struct bfa_bsg_port_stats_s), 329 sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) { 330 iocmd->status = BFA_STATUS_VERSION_FAIL; 331 return 0; 332 } 333 334 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s); 335 336 init_completion(&fcomp.comp); 337 spin_lock_irqsave(&bfad->bfad_lock, flags); 338 iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port, 339 iocmd_bufptr, bfad_hcb_comp, &fcomp); 340 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 341 if (iocmd->status != BFA_STATUS_OK) { 342 bfa_trc(bfad, iocmd->status); 343 goto out; 344 } 345 346 wait_for_completion(&fcomp.comp); 347 iocmd->status = fcomp.status; 348 out: 349 return 0; 350 } 351 352 int 353 bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd) 354 { 355 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 356 struct bfad_hal_comp fcomp; 357 unsigned long flags; 358 359 init_completion(&fcomp.comp); 360 spin_lock_irqsave(&bfad->bfad_lock, flags); 361 iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port, 362 bfad_hcb_comp, &fcomp); 363 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 364 if (iocmd->status != BFA_STATUS_OK) { 365 bfa_trc(bfad, iocmd->status); 366 return 0; 367 } 368 wait_for_completion(&fcomp.comp); 369 iocmd->status = fcomp.status; 370 return 0; 371 } 372 373 int 374 bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd) 375 { 376 struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd; 377 unsigned long flags; 378 379 spin_lock_irqsave(&bfad->bfad_lock, flags); 380 if (v_cmd == IOCMD_PORT_CFG_TOPO) 381 cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param); 382 else if (v_cmd == IOCMD_PORT_CFG_SPEED) 383 cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param); 384 else if (v_cmd == IOCMD_PORT_CFG_ALPA) 385 cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param); 386 else if (v_cmd == IOCMD_PORT_CLR_ALPA) 387 cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa); 388 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 389 390 return 0; 391 } 392 393 int 394 bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd) 395 { 396 struct bfa_bsg_port_cfg_maxfrsize_s *iocmd = 397 (struct bfa_bsg_port_cfg_maxfrsize_s *)cmd; 398 unsigned long flags; 399 400 spin_lock_irqsave(&bfad->bfad_lock, flags); 401 iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize); 402 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 403 404 return 0; 405 } 406 407 int 408 bfad_iocmd_port_cfg_bbcr(struct bfad_s *bfad, unsigned int cmd, void *pcmd) 409 { 410 struct bfa_bsg_bbcr_enable_s *iocmd = 411 (struct bfa_bsg_bbcr_enable_s *)pcmd; 412 unsigned long flags; 413 int rc; 414 415 spin_lock_irqsave(&bfad->bfad_lock, flags); 416 if (cmd == IOCMD_PORT_BBCR_ENABLE) 417 rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_TRUE, iocmd->bb_scn); 418 else if (cmd == IOCMD_PORT_BBCR_DISABLE) 419 rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_FALSE, 0); 420 else { 421 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 422 return -EINVAL; 423 } 424 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 425 426 iocmd->status = rc; 427 return 0; 428 } 429 430 int 431 bfad_iocmd_port_get_bbcr_attr(struct bfad_s *bfad, void *pcmd) 432 { 433 struct bfa_bsg_bbcr_attr_s *iocmd = (struct bfa_bsg_bbcr_attr_s *) pcmd; 434 unsigned long flags; 435 436 spin_lock_irqsave(&bfad->bfad_lock, flags); 437 iocmd->status = 438 bfa_fcport_get_bbcr_attr(&bfad->bfa, &iocmd->attr); 439 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 440 441 return 0; 442 } 443 444 445 static int 446 bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd) 447 { 448 struct bfa_fcs_lport_s *fcs_port; 449 struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd; 450 unsigned long flags; 451 452 spin_lock_irqsave(&bfad->bfad_lock, flags); 453 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 454 iocmd->vf_id, iocmd->pwwn); 455 if (fcs_port == NULL) { 456 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 457 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 458 goto out; 459 } 460 461 bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr); 462 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 463 iocmd->status = BFA_STATUS_OK; 464 out: 465 return 0; 466 } 467 468 int 469 bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd) 470 { 471 struct bfa_fcs_lport_s *fcs_port; 472 struct bfa_bsg_lport_stats_s *iocmd = 473 (struct bfa_bsg_lport_stats_s *)cmd; 474 unsigned long flags; 475 476 spin_lock_irqsave(&bfad->bfad_lock, flags); 477 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 478 iocmd->vf_id, iocmd->pwwn); 479 if (fcs_port == NULL) { 480 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 481 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 482 goto out; 483 } 484 485 bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats); 486 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 487 iocmd->status = BFA_STATUS_OK; 488 out: 489 return 0; 490 } 491 492 int 493 bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd) 494 { 495 struct bfa_fcs_lport_s *fcs_port; 496 struct bfa_bsg_reset_stats_s *iocmd = 497 (struct bfa_bsg_reset_stats_s *)cmd; 498 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); 499 struct list_head *qe, *qen; 500 struct bfa_itnim_s *itnim; 501 unsigned long flags; 502 503 spin_lock_irqsave(&bfad->bfad_lock, flags); 504 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 505 iocmd->vf_id, iocmd->vpwwn); 506 if (fcs_port == NULL) { 507 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 508 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 509 goto out; 510 } 511 512 bfa_fcs_lport_clear_stats(fcs_port); 513 /* clear IO stats from all active itnims */ 514 list_for_each_safe(qe, qen, &fcpim->itnim_q) { 515 itnim = (struct bfa_itnim_s *) qe; 516 if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag) 517 continue; 518 bfa_itnim_clear_stats(itnim); 519 } 520 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 521 iocmd->status = BFA_STATUS_OK; 522 out: 523 return 0; 524 } 525 526 int 527 bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd) 528 { 529 struct bfa_fcs_lport_s *fcs_port; 530 struct bfa_bsg_lport_iostats_s *iocmd = 531 (struct bfa_bsg_lport_iostats_s *)cmd; 532 unsigned long flags; 533 534 spin_lock_irqsave(&bfad->bfad_lock, flags); 535 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 536 iocmd->vf_id, iocmd->pwwn); 537 if (fcs_port == NULL) { 538 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 539 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 540 goto out; 541 } 542 543 bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats, 544 fcs_port->lp_tag); 545 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 546 iocmd->status = BFA_STATUS_OK; 547 out: 548 return 0; 549 } 550 551 int 552 bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd, 553 unsigned int payload_len) 554 { 555 struct bfa_bsg_lport_get_rports_s *iocmd = 556 (struct bfa_bsg_lport_get_rports_s *)cmd; 557 struct bfa_fcs_lport_s *fcs_port; 558 unsigned long flags; 559 void *iocmd_bufptr; 560 561 if (iocmd->nrports == 0) 562 return -EINVAL; 563 564 if (bfad_chk_iocmd_sz(payload_len, 565 sizeof(struct bfa_bsg_lport_get_rports_s), 566 sizeof(struct bfa_rport_qualifier_s) * iocmd->nrports) 567 != BFA_STATUS_OK) { 568 iocmd->status = BFA_STATUS_VERSION_FAIL; 569 return 0; 570 } 571 572 iocmd_bufptr = (char *)iocmd + 573 sizeof(struct bfa_bsg_lport_get_rports_s); 574 spin_lock_irqsave(&bfad->bfad_lock, flags); 575 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 576 iocmd->vf_id, iocmd->pwwn); 577 if (fcs_port == NULL) { 578 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 579 bfa_trc(bfad, 0); 580 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 581 goto out; 582 } 583 584 bfa_fcs_lport_get_rport_quals(fcs_port, 585 (struct bfa_rport_qualifier_s *)iocmd_bufptr, 586 &iocmd->nrports); 587 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 588 iocmd->status = BFA_STATUS_OK; 589 out: 590 return 0; 591 } 592 593 int 594 bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd) 595 { 596 struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd; 597 struct bfa_fcs_lport_s *fcs_port; 598 struct bfa_fcs_rport_s *fcs_rport; 599 unsigned long flags; 600 601 spin_lock_irqsave(&bfad->bfad_lock, flags); 602 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 603 iocmd->vf_id, iocmd->pwwn); 604 if (fcs_port == NULL) { 605 bfa_trc(bfad, 0); 606 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 607 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 608 goto out; 609 } 610 611 if (iocmd->pid) 612 fcs_rport = bfa_fcs_lport_get_rport_by_qualifier(fcs_port, 613 iocmd->rpwwn, iocmd->pid); 614 else 615 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); 616 if (fcs_rport == NULL) { 617 bfa_trc(bfad, 0); 618 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 619 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 620 goto out; 621 } 622 623 bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr); 624 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 625 iocmd->status = BFA_STATUS_OK; 626 out: 627 return 0; 628 } 629 630 static int 631 bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd) 632 { 633 struct bfa_bsg_rport_scsi_addr_s *iocmd = 634 (struct bfa_bsg_rport_scsi_addr_s *)cmd; 635 struct bfa_fcs_lport_s *fcs_port; 636 struct bfa_fcs_itnim_s *fcs_itnim; 637 struct bfad_itnim_s *drv_itnim; 638 unsigned long flags; 639 640 spin_lock_irqsave(&bfad->bfad_lock, flags); 641 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 642 iocmd->vf_id, iocmd->pwwn); 643 if (fcs_port == NULL) { 644 bfa_trc(bfad, 0); 645 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 646 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 647 goto out; 648 } 649 650 fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); 651 if (fcs_itnim == NULL) { 652 bfa_trc(bfad, 0); 653 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 654 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 655 goto out; 656 } 657 658 drv_itnim = fcs_itnim->itnim_drv; 659 660 if (drv_itnim && drv_itnim->im_port) 661 iocmd->host = drv_itnim->im_port->shost->host_no; 662 else { 663 bfa_trc(bfad, 0); 664 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 665 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 666 goto out; 667 } 668 669 iocmd->target = drv_itnim->scsi_tgt_id; 670 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 671 672 iocmd->bus = 0; 673 iocmd->lun = 0; 674 iocmd->status = BFA_STATUS_OK; 675 out: 676 return 0; 677 } 678 679 int 680 bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd) 681 { 682 struct bfa_bsg_rport_stats_s *iocmd = 683 (struct bfa_bsg_rport_stats_s *)cmd; 684 struct bfa_fcs_lport_s *fcs_port; 685 struct bfa_fcs_rport_s *fcs_rport; 686 unsigned long flags; 687 688 spin_lock_irqsave(&bfad->bfad_lock, flags); 689 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 690 iocmd->vf_id, iocmd->pwwn); 691 if (fcs_port == NULL) { 692 bfa_trc(bfad, 0); 693 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 694 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 695 goto out; 696 } 697 698 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); 699 if (fcs_rport == NULL) { 700 bfa_trc(bfad, 0); 701 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 702 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 703 goto out; 704 } 705 706 memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats, 707 sizeof(struct bfa_rport_stats_s)); 708 if (bfa_fcs_rport_get_halrport(fcs_rport)) { 709 memcpy((void *)&iocmd->stats.hal_stats, 710 (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats), 711 sizeof(struct bfa_rport_hal_stats_s)); 712 } 713 714 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 715 iocmd->status = BFA_STATUS_OK; 716 out: 717 return 0; 718 } 719 720 int 721 bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd) 722 { 723 struct bfa_bsg_rport_reset_stats_s *iocmd = 724 (struct bfa_bsg_rport_reset_stats_s *)cmd; 725 struct bfa_fcs_lport_s *fcs_port; 726 struct bfa_fcs_rport_s *fcs_rport; 727 struct bfa_rport_s *rport; 728 unsigned long flags; 729 730 spin_lock_irqsave(&bfad->bfad_lock, flags); 731 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 732 iocmd->vf_id, iocmd->pwwn); 733 if (fcs_port == NULL) { 734 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 735 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 736 goto out; 737 } 738 739 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); 740 if (fcs_rport == NULL) { 741 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 742 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 743 goto out; 744 } 745 746 memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s)); 747 rport = bfa_fcs_rport_get_halrport(fcs_rport); 748 if (rport) 749 memset(&rport->stats, 0, sizeof(rport->stats)); 750 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 751 iocmd->status = BFA_STATUS_OK; 752 out: 753 return 0; 754 } 755 756 int 757 bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd) 758 { 759 struct bfa_bsg_rport_set_speed_s *iocmd = 760 (struct bfa_bsg_rport_set_speed_s *)cmd; 761 struct bfa_fcs_lport_s *fcs_port; 762 struct bfa_fcs_rport_s *fcs_rport; 763 unsigned long flags; 764 765 spin_lock_irqsave(&bfad->bfad_lock, flags); 766 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 767 iocmd->vf_id, iocmd->pwwn); 768 if (fcs_port == NULL) { 769 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 770 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 771 goto out; 772 } 773 774 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); 775 if (fcs_rport == NULL) { 776 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 777 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 778 goto out; 779 } 780 781 fcs_rport->rpf.assigned_speed = iocmd->speed; 782 /* Set this speed in f/w only if the RPSC speed is not available */ 783 if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN) 784 if (fcs_rport->bfa_rport) 785 bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed); 786 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 787 iocmd->status = BFA_STATUS_OK; 788 out: 789 return 0; 790 } 791 792 int 793 bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd) 794 { 795 struct bfa_fcs_vport_s *fcs_vport; 796 struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd; 797 unsigned long flags; 798 799 spin_lock_irqsave(&bfad->bfad_lock, flags); 800 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 801 iocmd->vf_id, iocmd->vpwwn); 802 if (fcs_vport == NULL) { 803 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 804 iocmd->status = BFA_STATUS_UNKNOWN_VWWN; 805 goto out; 806 } 807 808 bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr); 809 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 810 iocmd->status = BFA_STATUS_OK; 811 out: 812 return 0; 813 } 814 815 int 816 bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd) 817 { 818 struct bfa_fcs_vport_s *fcs_vport; 819 struct bfa_bsg_vport_stats_s *iocmd = 820 (struct bfa_bsg_vport_stats_s *)cmd; 821 unsigned long flags; 822 823 spin_lock_irqsave(&bfad->bfad_lock, flags); 824 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 825 iocmd->vf_id, iocmd->vpwwn); 826 if (fcs_vport == NULL) { 827 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 828 iocmd->status = BFA_STATUS_UNKNOWN_VWWN; 829 goto out; 830 } 831 832 memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats, 833 sizeof(struct bfa_vport_stats_s)); 834 memcpy((void *)&iocmd->vport_stats.port_stats, 835 (void *)&fcs_vport->lport.stats, 836 sizeof(struct bfa_lport_stats_s)); 837 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 838 iocmd->status = BFA_STATUS_OK; 839 out: 840 return 0; 841 } 842 843 int 844 bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd) 845 { 846 struct bfa_fcs_vport_s *fcs_vport; 847 struct bfa_bsg_reset_stats_s *iocmd = 848 (struct bfa_bsg_reset_stats_s *)cmd; 849 unsigned long flags; 850 851 spin_lock_irqsave(&bfad->bfad_lock, flags); 852 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 853 iocmd->vf_id, iocmd->vpwwn); 854 if (fcs_vport == NULL) { 855 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 856 iocmd->status = BFA_STATUS_UNKNOWN_VWWN; 857 goto out; 858 } 859 860 memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s)); 861 memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s)); 862 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 863 iocmd->status = BFA_STATUS_OK; 864 out: 865 return 0; 866 } 867 868 static int 869 bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd, 870 unsigned int payload_len) 871 { 872 struct bfa_bsg_fabric_get_lports_s *iocmd = 873 (struct bfa_bsg_fabric_get_lports_s *)cmd; 874 bfa_fcs_vf_t *fcs_vf; 875 uint32_t nports = iocmd->nports; 876 unsigned long flags; 877 void *iocmd_bufptr; 878 879 if (nports == 0) { 880 iocmd->status = BFA_STATUS_EINVAL; 881 goto out; 882 } 883 884 if (bfad_chk_iocmd_sz(payload_len, 885 sizeof(struct bfa_bsg_fabric_get_lports_s), 886 sizeof(wwn_t) * iocmd->nports) != BFA_STATUS_OK) { 887 iocmd->status = BFA_STATUS_VERSION_FAIL; 888 goto out; 889 } 890 891 iocmd_bufptr = (char *)iocmd + 892 sizeof(struct bfa_bsg_fabric_get_lports_s); 893 894 spin_lock_irqsave(&bfad->bfad_lock, flags); 895 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id); 896 if (fcs_vf == NULL) { 897 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 898 iocmd->status = BFA_STATUS_UNKNOWN_VFID; 899 goto out; 900 } 901 bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports); 902 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 903 904 iocmd->nports = nports; 905 iocmd->status = BFA_STATUS_OK; 906 out: 907 return 0; 908 } 909 910 int 911 bfad_iocmd_qos_set_bw(struct bfad_s *bfad, void *pcmd) 912 { 913 struct bfa_bsg_qos_bw_s *iocmd = (struct bfa_bsg_qos_bw_s *)pcmd; 914 unsigned long flags; 915 916 spin_lock_irqsave(&bfad->bfad_lock, flags); 917 iocmd->status = bfa_fcport_set_qos_bw(&bfad->bfa, &iocmd->qos_bw); 918 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 919 920 return 0; 921 } 922 923 int 924 bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd) 925 { 926 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; 927 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 928 unsigned long flags; 929 930 spin_lock_irqsave(&bfad->bfad_lock, flags); 931 932 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && 933 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) 934 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; 935 else { 936 if (cmd == IOCMD_RATELIM_ENABLE) 937 fcport->cfg.ratelimit = BFA_TRUE; 938 else if (cmd == IOCMD_RATELIM_DISABLE) 939 fcport->cfg.ratelimit = BFA_FALSE; 940 941 if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN) 942 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS; 943 944 iocmd->status = BFA_STATUS_OK; 945 } 946 947 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 948 949 return 0; 950 } 951 952 int 953 bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd) 954 { 955 struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd; 956 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 957 unsigned long flags; 958 959 spin_lock_irqsave(&bfad->bfad_lock, flags); 960 961 /* Auto and speeds greater than the supported speed, are invalid */ 962 if ((iocmd->speed == BFA_PORT_SPEED_AUTO) || 963 (iocmd->speed > fcport->speed_sup)) { 964 iocmd->status = BFA_STATUS_UNSUPP_SPEED; 965 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 966 return 0; 967 } 968 969 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && 970 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) 971 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; 972 else { 973 fcport->cfg.trl_def_speed = iocmd->speed; 974 iocmd->status = BFA_STATUS_OK; 975 } 976 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 977 978 return 0; 979 } 980 981 int 982 bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd) 983 { 984 struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd; 985 unsigned long flags; 986 987 spin_lock_irqsave(&bfad->bfad_lock, flags); 988 bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param); 989 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 990 iocmd->status = BFA_STATUS_OK; 991 return 0; 992 } 993 994 int 995 bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd) 996 { 997 struct bfa_bsg_fcpim_modstats_s *iocmd = 998 (struct bfa_bsg_fcpim_modstats_s *)cmd; 999 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); 1000 struct list_head *qe, *qen; 1001 struct bfa_itnim_s *itnim; 1002 unsigned long flags; 1003 1004 spin_lock_irqsave(&bfad->bfad_lock, flags); 1005 /* accumulate IO stats from itnim */ 1006 memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s)); 1007 list_for_each_safe(qe, qen, &fcpim->itnim_q) { 1008 itnim = (struct bfa_itnim_s *) qe; 1009 bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats)); 1010 } 1011 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1012 iocmd->status = BFA_STATUS_OK; 1013 return 0; 1014 } 1015 1016 int 1017 bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd) 1018 { 1019 struct bfa_bsg_fcpim_modstatsclr_s *iocmd = 1020 (struct bfa_bsg_fcpim_modstatsclr_s *)cmd; 1021 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); 1022 struct list_head *qe, *qen; 1023 struct bfa_itnim_s *itnim; 1024 unsigned long flags; 1025 1026 spin_lock_irqsave(&bfad->bfad_lock, flags); 1027 list_for_each_safe(qe, qen, &fcpim->itnim_q) { 1028 itnim = (struct bfa_itnim_s *) qe; 1029 bfa_itnim_clear_stats(itnim); 1030 } 1031 memset(&fcpim->del_itn_stats, 0, 1032 sizeof(struct bfa_fcpim_del_itn_stats_s)); 1033 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1034 iocmd->status = BFA_STATUS_OK; 1035 return 0; 1036 } 1037 1038 int 1039 bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd) 1040 { 1041 struct bfa_bsg_fcpim_del_itn_stats_s *iocmd = 1042 (struct bfa_bsg_fcpim_del_itn_stats_s *)cmd; 1043 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); 1044 unsigned long flags; 1045 1046 spin_lock_irqsave(&bfad->bfad_lock, flags); 1047 memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats, 1048 sizeof(struct bfa_fcpim_del_itn_stats_s)); 1049 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1050 1051 iocmd->status = BFA_STATUS_OK; 1052 return 0; 1053 } 1054 1055 static int 1056 bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd) 1057 { 1058 struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd; 1059 struct bfa_fcs_lport_s *fcs_port; 1060 unsigned long flags; 1061 1062 spin_lock_irqsave(&bfad->bfad_lock, flags); 1063 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 1064 iocmd->vf_id, iocmd->lpwwn); 1065 if (!fcs_port) 1066 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 1067 else 1068 iocmd->status = bfa_fcs_itnim_attr_get(fcs_port, 1069 iocmd->rpwwn, &iocmd->attr); 1070 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1071 return 0; 1072 } 1073 1074 static int 1075 bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd) 1076 { 1077 struct bfa_bsg_itnim_iostats_s *iocmd = 1078 (struct bfa_bsg_itnim_iostats_s *)cmd; 1079 struct bfa_fcs_lport_s *fcs_port; 1080 struct bfa_fcs_itnim_s *itnim; 1081 unsigned long flags; 1082 1083 spin_lock_irqsave(&bfad->bfad_lock, flags); 1084 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 1085 iocmd->vf_id, iocmd->lpwwn); 1086 if (!fcs_port) { 1087 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 1088 bfa_trc(bfad, 0); 1089 } else { 1090 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); 1091 if (itnim == NULL) 1092 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 1093 else { 1094 iocmd->status = BFA_STATUS_OK; 1095 if (bfa_fcs_itnim_get_halitn(itnim)) 1096 memcpy((void *)&iocmd->iostats, (void *) 1097 &(bfa_fcs_itnim_get_halitn(itnim)->stats), 1098 sizeof(struct bfa_itnim_iostats_s)); 1099 } 1100 } 1101 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1102 return 0; 1103 } 1104 1105 static int 1106 bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd) 1107 { 1108 struct bfa_bsg_rport_reset_stats_s *iocmd = 1109 (struct bfa_bsg_rport_reset_stats_s *)cmd; 1110 struct bfa_fcs_lport_s *fcs_port; 1111 struct bfa_fcs_itnim_s *itnim; 1112 unsigned long flags; 1113 1114 spin_lock_irqsave(&bfad->bfad_lock, flags); 1115 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 1116 iocmd->vf_id, iocmd->pwwn); 1117 if (!fcs_port) 1118 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 1119 else { 1120 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); 1121 if (itnim == NULL) 1122 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 1123 else { 1124 iocmd->status = BFA_STATUS_OK; 1125 bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn); 1126 bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim)); 1127 } 1128 } 1129 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1130 1131 return 0; 1132 } 1133 1134 static int 1135 bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd) 1136 { 1137 struct bfa_bsg_itnim_itnstats_s *iocmd = 1138 (struct bfa_bsg_itnim_itnstats_s *)cmd; 1139 struct bfa_fcs_lport_s *fcs_port; 1140 struct bfa_fcs_itnim_s *itnim; 1141 unsigned long flags; 1142 1143 spin_lock_irqsave(&bfad->bfad_lock, flags); 1144 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 1145 iocmd->vf_id, iocmd->lpwwn); 1146 if (!fcs_port) { 1147 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 1148 bfa_trc(bfad, 0); 1149 } else { 1150 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); 1151 if (itnim == NULL) 1152 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 1153 else { 1154 iocmd->status = BFA_STATUS_OK; 1155 bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn, 1156 &iocmd->itnstats); 1157 } 1158 } 1159 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1160 return 0; 1161 } 1162 1163 int 1164 bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd) 1165 { 1166 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 1167 unsigned long flags; 1168 1169 spin_lock_irqsave(&bfad->bfad_lock, flags); 1170 iocmd->status = bfa_fcport_enable(&bfad->bfa); 1171 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1172 1173 return 0; 1174 } 1175 1176 int 1177 bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd) 1178 { 1179 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 1180 unsigned long flags; 1181 1182 spin_lock_irqsave(&bfad->bfad_lock, flags); 1183 iocmd->status = bfa_fcport_disable(&bfad->bfa); 1184 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1185 1186 return 0; 1187 } 1188 1189 int 1190 bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd) 1191 { 1192 struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd; 1193 struct bfad_hal_comp fcomp; 1194 unsigned long flags; 1195 1196 init_completion(&fcomp.comp); 1197 spin_lock_irqsave(&bfad->bfad_lock, flags); 1198 iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk, 1199 &iocmd->pcifn_cfg, 1200 bfad_hcb_comp, &fcomp); 1201 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1202 if (iocmd->status != BFA_STATUS_OK) 1203 goto out; 1204 1205 wait_for_completion(&fcomp.comp); 1206 iocmd->status = fcomp.status; 1207 out: 1208 return 0; 1209 } 1210 1211 int 1212 bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd) 1213 { 1214 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd; 1215 struct bfad_hal_comp fcomp; 1216 unsigned long flags; 1217 1218 init_completion(&fcomp.comp); 1219 spin_lock_irqsave(&bfad->bfad_lock, flags); 1220 iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk, 1221 &iocmd->pcifn_id, iocmd->port, 1222 iocmd->pcifn_class, iocmd->bw_min, 1223 iocmd->bw_max, bfad_hcb_comp, &fcomp); 1224 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1225 if (iocmd->status != BFA_STATUS_OK) 1226 goto out; 1227 1228 wait_for_completion(&fcomp.comp); 1229 iocmd->status = fcomp.status; 1230 out: 1231 return 0; 1232 } 1233 1234 int 1235 bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd) 1236 { 1237 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd; 1238 struct bfad_hal_comp fcomp; 1239 unsigned long flags; 1240 1241 init_completion(&fcomp.comp); 1242 spin_lock_irqsave(&bfad->bfad_lock, flags); 1243 iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk, 1244 iocmd->pcifn_id, 1245 bfad_hcb_comp, &fcomp); 1246 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1247 if (iocmd->status != BFA_STATUS_OK) 1248 goto out; 1249 1250 wait_for_completion(&fcomp.comp); 1251 iocmd->status = fcomp.status; 1252 out: 1253 return 0; 1254 } 1255 1256 int 1257 bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd) 1258 { 1259 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd; 1260 struct bfad_hal_comp fcomp; 1261 unsigned long flags; 1262 1263 init_completion(&fcomp.comp); 1264 spin_lock_irqsave(&bfad->bfad_lock, flags); 1265 iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk, 1266 iocmd->pcifn_id, iocmd->bw_min, 1267 iocmd->bw_max, bfad_hcb_comp, &fcomp); 1268 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1269 bfa_trc(bfad, iocmd->status); 1270 if (iocmd->status != BFA_STATUS_OK) 1271 goto out; 1272 1273 wait_for_completion(&fcomp.comp); 1274 iocmd->status = fcomp.status; 1275 bfa_trc(bfad, iocmd->status); 1276 out: 1277 return 0; 1278 } 1279 1280 int 1281 bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd) 1282 { 1283 struct bfa_bsg_adapter_cfg_mode_s *iocmd = 1284 (struct bfa_bsg_adapter_cfg_mode_s *)cmd; 1285 struct bfad_hal_comp fcomp; 1286 unsigned long flags = 0; 1287 1288 init_completion(&fcomp.comp); 1289 spin_lock_irqsave(&bfad->bfad_lock, flags); 1290 iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk, 1291 iocmd->cfg.mode, iocmd->cfg.max_pf, 1292 iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp); 1293 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1294 if (iocmd->status != BFA_STATUS_OK) 1295 goto out; 1296 1297 wait_for_completion(&fcomp.comp); 1298 iocmd->status = fcomp.status; 1299 out: 1300 return 0; 1301 } 1302 1303 int 1304 bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd) 1305 { 1306 struct bfa_bsg_port_cfg_mode_s *iocmd = 1307 (struct bfa_bsg_port_cfg_mode_s *)cmd; 1308 struct bfad_hal_comp fcomp; 1309 unsigned long flags = 0; 1310 1311 init_completion(&fcomp.comp); 1312 spin_lock_irqsave(&bfad->bfad_lock, flags); 1313 iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk, 1314 iocmd->instance, iocmd->cfg.mode, 1315 iocmd->cfg.max_pf, iocmd->cfg.max_vf, 1316 bfad_hcb_comp, &fcomp); 1317 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1318 if (iocmd->status != BFA_STATUS_OK) 1319 goto out; 1320 1321 wait_for_completion(&fcomp.comp); 1322 iocmd->status = fcomp.status; 1323 out: 1324 return 0; 1325 } 1326 1327 int 1328 bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd) 1329 { 1330 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; 1331 struct bfad_hal_comp fcomp; 1332 unsigned long flags; 1333 1334 init_completion(&fcomp.comp); 1335 spin_lock_irqsave(&bfad->bfad_lock, flags); 1336 if (cmd == IOCMD_FLASH_ENABLE_OPTROM) 1337 iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk, 1338 bfad_hcb_comp, &fcomp); 1339 else 1340 iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk, 1341 bfad_hcb_comp, &fcomp); 1342 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1343 1344 if (iocmd->status != BFA_STATUS_OK) 1345 goto out; 1346 1347 wait_for_completion(&fcomp.comp); 1348 iocmd->status = fcomp.status; 1349 out: 1350 return 0; 1351 } 1352 1353 int 1354 bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd) 1355 { 1356 struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd; 1357 struct bfad_hal_comp fcomp; 1358 unsigned long flags; 1359 1360 init_completion(&fcomp.comp); 1361 iocmd->status = BFA_STATUS_OK; 1362 spin_lock_irqsave(&bfad->bfad_lock, flags); 1363 iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr, 1364 bfad_hcb_comp, &fcomp); 1365 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1366 1367 if (iocmd->status != BFA_STATUS_OK) 1368 goto out; 1369 1370 wait_for_completion(&fcomp.comp); 1371 iocmd->status = fcomp.status; 1372 out: 1373 return 0; 1374 } 1375 1376 int 1377 bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len) 1378 { 1379 struct bfa_bsg_cee_attr_s *iocmd = 1380 (struct bfa_bsg_cee_attr_s *)cmd; 1381 void *iocmd_bufptr; 1382 struct bfad_hal_comp cee_comp; 1383 unsigned long flags; 1384 1385 if (bfad_chk_iocmd_sz(payload_len, 1386 sizeof(struct bfa_bsg_cee_attr_s), 1387 sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) { 1388 iocmd->status = BFA_STATUS_VERSION_FAIL; 1389 return 0; 1390 } 1391 1392 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s); 1393 1394 cee_comp.status = 0; 1395 init_completion(&cee_comp.comp); 1396 mutex_lock(&bfad_mutex); 1397 spin_lock_irqsave(&bfad->bfad_lock, flags); 1398 iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr, 1399 bfad_hcb_comp, &cee_comp); 1400 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1401 if (iocmd->status != BFA_STATUS_OK) { 1402 mutex_unlock(&bfad_mutex); 1403 bfa_trc(bfad, 0x5555); 1404 goto out; 1405 } 1406 wait_for_completion(&cee_comp.comp); 1407 mutex_unlock(&bfad_mutex); 1408 out: 1409 return 0; 1410 } 1411 1412 int 1413 bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd, 1414 unsigned int payload_len) 1415 { 1416 struct bfa_bsg_cee_stats_s *iocmd = 1417 (struct bfa_bsg_cee_stats_s *)cmd; 1418 void *iocmd_bufptr; 1419 struct bfad_hal_comp cee_comp; 1420 unsigned long flags; 1421 1422 if (bfad_chk_iocmd_sz(payload_len, 1423 sizeof(struct bfa_bsg_cee_stats_s), 1424 sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) { 1425 iocmd->status = BFA_STATUS_VERSION_FAIL; 1426 return 0; 1427 } 1428 1429 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s); 1430 1431 cee_comp.status = 0; 1432 init_completion(&cee_comp.comp); 1433 mutex_lock(&bfad_mutex); 1434 spin_lock_irqsave(&bfad->bfad_lock, flags); 1435 iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr, 1436 bfad_hcb_comp, &cee_comp); 1437 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1438 if (iocmd->status != BFA_STATUS_OK) { 1439 mutex_unlock(&bfad_mutex); 1440 bfa_trc(bfad, 0x5555); 1441 goto out; 1442 } 1443 wait_for_completion(&cee_comp.comp); 1444 mutex_unlock(&bfad_mutex); 1445 out: 1446 return 0; 1447 } 1448 1449 int 1450 bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd) 1451 { 1452 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 1453 unsigned long flags; 1454 1455 spin_lock_irqsave(&bfad->bfad_lock, flags); 1456 iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL); 1457 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1458 if (iocmd->status != BFA_STATUS_OK) 1459 bfa_trc(bfad, 0x5555); 1460 return 0; 1461 } 1462 1463 int 1464 bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd) 1465 { 1466 struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd; 1467 struct bfad_hal_comp fcomp; 1468 unsigned long flags; 1469 1470 init_completion(&fcomp.comp); 1471 spin_lock_irqsave(&bfad->bfad_lock, flags); 1472 iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media, 1473 bfad_hcb_comp, &fcomp); 1474 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1475 bfa_trc(bfad, iocmd->status); 1476 if (iocmd->status != BFA_STATUS_SFP_NOT_READY) 1477 goto out; 1478 1479 wait_for_completion(&fcomp.comp); 1480 iocmd->status = fcomp.status; 1481 out: 1482 return 0; 1483 } 1484 1485 int 1486 bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd) 1487 { 1488 struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd; 1489 struct bfad_hal_comp fcomp; 1490 unsigned long flags; 1491 1492 init_completion(&fcomp.comp); 1493 spin_lock_irqsave(&bfad->bfad_lock, flags); 1494 iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed, 1495 bfad_hcb_comp, &fcomp); 1496 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1497 bfa_trc(bfad, iocmd->status); 1498 if (iocmd->status != BFA_STATUS_SFP_NOT_READY) 1499 goto out; 1500 wait_for_completion(&fcomp.comp); 1501 iocmd->status = fcomp.status; 1502 out: 1503 return 0; 1504 } 1505 1506 int 1507 bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd) 1508 { 1509 struct bfa_bsg_flash_attr_s *iocmd = 1510 (struct bfa_bsg_flash_attr_s *)cmd; 1511 struct bfad_hal_comp fcomp; 1512 unsigned long flags; 1513 1514 init_completion(&fcomp.comp); 1515 spin_lock_irqsave(&bfad->bfad_lock, flags); 1516 iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr, 1517 bfad_hcb_comp, &fcomp); 1518 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1519 if (iocmd->status != BFA_STATUS_OK) 1520 goto out; 1521 wait_for_completion(&fcomp.comp); 1522 iocmd->status = fcomp.status; 1523 out: 1524 return 0; 1525 } 1526 1527 int 1528 bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd) 1529 { 1530 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd; 1531 struct bfad_hal_comp fcomp; 1532 unsigned long flags; 1533 1534 init_completion(&fcomp.comp); 1535 spin_lock_irqsave(&bfad->bfad_lock, flags); 1536 iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type, 1537 iocmd->instance, bfad_hcb_comp, &fcomp); 1538 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1539 if (iocmd->status != BFA_STATUS_OK) 1540 goto out; 1541 wait_for_completion(&fcomp.comp); 1542 iocmd->status = fcomp.status; 1543 out: 1544 return 0; 1545 } 1546 1547 int 1548 bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd, 1549 unsigned int payload_len) 1550 { 1551 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd; 1552 void *iocmd_bufptr; 1553 struct bfad_hal_comp fcomp; 1554 unsigned long flags; 1555 1556 if (bfad_chk_iocmd_sz(payload_len, 1557 sizeof(struct bfa_bsg_flash_s), 1558 iocmd->bufsz) != BFA_STATUS_OK) { 1559 iocmd->status = BFA_STATUS_VERSION_FAIL; 1560 return 0; 1561 } 1562 1563 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s); 1564 1565 init_completion(&fcomp.comp); 1566 spin_lock_irqsave(&bfad->bfad_lock, flags); 1567 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), 1568 iocmd->type, iocmd->instance, iocmd_bufptr, 1569 iocmd->bufsz, 0, bfad_hcb_comp, &fcomp); 1570 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1571 if (iocmd->status != BFA_STATUS_OK) 1572 goto out; 1573 wait_for_completion(&fcomp.comp); 1574 iocmd->status = fcomp.status; 1575 out: 1576 return 0; 1577 } 1578 1579 int 1580 bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd, 1581 unsigned int payload_len) 1582 { 1583 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd; 1584 struct bfad_hal_comp fcomp; 1585 void *iocmd_bufptr; 1586 unsigned long flags; 1587 1588 if (bfad_chk_iocmd_sz(payload_len, 1589 sizeof(struct bfa_bsg_flash_s), 1590 iocmd->bufsz) != BFA_STATUS_OK) { 1591 iocmd->status = BFA_STATUS_VERSION_FAIL; 1592 return 0; 1593 } 1594 1595 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s); 1596 1597 init_completion(&fcomp.comp); 1598 spin_lock_irqsave(&bfad->bfad_lock, flags); 1599 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type, 1600 iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0, 1601 bfad_hcb_comp, &fcomp); 1602 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1603 if (iocmd->status != BFA_STATUS_OK) 1604 goto out; 1605 wait_for_completion(&fcomp.comp); 1606 iocmd->status = fcomp.status; 1607 out: 1608 return 0; 1609 } 1610 1611 int 1612 bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd) 1613 { 1614 struct bfa_bsg_diag_get_temp_s *iocmd = 1615 (struct bfa_bsg_diag_get_temp_s *)cmd; 1616 struct bfad_hal_comp fcomp; 1617 unsigned long flags; 1618 1619 init_completion(&fcomp.comp); 1620 spin_lock_irqsave(&bfad->bfad_lock, flags); 1621 iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa), 1622 &iocmd->result, bfad_hcb_comp, &fcomp); 1623 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1624 bfa_trc(bfad, iocmd->status); 1625 if (iocmd->status != BFA_STATUS_OK) 1626 goto out; 1627 wait_for_completion(&fcomp.comp); 1628 iocmd->status = fcomp.status; 1629 out: 1630 return 0; 1631 } 1632 1633 int 1634 bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd) 1635 { 1636 struct bfa_bsg_diag_memtest_s *iocmd = 1637 (struct bfa_bsg_diag_memtest_s *)cmd; 1638 struct bfad_hal_comp fcomp; 1639 unsigned long flags; 1640 1641 init_completion(&fcomp.comp); 1642 spin_lock_irqsave(&bfad->bfad_lock, flags); 1643 iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa), 1644 &iocmd->memtest, iocmd->pat, 1645 &iocmd->result, bfad_hcb_comp, &fcomp); 1646 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1647 bfa_trc(bfad, iocmd->status); 1648 if (iocmd->status != BFA_STATUS_OK) 1649 goto out; 1650 wait_for_completion(&fcomp.comp); 1651 iocmd->status = fcomp.status; 1652 out: 1653 return 0; 1654 } 1655 1656 int 1657 bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd) 1658 { 1659 struct bfa_bsg_diag_loopback_s *iocmd = 1660 (struct bfa_bsg_diag_loopback_s *)cmd; 1661 struct bfad_hal_comp fcomp; 1662 unsigned long flags; 1663 1664 init_completion(&fcomp.comp); 1665 spin_lock_irqsave(&bfad->bfad_lock, flags); 1666 iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode, 1667 iocmd->speed, iocmd->lpcnt, iocmd->pat, 1668 &iocmd->result, bfad_hcb_comp, &fcomp); 1669 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1670 bfa_trc(bfad, iocmd->status); 1671 if (iocmd->status != BFA_STATUS_OK) 1672 goto out; 1673 wait_for_completion(&fcomp.comp); 1674 iocmd->status = fcomp.status; 1675 out: 1676 return 0; 1677 } 1678 1679 int 1680 bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd) 1681 { 1682 struct bfa_bsg_diag_fwping_s *iocmd = 1683 (struct bfa_bsg_diag_fwping_s *)cmd; 1684 struct bfad_hal_comp fcomp; 1685 unsigned long flags; 1686 1687 init_completion(&fcomp.comp); 1688 spin_lock_irqsave(&bfad->bfad_lock, flags); 1689 iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt, 1690 iocmd->pattern, &iocmd->result, 1691 bfad_hcb_comp, &fcomp); 1692 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1693 bfa_trc(bfad, iocmd->status); 1694 if (iocmd->status != BFA_STATUS_OK) 1695 goto out; 1696 bfa_trc(bfad, 0x77771); 1697 wait_for_completion(&fcomp.comp); 1698 iocmd->status = fcomp.status; 1699 out: 1700 return 0; 1701 } 1702 1703 int 1704 bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd) 1705 { 1706 struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd; 1707 struct bfad_hal_comp fcomp; 1708 unsigned long flags; 1709 1710 init_completion(&fcomp.comp); 1711 spin_lock_irqsave(&bfad->bfad_lock, flags); 1712 iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force, 1713 iocmd->queue, &iocmd->result, 1714 bfad_hcb_comp, &fcomp); 1715 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1716 if (iocmd->status != BFA_STATUS_OK) 1717 goto out; 1718 wait_for_completion(&fcomp.comp); 1719 iocmd->status = fcomp.status; 1720 out: 1721 return 0; 1722 } 1723 1724 int 1725 bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd) 1726 { 1727 struct bfa_bsg_sfp_show_s *iocmd = 1728 (struct bfa_bsg_sfp_show_s *)cmd; 1729 struct bfad_hal_comp fcomp; 1730 unsigned long flags; 1731 1732 init_completion(&fcomp.comp); 1733 spin_lock_irqsave(&bfad->bfad_lock, flags); 1734 iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp, 1735 bfad_hcb_comp, &fcomp); 1736 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1737 bfa_trc(bfad, iocmd->status); 1738 if (iocmd->status != BFA_STATUS_OK) 1739 goto out; 1740 wait_for_completion(&fcomp.comp); 1741 iocmd->status = fcomp.status; 1742 bfa_trc(bfad, iocmd->status); 1743 out: 1744 return 0; 1745 } 1746 1747 int 1748 bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd) 1749 { 1750 struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd; 1751 unsigned long flags; 1752 1753 spin_lock_irqsave(&bfad->bfad_lock, flags); 1754 iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa), 1755 &iocmd->ledtest); 1756 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1757 return 0; 1758 } 1759 1760 int 1761 bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd) 1762 { 1763 struct bfa_bsg_diag_beacon_s *iocmd = 1764 (struct bfa_bsg_diag_beacon_s *)cmd; 1765 unsigned long flags; 1766 1767 spin_lock_irqsave(&bfad->bfad_lock, flags); 1768 iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa), 1769 iocmd->beacon, iocmd->link_e2e_beacon, 1770 iocmd->second); 1771 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1772 return 0; 1773 } 1774 1775 int 1776 bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd) 1777 { 1778 struct bfa_bsg_diag_lb_stat_s *iocmd = 1779 (struct bfa_bsg_diag_lb_stat_s *)cmd; 1780 unsigned long flags; 1781 1782 spin_lock_irqsave(&bfad->bfad_lock, flags); 1783 iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa); 1784 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1785 bfa_trc(bfad, iocmd->status); 1786 1787 return 0; 1788 } 1789 1790 int 1791 bfad_iocmd_diag_dport_enable(struct bfad_s *bfad, void *pcmd) 1792 { 1793 struct bfa_bsg_dport_enable_s *iocmd = 1794 (struct bfa_bsg_dport_enable_s *)pcmd; 1795 unsigned long flags; 1796 struct bfad_hal_comp fcomp; 1797 1798 init_completion(&fcomp.comp); 1799 spin_lock_irqsave(&bfad->bfad_lock, flags); 1800 iocmd->status = bfa_dport_enable(&bfad->bfa, iocmd->lpcnt, 1801 iocmd->pat, bfad_hcb_comp, &fcomp); 1802 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1803 if (iocmd->status != BFA_STATUS_OK) 1804 bfa_trc(bfad, iocmd->status); 1805 else { 1806 wait_for_completion(&fcomp.comp); 1807 iocmd->status = fcomp.status; 1808 } 1809 return 0; 1810 } 1811 1812 int 1813 bfad_iocmd_diag_dport_disable(struct bfad_s *bfad, void *pcmd) 1814 { 1815 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; 1816 unsigned long flags; 1817 struct bfad_hal_comp fcomp; 1818 1819 init_completion(&fcomp.comp); 1820 spin_lock_irqsave(&bfad->bfad_lock, flags); 1821 iocmd->status = bfa_dport_disable(&bfad->bfa, bfad_hcb_comp, &fcomp); 1822 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1823 if (iocmd->status != BFA_STATUS_OK) 1824 bfa_trc(bfad, iocmd->status); 1825 else { 1826 wait_for_completion(&fcomp.comp); 1827 iocmd->status = fcomp.status; 1828 } 1829 return 0; 1830 } 1831 1832 int 1833 bfad_iocmd_diag_dport_start(struct bfad_s *bfad, void *pcmd) 1834 { 1835 struct bfa_bsg_dport_enable_s *iocmd = 1836 (struct bfa_bsg_dport_enable_s *)pcmd; 1837 unsigned long flags; 1838 struct bfad_hal_comp fcomp; 1839 1840 init_completion(&fcomp.comp); 1841 spin_lock_irqsave(&bfad->bfad_lock, flags); 1842 iocmd->status = bfa_dport_start(&bfad->bfa, iocmd->lpcnt, 1843 iocmd->pat, bfad_hcb_comp, 1844 &fcomp); 1845 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1846 1847 if (iocmd->status != BFA_STATUS_OK) { 1848 bfa_trc(bfad, iocmd->status); 1849 } else { 1850 wait_for_completion(&fcomp.comp); 1851 iocmd->status = fcomp.status; 1852 } 1853 1854 return 0; 1855 } 1856 1857 int 1858 bfad_iocmd_diag_dport_show(struct bfad_s *bfad, void *pcmd) 1859 { 1860 struct bfa_bsg_diag_dport_show_s *iocmd = 1861 (struct bfa_bsg_diag_dport_show_s *)pcmd; 1862 unsigned long flags; 1863 1864 spin_lock_irqsave(&bfad->bfad_lock, flags); 1865 iocmd->status = bfa_dport_show(&bfad->bfa, &iocmd->result); 1866 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1867 1868 return 0; 1869 } 1870 1871 1872 int 1873 bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd) 1874 { 1875 struct bfa_bsg_phy_attr_s *iocmd = 1876 (struct bfa_bsg_phy_attr_s *)cmd; 1877 struct bfad_hal_comp fcomp; 1878 unsigned long flags; 1879 1880 init_completion(&fcomp.comp); 1881 spin_lock_irqsave(&bfad->bfad_lock, flags); 1882 iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance, 1883 &iocmd->attr, bfad_hcb_comp, &fcomp); 1884 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1885 if (iocmd->status != BFA_STATUS_OK) 1886 goto out; 1887 wait_for_completion(&fcomp.comp); 1888 iocmd->status = fcomp.status; 1889 out: 1890 return 0; 1891 } 1892 1893 int 1894 bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd) 1895 { 1896 struct bfa_bsg_phy_stats_s *iocmd = 1897 (struct bfa_bsg_phy_stats_s *)cmd; 1898 struct bfad_hal_comp fcomp; 1899 unsigned long flags; 1900 1901 init_completion(&fcomp.comp); 1902 spin_lock_irqsave(&bfad->bfad_lock, flags); 1903 iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance, 1904 &iocmd->stats, bfad_hcb_comp, &fcomp); 1905 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1906 if (iocmd->status != BFA_STATUS_OK) 1907 goto out; 1908 wait_for_completion(&fcomp.comp); 1909 iocmd->status = fcomp.status; 1910 out: 1911 return 0; 1912 } 1913 1914 int 1915 bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len) 1916 { 1917 struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd; 1918 struct bfad_hal_comp fcomp; 1919 void *iocmd_bufptr; 1920 unsigned long flags; 1921 1922 if (bfad_chk_iocmd_sz(payload_len, 1923 sizeof(struct bfa_bsg_phy_s), 1924 iocmd->bufsz) != BFA_STATUS_OK) { 1925 iocmd->status = BFA_STATUS_VERSION_FAIL; 1926 return 0; 1927 } 1928 1929 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s); 1930 init_completion(&fcomp.comp); 1931 spin_lock_irqsave(&bfad->bfad_lock, flags); 1932 iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa), 1933 iocmd->instance, iocmd_bufptr, iocmd->bufsz, 1934 0, bfad_hcb_comp, &fcomp); 1935 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1936 if (iocmd->status != BFA_STATUS_OK) 1937 goto out; 1938 wait_for_completion(&fcomp.comp); 1939 iocmd->status = fcomp.status; 1940 if (iocmd->status != BFA_STATUS_OK) 1941 goto out; 1942 out: 1943 return 0; 1944 } 1945 1946 int 1947 bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd) 1948 { 1949 struct bfa_bsg_vhba_attr_s *iocmd = 1950 (struct bfa_bsg_vhba_attr_s *)cmd; 1951 struct bfa_vhba_attr_s *attr = &iocmd->attr; 1952 unsigned long flags; 1953 1954 spin_lock_irqsave(&bfad->bfad_lock, flags); 1955 attr->pwwn = bfad->bfa.ioc.attr->pwwn; 1956 attr->nwwn = bfad->bfa.ioc.attr->nwwn; 1957 attr->plog_enabled = (bfa_boolean_t)bfad->bfa.plog->plog_enabled; 1958 attr->io_profile = bfa_fcpim_get_io_profile(&bfad->bfa); 1959 attr->path_tov = bfa_fcpim_path_tov_get(&bfad->bfa); 1960 iocmd->status = BFA_STATUS_OK; 1961 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1962 return 0; 1963 } 1964 1965 int 1966 bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len) 1967 { 1968 struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd; 1969 void *iocmd_bufptr; 1970 struct bfad_hal_comp fcomp; 1971 unsigned long flags; 1972 1973 if (bfad_chk_iocmd_sz(payload_len, 1974 sizeof(struct bfa_bsg_phy_s), 1975 iocmd->bufsz) != BFA_STATUS_OK) { 1976 iocmd->status = BFA_STATUS_VERSION_FAIL; 1977 return 0; 1978 } 1979 1980 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s); 1981 init_completion(&fcomp.comp); 1982 spin_lock_irqsave(&bfad->bfad_lock, flags); 1983 iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa), 1984 iocmd->instance, iocmd_bufptr, iocmd->bufsz, 1985 0, bfad_hcb_comp, &fcomp); 1986 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1987 if (iocmd->status != BFA_STATUS_OK) 1988 goto out; 1989 wait_for_completion(&fcomp.comp); 1990 iocmd->status = fcomp.status; 1991 out: 1992 return 0; 1993 } 1994 1995 int 1996 bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd) 1997 { 1998 struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd; 1999 void *iocmd_bufptr; 2000 2001 if (iocmd->bufsz < sizeof(struct bfa_plog_s)) { 2002 bfa_trc(bfad, sizeof(struct bfa_plog_s)); 2003 iocmd->status = BFA_STATUS_EINVAL; 2004 goto out; 2005 } 2006 2007 iocmd->status = BFA_STATUS_OK; 2008 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s); 2009 memcpy(iocmd_bufptr, (u8 *) &bfad->plog_buf, sizeof(struct bfa_plog_s)); 2010 out: 2011 return 0; 2012 } 2013 2014 #define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */ 2015 int 2016 bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd, 2017 unsigned int payload_len) 2018 { 2019 struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd; 2020 void *iocmd_bufptr; 2021 unsigned long flags; 2022 u32 offset; 2023 2024 if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s), 2025 BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) { 2026 iocmd->status = BFA_STATUS_VERSION_FAIL; 2027 return 0; 2028 } 2029 2030 if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ || 2031 !IS_ALIGNED(iocmd->bufsz, sizeof(u16)) || 2032 !IS_ALIGNED(iocmd->offset, sizeof(u32))) { 2033 bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ); 2034 iocmd->status = BFA_STATUS_EINVAL; 2035 goto out; 2036 } 2037 2038 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s); 2039 spin_lock_irqsave(&bfad->bfad_lock, flags); 2040 offset = iocmd->offset; 2041 iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr, 2042 &offset, &iocmd->bufsz); 2043 iocmd->offset = offset; 2044 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2045 out: 2046 return 0; 2047 } 2048 2049 int 2050 bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 2051 { 2052 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 2053 unsigned long flags; 2054 2055 if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) { 2056 spin_lock_irqsave(&bfad->bfad_lock, flags); 2057 bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE; 2058 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2059 } else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR) 2060 bfad->plog_buf.head = bfad->plog_buf.tail = 0; 2061 else if (v_cmd == IOCMD_DEBUG_START_DTRC) 2062 bfa_trc_init(bfad->trcmod); 2063 else if (v_cmd == IOCMD_DEBUG_STOP_DTRC) 2064 bfa_trc_stop(bfad->trcmod); 2065 2066 iocmd->status = BFA_STATUS_OK; 2067 return 0; 2068 } 2069 2070 int 2071 bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd) 2072 { 2073 struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd; 2074 2075 if (iocmd->ctl == BFA_TRUE) 2076 bfad->plog_buf.plog_enabled = 1; 2077 else 2078 bfad->plog_buf.plog_enabled = 0; 2079 2080 iocmd->status = BFA_STATUS_OK; 2081 return 0; 2082 } 2083 2084 int 2085 bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 2086 { 2087 struct bfa_bsg_fcpim_profile_s *iocmd = 2088 (struct bfa_bsg_fcpim_profile_s *)cmd; 2089 unsigned long flags; 2090 2091 spin_lock_irqsave(&bfad->bfad_lock, flags); 2092 if (v_cmd == IOCMD_FCPIM_PROFILE_ON) 2093 iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, ktime_get_real_seconds()); 2094 else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF) 2095 iocmd->status = bfa_fcpim_profile_off(&bfad->bfa); 2096 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2097 2098 return 0; 2099 } 2100 2101 static int 2102 bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd) 2103 { 2104 struct bfa_bsg_itnim_ioprofile_s *iocmd = 2105 (struct bfa_bsg_itnim_ioprofile_s *)cmd; 2106 struct bfa_fcs_lport_s *fcs_port; 2107 struct bfa_fcs_itnim_s *itnim; 2108 unsigned long flags; 2109 2110 spin_lock_irqsave(&bfad->bfad_lock, flags); 2111 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, 2112 iocmd->vf_id, iocmd->lpwwn); 2113 if (!fcs_port) 2114 iocmd->status = BFA_STATUS_UNKNOWN_LWWN; 2115 else { 2116 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); 2117 if (itnim == NULL) 2118 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 2119 else 2120 iocmd->status = bfa_itnim_get_ioprofile( 2121 bfa_fcs_itnim_get_halitn(itnim), 2122 &iocmd->ioprofile); 2123 } 2124 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2125 return 0; 2126 } 2127 2128 int 2129 bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd) 2130 { 2131 struct bfa_bsg_fcport_stats_s *iocmd = 2132 (struct bfa_bsg_fcport_stats_s *)cmd; 2133 struct bfad_hal_comp fcomp; 2134 unsigned long flags; 2135 struct bfa_cb_pending_q_s cb_qe; 2136 2137 init_completion(&fcomp.comp); 2138 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, 2139 &fcomp, &iocmd->stats); 2140 spin_lock_irqsave(&bfad->bfad_lock, flags); 2141 iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe); 2142 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2143 if (iocmd->status != BFA_STATUS_OK) { 2144 bfa_trc(bfad, iocmd->status); 2145 goto out; 2146 } 2147 wait_for_completion(&fcomp.comp); 2148 iocmd->status = fcomp.status; 2149 out: 2150 return 0; 2151 } 2152 2153 int 2154 bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd) 2155 { 2156 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 2157 struct bfad_hal_comp fcomp; 2158 unsigned long flags; 2159 struct bfa_cb_pending_q_s cb_qe; 2160 2161 init_completion(&fcomp.comp); 2162 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL); 2163 2164 spin_lock_irqsave(&bfad->bfad_lock, flags); 2165 iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe); 2166 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2167 if (iocmd->status != BFA_STATUS_OK) { 2168 bfa_trc(bfad, iocmd->status); 2169 goto out; 2170 } 2171 wait_for_completion(&fcomp.comp); 2172 iocmd->status = fcomp.status; 2173 out: 2174 return 0; 2175 } 2176 2177 int 2178 bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd) 2179 { 2180 struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd; 2181 struct bfad_hal_comp fcomp; 2182 unsigned long flags; 2183 2184 init_completion(&fcomp.comp); 2185 spin_lock_irqsave(&bfad->bfad_lock, flags); 2186 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), 2187 BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id, 2188 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, 2189 bfad_hcb_comp, &fcomp); 2190 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2191 if (iocmd->status != BFA_STATUS_OK) 2192 goto out; 2193 wait_for_completion(&fcomp.comp); 2194 iocmd->status = fcomp.status; 2195 out: 2196 return 0; 2197 } 2198 2199 int 2200 bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd) 2201 { 2202 struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd; 2203 struct bfad_hal_comp fcomp; 2204 unsigned long flags; 2205 2206 init_completion(&fcomp.comp); 2207 spin_lock_irqsave(&bfad->bfad_lock, flags); 2208 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), 2209 BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id, 2210 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, 2211 bfad_hcb_comp, &fcomp); 2212 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2213 if (iocmd->status != BFA_STATUS_OK) 2214 goto out; 2215 wait_for_completion(&fcomp.comp); 2216 iocmd->status = fcomp.status; 2217 out: 2218 return 0; 2219 } 2220 2221 int 2222 bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd) 2223 { 2224 struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd; 2225 struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp; 2226 struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg; 2227 unsigned long flags; 2228 2229 spin_lock_irqsave(&bfad->bfad_lock, flags); 2230 pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled; 2231 pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns; 2232 pbcfg->speed = cfgrsp->pbc_cfg.port_speed; 2233 memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun)); 2234 iocmd->status = BFA_STATUS_OK; 2235 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2236 2237 return 0; 2238 } 2239 2240 int 2241 bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd) 2242 { 2243 struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd; 2244 struct bfad_hal_comp fcomp; 2245 unsigned long flags; 2246 2247 init_completion(&fcomp.comp); 2248 spin_lock_irqsave(&bfad->bfad_lock, flags); 2249 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), 2250 BFA_FLASH_PART_PXECFG, 2251 bfad->bfa.ioc.port_id, &iocmd->cfg, 2252 sizeof(struct bfa_ethboot_cfg_s), 0, 2253 bfad_hcb_comp, &fcomp); 2254 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2255 if (iocmd->status != BFA_STATUS_OK) 2256 goto out; 2257 wait_for_completion(&fcomp.comp); 2258 iocmd->status = fcomp.status; 2259 out: 2260 return 0; 2261 } 2262 2263 int 2264 bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd) 2265 { 2266 struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd; 2267 struct bfad_hal_comp fcomp; 2268 unsigned long flags; 2269 2270 init_completion(&fcomp.comp); 2271 spin_lock_irqsave(&bfad->bfad_lock, flags); 2272 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), 2273 BFA_FLASH_PART_PXECFG, 2274 bfad->bfa.ioc.port_id, &iocmd->cfg, 2275 sizeof(struct bfa_ethboot_cfg_s), 0, 2276 bfad_hcb_comp, &fcomp); 2277 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2278 if (iocmd->status != BFA_STATUS_OK) 2279 goto out; 2280 wait_for_completion(&fcomp.comp); 2281 iocmd->status = fcomp.status; 2282 out: 2283 return 0; 2284 } 2285 2286 int 2287 bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 2288 { 2289 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 2290 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2291 struct bfa_fcport_trunk_s *trunk = &fcport->trunk; 2292 unsigned long flags; 2293 2294 spin_lock_irqsave(&bfad->bfad_lock, flags); 2295 2296 if (bfa_fcport_is_dport(&bfad->bfa)) { 2297 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2298 return BFA_STATUS_DPORT_ERR; 2299 } 2300 2301 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) || 2302 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) 2303 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; 2304 else { 2305 if (v_cmd == IOCMD_TRUNK_ENABLE) { 2306 trunk->attr.state = BFA_TRUNK_OFFLINE; 2307 bfa_fcport_disable(&bfad->bfa); 2308 fcport->cfg.trunked = BFA_TRUE; 2309 } else if (v_cmd == IOCMD_TRUNK_DISABLE) { 2310 trunk->attr.state = BFA_TRUNK_DISABLED; 2311 bfa_fcport_disable(&bfad->bfa); 2312 fcport->cfg.trunked = BFA_FALSE; 2313 } 2314 2315 if (!bfa_fcport_is_disabled(&bfad->bfa)) 2316 bfa_fcport_enable(&bfad->bfa); 2317 2318 iocmd->status = BFA_STATUS_OK; 2319 } 2320 2321 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2322 2323 return 0; 2324 } 2325 2326 int 2327 bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd) 2328 { 2329 struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd; 2330 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2331 struct bfa_fcport_trunk_s *trunk = &fcport->trunk; 2332 unsigned long flags; 2333 2334 spin_lock_irqsave(&bfad->bfad_lock, flags); 2335 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) || 2336 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) 2337 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; 2338 else { 2339 memcpy((void *)&iocmd->attr, (void *)&trunk->attr, 2340 sizeof(struct bfa_trunk_attr_s)); 2341 iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa); 2342 iocmd->status = BFA_STATUS_OK; 2343 } 2344 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2345 2346 return 0; 2347 } 2348 2349 int 2350 bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 2351 { 2352 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 2353 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2354 unsigned long flags; 2355 2356 spin_lock_irqsave(&bfad->bfad_lock, flags); 2357 if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) { 2358 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && 2359 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) 2360 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; 2361 else { 2362 if (v_cmd == IOCMD_QOS_ENABLE) 2363 fcport->cfg.qos_enabled = BFA_TRUE; 2364 else if (v_cmd == IOCMD_QOS_DISABLE) { 2365 fcport->cfg.qos_enabled = BFA_FALSE; 2366 fcport->cfg.qos_bw.high = BFA_QOS_BW_HIGH; 2367 fcport->cfg.qos_bw.med = BFA_QOS_BW_MED; 2368 fcport->cfg.qos_bw.low = BFA_QOS_BW_LOW; 2369 } 2370 } 2371 } 2372 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2373 2374 return 0; 2375 } 2376 2377 int 2378 bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd) 2379 { 2380 struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd; 2381 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2382 unsigned long flags; 2383 2384 spin_lock_irqsave(&bfad->bfad_lock, flags); 2385 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && 2386 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) 2387 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; 2388 else { 2389 iocmd->attr.state = fcport->qos_attr.state; 2390 iocmd->attr.total_bb_cr = 2391 be32_to_cpu(fcport->qos_attr.total_bb_cr); 2392 iocmd->attr.qos_bw.high = fcport->cfg.qos_bw.high; 2393 iocmd->attr.qos_bw.med = fcport->cfg.qos_bw.med; 2394 iocmd->attr.qos_bw.low = fcport->cfg.qos_bw.low; 2395 iocmd->attr.qos_bw_op = fcport->qos_attr.qos_bw_op; 2396 iocmd->status = BFA_STATUS_OK; 2397 } 2398 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2399 2400 return 0; 2401 } 2402 2403 int 2404 bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd) 2405 { 2406 struct bfa_bsg_qos_vc_attr_s *iocmd = 2407 (struct bfa_bsg_qos_vc_attr_s *)cmd; 2408 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2409 struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr; 2410 unsigned long flags; 2411 u32 i = 0; 2412 2413 spin_lock_irqsave(&bfad->bfad_lock, flags); 2414 iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count); 2415 iocmd->attr.shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit); 2416 iocmd->attr.elp_opmode_flags = 2417 be32_to_cpu(bfa_vc_attr->elp_opmode_flags); 2418 2419 /* Individual VC info */ 2420 while (i < iocmd->attr.total_vc_count) { 2421 iocmd->attr.vc_info[i].vc_credit = 2422 bfa_vc_attr->vc_info[i].vc_credit; 2423 iocmd->attr.vc_info[i].borrow_credit = 2424 bfa_vc_attr->vc_info[i].borrow_credit; 2425 iocmd->attr.vc_info[i].priority = 2426 bfa_vc_attr->vc_info[i].priority; 2427 i++; 2428 } 2429 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2430 2431 iocmd->status = BFA_STATUS_OK; 2432 return 0; 2433 } 2434 2435 int 2436 bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd) 2437 { 2438 struct bfa_bsg_fcport_stats_s *iocmd = 2439 (struct bfa_bsg_fcport_stats_s *)cmd; 2440 struct bfad_hal_comp fcomp; 2441 unsigned long flags; 2442 struct bfa_cb_pending_q_s cb_qe; 2443 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2444 2445 init_completion(&fcomp.comp); 2446 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, 2447 &fcomp, &iocmd->stats); 2448 2449 spin_lock_irqsave(&bfad->bfad_lock, flags); 2450 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); 2451 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && 2452 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) 2453 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; 2454 else 2455 iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe); 2456 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2457 if (iocmd->status != BFA_STATUS_OK) { 2458 bfa_trc(bfad, iocmd->status); 2459 goto out; 2460 } 2461 wait_for_completion(&fcomp.comp); 2462 iocmd->status = fcomp.status; 2463 out: 2464 return 0; 2465 } 2466 2467 int 2468 bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd) 2469 { 2470 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; 2471 struct bfad_hal_comp fcomp; 2472 unsigned long flags; 2473 struct bfa_cb_pending_q_s cb_qe; 2474 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 2475 2476 init_completion(&fcomp.comp); 2477 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, 2478 &fcomp, NULL); 2479 2480 spin_lock_irqsave(&bfad->bfad_lock, flags); 2481 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); 2482 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && 2483 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) 2484 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; 2485 else 2486 iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe); 2487 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2488 if (iocmd->status != BFA_STATUS_OK) { 2489 bfa_trc(bfad, iocmd->status); 2490 goto out; 2491 } 2492 wait_for_completion(&fcomp.comp); 2493 iocmd->status = fcomp.status; 2494 out: 2495 return 0; 2496 } 2497 2498 int 2499 bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd) 2500 { 2501 struct bfa_bsg_vf_stats_s *iocmd = 2502 (struct bfa_bsg_vf_stats_s *)cmd; 2503 struct bfa_fcs_fabric_s *fcs_vf; 2504 unsigned long flags; 2505 2506 spin_lock_irqsave(&bfad->bfad_lock, flags); 2507 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id); 2508 if (fcs_vf == NULL) { 2509 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2510 iocmd->status = BFA_STATUS_UNKNOWN_VFID; 2511 goto out; 2512 } 2513 memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats, 2514 sizeof(struct bfa_vf_stats_s)); 2515 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2516 iocmd->status = BFA_STATUS_OK; 2517 out: 2518 return 0; 2519 } 2520 2521 int 2522 bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd) 2523 { 2524 struct bfa_bsg_vf_reset_stats_s *iocmd = 2525 (struct bfa_bsg_vf_reset_stats_s *)cmd; 2526 struct bfa_fcs_fabric_s *fcs_vf; 2527 unsigned long flags; 2528 2529 spin_lock_irqsave(&bfad->bfad_lock, flags); 2530 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id); 2531 if (fcs_vf == NULL) { 2532 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2533 iocmd->status = BFA_STATUS_UNKNOWN_VFID; 2534 goto out; 2535 } 2536 memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s)); 2537 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2538 iocmd->status = BFA_STATUS_OK; 2539 out: 2540 return 0; 2541 } 2542 2543 /* Function to reset the LUN SCAN mode */ 2544 static void 2545 bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg) 2546 { 2547 struct bfad_im_port_s *pport_im = bfad->pport.im_port; 2548 struct bfad_vport_s *vport = NULL; 2549 2550 /* Set the scsi device LUN SCAN flags for base port */ 2551 bfad_reset_sdev_bflags(pport_im, lunmask_cfg); 2552 2553 /* Set the scsi device LUN SCAN flags for the vports */ 2554 list_for_each_entry(vport, &bfad->vport_list, list_entry) 2555 bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg); 2556 } 2557 2558 int 2559 bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd) 2560 { 2561 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; 2562 unsigned long flags; 2563 2564 spin_lock_irqsave(&bfad->bfad_lock, flags); 2565 if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) { 2566 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE); 2567 /* Set the LUN Scanning mode to be Sequential scan */ 2568 if (iocmd->status == BFA_STATUS_OK) 2569 bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE); 2570 } else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) { 2571 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE); 2572 /* Set the LUN Scanning mode to default REPORT_LUNS scan */ 2573 if (iocmd->status == BFA_STATUS_OK) 2574 bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE); 2575 } else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR) 2576 iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa); 2577 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2578 return 0; 2579 } 2580 2581 int 2582 bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd) 2583 { 2584 struct bfa_bsg_fcpim_lunmask_query_s *iocmd = 2585 (struct bfa_bsg_fcpim_lunmask_query_s *)cmd; 2586 struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask; 2587 unsigned long flags; 2588 2589 spin_lock_irqsave(&bfad->bfad_lock, flags); 2590 iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask); 2591 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2592 return 0; 2593 } 2594 2595 int 2596 bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) 2597 { 2598 struct bfa_bsg_fcpim_lunmask_s *iocmd = 2599 (struct bfa_bsg_fcpim_lunmask_s *)cmd; 2600 unsigned long flags; 2601 2602 spin_lock_irqsave(&bfad->bfad_lock, flags); 2603 if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD) 2604 iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id, 2605 &iocmd->pwwn, iocmd->rpwwn, iocmd->lun); 2606 else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE) 2607 iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa, 2608 iocmd->vf_id, &iocmd->pwwn, 2609 iocmd->rpwwn, iocmd->lun); 2610 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2611 return 0; 2612 } 2613 2614 int 2615 bfad_iocmd_fcpim_throttle_query(struct bfad_s *bfad, void *cmd) 2616 { 2617 struct bfa_bsg_fcpim_throttle_s *iocmd = 2618 (struct bfa_bsg_fcpim_throttle_s *)cmd; 2619 unsigned long flags; 2620 2621 spin_lock_irqsave(&bfad->bfad_lock, flags); 2622 iocmd->status = bfa_fcpim_throttle_get(&bfad->bfa, 2623 (void *)&iocmd->throttle); 2624 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2625 2626 return 0; 2627 } 2628 2629 int 2630 bfad_iocmd_fcpim_throttle_set(struct bfad_s *bfad, void *cmd) 2631 { 2632 struct bfa_bsg_fcpim_throttle_s *iocmd = 2633 (struct bfa_bsg_fcpim_throttle_s *)cmd; 2634 unsigned long flags; 2635 2636 spin_lock_irqsave(&bfad->bfad_lock, flags); 2637 iocmd->status = bfa_fcpim_throttle_set(&bfad->bfa, 2638 iocmd->throttle.cfg_value); 2639 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2640 2641 return 0; 2642 } 2643 2644 int 2645 bfad_iocmd_tfru_read(struct bfad_s *bfad, void *cmd) 2646 { 2647 struct bfa_bsg_tfru_s *iocmd = 2648 (struct bfa_bsg_tfru_s *)cmd; 2649 struct bfad_hal_comp fcomp; 2650 unsigned long flags = 0; 2651 2652 init_completion(&fcomp.comp); 2653 spin_lock_irqsave(&bfad->bfad_lock, flags); 2654 iocmd->status = bfa_tfru_read(BFA_FRU(&bfad->bfa), 2655 &iocmd->data, iocmd->len, iocmd->offset, 2656 bfad_hcb_comp, &fcomp); 2657 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2658 if (iocmd->status == BFA_STATUS_OK) { 2659 wait_for_completion(&fcomp.comp); 2660 iocmd->status = fcomp.status; 2661 } 2662 2663 return 0; 2664 } 2665 2666 int 2667 bfad_iocmd_tfru_write(struct bfad_s *bfad, void *cmd) 2668 { 2669 struct bfa_bsg_tfru_s *iocmd = 2670 (struct bfa_bsg_tfru_s *)cmd; 2671 struct bfad_hal_comp fcomp; 2672 unsigned long flags = 0; 2673 2674 init_completion(&fcomp.comp); 2675 spin_lock_irqsave(&bfad->bfad_lock, flags); 2676 iocmd->status = bfa_tfru_write(BFA_FRU(&bfad->bfa), 2677 &iocmd->data, iocmd->len, iocmd->offset, 2678 bfad_hcb_comp, &fcomp); 2679 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2680 if (iocmd->status == BFA_STATUS_OK) { 2681 wait_for_completion(&fcomp.comp); 2682 iocmd->status = fcomp.status; 2683 } 2684 2685 return 0; 2686 } 2687 2688 int 2689 bfad_iocmd_fruvpd_read(struct bfad_s *bfad, void *cmd) 2690 { 2691 struct bfa_bsg_fruvpd_s *iocmd = 2692 (struct bfa_bsg_fruvpd_s *)cmd; 2693 struct bfad_hal_comp fcomp; 2694 unsigned long flags = 0; 2695 2696 init_completion(&fcomp.comp); 2697 spin_lock_irqsave(&bfad->bfad_lock, flags); 2698 iocmd->status = bfa_fruvpd_read(BFA_FRU(&bfad->bfa), 2699 &iocmd->data, iocmd->len, iocmd->offset, 2700 bfad_hcb_comp, &fcomp); 2701 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2702 if (iocmd->status == BFA_STATUS_OK) { 2703 wait_for_completion(&fcomp.comp); 2704 iocmd->status = fcomp.status; 2705 } 2706 2707 return 0; 2708 } 2709 2710 int 2711 bfad_iocmd_fruvpd_update(struct bfad_s *bfad, void *cmd) 2712 { 2713 struct bfa_bsg_fruvpd_s *iocmd = 2714 (struct bfa_bsg_fruvpd_s *)cmd; 2715 struct bfad_hal_comp fcomp; 2716 unsigned long flags = 0; 2717 2718 init_completion(&fcomp.comp); 2719 spin_lock_irqsave(&bfad->bfad_lock, flags); 2720 iocmd->status = bfa_fruvpd_update(BFA_FRU(&bfad->bfa), 2721 &iocmd->data, iocmd->len, iocmd->offset, 2722 bfad_hcb_comp, &fcomp, iocmd->trfr_cmpl); 2723 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2724 if (iocmd->status == BFA_STATUS_OK) { 2725 wait_for_completion(&fcomp.comp); 2726 iocmd->status = fcomp.status; 2727 } 2728 2729 return 0; 2730 } 2731 2732 int 2733 bfad_iocmd_fruvpd_get_max_size(struct bfad_s *bfad, void *cmd) 2734 { 2735 struct bfa_bsg_fruvpd_max_size_s *iocmd = 2736 (struct bfa_bsg_fruvpd_max_size_s *)cmd; 2737 unsigned long flags = 0; 2738 2739 spin_lock_irqsave(&bfad->bfad_lock, flags); 2740 iocmd->status = bfa_fruvpd_get_max_size(BFA_FRU(&bfad->bfa), 2741 &iocmd->max_size); 2742 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2743 2744 return 0; 2745 } 2746 2747 static int 2748 bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, 2749 unsigned int payload_len) 2750 { 2751 int rc = -EINVAL; 2752 2753 switch (cmd) { 2754 case IOCMD_IOC_ENABLE: 2755 rc = bfad_iocmd_ioc_enable(bfad, iocmd); 2756 break; 2757 case IOCMD_IOC_DISABLE: 2758 rc = bfad_iocmd_ioc_disable(bfad, iocmd); 2759 break; 2760 case IOCMD_IOC_GET_INFO: 2761 rc = bfad_iocmd_ioc_get_info(bfad, iocmd); 2762 break; 2763 case IOCMD_IOC_GET_ATTR: 2764 rc = bfad_iocmd_ioc_get_attr(bfad, iocmd); 2765 break; 2766 case IOCMD_IOC_GET_STATS: 2767 rc = bfad_iocmd_ioc_get_stats(bfad, iocmd); 2768 break; 2769 case IOCMD_IOC_GET_FWSTATS: 2770 rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len); 2771 break; 2772 case IOCMD_IOC_RESET_STATS: 2773 case IOCMD_IOC_RESET_FWSTATS: 2774 rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd); 2775 break; 2776 case IOCMD_IOC_SET_ADAPTER_NAME: 2777 case IOCMD_IOC_SET_PORT_NAME: 2778 rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd); 2779 break; 2780 case IOCMD_IOCFC_GET_ATTR: 2781 rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd); 2782 break; 2783 case IOCMD_IOCFC_SET_INTR: 2784 rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd); 2785 break; 2786 case IOCMD_PORT_ENABLE: 2787 rc = bfad_iocmd_port_enable(bfad, iocmd); 2788 break; 2789 case IOCMD_PORT_DISABLE: 2790 rc = bfad_iocmd_port_disable(bfad, iocmd); 2791 break; 2792 case IOCMD_PORT_GET_ATTR: 2793 rc = bfad_iocmd_port_get_attr(bfad, iocmd); 2794 break; 2795 case IOCMD_PORT_GET_STATS: 2796 rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len); 2797 break; 2798 case IOCMD_PORT_RESET_STATS: 2799 rc = bfad_iocmd_port_reset_stats(bfad, iocmd); 2800 break; 2801 case IOCMD_PORT_CFG_TOPO: 2802 case IOCMD_PORT_CFG_SPEED: 2803 case IOCMD_PORT_CFG_ALPA: 2804 case IOCMD_PORT_CLR_ALPA: 2805 rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd); 2806 break; 2807 case IOCMD_PORT_CFG_MAXFRSZ: 2808 rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd); 2809 break; 2810 case IOCMD_PORT_BBCR_ENABLE: 2811 case IOCMD_PORT_BBCR_DISABLE: 2812 rc = bfad_iocmd_port_cfg_bbcr(bfad, cmd, iocmd); 2813 break; 2814 case IOCMD_PORT_BBCR_GET_ATTR: 2815 rc = bfad_iocmd_port_get_bbcr_attr(bfad, iocmd); 2816 break; 2817 case IOCMD_LPORT_GET_ATTR: 2818 rc = bfad_iocmd_lport_get_attr(bfad, iocmd); 2819 break; 2820 case IOCMD_LPORT_GET_STATS: 2821 rc = bfad_iocmd_lport_get_stats(bfad, iocmd); 2822 break; 2823 case IOCMD_LPORT_RESET_STATS: 2824 rc = bfad_iocmd_lport_reset_stats(bfad, iocmd); 2825 break; 2826 case IOCMD_LPORT_GET_IOSTATS: 2827 rc = bfad_iocmd_lport_get_iostats(bfad, iocmd); 2828 break; 2829 case IOCMD_LPORT_GET_RPORTS: 2830 rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len); 2831 break; 2832 case IOCMD_RPORT_GET_ATTR: 2833 rc = bfad_iocmd_rport_get_attr(bfad, iocmd); 2834 break; 2835 case IOCMD_RPORT_GET_ADDR: 2836 rc = bfad_iocmd_rport_get_addr(bfad, iocmd); 2837 break; 2838 case IOCMD_RPORT_GET_STATS: 2839 rc = bfad_iocmd_rport_get_stats(bfad, iocmd); 2840 break; 2841 case IOCMD_RPORT_RESET_STATS: 2842 rc = bfad_iocmd_rport_clr_stats(bfad, iocmd); 2843 break; 2844 case IOCMD_RPORT_SET_SPEED: 2845 rc = bfad_iocmd_rport_set_speed(bfad, iocmd); 2846 break; 2847 case IOCMD_VPORT_GET_ATTR: 2848 rc = bfad_iocmd_vport_get_attr(bfad, iocmd); 2849 break; 2850 case IOCMD_VPORT_GET_STATS: 2851 rc = bfad_iocmd_vport_get_stats(bfad, iocmd); 2852 break; 2853 case IOCMD_VPORT_RESET_STATS: 2854 rc = bfad_iocmd_vport_clr_stats(bfad, iocmd); 2855 break; 2856 case IOCMD_FABRIC_GET_LPORTS: 2857 rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len); 2858 break; 2859 case IOCMD_RATELIM_ENABLE: 2860 case IOCMD_RATELIM_DISABLE: 2861 rc = bfad_iocmd_ratelim(bfad, cmd, iocmd); 2862 break; 2863 case IOCMD_RATELIM_DEF_SPEED: 2864 rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd); 2865 break; 2866 case IOCMD_FCPIM_FAILOVER: 2867 rc = bfad_iocmd_cfg_fcpim(bfad, iocmd); 2868 break; 2869 case IOCMD_FCPIM_MODSTATS: 2870 rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd); 2871 break; 2872 case IOCMD_FCPIM_MODSTATSCLR: 2873 rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd); 2874 break; 2875 case IOCMD_FCPIM_DEL_ITN_STATS: 2876 rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd); 2877 break; 2878 case IOCMD_ITNIM_GET_ATTR: 2879 rc = bfad_iocmd_itnim_get_attr(bfad, iocmd); 2880 break; 2881 case IOCMD_ITNIM_GET_IOSTATS: 2882 rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd); 2883 break; 2884 case IOCMD_ITNIM_RESET_STATS: 2885 rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd); 2886 break; 2887 case IOCMD_ITNIM_GET_ITNSTATS: 2888 rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd); 2889 break; 2890 case IOCMD_FCPORT_ENABLE: 2891 rc = bfad_iocmd_fcport_enable(bfad, iocmd); 2892 break; 2893 case IOCMD_FCPORT_DISABLE: 2894 rc = bfad_iocmd_fcport_disable(bfad, iocmd); 2895 break; 2896 case IOCMD_IOC_PCIFN_CFG: 2897 rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd); 2898 break; 2899 case IOCMD_IOC_FW_SIG_INV: 2900 rc = bfad_iocmd_ioc_fw_sig_inv(bfad, iocmd); 2901 break; 2902 case IOCMD_PCIFN_CREATE: 2903 rc = bfad_iocmd_pcifn_create(bfad, iocmd); 2904 break; 2905 case IOCMD_PCIFN_DELETE: 2906 rc = bfad_iocmd_pcifn_delete(bfad, iocmd); 2907 break; 2908 case IOCMD_PCIFN_BW: 2909 rc = bfad_iocmd_pcifn_bw(bfad, iocmd); 2910 break; 2911 case IOCMD_ADAPTER_CFG_MODE: 2912 rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd); 2913 break; 2914 case IOCMD_PORT_CFG_MODE: 2915 rc = bfad_iocmd_port_cfg_mode(bfad, iocmd); 2916 break; 2917 case IOCMD_FLASH_ENABLE_OPTROM: 2918 case IOCMD_FLASH_DISABLE_OPTROM: 2919 rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd); 2920 break; 2921 case IOCMD_FAA_QUERY: 2922 rc = bfad_iocmd_faa_query(bfad, iocmd); 2923 break; 2924 case IOCMD_CEE_GET_ATTR: 2925 rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len); 2926 break; 2927 case IOCMD_CEE_GET_STATS: 2928 rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len); 2929 break; 2930 case IOCMD_CEE_RESET_STATS: 2931 rc = bfad_iocmd_cee_reset_stats(bfad, iocmd); 2932 break; 2933 case IOCMD_SFP_MEDIA: 2934 rc = bfad_iocmd_sfp_media(bfad, iocmd); 2935 break; 2936 case IOCMD_SFP_SPEED: 2937 rc = bfad_iocmd_sfp_speed(bfad, iocmd); 2938 break; 2939 case IOCMD_FLASH_GET_ATTR: 2940 rc = bfad_iocmd_flash_get_attr(bfad, iocmd); 2941 break; 2942 case IOCMD_FLASH_ERASE_PART: 2943 rc = bfad_iocmd_flash_erase_part(bfad, iocmd); 2944 break; 2945 case IOCMD_FLASH_UPDATE_PART: 2946 rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len); 2947 break; 2948 case IOCMD_FLASH_READ_PART: 2949 rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len); 2950 break; 2951 case IOCMD_DIAG_TEMP: 2952 rc = bfad_iocmd_diag_temp(bfad, iocmd); 2953 break; 2954 case IOCMD_DIAG_MEMTEST: 2955 rc = bfad_iocmd_diag_memtest(bfad, iocmd); 2956 break; 2957 case IOCMD_DIAG_LOOPBACK: 2958 rc = bfad_iocmd_diag_loopback(bfad, iocmd); 2959 break; 2960 case IOCMD_DIAG_FWPING: 2961 rc = bfad_iocmd_diag_fwping(bfad, iocmd); 2962 break; 2963 case IOCMD_DIAG_QUEUETEST: 2964 rc = bfad_iocmd_diag_queuetest(bfad, iocmd); 2965 break; 2966 case IOCMD_DIAG_SFP: 2967 rc = bfad_iocmd_diag_sfp(bfad, iocmd); 2968 break; 2969 case IOCMD_DIAG_LED: 2970 rc = bfad_iocmd_diag_led(bfad, iocmd); 2971 break; 2972 case IOCMD_DIAG_BEACON_LPORT: 2973 rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd); 2974 break; 2975 case IOCMD_DIAG_LB_STAT: 2976 rc = bfad_iocmd_diag_lb_stat(bfad, iocmd); 2977 break; 2978 case IOCMD_DIAG_DPORT_ENABLE: 2979 rc = bfad_iocmd_diag_dport_enable(bfad, iocmd); 2980 break; 2981 case IOCMD_DIAG_DPORT_DISABLE: 2982 rc = bfad_iocmd_diag_dport_disable(bfad, iocmd); 2983 break; 2984 case IOCMD_DIAG_DPORT_SHOW: 2985 rc = bfad_iocmd_diag_dport_show(bfad, iocmd); 2986 break; 2987 case IOCMD_DIAG_DPORT_START: 2988 rc = bfad_iocmd_diag_dport_start(bfad, iocmd); 2989 break; 2990 case IOCMD_PHY_GET_ATTR: 2991 rc = bfad_iocmd_phy_get_attr(bfad, iocmd); 2992 break; 2993 case IOCMD_PHY_GET_STATS: 2994 rc = bfad_iocmd_phy_get_stats(bfad, iocmd); 2995 break; 2996 case IOCMD_PHY_UPDATE_FW: 2997 rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len); 2998 break; 2999 case IOCMD_PHY_READ_FW: 3000 rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len); 3001 break; 3002 case IOCMD_VHBA_QUERY: 3003 rc = bfad_iocmd_vhba_query(bfad, iocmd); 3004 break; 3005 case IOCMD_DEBUG_PORTLOG: 3006 rc = bfad_iocmd_porglog_get(bfad, iocmd); 3007 break; 3008 case IOCMD_DEBUG_FW_CORE: 3009 rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len); 3010 break; 3011 case IOCMD_DEBUG_FW_STATE_CLR: 3012 case IOCMD_DEBUG_PORTLOG_CLR: 3013 case IOCMD_DEBUG_START_DTRC: 3014 case IOCMD_DEBUG_STOP_DTRC: 3015 rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd); 3016 break; 3017 case IOCMD_DEBUG_PORTLOG_CTL: 3018 rc = bfad_iocmd_porglog_ctl(bfad, iocmd); 3019 break; 3020 case IOCMD_FCPIM_PROFILE_ON: 3021 case IOCMD_FCPIM_PROFILE_OFF: 3022 rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd); 3023 break; 3024 case IOCMD_ITNIM_GET_IOPROFILE: 3025 rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd); 3026 break; 3027 case IOCMD_FCPORT_GET_STATS: 3028 rc = bfad_iocmd_fcport_get_stats(bfad, iocmd); 3029 break; 3030 case IOCMD_FCPORT_RESET_STATS: 3031 rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd); 3032 break; 3033 case IOCMD_BOOT_CFG: 3034 rc = bfad_iocmd_boot_cfg(bfad, iocmd); 3035 break; 3036 case IOCMD_BOOT_QUERY: 3037 rc = bfad_iocmd_boot_query(bfad, iocmd); 3038 break; 3039 case IOCMD_PREBOOT_QUERY: 3040 rc = bfad_iocmd_preboot_query(bfad, iocmd); 3041 break; 3042 case IOCMD_ETHBOOT_CFG: 3043 rc = bfad_iocmd_ethboot_cfg(bfad, iocmd); 3044 break; 3045 case IOCMD_ETHBOOT_QUERY: 3046 rc = bfad_iocmd_ethboot_query(bfad, iocmd); 3047 break; 3048 case IOCMD_TRUNK_ENABLE: 3049 case IOCMD_TRUNK_DISABLE: 3050 rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd); 3051 break; 3052 case IOCMD_TRUNK_GET_ATTR: 3053 rc = bfad_iocmd_trunk_get_attr(bfad, iocmd); 3054 break; 3055 case IOCMD_QOS_ENABLE: 3056 case IOCMD_QOS_DISABLE: 3057 rc = bfad_iocmd_qos(bfad, iocmd, cmd); 3058 break; 3059 case IOCMD_QOS_GET_ATTR: 3060 rc = bfad_iocmd_qos_get_attr(bfad, iocmd); 3061 break; 3062 case IOCMD_QOS_GET_VC_ATTR: 3063 rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd); 3064 break; 3065 case IOCMD_QOS_GET_STATS: 3066 rc = bfad_iocmd_qos_get_stats(bfad, iocmd); 3067 break; 3068 case IOCMD_QOS_RESET_STATS: 3069 rc = bfad_iocmd_qos_reset_stats(bfad, iocmd); 3070 break; 3071 case IOCMD_QOS_SET_BW: 3072 rc = bfad_iocmd_qos_set_bw(bfad, iocmd); 3073 break; 3074 case IOCMD_VF_GET_STATS: 3075 rc = bfad_iocmd_vf_get_stats(bfad, iocmd); 3076 break; 3077 case IOCMD_VF_RESET_STATS: 3078 rc = bfad_iocmd_vf_clr_stats(bfad, iocmd); 3079 break; 3080 case IOCMD_FCPIM_LUNMASK_ENABLE: 3081 case IOCMD_FCPIM_LUNMASK_DISABLE: 3082 case IOCMD_FCPIM_LUNMASK_CLEAR: 3083 rc = bfad_iocmd_lunmask(bfad, iocmd, cmd); 3084 break; 3085 case IOCMD_FCPIM_LUNMASK_QUERY: 3086 rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd); 3087 break; 3088 case IOCMD_FCPIM_LUNMASK_ADD: 3089 case IOCMD_FCPIM_LUNMASK_DELETE: 3090 rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd); 3091 break; 3092 case IOCMD_FCPIM_THROTTLE_QUERY: 3093 rc = bfad_iocmd_fcpim_throttle_query(bfad, iocmd); 3094 break; 3095 case IOCMD_FCPIM_THROTTLE_SET: 3096 rc = bfad_iocmd_fcpim_throttle_set(bfad, iocmd); 3097 break; 3098 /* TFRU */ 3099 case IOCMD_TFRU_READ: 3100 rc = bfad_iocmd_tfru_read(bfad, iocmd); 3101 break; 3102 case IOCMD_TFRU_WRITE: 3103 rc = bfad_iocmd_tfru_write(bfad, iocmd); 3104 break; 3105 /* FRU */ 3106 case IOCMD_FRUVPD_READ: 3107 rc = bfad_iocmd_fruvpd_read(bfad, iocmd); 3108 break; 3109 case IOCMD_FRUVPD_UPDATE: 3110 rc = bfad_iocmd_fruvpd_update(bfad, iocmd); 3111 break; 3112 case IOCMD_FRUVPD_GET_MAX_SIZE: 3113 rc = bfad_iocmd_fruvpd_get_max_size(bfad, iocmd); 3114 break; 3115 default: 3116 rc = -EINVAL; 3117 break; 3118 } 3119 return rc; 3120 } 3121 3122 static int 3123 bfad_im_bsg_vendor_request(struct bsg_job *job) 3124 { 3125 struct fc_bsg_request *bsg_request = job->request; 3126 struct fc_bsg_reply *bsg_reply = job->reply; 3127 uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0]; 3128 struct Scsi_Host *shost = fc_bsg_to_shost(job); 3129 struct bfad_im_port_s *im_port = bfad_get_im_port(shost); 3130 struct bfad_s *bfad = im_port->bfad; 3131 void *payload_kbuf; 3132 int rc = -EINVAL; 3133 3134 /* Allocate a temp buffer to hold the passed in user space command */ 3135 payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL); 3136 if (!payload_kbuf) { 3137 rc = -ENOMEM; 3138 goto out; 3139 } 3140 3141 /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */ 3142 sg_copy_to_buffer(job->request_payload.sg_list, 3143 job->request_payload.sg_cnt, payload_kbuf, 3144 job->request_payload.payload_len); 3145 3146 /* Invoke IOCMD handler - to handle all the vendor command requests */ 3147 rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf, 3148 job->request_payload.payload_len); 3149 if (rc != BFA_STATUS_OK) 3150 goto error; 3151 3152 /* Copy the response data to the job->reply_payload sg_list */ 3153 sg_copy_from_buffer(job->reply_payload.sg_list, 3154 job->reply_payload.sg_cnt, 3155 payload_kbuf, 3156 job->reply_payload.payload_len); 3157 3158 /* free the command buffer */ 3159 kfree(payload_kbuf); 3160 3161 /* Fill the BSG job reply data */ 3162 job->reply_len = job->reply_payload.payload_len; 3163 bsg_reply->reply_payload_rcv_len = job->reply_payload.payload_len; 3164 bsg_reply->result = rc; 3165 3166 bsg_job_done(job, bsg_reply->result, 3167 bsg_reply->reply_payload_rcv_len); 3168 return rc; 3169 error: 3170 /* free the command buffer */ 3171 kfree(payload_kbuf); 3172 out: 3173 bsg_reply->result = rc; 3174 job->reply_len = sizeof(uint32_t); 3175 bsg_reply->reply_payload_rcv_len = 0; 3176 return rc; 3177 } 3178 3179 /* FC passthru call backs */ 3180 u64 3181 bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid) 3182 { 3183 struct bfad_fcxp *drv_fcxp = bfad_fcxp; 3184 struct bfa_sge_s *sge; 3185 u64 addr; 3186 3187 sge = drv_fcxp->req_sge + sgeid; 3188 addr = (u64)(size_t) sge->sg_addr; 3189 return addr; 3190 } 3191 3192 u32 3193 bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid) 3194 { 3195 struct bfad_fcxp *drv_fcxp = bfad_fcxp; 3196 struct bfa_sge_s *sge; 3197 3198 sge = drv_fcxp->req_sge + sgeid; 3199 return sge->sg_len; 3200 } 3201 3202 u64 3203 bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid) 3204 { 3205 struct bfad_fcxp *drv_fcxp = bfad_fcxp; 3206 struct bfa_sge_s *sge; 3207 u64 addr; 3208 3209 sge = drv_fcxp->rsp_sge + sgeid; 3210 addr = (u64)(size_t) sge->sg_addr; 3211 return addr; 3212 } 3213 3214 u32 3215 bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid) 3216 { 3217 struct bfad_fcxp *drv_fcxp = bfad_fcxp; 3218 struct bfa_sge_s *sge; 3219 3220 sge = drv_fcxp->rsp_sge + sgeid; 3221 return sge->sg_len; 3222 } 3223 3224 void 3225 bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg, 3226 bfa_status_t req_status, u32 rsp_len, u32 resid_len, 3227 struct fchs_s *rsp_fchs) 3228 { 3229 struct bfad_fcxp *drv_fcxp = bfad_fcxp; 3230 3231 drv_fcxp->req_status = req_status; 3232 drv_fcxp->rsp_len = rsp_len; 3233 3234 /* bfa_fcxp will be automatically freed by BFA */ 3235 drv_fcxp->bfa_fcxp = NULL; 3236 complete(&drv_fcxp->comp); 3237 } 3238 3239 struct bfad_buf_info * 3240 bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf, 3241 uint32_t payload_len, uint32_t *num_sgles) 3242 { 3243 struct bfad_buf_info *buf_base, *buf_info; 3244 struct bfa_sge_s *sg_table; 3245 int sge_num = 1; 3246 3247 buf_base = kcalloc(sizeof(struct bfad_buf_info) + 3248 sizeof(struct bfa_sge_s), 3249 sge_num, GFP_KERNEL); 3250 if (!buf_base) 3251 return NULL; 3252 3253 sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) + 3254 (sizeof(struct bfad_buf_info) * sge_num)); 3255 3256 /* Allocate dma coherent memory */ 3257 buf_info = buf_base; 3258 buf_info->size = payload_len; 3259 buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, 3260 buf_info->size, &buf_info->phys, 3261 GFP_KERNEL); 3262 if (!buf_info->virt) 3263 goto out_free_mem; 3264 3265 /* copy the linear bsg buffer to buf_info */ 3266 memcpy(buf_info->virt, payload_kbuf, buf_info->size); 3267 3268 /* 3269 * Setup SG table 3270 */ 3271 sg_table->sg_len = buf_info->size; 3272 sg_table->sg_addr = (void *)(size_t) buf_info->phys; 3273 3274 *num_sgles = sge_num; 3275 3276 return buf_base; 3277 3278 out_free_mem: 3279 kfree(buf_base); 3280 return NULL; 3281 } 3282 3283 void 3284 bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base, 3285 uint32_t num_sgles) 3286 { 3287 int i; 3288 struct bfad_buf_info *buf_info = buf_base; 3289 3290 if (buf_base) { 3291 for (i = 0; i < num_sgles; buf_info++, i++) { 3292 if (buf_info->virt != NULL) 3293 dma_free_coherent(&bfad->pcidev->dev, 3294 buf_info->size, buf_info->virt, 3295 buf_info->phys); 3296 } 3297 kfree(buf_base); 3298 } 3299 } 3300 3301 int 3302 bfad_fcxp_bsg_send(struct bsg_job *job, struct bfad_fcxp *drv_fcxp, 3303 bfa_bsg_fcpt_t *bsg_fcpt) 3304 { 3305 struct bfa_fcxp_s *hal_fcxp; 3306 struct bfad_s *bfad = drv_fcxp->port->bfad; 3307 unsigned long flags; 3308 uint8_t lp_tag; 3309 3310 spin_lock_irqsave(&bfad->bfad_lock, flags); 3311 3312 /* Allocate bfa_fcxp structure */ 3313 hal_fcxp = bfa_fcxp_req_rsp_alloc(drv_fcxp, &bfad->bfa, 3314 drv_fcxp->num_req_sgles, 3315 drv_fcxp->num_rsp_sgles, 3316 bfad_fcxp_get_req_sgaddr_cb, 3317 bfad_fcxp_get_req_sglen_cb, 3318 bfad_fcxp_get_rsp_sgaddr_cb, 3319 bfad_fcxp_get_rsp_sglen_cb, BFA_TRUE); 3320 if (!hal_fcxp) { 3321 bfa_trc(bfad, 0); 3322 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3323 return BFA_STATUS_ENOMEM; 3324 } 3325 3326 drv_fcxp->bfa_fcxp = hal_fcxp; 3327 3328 lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id); 3329 3330 bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag, 3331 bsg_fcpt->cts, bsg_fcpt->cos, 3332 job->request_payload.payload_len, 3333 &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad, 3334 job->reply_payload.payload_len, bsg_fcpt->tsecs); 3335 3336 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3337 3338 return BFA_STATUS_OK; 3339 } 3340 3341 int 3342 bfad_im_bsg_els_ct_request(struct bsg_job *job) 3343 { 3344 struct bfa_bsg_data *bsg_data; 3345 struct Scsi_Host *shost = fc_bsg_to_shost(job); 3346 struct bfad_im_port_s *im_port = bfad_get_im_port(shost); 3347 struct bfad_s *bfad = im_port->bfad; 3348 bfa_bsg_fcpt_t *bsg_fcpt; 3349 struct bfad_fcxp *drv_fcxp; 3350 struct bfa_fcs_lport_s *fcs_port; 3351 struct bfa_fcs_rport_s *fcs_rport; 3352 struct fc_bsg_request *bsg_request = job->request; 3353 struct fc_bsg_reply *bsg_reply = job->reply; 3354 uint32_t command_type = bsg_request->msgcode; 3355 unsigned long flags; 3356 struct bfad_buf_info *rsp_buf_info; 3357 void *req_kbuf = NULL, *rsp_kbuf = NULL; 3358 int rc = -EINVAL; 3359 3360 job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */ 3361 bsg_reply->reply_payload_rcv_len = 0; 3362 3363 /* Get the payload passed in from userspace */ 3364 bsg_data = (struct bfa_bsg_data *) (((char *)bsg_request) + 3365 sizeof(struct fc_bsg_request)); 3366 if (bsg_data == NULL) 3367 goto out; 3368 3369 /* 3370 * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload 3371 * buffer of size bsg_data->payload_len 3372 */ 3373 bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL); 3374 if (!bsg_fcpt) { 3375 rc = -ENOMEM; 3376 goto out; 3377 } 3378 3379 if (copy_from_user((uint8_t *)bsg_fcpt, 3380 (void *)(unsigned long)bsg_data->payload, 3381 bsg_data->payload_len)) { 3382 kfree(bsg_fcpt); 3383 rc = -EIO; 3384 goto out; 3385 } 3386 3387 drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL); 3388 if (drv_fcxp == NULL) { 3389 kfree(bsg_fcpt); 3390 rc = -ENOMEM; 3391 goto out; 3392 } 3393 3394 spin_lock_irqsave(&bfad->bfad_lock, flags); 3395 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id, 3396 bsg_fcpt->lpwwn); 3397 if (fcs_port == NULL) { 3398 bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN; 3399 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3400 goto out_free_mem; 3401 } 3402 3403 /* Check if the port is online before sending FC Passthru cmd */ 3404 if (!bfa_fcs_lport_is_online(fcs_port)) { 3405 bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE; 3406 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3407 goto out_free_mem; 3408 } 3409 3410 drv_fcxp->port = fcs_port->bfad_port; 3411 3412 if (drv_fcxp->port->bfad == 0) 3413 drv_fcxp->port->bfad = bfad; 3414 3415 /* Fetch the bfa_rport - if nexus needed */ 3416 if (command_type == FC_BSG_HST_ELS_NOLOGIN || 3417 command_type == FC_BSG_HST_CT) { 3418 /* BSG HST commands: no nexus needed */ 3419 drv_fcxp->bfa_rport = NULL; 3420 3421 } else if (command_type == FC_BSG_RPT_ELS || 3422 command_type == FC_BSG_RPT_CT) { 3423 /* BSG RPT commands: nexus needed */ 3424 fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port, 3425 bsg_fcpt->dpwwn); 3426 if (fcs_rport == NULL) { 3427 bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN; 3428 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3429 goto out_free_mem; 3430 } 3431 3432 drv_fcxp->bfa_rport = fcs_rport->bfa_rport; 3433 3434 } else { /* Unknown BSG msgcode; return -EINVAL */ 3435 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3436 goto out_free_mem; 3437 } 3438 3439 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 3440 3441 /* allocate memory for req / rsp buffers */ 3442 req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL); 3443 if (!req_kbuf) { 3444 printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n", 3445 bfad->pci_name); 3446 rc = -ENOMEM; 3447 goto out_free_mem; 3448 } 3449 3450 rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL); 3451 if (!rsp_kbuf) { 3452 printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n", 3453 bfad->pci_name); 3454 rc = -ENOMEM; 3455 goto out_free_mem; 3456 } 3457 3458 /* map req sg - copy the sg_list passed in to the linear buffer */ 3459 sg_copy_to_buffer(job->request_payload.sg_list, 3460 job->request_payload.sg_cnt, req_kbuf, 3461 job->request_payload.payload_len); 3462 3463 drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf, 3464 job->request_payload.payload_len, 3465 &drv_fcxp->num_req_sgles); 3466 if (!drv_fcxp->reqbuf_info) { 3467 printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n", 3468 bfad->pci_name); 3469 rc = -ENOMEM; 3470 goto out_free_mem; 3471 } 3472 3473 drv_fcxp->req_sge = (struct bfa_sge_s *) 3474 (((uint8_t *)drv_fcxp->reqbuf_info) + 3475 (sizeof(struct bfad_buf_info) * 3476 drv_fcxp->num_req_sgles)); 3477 3478 /* map rsp sg */ 3479 drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf, 3480 job->reply_payload.payload_len, 3481 &drv_fcxp->num_rsp_sgles); 3482 if (!drv_fcxp->rspbuf_info) { 3483 printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n", 3484 bfad->pci_name); 3485 rc = -ENOMEM; 3486 goto out_free_mem; 3487 } 3488 3489 rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info; 3490 drv_fcxp->rsp_sge = (struct bfa_sge_s *) 3491 (((uint8_t *)drv_fcxp->rspbuf_info) + 3492 (sizeof(struct bfad_buf_info) * 3493 drv_fcxp->num_rsp_sgles)); 3494 3495 /* fcxp send */ 3496 init_completion(&drv_fcxp->comp); 3497 rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt); 3498 if (rc == BFA_STATUS_OK) { 3499 wait_for_completion(&drv_fcxp->comp); 3500 bsg_fcpt->status = drv_fcxp->req_status; 3501 } else { 3502 bsg_fcpt->status = rc; 3503 goto out_free_mem; 3504 } 3505 3506 /* fill the job->reply data */ 3507 if (drv_fcxp->req_status == BFA_STATUS_OK) { 3508 job->reply_len = drv_fcxp->rsp_len; 3509 bsg_reply->reply_payload_rcv_len = drv_fcxp->rsp_len; 3510 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 3511 } else { 3512 bsg_reply->reply_payload_rcv_len = 3513 sizeof(struct fc_bsg_ctels_reply); 3514 job->reply_len = sizeof(uint32_t); 3515 bsg_reply->reply_data.ctels_reply.status = 3516 FC_CTELS_STATUS_REJECT; 3517 } 3518 3519 /* Copy the response data to the reply_payload sg list */ 3520 sg_copy_from_buffer(job->reply_payload.sg_list, 3521 job->reply_payload.sg_cnt, 3522 (uint8_t *)rsp_buf_info->virt, 3523 job->reply_payload.payload_len); 3524 3525 out_free_mem: 3526 bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info, 3527 drv_fcxp->num_rsp_sgles); 3528 bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info, 3529 drv_fcxp->num_req_sgles); 3530 kfree(req_kbuf); 3531 kfree(rsp_kbuf); 3532 3533 /* Need a copy to user op */ 3534 if (copy_to_user((void *)(unsigned long)bsg_data->payload, 3535 (void *)bsg_fcpt, bsg_data->payload_len)) 3536 rc = -EIO; 3537 3538 kfree(bsg_fcpt); 3539 kfree(drv_fcxp); 3540 out: 3541 bsg_reply->result = rc; 3542 3543 if (rc == BFA_STATUS_OK) 3544 bsg_job_done(job, bsg_reply->result, 3545 bsg_reply->reply_payload_rcv_len); 3546 3547 return rc; 3548 } 3549 3550 int 3551 bfad_im_bsg_request(struct bsg_job *job) 3552 { 3553 struct fc_bsg_request *bsg_request = job->request; 3554 struct fc_bsg_reply *bsg_reply = job->reply; 3555 uint32_t rc = BFA_STATUS_OK; 3556 3557 switch (bsg_request->msgcode) { 3558 case FC_BSG_HST_VENDOR: 3559 /* Process BSG HST Vendor requests */ 3560 rc = bfad_im_bsg_vendor_request(job); 3561 break; 3562 case FC_BSG_HST_ELS_NOLOGIN: 3563 case FC_BSG_RPT_ELS: 3564 case FC_BSG_HST_CT: 3565 case FC_BSG_RPT_CT: 3566 /* Process BSG ELS/CT commands */ 3567 rc = bfad_im_bsg_els_ct_request(job); 3568 break; 3569 default: 3570 bsg_reply->result = rc = -EINVAL; 3571 bsg_reply->reply_payload_rcv_len = 0; 3572 break; 3573 } 3574 3575 return rc; 3576 } 3577 3578 int 3579 bfad_im_bsg_timeout(struct bsg_job *job) 3580 { 3581 /* Don't complete the BSG job request - return -EAGAIN 3582 * to reset bsg job timeout : for ELS/CT pass thru we 3583 * already have timer to track the request. 3584 */ 3585 return -EAGAIN; 3586 } 3587