1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 #include "qla_def.h" 7 #include "qla_gbl.h" 8 9 #include <linux/delay.h> 10 #include <linux/slab.h> 11 #include <linux/vmalloc.h> 12 13 #include "qla_devtbl.h" 14 15 #ifdef CONFIG_SPARC 16 #include <asm/prom.h> 17 #endif 18 19 #include "qla_target.h" 20 21 /* 22 * QLogic ISP2x00 Hardware Support Function Prototypes. 23 */ 24 static int qla2x00_isp_firmware(scsi_qla_host_t *); 25 static int qla2x00_setup_chip(scsi_qla_host_t *); 26 static int qla2x00_fw_ready(scsi_qla_host_t *); 27 static int qla2x00_configure_hba(scsi_qla_host_t *); 28 static int qla2x00_configure_loop(scsi_qla_host_t *); 29 static int qla2x00_configure_local_loop(scsi_qla_host_t *); 30 static int qla2x00_configure_fabric(scsi_qla_host_t *); 31 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *); 32 static int qla2x00_restart_isp(scsi_qla_host_t *); 33 34 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); 35 static int qla84xx_init_chip(scsi_qla_host_t *); 36 static int qla25xx_init_queues(struct qla_hw_data *); 37 static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, 38 struct event_arg *ea); 39 static void qla24xx_handle_prli_done_event(struct scsi_qla_host *, 40 struct event_arg *); 41 static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *); 42 43 /* SRB Extensions ---------------------------------------------------------- */ 44 45 void 46 qla2x00_sp_timeout(struct timer_list *t) 47 { 48 srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer); 49 struct srb_iocb *iocb; 50 scsi_qla_host_t *vha = sp->vha; 51 52 WARN_ON(irqs_disabled()); 53 iocb = &sp->u.iocb_cmd; 54 iocb->timeout(sp); 55 56 /* ref: TMR */ 57 kref_put(&sp->cmd_kref, qla2x00_sp_release); 58 59 if (vha && qla2x00_isp_reg_stat(vha->hw)) { 60 ql_log(ql_log_info, vha, 0x9008, 61 "PCI/Register disconnect.\n"); 62 qla_pci_set_eeh_busy(vha); 63 } 64 } 65 66 void qla2x00_sp_free(srb_t *sp) 67 { 68 struct srb_iocb *iocb = &sp->u.iocb_cmd; 69 70 del_timer(&iocb->timer); 71 qla2x00_rel_sp(sp); 72 } 73 74 void qla2xxx_rel_done_warning(srb_t *sp, int res) 75 { 76 WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp); 77 } 78 79 void qla2xxx_rel_free_warning(srb_t *sp) 80 { 81 WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp); 82 } 83 84 /* Asynchronous Login/Logout Routines -------------------------------------- */ 85 86 unsigned long 87 qla2x00_get_async_timeout(struct scsi_qla_host *vha) 88 { 89 unsigned long tmo; 90 struct qla_hw_data *ha = vha->hw; 91 92 /* Firmware should use switch negotiated r_a_tov for timeout. */ 93 tmo = ha->r_a_tov / 10 * 2; 94 if (IS_QLAFX00(ha)) { 95 tmo = FX00_DEF_RATOV * 2; 96 } else if (!IS_FWI2_CAPABLE(ha)) { 97 /* 98 * Except for earlier ISPs where the timeout is seeded from the 99 * initialization control block. 100 */ 101 tmo = ha->login_timeout; 102 } 103 return tmo; 104 } 105 106 static void qla24xx_abort_iocb_timeout(void *data) 107 { 108 srb_t *sp = data; 109 struct srb_iocb *abt = &sp->u.iocb_cmd; 110 struct qla_qpair *qpair = sp->qpair; 111 u32 handle; 112 unsigned long flags; 113 114 if (sp->cmd_sp) 115 ql_dbg(ql_dbg_async, sp->vha, 0x507c, 116 "Abort timeout - cmd hdl=%x, cmd type=%x hdl=%x, type=%x\n", 117 sp->cmd_sp->handle, sp->cmd_sp->type, 118 sp->handle, sp->type); 119 else 120 ql_dbg(ql_dbg_async, sp->vha, 0x507c, 121 "Abort timeout 2 - hdl=%x, type=%x\n", 122 sp->handle, sp->type); 123 124 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 125 for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) { 126 if (sp->cmd_sp && (qpair->req->outstanding_cmds[handle] == 127 sp->cmd_sp)) 128 qpair->req->outstanding_cmds[handle] = NULL; 129 130 /* removing the abort */ 131 if (qpair->req->outstanding_cmds[handle] == sp) { 132 qpair->req->outstanding_cmds[handle] = NULL; 133 break; 134 } 135 } 136 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 137 138 if (sp->cmd_sp) { 139 /* 140 * This done function should take care of 141 * original command ref: INIT 142 */ 143 sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED); 144 } 145 146 abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT); 147 sp->done(sp, QLA_OS_TIMER_EXPIRED); 148 } 149 150 static void qla24xx_abort_sp_done(srb_t *sp, int res) 151 { 152 struct srb_iocb *abt = &sp->u.iocb_cmd; 153 srb_t *orig_sp = sp->cmd_sp; 154 155 if (orig_sp) 156 qla_wait_nvme_release_cmd_kref(orig_sp); 157 158 if (sp->flags & SRB_WAKEUP_ON_COMP) 159 complete(&abt->u.abt.comp); 160 else 161 /* ref: INIT */ 162 kref_put(&sp->cmd_kref, qla2x00_sp_release); 163 } 164 165 int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) 166 { 167 scsi_qla_host_t *vha = cmd_sp->vha; 168 struct srb_iocb *abt_iocb; 169 srb_t *sp; 170 int rval = QLA_FUNCTION_FAILED; 171 172 /* ref: INIT for ABTS command */ 173 sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport, 174 GFP_ATOMIC); 175 if (!sp) 176 return QLA_MEMORY_ALLOC_FAILED; 177 178 abt_iocb = &sp->u.iocb_cmd; 179 sp->type = SRB_ABT_CMD; 180 sp->name = "abort"; 181 sp->qpair = cmd_sp->qpair; 182 sp->cmd_sp = cmd_sp; 183 if (wait) 184 sp->flags = SRB_WAKEUP_ON_COMP; 185 186 init_completion(&abt_iocb->u.abt.comp); 187 /* FW can send 2 x ABTS's timeout/20s */ 188 qla2x00_init_async_sp(sp, 42, qla24xx_abort_sp_done); 189 sp->u.iocb_cmd.timeout = qla24xx_abort_iocb_timeout; 190 191 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle; 192 abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id); 193 194 ql_dbg(ql_dbg_async, vha, 0x507c, 195 "Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle, 196 cmd_sp->type); 197 198 rval = qla2x00_start_sp(sp); 199 if (rval != QLA_SUCCESS) { 200 /* ref: INIT */ 201 kref_put(&sp->cmd_kref, qla2x00_sp_release); 202 return rval; 203 } 204 205 if (wait) { 206 wait_for_completion(&abt_iocb->u.abt.comp); 207 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ? 208 QLA_SUCCESS : QLA_ERR_FROM_FW; 209 /* ref: INIT */ 210 kref_put(&sp->cmd_kref, qla2x00_sp_release); 211 } 212 213 return rval; 214 } 215 216 void 217 qla2x00_async_iocb_timeout(void *data) 218 { 219 srb_t *sp = data; 220 fc_port_t *fcport = sp->fcport; 221 struct srb_iocb *lio = &sp->u.iocb_cmd; 222 int rc, h; 223 unsigned long flags; 224 225 if (fcport) { 226 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, 227 "Async-%s timeout - hdl=%x portid=%06x %8phC.\n", 228 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name); 229 230 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 231 } else { 232 pr_info("Async-%s timeout - hdl=%x.\n", 233 sp->name, sp->handle); 234 } 235 236 switch (sp->type) { 237 case SRB_LOGIN_CMD: 238 rc = qla24xx_async_abort_cmd(sp, false); 239 if (rc) { 240 /* Retry as needed. */ 241 lio->u.logio.data[0] = MBS_COMMAND_ERROR; 242 lio->u.logio.data[1] = 243 lio->u.logio.flags & SRB_LOGIN_RETRIED ? 244 QLA_LOGIO_LOGIN_RETRIED : 0; 245 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); 246 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; 247 h++) { 248 if (sp->qpair->req->outstanding_cmds[h] == 249 sp) { 250 sp->qpair->req->outstanding_cmds[h] = 251 NULL; 252 break; 253 } 254 } 255 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); 256 sp->done(sp, QLA_FUNCTION_TIMEOUT); 257 } 258 break; 259 case SRB_LOGOUT_CMD: 260 case SRB_CT_PTHRU_CMD: 261 case SRB_MB_IOCB: 262 case SRB_NACK_PLOGI: 263 case SRB_NACK_PRLI: 264 case SRB_NACK_LOGO: 265 case SRB_CTRL_VP: 266 default: 267 rc = qla24xx_async_abort_cmd(sp, false); 268 if (rc) { 269 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); 270 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; 271 h++) { 272 if (sp->qpair->req->outstanding_cmds[h] == 273 sp) { 274 sp->qpair->req->outstanding_cmds[h] = 275 NULL; 276 break; 277 } 278 } 279 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); 280 sp->done(sp, QLA_FUNCTION_TIMEOUT); 281 } 282 break; 283 } 284 } 285 286 static void qla2x00_async_login_sp_done(srb_t *sp, int res) 287 { 288 struct scsi_qla_host *vha = sp->vha; 289 struct srb_iocb *lio = &sp->u.iocb_cmd; 290 struct event_arg ea; 291 292 ql_dbg(ql_dbg_disc, vha, 0x20dd, 293 "%s %8phC res %d \n", __func__, sp->fcport->port_name, res); 294 295 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 296 297 if (!test_bit(UNLOADING, &vha->dpc_flags)) { 298 memset(&ea, 0, sizeof(ea)); 299 ea.fcport = sp->fcport; 300 ea.data[0] = lio->u.logio.data[0]; 301 ea.data[1] = lio->u.logio.data[1]; 302 ea.iop[0] = lio->u.logio.iop[0]; 303 ea.iop[1] = lio->u.logio.iop[1]; 304 ea.sp = sp; 305 if (res) 306 ea.data[0] = MBS_COMMAND_ERROR; 307 qla24xx_handle_plogi_done_event(vha, &ea); 308 } 309 310 /* ref: INIT */ 311 kref_put(&sp->cmd_kref, qla2x00_sp_release); 312 } 313 314 int 315 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, 316 uint16_t *data) 317 { 318 srb_t *sp; 319 struct srb_iocb *lio; 320 int rval = QLA_FUNCTION_FAILED; 321 322 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) || 323 fcport->loop_id == FC_NO_LOOP_ID) { 324 ql_log(ql_log_warn, vha, 0xffff, 325 "%s: %8phC - not sending command.\n", 326 __func__, fcport->port_name); 327 return rval; 328 } 329 330 /* ref: INIT */ 331 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 332 if (!sp) 333 goto done; 334 335 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); 336 fcport->flags |= FCF_ASYNC_SENT; 337 fcport->logout_completed = 0; 338 339 sp->type = SRB_LOGIN_CMD; 340 sp->name = "login"; 341 sp->gen1 = fcport->rscn_gen; 342 sp->gen2 = fcport->login_gen; 343 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 344 qla2x00_async_login_sp_done); 345 346 lio = &sp->u.iocb_cmd; 347 if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) { 348 lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY; 349 } else { 350 if (vha->hw->flags.edif_enabled && 351 DBELL_ACTIVE(vha)) { 352 lio->u.logio.flags |= 353 (SRB_LOGIN_FCSP | SRB_LOGIN_SKIP_PRLI); 354 } else { 355 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; 356 } 357 } 358 359 if (NVME_TARGET(vha->hw, fcport)) 360 lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI; 361 362 rval = qla2x00_start_sp(sp); 363 364 ql_dbg(ql_dbg_disc, vha, 0x2072, 365 "Async-login - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n", 366 fcport->port_name, sp->handle, fcport->loop_id, 367 fcport->d_id.b24, fcport->login_retry, 368 lio->u.logio.flags & SRB_LOGIN_FCSP ? "FCSP" : ""); 369 370 if (rval != QLA_SUCCESS) { 371 fcport->flags |= FCF_LOGIN_NEEDED; 372 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 373 goto done_free_sp; 374 } 375 376 return rval; 377 378 done_free_sp: 379 /* ref: INIT */ 380 kref_put(&sp->cmd_kref, qla2x00_sp_release); 381 fcport->flags &= ~FCF_ASYNC_SENT; 382 done: 383 fcport->flags &= ~FCF_ASYNC_ACTIVE; 384 return rval; 385 } 386 387 static void qla2x00_async_logout_sp_done(srb_t *sp, int res) 388 { 389 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 390 sp->fcport->login_gen++; 391 qlt_logo_completion_handler(sp->fcport, sp->u.iocb_cmd.u.logio.data[0]); 392 /* ref: INIT */ 393 kref_put(&sp->cmd_kref, qla2x00_sp_release); 394 } 395 396 int 397 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) 398 { 399 srb_t *sp; 400 int rval = QLA_FUNCTION_FAILED; 401 402 fcport->flags |= FCF_ASYNC_SENT; 403 /* ref: INIT */ 404 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 405 if (!sp) 406 goto done; 407 408 sp->type = SRB_LOGOUT_CMD; 409 sp->name = "logout"; 410 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 411 qla2x00_async_logout_sp_done), 412 413 ql_dbg(ql_dbg_disc, vha, 0x2070, 414 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n", 415 sp->handle, fcport->loop_id, fcport->d_id.b.domain, 416 fcport->d_id.b.area, fcport->d_id.b.al_pa, 417 fcport->port_name, fcport->explicit_logout); 418 419 rval = qla2x00_start_sp(sp); 420 if (rval != QLA_SUCCESS) 421 goto done_free_sp; 422 return rval; 423 424 done_free_sp: 425 /* ref: INIT */ 426 kref_put(&sp->cmd_kref, qla2x00_sp_release); 427 done: 428 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 429 return rval; 430 } 431 432 void 433 qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport, 434 uint16_t *data) 435 { 436 fcport->flags &= ~FCF_ASYNC_ACTIVE; 437 /* Don't re-login in target mode */ 438 if (!fcport->tgt_session) 439 qla2x00_mark_device_lost(vha, fcport, 1); 440 qlt_logo_completion_handler(fcport, data[0]); 441 } 442 443 static void qla2x00_async_prlo_sp_done(srb_t *sp, int res) 444 { 445 struct srb_iocb *lio = &sp->u.iocb_cmd; 446 struct scsi_qla_host *vha = sp->vha; 447 448 sp->fcport->flags &= ~FCF_ASYNC_ACTIVE; 449 if (!test_bit(UNLOADING, &vha->dpc_flags)) 450 qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport, 451 lio->u.logio.data); 452 /* ref: INIT */ 453 kref_put(&sp->cmd_kref, qla2x00_sp_release); 454 } 455 456 int 457 qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport) 458 { 459 srb_t *sp; 460 int rval; 461 462 rval = QLA_FUNCTION_FAILED; 463 /* ref: INIT */ 464 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 465 if (!sp) 466 goto done; 467 468 sp->type = SRB_PRLO_CMD; 469 sp->name = "prlo"; 470 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 471 qla2x00_async_prlo_sp_done); 472 473 ql_dbg(ql_dbg_disc, vha, 0x2070, 474 "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n", 475 sp->handle, fcport->loop_id, fcport->d_id.b.domain, 476 fcport->d_id.b.area, fcport->d_id.b.al_pa); 477 478 rval = qla2x00_start_sp(sp); 479 if (rval != QLA_SUCCESS) 480 goto done_free_sp; 481 482 return rval; 483 484 done_free_sp: 485 /* ref: INIT */ 486 kref_put(&sp->cmd_kref, qla2x00_sp_release); 487 done: 488 fcport->flags &= ~FCF_ASYNC_ACTIVE; 489 return rval; 490 } 491 492 static 493 void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea) 494 { 495 struct fc_port *fcport = ea->fcport; 496 497 ql_dbg(ql_dbg_disc, vha, 0x20d2, 498 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n", 499 __func__, fcport->port_name, fcport->disc_state, 500 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2, 501 fcport->rscn_gen, ea->sp->gen1, fcport->loop_id); 502 503 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n", 504 ea->data[0]); 505 506 if (ea->data[0] != MBS_COMMAND_COMPLETE) { 507 ql_dbg(ql_dbg_disc, vha, 0x2066, 508 "%s %8phC: adisc fail: post delete\n", 509 __func__, ea->fcport->port_name); 510 /* deleted = 0 & logout_on_delete = force fw cleanup */ 511 fcport->deleted = 0; 512 fcport->logout_on_delete = 1; 513 qlt_schedule_sess_for_deletion(ea->fcport); 514 return; 515 } 516 517 if (ea->fcport->disc_state == DSC_DELETE_PEND) 518 return; 519 520 if (ea->sp->gen2 != ea->fcport->login_gen) { 521 /* target side must have changed it. */ 522 ql_dbg(ql_dbg_disc, vha, 0x20d3, 523 "%s %8phC generation changed\n", 524 __func__, ea->fcport->port_name); 525 return; 526 } else if (ea->sp->gen1 != ea->fcport->rscn_gen) { 527 qla_rscn_replay(fcport); 528 qlt_schedule_sess_for_deletion(fcport); 529 return; 530 } 531 532 __qla24xx_handle_gpdb_event(vha, ea); 533 } 534 535 static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport) 536 { 537 struct qla_work_evt *e; 538 539 e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI); 540 if (!e) 541 return QLA_FUNCTION_FAILED; 542 543 e->u.fcport.fcport = fcport; 544 fcport->flags |= FCF_ASYNC_ACTIVE; 545 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); 546 return qla2x00_post_work(vha, e); 547 } 548 549 static void qla2x00_async_adisc_sp_done(srb_t *sp, int res) 550 { 551 struct scsi_qla_host *vha = sp->vha; 552 struct event_arg ea; 553 struct srb_iocb *lio = &sp->u.iocb_cmd; 554 555 ql_dbg(ql_dbg_disc, vha, 0x2066, 556 "Async done-%s res %x %8phC\n", 557 sp->name, res, sp->fcport->port_name); 558 559 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 560 561 memset(&ea, 0, sizeof(ea)); 562 ea.rc = res; 563 ea.data[0] = lio->u.logio.data[0]; 564 ea.data[1] = lio->u.logio.data[1]; 565 ea.iop[0] = lio->u.logio.iop[0]; 566 ea.iop[1] = lio->u.logio.iop[1]; 567 ea.fcport = sp->fcport; 568 ea.sp = sp; 569 if (res) 570 ea.data[0] = MBS_COMMAND_ERROR; 571 572 qla24xx_handle_adisc_event(vha, &ea); 573 /* ref: INIT */ 574 kref_put(&sp->cmd_kref, qla2x00_sp_release); 575 } 576 577 int 578 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, 579 uint16_t *data) 580 { 581 srb_t *sp; 582 struct srb_iocb *lio; 583 int rval = QLA_FUNCTION_FAILED; 584 585 if (IS_SESSION_DELETED(fcport)) { 586 ql_log(ql_log_warn, vha, 0xffff, 587 "%s: %8phC is being delete - not sending command.\n", 588 __func__, fcport->port_name); 589 fcport->flags &= ~FCF_ASYNC_ACTIVE; 590 return rval; 591 } 592 593 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) 594 return rval; 595 596 fcport->flags |= FCF_ASYNC_SENT; 597 /* ref: INIT */ 598 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 599 if (!sp) 600 goto done; 601 602 sp->type = SRB_ADISC_CMD; 603 sp->name = "adisc"; 604 sp->gen1 = fcport->rscn_gen; 605 sp->gen2 = fcport->login_gen; 606 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 607 qla2x00_async_adisc_sp_done); 608 609 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) { 610 lio = &sp->u.iocb_cmd; 611 lio->u.logio.flags |= SRB_LOGIN_RETRIED; 612 } 613 614 ql_dbg(ql_dbg_disc, vha, 0x206f, 615 "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n", 616 sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name); 617 618 rval = qla2x00_start_sp(sp); 619 if (rval != QLA_SUCCESS) 620 goto done_free_sp; 621 622 return rval; 623 624 done_free_sp: 625 /* ref: INIT */ 626 kref_put(&sp->cmd_kref, qla2x00_sp_release); 627 done: 628 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 629 qla2x00_post_async_adisc_work(vha, fcport, data); 630 return rval; 631 } 632 633 static bool qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id) 634 { 635 struct qla_hw_data *ha = vha->hw; 636 637 if (IS_FWI2_CAPABLE(ha)) 638 return loop_id > NPH_LAST_HANDLE; 639 640 return (loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) || 641 loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST; 642 } 643 644 /** 645 * qla2x00_find_new_loop_id - scan through our port list and find a new usable loop ID 646 * @vha: adapter state pointer. 647 * @dev: port structure pointer. 648 * 649 * Returns: 650 * qla2x00 local function return status code. 651 * 652 * Context: 653 * Kernel context. 654 */ 655 static int qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) 656 { 657 int rval; 658 struct qla_hw_data *ha = vha->hw; 659 unsigned long flags = 0; 660 661 rval = QLA_SUCCESS; 662 663 spin_lock_irqsave(&ha->vport_slock, flags); 664 665 dev->loop_id = find_first_zero_bit(ha->loop_id_map, LOOPID_MAP_SIZE); 666 if (dev->loop_id >= LOOPID_MAP_SIZE || 667 qla2x00_is_reserved_id(vha, dev->loop_id)) { 668 dev->loop_id = FC_NO_LOOP_ID; 669 rval = QLA_FUNCTION_FAILED; 670 } else { 671 set_bit(dev->loop_id, ha->loop_id_map); 672 } 673 spin_unlock_irqrestore(&ha->vport_slock, flags); 674 675 if (rval == QLA_SUCCESS) 676 ql_dbg(ql_dbg_disc, dev->vha, 0x2086, 677 "Assigning new loopid=%x, portid=%x.\n", 678 dev->loop_id, dev->d_id.b24); 679 else 680 ql_log(ql_log_warn, dev->vha, 0x2087, 681 "No loop_id's available, portid=%x.\n", 682 dev->d_id.b24); 683 684 return rval; 685 } 686 687 void qla2x00_clear_loop_id(fc_port_t *fcport) 688 { 689 struct qla_hw_data *ha = fcport->vha->hw; 690 691 if (fcport->loop_id == FC_NO_LOOP_ID || 692 qla2x00_is_reserved_id(fcport->vha, fcport->loop_id)) 693 return; 694 695 clear_bit(fcport->loop_id, ha->loop_id_map); 696 fcport->loop_id = FC_NO_LOOP_ID; 697 } 698 699 static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, 700 struct event_arg *ea) 701 { 702 fc_port_t *fcport, *conflict_fcport; 703 struct get_name_list_extended *e; 704 u16 i, n, found = 0, loop_id; 705 port_id_t id; 706 u64 wwn; 707 u16 data[2]; 708 u8 current_login_state, nvme_cls; 709 710 fcport = ea->fcport; 711 ql_dbg(ql_dbg_disc, vha, 0xffff, 712 "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d edif %d\n", 713 __func__, fcport->port_name, fcport->disc_state, 714 fcport->fw_login_state, ea->rc, 715 fcport->login_gen, fcport->last_login_gen, 716 fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id, fcport->edif.enable); 717 718 if (fcport->disc_state == DSC_DELETE_PEND) 719 return; 720 721 if (ea->rc) { /* rval */ 722 if (fcport->login_retry == 0) { 723 ql_dbg(ql_dbg_disc, vha, 0x20de, 724 "GNL failed Port login retry %8phN, retry cnt=%d.\n", 725 fcport->port_name, fcport->login_retry); 726 } 727 return; 728 } 729 730 if (fcport->last_rscn_gen != fcport->rscn_gen) { 731 qla_rscn_replay(fcport); 732 qlt_schedule_sess_for_deletion(fcport); 733 return; 734 } else if (fcport->last_login_gen != fcport->login_gen) { 735 ql_dbg(ql_dbg_disc, vha, 0x20e0, 736 "%s %8phC login gen changed\n", 737 __func__, fcport->port_name); 738 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 739 return; 740 } 741 742 n = ea->data[0] / sizeof(struct get_name_list_extended); 743 744 ql_dbg(ql_dbg_disc, vha, 0x20e1, 745 "%s %d %8phC n %d %02x%02x%02x lid %d \n", 746 __func__, __LINE__, fcport->port_name, n, 747 fcport->d_id.b.domain, fcport->d_id.b.area, 748 fcport->d_id.b.al_pa, fcport->loop_id); 749 750 for (i = 0; i < n; i++) { 751 e = &vha->gnl.l[i]; 752 wwn = wwn_to_u64(e->port_name); 753 id.b.domain = e->port_id[2]; 754 id.b.area = e->port_id[1]; 755 id.b.al_pa = e->port_id[0]; 756 id.b.rsvd_1 = 0; 757 758 if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE)) 759 continue; 760 761 if (IS_SW_RESV_ADDR(id)) 762 continue; 763 764 found = 1; 765 766 loop_id = le16_to_cpu(e->nport_handle); 767 loop_id = (loop_id & 0x7fff); 768 nvme_cls = e->current_login_state >> 4; 769 current_login_state = e->current_login_state & 0xf; 770 771 if (PRLI_PHASE(nvme_cls)) { 772 current_login_state = nvme_cls; 773 fcport->fc4_type &= ~FS_FC4TYPE_FCP; 774 fcport->fc4_type |= FS_FC4TYPE_NVME; 775 } else if (PRLI_PHASE(current_login_state)) { 776 fcport->fc4_type |= FS_FC4TYPE_FCP; 777 fcport->fc4_type &= ~FS_FC4TYPE_NVME; 778 } 779 780 ql_dbg(ql_dbg_disc, vha, 0x20e2, 781 "%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n", 782 __func__, fcport->port_name, 783 e->current_login_state, fcport->fw_login_state, 784 fcport->fc4_type, id.b24, fcport->d_id.b24, 785 loop_id, fcport->loop_id); 786 787 switch (fcport->disc_state) { 788 case DSC_DELETE_PEND: 789 case DSC_DELETED: 790 break; 791 default: 792 if ((id.b24 != fcport->d_id.b24 && 793 fcport->d_id.b24 && 794 fcport->loop_id != FC_NO_LOOP_ID) || 795 (fcport->loop_id != FC_NO_LOOP_ID && 796 fcport->loop_id != loop_id)) { 797 ql_dbg(ql_dbg_disc, vha, 0x20e3, 798 "%s %d %8phC post del sess\n", 799 __func__, __LINE__, fcport->port_name); 800 if (fcport->n2n_flag) 801 fcport->d_id.b24 = 0; 802 qlt_schedule_sess_for_deletion(fcport); 803 return; 804 } 805 break; 806 } 807 808 fcport->loop_id = loop_id; 809 if (fcport->n2n_flag) 810 fcport->d_id.b24 = id.b24; 811 812 wwn = wwn_to_u64(fcport->port_name); 813 qlt_find_sess_invalidate_other(vha, wwn, 814 id, loop_id, &conflict_fcport); 815 816 if (conflict_fcport) { 817 /* 818 * Another share fcport share the same loop_id & 819 * nport id. Conflict fcport needs to finish 820 * cleanup before this fcport can proceed to login. 821 */ 822 conflict_fcport->conflict = fcport; 823 fcport->login_pause = 1; 824 } 825 826 switch (vha->hw->current_topology) { 827 default: 828 switch (current_login_state) { 829 case DSC_LS_PRLI_COMP: 830 ql_dbg(ql_dbg_disc, 831 vha, 0x20e4, "%s %d %8phC post gpdb\n", 832 __func__, __LINE__, fcport->port_name); 833 834 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0) 835 fcport->port_type = FCT_INITIATOR; 836 else 837 fcport->port_type = FCT_TARGET; 838 data[0] = data[1] = 0; 839 qla2x00_post_async_adisc_work(vha, fcport, 840 data); 841 break; 842 case DSC_LS_PLOGI_COMP: 843 if (vha->hw->flags.edif_enabled) { 844 /* check to see if App support Secure */ 845 qla24xx_post_gpdb_work(vha, fcport, 0); 846 break; 847 } 848 fallthrough; 849 case DSC_LS_PORT_UNAVAIL: 850 default: 851 if (fcport->loop_id == FC_NO_LOOP_ID) { 852 qla2x00_find_new_loop_id(vha, fcport); 853 fcport->fw_login_state = 854 DSC_LS_PORT_UNAVAIL; 855 } 856 ql_dbg(ql_dbg_disc, vha, 0x20e5, 857 "%s %d %8phC\n", __func__, __LINE__, 858 fcport->port_name); 859 qla24xx_fcport_handle_login(vha, fcport); 860 break; 861 } 862 break; 863 case ISP_CFG_N: 864 fcport->fw_login_state = current_login_state; 865 fcport->d_id = id; 866 switch (current_login_state) { 867 case DSC_LS_PRLI_PEND: 868 /* 869 * In the middle of PRLI. Let it finish. 870 * Allow relogin code to recheck state again 871 * with GNL. Push disc_state back to DELETED 872 * so GNL can go out again 873 */ 874 qla2x00_set_fcport_disc_state(fcport, 875 DSC_DELETED); 876 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 877 break; 878 case DSC_LS_PRLI_COMP: 879 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0) 880 fcport->port_type = FCT_INITIATOR; 881 else 882 fcport->port_type = FCT_TARGET; 883 884 data[0] = data[1] = 0; 885 qla2x00_post_async_adisc_work(vha, fcport, 886 data); 887 break; 888 case DSC_LS_PLOGI_COMP: 889 if (vha->hw->flags.edif_enabled && 890 DBELL_ACTIVE(vha)) { 891 /* check to see if App support secure or not */ 892 qla24xx_post_gpdb_work(vha, fcport, 0); 893 break; 894 } 895 if (fcport_is_bigger(fcport)) { 896 /* local adapter is smaller */ 897 if (fcport->loop_id != FC_NO_LOOP_ID) 898 qla2x00_clear_loop_id(fcport); 899 900 fcport->loop_id = loop_id; 901 qla24xx_fcport_handle_login(vha, 902 fcport); 903 break; 904 } 905 fallthrough; 906 default: 907 if (fcport_is_smaller(fcport)) { 908 /* local adapter is bigger */ 909 if (fcport->loop_id != FC_NO_LOOP_ID) 910 qla2x00_clear_loop_id(fcport); 911 912 fcport->loop_id = loop_id; 913 qla24xx_fcport_handle_login(vha, 914 fcport); 915 } 916 break; 917 } 918 break; 919 } /* switch (ha->current_topology) */ 920 } 921 922 if (!found) { 923 switch (vha->hw->current_topology) { 924 case ISP_CFG_F: 925 case ISP_CFG_FL: 926 for (i = 0; i < n; i++) { 927 e = &vha->gnl.l[i]; 928 id.b.domain = e->port_id[0]; 929 id.b.area = e->port_id[1]; 930 id.b.al_pa = e->port_id[2]; 931 id.b.rsvd_1 = 0; 932 loop_id = le16_to_cpu(e->nport_handle); 933 934 if (fcport->d_id.b24 == id.b24) { 935 conflict_fcport = 936 qla2x00_find_fcport_by_wwpn(vha, 937 e->port_name, 0); 938 if (conflict_fcport) { 939 ql_dbg(ql_dbg_disc + ql_dbg_verbose, 940 vha, 0x20e5, 941 "%s %d %8phC post del sess\n", 942 __func__, __LINE__, 943 conflict_fcport->port_name); 944 qlt_schedule_sess_for_deletion 945 (conflict_fcport); 946 } 947 } 948 /* 949 * FW already picked this loop id for 950 * another fcport 951 */ 952 if (fcport->loop_id == loop_id) 953 fcport->loop_id = FC_NO_LOOP_ID; 954 } 955 qla24xx_fcport_handle_login(vha, fcport); 956 break; 957 case ISP_CFG_N: 958 qla2x00_set_fcport_disc_state(fcport, DSC_DELETED); 959 if (time_after_eq(jiffies, fcport->dm_login_expire)) { 960 if (fcport->n2n_link_reset_cnt < 2) { 961 fcport->n2n_link_reset_cnt++; 962 /* 963 * remote port is not sending PLOGI. 964 * Reset link to kick start his state 965 * machine 966 */ 967 set_bit(N2N_LINK_RESET, 968 &vha->dpc_flags); 969 } else { 970 if (fcport->n2n_chip_reset < 1) { 971 ql_log(ql_log_info, vha, 0x705d, 972 "Chip reset to bring laser down"); 973 set_bit(ISP_ABORT_NEEDED, 974 &vha->dpc_flags); 975 fcport->n2n_chip_reset++; 976 } else { 977 ql_log(ql_log_info, vha, 0x705d, 978 "Remote port %8ph is not coming back\n", 979 fcport->port_name); 980 fcport->scan_state = 0; 981 } 982 } 983 qla2xxx_wake_dpc(vha); 984 } else { 985 /* 986 * report port suppose to do PLOGI. Give him 987 * more time. FW will catch it. 988 */ 989 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 990 } 991 break; 992 case ISP_CFG_NL: 993 qla24xx_fcport_handle_login(vha, fcport); 994 break; 995 default: 996 break; 997 } 998 } 999 } /* gnl_event */ 1000 1001 static void qla24xx_async_gnl_sp_done(srb_t *sp, int res) 1002 { 1003 struct scsi_qla_host *vha = sp->vha; 1004 unsigned long flags; 1005 struct fc_port *fcport = NULL, *tf; 1006 u16 i, n = 0, loop_id; 1007 struct event_arg ea; 1008 struct get_name_list_extended *e; 1009 u64 wwn; 1010 struct list_head h; 1011 bool found = false; 1012 1013 ql_dbg(ql_dbg_disc, vha, 0x20e7, 1014 "Async done-%s res %x mb[1]=%x mb[2]=%x \n", 1015 sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1], 1016 sp->u.iocb_cmd.u.mbx.in_mb[2]); 1017 1018 1019 sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE); 1020 memset(&ea, 0, sizeof(ea)); 1021 ea.sp = sp; 1022 ea.rc = res; 1023 1024 if (sp->u.iocb_cmd.u.mbx.in_mb[1] >= 1025 sizeof(struct get_name_list_extended)) { 1026 n = sp->u.iocb_cmd.u.mbx.in_mb[1] / 1027 sizeof(struct get_name_list_extended); 1028 ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */ 1029 } 1030 1031 for (i = 0; i < n; i++) { 1032 e = &vha->gnl.l[i]; 1033 loop_id = le16_to_cpu(e->nport_handle); 1034 /* mask out reserve bit */ 1035 loop_id = (loop_id & 0x7fff); 1036 set_bit(loop_id, vha->hw->loop_id_map); 1037 wwn = wwn_to_u64(e->port_name); 1038 1039 ql_dbg(ql_dbg_disc, vha, 0x20e8, 1040 "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n", 1041 __func__, &wwn, e->port_id[2], e->port_id[1], 1042 e->port_id[0], e->current_login_state, e->last_login_state, 1043 (loop_id & 0x7fff)); 1044 } 1045 1046 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1047 1048 INIT_LIST_HEAD(&h); 1049 fcport = tf = NULL; 1050 if (!list_empty(&vha->gnl.fcports)) 1051 list_splice_init(&vha->gnl.fcports, &h); 1052 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1053 1054 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { 1055 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1056 list_del_init(&fcport->gnl_entry); 1057 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 1058 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1059 ea.fcport = fcport; 1060 1061 qla24xx_handle_gnl_done_event(vha, &ea); 1062 } 1063 1064 /* create new fcport if fw has knowledge of new sessions */ 1065 for (i = 0; i < n; i++) { 1066 port_id_t id; 1067 u64 wwnn; 1068 1069 e = &vha->gnl.l[i]; 1070 wwn = wwn_to_u64(e->port_name); 1071 1072 found = false; 1073 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { 1074 if (!memcmp((u8 *)&wwn, fcport->port_name, 1075 WWN_SIZE)) { 1076 found = true; 1077 break; 1078 } 1079 } 1080 1081 id.b.domain = e->port_id[2]; 1082 id.b.area = e->port_id[1]; 1083 id.b.al_pa = e->port_id[0]; 1084 id.b.rsvd_1 = 0; 1085 1086 if (!found && wwn && !IS_SW_RESV_ADDR(id)) { 1087 ql_dbg(ql_dbg_disc, vha, 0x2065, 1088 "%s %d %8phC %06x post new sess\n", 1089 __func__, __LINE__, (u8 *)&wwn, id.b24); 1090 wwnn = wwn_to_u64(e->node_name); 1091 qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn, 1092 (u8 *)&wwnn, NULL, 0); 1093 } 1094 } 1095 1096 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1097 vha->gnl.sent = 0; 1098 if (!list_empty(&vha->gnl.fcports)) { 1099 /* retrigger gnl */ 1100 list_for_each_entry_safe(fcport, tf, &vha->gnl.fcports, 1101 gnl_entry) { 1102 list_del_init(&fcport->gnl_entry); 1103 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 1104 if (qla24xx_post_gnl_work(vha, fcport) == QLA_SUCCESS) 1105 break; 1106 } 1107 } 1108 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1109 1110 /* ref: INIT */ 1111 kref_put(&sp->cmd_kref, qla2x00_sp_release); 1112 } 1113 1114 int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) 1115 { 1116 srb_t *sp; 1117 int rval = QLA_FUNCTION_FAILED; 1118 unsigned long flags; 1119 u16 *mb; 1120 1121 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) 1122 return rval; 1123 1124 ql_dbg(ql_dbg_disc, vha, 0x20d9, 1125 "Async-gnlist WWPN %8phC \n", fcport->port_name); 1126 1127 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1128 fcport->flags |= FCF_ASYNC_SENT; 1129 qla2x00_set_fcport_disc_state(fcport, DSC_GNL); 1130 fcport->last_rscn_gen = fcport->rscn_gen; 1131 fcport->last_login_gen = fcport->login_gen; 1132 1133 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports); 1134 if (vha->gnl.sent) { 1135 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1136 return QLA_SUCCESS; 1137 } 1138 vha->gnl.sent = 1; 1139 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1140 1141 /* ref: INIT */ 1142 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1143 if (!sp) 1144 goto done; 1145 1146 sp->type = SRB_MB_IOCB; 1147 sp->name = "gnlist"; 1148 sp->gen1 = fcport->rscn_gen; 1149 sp->gen2 = fcport->login_gen; 1150 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 1151 qla24xx_async_gnl_sp_done); 1152 1153 mb = sp->u.iocb_cmd.u.mbx.out_mb; 1154 mb[0] = MBC_PORT_NODE_NAME_LIST; 1155 mb[1] = BIT_2 | BIT_3; 1156 mb[2] = MSW(vha->gnl.ldma); 1157 mb[3] = LSW(vha->gnl.ldma); 1158 mb[6] = MSW(MSD(vha->gnl.ldma)); 1159 mb[7] = LSW(MSD(vha->gnl.ldma)); 1160 mb[8] = vha->gnl.size; 1161 mb[9] = vha->vp_idx; 1162 1163 ql_dbg(ql_dbg_disc, vha, 0x20da, 1164 "Async-%s - OUT WWPN %8phC hndl %x\n", 1165 sp->name, fcport->port_name, sp->handle); 1166 1167 rval = qla2x00_start_sp(sp); 1168 if (rval != QLA_SUCCESS) 1169 goto done_free_sp; 1170 1171 return rval; 1172 1173 done_free_sp: 1174 /* ref: INIT */ 1175 kref_put(&sp->cmd_kref, qla2x00_sp_release); 1176 done: 1177 fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT); 1178 return rval; 1179 } 1180 1181 int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport) 1182 { 1183 struct qla_work_evt *e; 1184 1185 e = qla2x00_alloc_work(vha, QLA_EVT_GNL); 1186 if (!e) 1187 return QLA_FUNCTION_FAILED; 1188 1189 e->u.fcport.fcport = fcport; 1190 fcport->flags |= FCF_ASYNC_ACTIVE; 1191 return qla2x00_post_work(vha, e); 1192 } 1193 1194 static void qla24xx_async_gpdb_sp_done(srb_t *sp, int res) 1195 { 1196 struct scsi_qla_host *vha = sp->vha; 1197 struct qla_hw_data *ha = vha->hw; 1198 fc_port_t *fcport = sp->fcport; 1199 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb; 1200 struct event_arg ea; 1201 1202 ql_dbg(ql_dbg_disc, vha, 0x20db, 1203 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n", 1204 sp->name, res, fcport->port_name, mb[1], mb[2]); 1205 1206 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 1207 1208 if (res == QLA_FUNCTION_TIMEOUT) 1209 goto done; 1210 1211 memset(&ea, 0, sizeof(ea)); 1212 ea.fcport = fcport; 1213 ea.sp = sp; 1214 1215 qla24xx_handle_gpdb_event(vha, &ea); 1216 1217 done: 1218 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in, 1219 sp->u.iocb_cmd.u.mbx.in_dma); 1220 1221 kref_put(&sp->cmd_kref, qla2x00_sp_release); 1222 } 1223 1224 int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport) 1225 { 1226 struct qla_work_evt *e; 1227 1228 if (vha->host->active_mode == MODE_TARGET) 1229 return QLA_FUNCTION_FAILED; 1230 1231 e = qla2x00_alloc_work(vha, QLA_EVT_PRLI); 1232 if (!e) 1233 return QLA_FUNCTION_FAILED; 1234 1235 e->u.fcport.fcport = fcport; 1236 1237 return qla2x00_post_work(vha, e); 1238 } 1239 1240 static void qla2x00_async_prli_sp_done(srb_t *sp, int res) 1241 { 1242 struct scsi_qla_host *vha = sp->vha; 1243 struct srb_iocb *lio = &sp->u.iocb_cmd; 1244 struct event_arg ea; 1245 1246 ql_dbg(ql_dbg_disc, vha, 0x2129, 1247 "%s %8phC res %x\n", __func__, 1248 sp->fcport->port_name, res); 1249 1250 sp->fcport->flags &= ~FCF_ASYNC_SENT; 1251 1252 if (!test_bit(UNLOADING, &vha->dpc_flags)) { 1253 memset(&ea, 0, sizeof(ea)); 1254 ea.fcport = sp->fcport; 1255 ea.data[0] = lio->u.logio.data[0]; 1256 ea.data[1] = lio->u.logio.data[1]; 1257 ea.iop[0] = lio->u.logio.iop[0]; 1258 ea.iop[1] = lio->u.logio.iop[1]; 1259 ea.sp = sp; 1260 if (res == QLA_OS_TIMER_EXPIRED) 1261 ea.data[0] = QLA_OS_TIMER_EXPIRED; 1262 else if (res) 1263 ea.data[0] = MBS_COMMAND_ERROR; 1264 1265 qla24xx_handle_prli_done_event(vha, &ea); 1266 } 1267 1268 kref_put(&sp->cmd_kref, qla2x00_sp_release); 1269 } 1270 1271 int 1272 qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport) 1273 { 1274 srb_t *sp; 1275 struct srb_iocb *lio; 1276 int rval = QLA_FUNCTION_FAILED; 1277 1278 if (!vha->flags.online) { 1279 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n", 1280 __func__, __LINE__, fcport->port_name); 1281 return rval; 1282 } 1283 1284 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND || 1285 fcport->fw_login_state == DSC_LS_PRLI_PEND) && 1286 qla_dual_mode_enabled(vha)) { 1287 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n", 1288 __func__, __LINE__, fcport->port_name); 1289 return rval; 1290 } 1291 1292 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1293 if (!sp) 1294 return rval; 1295 1296 fcport->flags |= FCF_ASYNC_SENT; 1297 fcport->logout_completed = 0; 1298 1299 sp->type = SRB_PRLI_CMD; 1300 sp->name = "prli"; 1301 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 1302 qla2x00_async_prli_sp_done); 1303 1304 lio = &sp->u.iocb_cmd; 1305 lio->u.logio.flags = 0; 1306 1307 if (NVME_TARGET(vha->hw, fcport)) 1308 lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI; 1309 1310 ql_dbg(ql_dbg_disc, vha, 0x211b, 1311 "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d fc4type %x priority %x %s.\n", 1312 fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24, 1313 fcport->login_retry, fcport->fc4_type, vha->hw->fc4_type_priority, 1314 NVME_TARGET(vha->hw, fcport) ? "nvme" : "fcp"); 1315 1316 rval = qla2x00_start_sp(sp); 1317 if (rval != QLA_SUCCESS) { 1318 fcport->flags |= FCF_LOGIN_NEEDED; 1319 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1320 goto done_free_sp; 1321 } 1322 1323 return rval; 1324 1325 done_free_sp: 1326 /* ref: INIT */ 1327 kref_put(&sp->cmd_kref, qla2x00_sp_release); 1328 fcport->flags &= ~FCF_ASYNC_SENT; 1329 return rval; 1330 } 1331 1332 int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 1333 { 1334 struct qla_work_evt *e; 1335 1336 e = qla2x00_alloc_work(vha, QLA_EVT_GPDB); 1337 if (!e) 1338 return QLA_FUNCTION_FAILED; 1339 1340 e->u.fcport.fcport = fcport; 1341 e->u.fcport.opt = opt; 1342 fcport->flags |= FCF_ASYNC_ACTIVE; 1343 return qla2x00_post_work(vha, e); 1344 } 1345 1346 int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 1347 { 1348 srb_t *sp; 1349 struct srb_iocb *mbx; 1350 int rval = QLA_FUNCTION_FAILED; 1351 u16 *mb; 1352 dma_addr_t pd_dma; 1353 struct port_database_24xx *pd; 1354 struct qla_hw_data *ha = vha->hw; 1355 1356 if (IS_SESSION_DELETED(fcport)) { 1357 ql_log(ql_log_warn, vha, 0xffff, 1358 "%s: %8phC is being delete - not sending command.\n", 1359 __func__, fcport->port_name); 1360 fcport->flags &= ~FCF_ASYNC_ACTIVE; 1361 return rval; 1362 } 1363 1364 if (!vha->flags.online || fcport->flags & FCF_ASYNC_SENT) { 1365 ql_log(ql_log_warn, vha, 0xffff, 1366 "%s: %8phC online %d flags %x - not sending command.\n", 1367 __func__, fcport->port_name, vha->flags.online, fcport->flags); 1368 goto done; 1369 } 1370 1371 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1372 if (!sp) 1373 goto done; 1374 1375 qla2x00_set_fcport_disc_state(fcport, DSC_GPDB); 1376 1377 fcport->flags |= FCF_ASYNC_SENT; 1378 sp->type = SRB_MB_IOCB; 1379 sp->name = "gpdb"; 1380 sp->gen1 = fcport->rscn_gen; 1381 sp->gen2 = fcport->login_gen; 1382 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 1383 qla24xx_async_gpdb_sp_done); 1384 1385 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1386 if (pd == NULL) { 1387 ql_log(ql_log_warn, vha, 0xd043, 1388 "Failed to allocate port database structure.\n"); 1389 goto done_free_sp; 1390 } 1391 1392 mb = sp->u.iocb_cmd.u.mbx.out_mb; 1393 mb[0] = MBC_GET_PORT_DATABASE; 1394 mb[1] = fcport->loop_id; 1395 mb[2] = MSW(pd_dma); 1396 mb[3] = LSW(pd_dma); 1397 mb[6] = MSW(MSD(pd_dma)); 1398 mb[7] = LSW(MSD(pd_dma)); 1399 mb[9] = vha->vp_idx; 1400 mb[10] = opt; 1401 1402 mbx = &sp->u.iocb_cmd; 1403 mbx->u.mbx.in = (void *)pd; 1404 mbx->u.mbx.in_dma = pd_dma; 1405 1406 ql_dbg(ql_dbg_disc, vha, 0x20dc, 1407 "Async-%s %8phC hndl %x opt %x\n", 1408 sp->name, fcport->port_name, sp->handle, opt); 1409 1410 rval = qla2x00_start_sp(sp); 1411 if (rval != QLA_SUCCESS) 1412 goto done_free_sp; 1413 return rval; 1414 1415 done_free_sp: 1416 if (pd) 1417 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 1418 1419 kref_put(&sp->cmd_kref, qla2x00_sp_release); 1420 fcport->flags &= ~FCF_ASYNC_SENT; 1421 done: 1422 fcport->flags &= ~FCF_ASYNC_ACTIVE; 1423 qla24xx_post_gpdb_work(vha, fcport, opt); 1424 return rval; 1425 } 1426 1427 static 1428 void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) 1429 { 1430 unsigned long flags; 1431 1432 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1433 ea->fcport->login_gen++; 1434 ea->fcport->deleted = 0; 1435 ea->fcport->logout_on_delete = 1; 1436 1437 if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) { 1438 vha->fcport_count++; 1439 ea->fcport->login_succ = 1; 1440 1441 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1442 qla24xx_sched_upd_fcport(ea->fcport); 1443 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1444 } else if (ea->fcport->login_succ) { 1445 /* 1446 * We have an existing session. A late RSCN delivery 1447 * must have triggered the session to be re-validate. 1448 * Session is still valid. 1449 */ 1450 ql_dbg(ql_dbg_disc, vha, 0x20d6, 1451 "%s %d %8phC session revalidate success\n", 1452 __func__, __LINE__, ea->fcport->port_name); 1453 qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_COMPLETE); 1454 } 1455 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1456 } 1457 1458 static int qla_chk_secure_login(scsi_qla_host_t *vha, fc_port_t *fcport, 1459 struct port_database_24xx *pd) 1460 { 1461 int rc = 0; 1462 1463 if (pd->secure_login) { 1464 ql_dbg(ql_dbg_disc, vha, 0x104d, 1465 "Secure Login established on %8phC\n", 1466 fcport->port_name); 1467 fcport->flags |= FCF_FCSP_DEVICE; 1468 } else { 1469 ql_dbg(ql_dbg_disc, vha, 0x104d, 1470 "non-Secure Login %8phC", 1471 fcport->port_name); 1472 fcport->flags &= ~FCF_FCSP_DEVICE; 1473 } 1474 if (vha->hw->flags.edif_enabled) { 1475 if (fcport->flags & FCF_FCSP_DEVICE) { 1476 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_AUTH_PEND); 1477 /* Start edif prli timer & ring doorbell for app */ 1478 fcport->edif.rx_sa_set = 0; 1479 fcport->edif.tx_sa_set = 0; 1480 fcport->edif.rx_sa_pending = 0; 1481 fcport->edif.tx_sa_pending = 0; 1482 1483 qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, 1484 fcport->d_id.b24); 1485 1486 if (DBELL_ACTIVE(vha)) { 1487 ql_dbg(ql_dbg_disc, vha, 0x20ef, 1488 "%s %d %8phC EDIF: post DB_AUTH: AUTH needed\n", 1489 __func__, __LINE__, fcport->port_name); 1490 fcport->edif.app_sess_online = 1; 1491 1492 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED, 1493 fcport->d_id.b24, 0, fcport); 1494 } 1495 1496 rc = 1; 1497 } else if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { 1498 ql_dbg(ql_dbg_disc, vha, 0x2117, 1499 "%s %d %8phC post prli\n", 1500 __func__, __LINE__, fcport->port_name); 1501 qla24xx_post_prli_work(vha, fcport); 1502 rc = 1; 1503 } 1504 } 1505 return rc; 1506 } 1507 1508 static 1509 void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) 1510 { 1511 fc_port_t *fcport = ea->fcport; 1512 struct port_database_24xx *pd; 1513 struct srb *sp = ea->sp; 1514 uint8_t ls; 1515 1516 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; 1517 1518 fcport->flags &= ~FCF_ASYNC_SENT; 1519 1520 ql_dbg(ql_dbg_disc, vha, 0x20d2, 1521 "%s %8phC DS %d LS %x fc4_type %x rc %x\n", __func__, 1522 fcport->port_name, fcport->disc_state, pd->current_login_state, 1523 fcport->fc4_type, ea->rc); 1524 1525 if (fcport->disc_state == DSC_DELETE_PEND) { 1526 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC\n", 1527 __func__, __LINE__, fcport->port_name); 1528 return; 1529 } 1530 1531 if (NVME_TARGET(vha->hw, fcport)) 1532 ls = pd->current_login_state >> 4; 1533 else 1534 ls = pd->current_login_state & 0xf; 1535 1536 if (ea->sp->gen2 != fcport->login_gen) { 1537 /* target side must have changed it. */ 1538 1539 ql_dbg(ql_dbg_disc, vha, 0x20d3, 1540 "%s %8phC generation changed\n", 1541 __func__, fcport->port_name); 1542 return; 1543 } else if (ea->sp->gen1 != fcport->rscn_gen) { 1544 qla_rscn_replay(fcport); 1545 qlt_schedule_sess_for_deletion(fcport); 1546 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n", 1547 __func__, __LINE__, fcport->port_name, ls); 1548 return; 1549 } 1550 1551 switch (ls) { 1552 case PDS_PRLI_COMPLETE: 1553 __qla24xx_parse_gpdb(vha, fcport, pd); 1554 break; 1555 case PDS_PLOGI_COMPLETE: 1556 if (qla_chk_secure_login(vha, fcport, pd)) { 1557 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n", 1558 __func__, __LINE__, fcport->port_name, ls); 1559 return; 1560 } 1561 fallthrough; 1562 case PDS_PLOGI_PENDING: 1563 case PDS_PRLI_PENDING: 1564 case PDS_PRLI2_PENDING: 1565 /* Set discovery state back to GNL to Relogin attempt */ 1566 if (qla_dual_mode_enabled(vha) || 1567 qla_ini_mode_enabled(vha)) { 1568 qla2x00_set_fcport_disc_state(fcport, DSC_GNL); 1569 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1570 } 1571 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n", 1572 __func__, __LINE__, fcport->port_name, ls); 1573 return; 1574 case PDS_LOGO_PENDING: 1575 case PDS_PORT_UNAVAILABLE: 1576 default: 1577 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n", 1578 __func__, __LINE__, fcport->port_name); 1579 qlt_schedule_sess_for_deletion(fcport); 1580 return; 1581 } 1582 __qla24xx_handle_gpdb_event(vha, ea); 1583 } /* gpdb event */ 1584 1585 static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport) 1586 { 1587 u8 login = 0; 1588 int rc; 1589 1590 ql_dbg(ql_dbg_disc, vha, 0x307b, 1591 "%s %8phC DS %d LS %d lid %d retries=%d\n", 1592 __func__, fcport->port_name, fcport->disc_state, 1593 fcport->fw_login_state, fcport->loop_id, fcport->login_retry); 1594 1595 if (qla_tgt_mode_enabled(vha)) 1596 return; 1597 1598 if (qla_dual_mode_enabled(vha)) { 1599 if (N2N_TOPO(vha->hw)) { 1600 u64 mywwn, wwn; 1601 1602 mywwn = wwn_to_u64(vha->port_name); 1603 wwn = wwn_to_u64(fcport->port_name); 1604 if (mywwn > wwn) 1605 login = 1; 1606 else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP) 1607 && time_after_eq(jiffies, 1608 fcport->plogi_nack_done_deadline)) 1609 login = 1; 1610 } else { 1611 login = 1; 1612 } 1613 } else { 1614 /* initiator mode */ 1615 login = 1; 1616 } 1617 1618 if (login && fcport->login_retry) { 1619 fcport->login_retry--; 1620 if (fcport->loop_id == FC_NO_LOOP_ID) { 1621 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 1622 rc = qla2x00_find_new_loop_id(vha, fcport); 1623 if (rc) { 1624 ql_dbg(ql_dbg_disc, vha, 0x20e6, 1625 "%s %d %8phC post del sess - out of loopid\n", 1626 __func__, __LINE__, fcport->port_name); 1627 fcport->scan_state = 0; 1628 qlt_schedule_sess_for_deletion(fcport); 1629 return; 1630 } 1631 } 1632 ql_dbg(ql_dbg_disc, vha, 0x20bf, 1633 "%s %d %8phC post login\n", 1634 __func__, __LINE__, fcport->port_name); 1635 qla2x00_post_async_login_work(vha, fcport, NULL); 1636 } 1637 } 1638 1639 int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) 1640 { 1641 u16 data[2]; 1642 u64 wwn; 1643 u16 sec; 1644 1645 ql_dbg(ql_dbg_disc, vha, 0x20d8, 1646 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d fc4type %x\n", 1647 __func__, fcport->port_name, fcport->disc_state, 1648 fcport->fw_login_state, fcport->login_pause, fcport->flags, 1649 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen, 1650 fcport->login_gen, fcport->loop_id, fcport->scan_state, 1651 fcport->fc4_type); 1652 1653 if (fcport->scan_state != QLA_FCPORT_FOUND || 1654 fcport->disc_state == DSC_DELETE_PEND) 1655 return 0; 1656 1657 if ((fcport->loop_id != FC_NO_LOOP_ID) && 1658 qla_dual_mode_enabled(vha) && 1659 ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 1660 (fcport->fw_login_state == DSC_LS_PRLI_PEND))) 1661 return 0; 1662 1663 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP && 1664 !N2N_TOPO(vha->hw)) { 1665 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) { 1666 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1667 return 0; 1668 } 1669 } 1670 1671 /* Target won't initiate port login if fabric is present */ 1672 if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw)) 1673 return 0; 1674 1675 if (fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE)) { 1676 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1677 return 0; 1678 } 1679 1680 switch (fcport->disc_state) { 1681 case DSC_DELETED: 1682 wwn = wwn_to_u64(fcport->node_name); 1683 switch (vha->hw->current_topology) { 1684 case ISP_CFG_N: 1685 if (fcport_is_smaller(fcport)) { 1686 /* this adapter is bigger */ 1687 if (fcport->login_retry) { 1688 if (fcport->loop_id == FC_NO_LOOP_ID) { 1689 qla2x00_find_new_loop_id(vha, 1690 fcport); 1691 fcport->fw_login_state = 1692 DSC_LS_PORT_UNAVAIL; 1693 } 1694 fcport->login_retry--; 1695 qla_post_els_plogi_work(vha, fcport); 1696 } else { 1697 ql_log(ql_log_info, vha, 0x705d, 1698 "Unable to reach remote port %8phC", 1699 fcport->port_name); 1700 } 1701 } else { 1702 qla24xx_post_gnl_work(vha, fcport); 1703 } 1704 break; 1705 default: 1706 if (wwn == 0) { 1707 ql_dbg(ql_dbg_disc, vha, 0xffff, 1708 "%s %d %8phC post GNNID\n", 1709 __func__, __LINE__, fcport->port_name); 1710 qla24xx_post_gnnid_work(vha, fcport); 1711 } else if (fcport->loop_id == FC_NO_LOOP_ID) { 1712 ql_dbg(ql_dbg_disc, vha, 0x20bd, 1713 "%s %d %8phC post gnl\n", 1714 __func__, __LINE__, fcport->port_name); 1715 qla24xx_post_gnl_work(vha, fcport); 1716 } else { 1717 qla_chk_n2n_b4_login(vha, fcport); 1718 } 1719 break; 1720 } 1721 break; 1722 1723 case DSC_GNL: 1724 switch (vha->hw->current_topology) { 1725 case ISP_CFG_N: 1726 if ((fcport->current_login_state & 0xf) == 0x6) { 1727 ql_dbg(ql_dbg_disc, vha, 0x2118, 1728 "%s %d %8phC post GPDB work\n", 1729 __func__, __LINE__, fcport->port_name); 1730 fcport->chip_reset = 1731 vha->hw->base_qpair->chip_reset; 1732 qla24xx_post_gpdb_work(vha, fcport, 0); 1733 } else { 1734 ql_dbg(ql_dbg_disc, vha, 0x2118, 1735 "%s %d %8phC post %s PRLI\n", 1736 __func__, __LINE__, fcport->port_name, 1737 NVME_TARGET(vha->hw, fcport) ? "NVME" : 1738 "FC"); 1739 qla24xx_post_prli_work(vha, fcport); 1740 } 1741 break; 1742 default: 1743 if (fcport->login_pause) { 1744 ql_dbg(ql_dbg_disc, vha, 0x20d8, 1745 "%s %d %8phC exit\n", 1746 __func__, __LINE__, 1747 fcport->port_name); 1748 fcport->last_rscn_gen = fcport->rscn_gen; 1749 fcport->last_login_gen = fcport->login_gen; 1750 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1751 break; 1752 } 1753 qla_chk_n2n_b4_login(vha, fcport); 1754 break; 1755 } 1756 break; 1757 1758 case DSC_LOGIN_FAILED: 1759 if (N2N_TOPO(vha->hw)) 1760 qla_chk_n2n_b4_login(vha, fcport); 1761 else 1762 qlt_schedule_sess_for_deletion(fcport); 1763 break; 1764 1765 case DSC_LOGIN_COMPLETE: 1766 /* recheck login state */ 1767 data[0] = data[1] = 0; 1768 qla2x00_post_async_adisc_work(vha, fcport, data); 1769 break; 1770 1771 case DSC_LOGIN_PEND: 1772 if (vha->hw->flags.edif_enabled) 1773 break; 1774 1775 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { 1776 ql_dbg(ql_dbg_disc, vha, 0x2118, 1777 "%s %d %8phC post %s PRLI\n", 1778 __func__, __LINE__, fcport->port_name, 1779 NVME_TARGET(vha->hw, fcport) ? "NVME" : "FC"); 1780 qla24xx_post_prli_work(vha, fcport); 1781 } 1782 break; 1783 1784 case DSC_UPD_FCPORT: 1785 sec = jiffies_to_msecs(jiffies - 1786 fcport->jiffies_at_registration)/1000; 1787 if (fcport->sec_since_registration < sec && sec && 1788 !(sec % 60)) { 1789 fcport->sec_since_registration = sec; 1790 ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, 1791 "%s %8phC - Slow Rport registration(%d Sec)\n", 1792 __func__, fcport->port_name, sec); 1793 } 1794 1795 if (fcport->next_disc_state != DSC_DELETE_PEND) 1796 fcport->next_disc_state = DSC_ADISC; 1797 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1798 break; 1799 1800 default: 1801 break; 1802 } 1803 1804 return 0; 1805 } 1806 1807 int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id, 1808 u8 *port_name, u8 *node_name, void *pla, u8 fc4_type) 1809 { 1810 struct qla_work_evt *e; 1811 1812 e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS); 1813 if (!e) 1814 return QLA_FUNCTION_FAILED; 1815 1816 e->u.new_sess.id = *id; 1817 e->u.new_sess.pla = pla; 1818 e->u.new_sess.fc4_type = fc4_type; 1819 memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE); 1820 if (node_name) 1821 memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE); 1822 1823 return qla2x00_post_work(vha, e); 1824 } 1825 1826 void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) 1827 { 1828 fc_port_t *fcport; 1829 unsigned long flags; 1830 1831 switch (ea->id.b.rsvd_1) { 1832 case RSCN_PORT_ADDR: 1833 fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1); 1834 if (fcport) { 1835 if (fcport->flags & FCF_FCP2_DEVICE && 1836 atomic_read(&fcport->state) == FCS_ONLINE) { 1837 ql_dbg(ql_dbg_disc, vha, 0x2115, 1838 "Delaying session delete for FCP2 portid=%06x %8phC ", 1839 fcport->d_id.b24, fcport->port_name); 1840 return; 1841 } 1842 1843 if (vha->hw->flags.edif_enabled && DBELL_ACTIVE(vha)) { 1844 /* 1845 * On ipsec start by remote port, Target port 1846 * may use RSCN to trigger initiator to 1847 * relogin. If driver is already in the 1848 * process of a relogin, then ignore the RSCN 1849 * and allow the current relogin to continue. 1850 * This reduces thrashing of the connection. 1851 */ 1852 if (atomic_read(&fcport->state) == FCS_ONLINE) { 1853 /* 1854 * If state = online, then set scan_needed=1 to do relogin. 1855 * Otherwise we're already in the middle of a relogin 1856 */ 1857 fcport->scan_needed = 1; 1858 fcport->rscn_gen++; 1859 } 1860 } else { 1861 fcport->scan_needed = 1; 1862 fcport->rscn_gen++; 1863 } 1864 } 1865 break; 1866 case RSCN_AREA_ADDR: 1867 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1868 if (fcport->flags & FCF_FCP2_DEVICE && 1869 atomic_read(&fcport->state) == FCS_ONLINE) 1870 continue; 1871 1872 if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) { 1873 fcport->scan_needed = 1; 1874 fcport->rscn_gen++; 1875 } 1876 } 1877 break; 1878 case RSCN_DOM_ADDR: 1879 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1880 if (fcport->flags & FCF_FCP2_DEVICE && 1881 atomic_read(&fcport->state) == FCS_ONLINE) 1882 continue; 1883 1884 if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) { 1885 fcport->scan_needed = 1; 1886 fcport->rscn_gen++; 1887 } 1888 } 1889 break; 1890 case RSCN_FAB_ADDR: 1891 default: 1892 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1893 if (fcport->flags & FCF_FCP2_DEVICE && 1894 atomic_read(&fcport->state) == FCS_ONLINE) 1895 continue; 1896 1897 fcport->scan_needed = 1; 1898 fcport->rscn_gen++; 1899 } 1900 break; 1901 } 1902 1903 spin_lock_irqsave(&vha->work_lock, flags); 1904 if (vha->scan.scan_flags == 0) { 1905 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__); 1906 vha->scan.scan_flags |= SF_QUEUED; 1907 schedule_delayed_work(&vha->scan.scan_work, 5); 1908 } 1909 spin_unlock_irqrestore(&vha->work_lock, flags); 1910 } 1911 1912 void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, 1913 struct event_arg *ea) 1914 { 1915 fc_port_t *fcport = ea->fcport; 1916 1917 if (test_bit(UNLOADING, &vha->dpc_flags)) 1918 return; 1919 1920 ql_dbg(ql_dbg_disc, vha, 0x2102, 1921 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n", 1922 __func__, fcport->port_name, fcport->disc_state, 1923 fcport->fw_login_state, fcport->login_pause, 1924 fcport->deleted, fcport->conflict, 1925 fcport->last_rscn_gen, fcport->rscn_gen, 1926 fcport->last_login_gen, fcport->login_gen, 1927 fcport->flags); 1928 1929 if (fcport->last_rscn_gen != fcport->rscn_gen) { 1930 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gnl\n", 1931 __func__, __LINE__, fcport->port_name); 1932 qla24xx_post_gnl_work(vha, fcport); 1933 return; 1934 } 1935 1936 qla24xx_fcport_handle_login(vha, fcport); 1937 } 1938 1939 void qla_handle_els_plogi_done(scsi_qla_host_t *vha, 1940 struct event_arg *ea) 1941 { 1942 if (N2N_TOPO(vha->hw) && fcport_is_smaller(ea->fcport) && 1943 vha->hw->flags.edif_enabled) { 1944 /* check to see if App support Secure */ 1945 qla24xx_post_gpdb_work(vha, ea->fcport, 0); 1946 return; 1947 } 1948 1949 /* for pure Target Mode, PRLI will not be initiated */ 1950 if (vha->host->active_mode == MODE_TARGET) 1951 return; 1952 1953 ql_dbg(ql_dbg_disc, vha, 0x2118, 1954 "%s %d %8phC post PRLI\n", 1955 __func__, __LINE__, ea->fcport->port_name); 1956 qla24xx_post_prli_work(vha, ea->fcport); 1957 } 1958 1959 /* 1960 * RSCN(s) came in for this fcport, but the RSCN(s) was not able 1961 * to be consumed by the fcport 1962 */ 1963 void qla_rscn_replay(fc_port_t *fcport) 1964 { 1965 struct event_arg ea; 1966 1967 switch (fcport->disc_state) { 1968 case DSC_DELETE_PEND: 1969 return; 1970 default: 1971 break; 1972 } 1973 1974 if (fcport->scan_needed) { 1975 memset(&ea, 0, sizeof(ea)); 1976 ea.id = fcport->d_id; 1977 ea.id.b.rsvd_1 = RSCN_PORT_ADDR; 1978 qla2x00_handle_rscn(fcport->vha, &ea); 1979 } 1980 } 1981 1982 static void 1983 qla2x00_tmf_iocb_timeout(void *data) 1984 { 1985 srb_t *sp = data; 1986 struct srb_iocb *tmf = &sp->u.iocb_cmd; 1987 int rc, h; 1988 unsigned long flags; 1989 1990 rc = qla24xx_async_abort_cmd(sp, false); 1991 if (rc) { 1992 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); 1993 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { 1994 if (sp->qpair->req->outstanding_cmds[h] == sp) { 1995 sp->qpair->req->outstanding_cmds[h] = NULL; 1996 break; 1997 } 1998 } 1999 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); 2000 tmf->u.tmf.comp_status = cpu_to_le16(CS_TIMEOUT); 2001 tmf->u.tmf.data = QLA_FUNCTION_FAILED; 2002 complete(&tmf->u.tmf.comp); 2003 } 2004 } 2005 2006 static void qla2x00_tmf_sp_done(srb_t *sp, int res) 2007 { 2008 struct srb_iocb *tmf = &sp->u.iocb_cmd; 2009 2010 complete(&tmf->u.tmf.comp); 2011 } 2012 2013 int 2014 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, 2015 uint32_t tag) 2016 { 2017 struct scsi_qla_host *vha = fcport->vha; 2018 struct srb_iocb *tm_iocb; 2019 srb_t *sp; 2020 int rval = QLA_FUNCTION_FAILED; 2021 2022 /* ref: INIT */ 2023 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 2024 if (!sp) 2025 goto done; 2026 2027 sp->type = SRB_TM_CMD; 2028 sp->name = "tmf"; 2029 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha), 2030 qla2x00_tmf_sp_done); 2031 sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout; 2032 2033 tm_iocb = &sp->u.iocb_cmd; 2034 init_completion(&tm_iocb->u.tmf.comp); 2035 tm_iocb->u.tmf.flags = flags; 2036 tm_iocb->u.tmf.lun = lun; 2037 2038 ql_dbg(ql_dbg_taskm, vha, 0x802f, 2039 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n", 2040 sp->handle, fcport->loop_id, fcport->d_id.b.domain, 2041 fcport->d_id.b.area, fcport->d_id.b.al_pa); 2042 2043 rval = qla2x00_start_sp(sp); 2044 if (rval != QLA_SUCCESS) 2045 goto done_free_sp; 2046 wait_for_completion(&tm_iocb->u.tmf.comp); 2047 2048 rval = tm_iocb->u.tmf.data; 2049 2050 if (rval != QLA_SUCCESS) { 2051 ql_log(ql_log_warn, vha, 0x8030, 2052 "TM IOCB failed (%x).\n", rval); 2053 } 2054 2055 if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) { 2056 flags = tm_iocb->u.tmf.flags; 2057 lun = (uint16_t)tm_iocb->u.tmf.lun; 2058 2059 /* Issue Marker IOCB */ 2060 qla2x00_marker(vha, vha->hw->base_qpair, 2061 fcport->loop_id, lun, 2062 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 2063 } 2064 2065 done_free_sp: 2066 /* ref: INIT */ 2067 kref_put(&sp->cmd_kref, qla2x00_sp_release); 2068 fcport->flags &= ~FCF_ASYNC_SENT; 2069 done: 2070 return rval; 2071 } 2072 2073 int 2074 qla24xx_async_abort_command(srb_t *sp) 2075 { 2076 unsigned long flags = 0; 2077 2078 uint32_t handle; 2079 fc_port_t *fcport = sp->fcport; 2080 struct qla_qpair *qpair = sp->qpair; 2081 struct scsi_qla_host *vha = fcport->vha; 2082 struct req_que *req = qpair->req; 2083 2084 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 2085 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 2086 if (req->outstanding_cmds[handle] == sp) 2087 break; 2088 } 2089 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 2090 2091 if (handle == req->num_outstanding_cmds) { 2092 /* Command not found. */ 2093 return QLA_ERR_NOT_FOUND; 2094 } 2095 if (sp->type == SRB_FXIOCB_DCMD) 2096 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport, 2097 FXDISC_ABORT_IOCTL); 2098 2099 return qla24xx_async_abort_cmd(sp, true); 2100 } 2101 2102 static void 2103 qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea) 2104 { 2105 struct srb *sp; 2106 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n", 2107 ea->data[0]); 2108 2109 switch (ea->data[0]) { 2110 case MBS_COMMAND_COMPLETE: 2111 ql_dbg(ql_dbg_disc, vha, 0x2118, 2112 "%s %d %8phC post gpdb\n", 2113 __func__, __LINE__, ea->fcport->port_name); 2114 2115 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; 2116 ea->fcport->logout_on_delete = 1; 2117 ea->fcport->nvme_prli_service_param = ea->iop[0]; 2118 if (ea->iop[0] & NVME_PRLI_SP_FIRST_BURST) 2119 ea->fcport->nvme_first_burst_size = 2120 (ea->iop[1] & 0xffff) * 512; 2121 else 2122 ea->fcport->nvme_first_burst_size = 0; 2123 qla24xx_post_gpdb_work(vha, ea->fcport, 0); 2124 break; 2125 default: 2126 sp = ea->sp; 2127 ql_dbg(ql_dbg_disc, vha, 0x2118, 2128 "%s %d %8phC priority %s, fc4type %x prev try %s\n", 2129 __func__, __LINE__, ea->fcport->port_name, 2130 vha->hw->fc4_type_priority == FC4_PRIORITY_FCP ? 2131 "FCP" : "NVMe", ea->fcport->fc4_type, 2132 (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI) ? 2133 "NVME" : "FCP"); 2134 2135 if (NVME_FCP_TARGET(ea->fcport)) { 2136 if (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI) 2137 ea->fcport->do_prli_nvme = 0; 2138 else 2139 ea->fcport->do_prli_nvme = 1; 2140 } else { 2141 ea->fcport->do_prli_nvme = 0; 2142 } 2143 2144 if (N2N_TOPO(vha->hw)) { 2145 if (ea->fcport->n2n_link_reset_cnt == 2146 vha->hw->login_retry_count && 2147 ea->fcport->flags & FCF_FCSP_DEVICE) { 2148 /* remote authentication app just started */ 2149 ea->fcport->n2n_link_reset_cnt = 0; 2150 } 2151 2152 if (ea->fcport->n2n_link_reset_cnt < 2153 vha->hw->login_retry_count) { 2154 ea->fcport->n2n_link_reset_cnt++; 2155 vha->relogin_jif = jiffies + 2 * HZ; 2156 /* 2157 * PRLI failed. Reset link to kick start 2158 * state machine 2159 */ 2160 set_bit(N2N_LINK_RESET, &vha->dpc_flags); 2161 qla2xxx_wake_dpc(vha); 2162 } else { 2163 ql_log(ql_log_warn, vha, 0x2119, 2164 "%s %d %8phC Unable to reconnect\n", 2165 __func__, __LINE__, 2166 ea->fcport->port_name); 2167 } 2168 } else { 2169 /* 2170 * switch connect. login failed. Take connection down 2171 * and allow relogin to retrigger 2172 */ 2173 ea->fcport->flags &= ~FCF_ASYNC_SENT; 2174 ea->fcport->keep_nport_handle = 0; 2175 ea->fcport->logout_on_delete = 1; 2176 qlt_schedule_sess_for_deletion(ea->fcport); 2177 } 2178 break; 2179 } 2180 } 2181 2182 void 2183 qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) 2184 { 2185 port_id_t cid; /* conflict Nport id */ 2186 u16 lid; 2187 struct fc_port *conflict_fcport; 2188 unsigned long flags; 2189 struct fc_port *fcport = ea->fcport; 2190 2191 ql_dbg(ql_dbg_disc, vha, 0xffff, 2192 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n", 2193 __func__, fcport->port_name, fcport->disc_state, 2194 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen, 2195 ea->sp->gen1, fcport->rscn_gen, 2196 ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]); 2197 2198 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 2199 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) { 2200 ql_dbg(ql_dbg_disc, vha, 0x20ea, 2201 "%s %d %8phC Remote is trying to login\n", 2202 __func__, __LINE__, fcport->port_name); 2203 return; 2204 } 2205 2206 if ((fcport->disc_state == DSC_DELETE_PEND) || 2207 (fcport->disc_state == DSC_DELETED)) { 2208 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 2209 return; 2210 } 2211 2212 if (ea->sp->gen2 != fcport->login_gen) { 2213 /* target side must have changed it. */ 2214 ql_dbg(ql_dbg_disc, vha, 0x20d3, 2215 "%s %8phC generation changed\n", 2216 __func__, fcport->port_name); 2217 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 2218 return; 2219 } else if (ea->sp->gen1 != fcport->rscn_gen) { 2220 ql_dbg(ql_dbg_disc, vha, 0x20d3, 2221 "%s %8phC RSCN generation changed\n", 2222 __func__, fcport->port_name); 2223 qla_rscn_replay(fcport); 2224 qlt_schedule_sess_for_deletion(fcport); 2225 return; 2226 } 2227 2228 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n", 2229 ea->data[0]); 2230 2231 switch (ea->data[0]) { 2232 case MBS_COMMAND_COMPLETE: 2233 /* 2234 * Driver must validate login state - If PRLI not complete, 2235 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI 2236 * requests. 2237 */ 2238 if (vha->hw->flags.edif_enabled) { 2239 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); 2240 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 2241 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; 2242 ea->fcport->logout_on_delete = 1; 2243 ea->fcport->send_els_logo = 0; 2244 ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP; 2245 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 2246 2247 qla24xx_post_gpdb_work(vha, ea->fcport, 0); 2248 } else { 2249 if (NVME_TARGET(vha->hw, fcport)) { 2250 ql_dbg(ql_dbg_disc, vha, 0x2117, 2251 "%s %d %8phC post prli\n", 2252 __func__, __LINE__, fcport->port_name); 2253 qla24xx_post_prli_work(vha, fcport); 2254 } else { 2255 ql_dbg(ql_dbg_disc, vha, 0x20ea, 2256 "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n", 2257 __func__, __LINE__, fcport->port_name, 2258 fcport->loop_id, fcport->d_id.b24); 2259 2260 set_bit(fcport->loop_id, vha->hw->loop_id_map); 2261 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 2262 fcport->chip_reset = vha->hw->base_qpair->chip_reset; 2263 fcport->logout_on_delete = 1; 2264 fcport->send_els_logo = 0; 2265 fcport->fw_login_state = DSC_LS_PRLI_COMP; 2266 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 2267 2268 qla24xx_post_gpdb_work(vha, fcport, 0); 2269 } 2270 } 2271 break; 2272 case MBS_COMMAND_ERROR: 2273 ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n", 2274 __func__, __LINE__, ea->fcport->port_name, ea->data[1]); 2275 2276 qlt_schedule_sess_for_deletion(ea->fcport); 2277 break; 2278 case MBS_LOOP_ID_USED: 2279 /* data[1] = IO PARAM 1 = nport ID */ 2280 cid.b.domain = (ea->iop[1] >> 16) & 0xff; 2281 cid.b.area = (ea->iop[1] >> 8) & 0xff; 2282 cid.b.al_pa = ea->iop[1] & 0xff; 2283 cid.b.rsvd_1 = 0; 2284 2285 ql_dbg(ql_dbg_disc, vha, 0x20ec, 2286 "%s %d %8phC lid %#x in use with pid %06x post gnl\n", 2287 __func__, __LINE__, ea->fcport->port_name, 2288 ea->fcport->loop_id, cid.b24); 2289 2290 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); 2291 ea->fcport->loop_id = FC_NO_LOOP_ID; 2292 qla24xx_post_gnl_work(vha, ea->fcport); 2293 break; 2294 case MBS_PORT_ID_USED: 2295 lid = ea->iop[1] & 0xffff; 2296 qlt_find_sess_invalidate_other(vha, 2297 wwn_to_u64(ea->fcport->port_name), 2298 ea->fcport->d_id, lid, &conflict_fcport); 2299 2300 if (conflict_fcport) { 2301 /* 2302 * Another fcport share the same loop_id/nport id. 2303 * Conflict fcport needs to finish cleanup before this 2304 * fcport can proceed to login. 2305 */ 2306 conflict_fcport->conflict = ea->fcport; 2307 ea->fcport->login_pause = 1; 2308 2309 ql_dbg(ql_dbg_disc, vha, 0x20ed, 2310 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n", 2311 __func__, __LINE__, ea->fcport->port_name, 2312 ea->fcport->d_id.b24, lid); 2313 } else { 2314 ql_dbg(ql_dbg_disc, vha, 0x20ed, 2315 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n", 2316 __func__, __LINE__, ea->fcport->port_name, 2317 ea->fcport->d_id.b24, lid); 2318 2319 qla2x00_clear_loop_id(ea->fcport); 2320 set_bit(lid, vha->hw->loop_id_map); 2321 ea->fcport->loop_id = lid; 2322 ea->fcport->keep_nport_handle = 0; 2323 ea->fcport->logout_on_delete = 1; 2324 qlt_schedule_sess_for_deletion(ea->fcport); 2325 } 2326 break; 2327 } 2328 return; 2329 } 2330 2331 /****************************************************************************/ 2332 /* QLogic ISP2x00 Hardware Support Functions. */ 2333 /****************************************************************************/ 2334 2335 static int 2336 qla83xx_nic_core_fw_load(scsi_qla_host_t *vha) 2337 { 2338 int rval = QLA_SUCCESS; 2339 struct qla_hw_data *ha = vha->hw; 2340 uint32_t idc_major_ver, idc_minor_ver; 2341 uint16_t config[4]; 2342 2343 qla83xx_idc_lock(vha, 0); 2344 2345 /* SV: TODO: Assign initialization timeout from 2346 * flash-info / other param 2347 */ 2348 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT; 2349 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT; 2350 2351 /* Set our fcoe function presence */ 2352 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) { 2353 ql_dbg(ql_dbg_p3p, vha, 0xb077, 2354 "Error while setting DRV-Presence.\n"); 2355 rval = QLA_FUNCTION_FAILED; 2356 goto exit; 2357 } 2358 2359 /* Decide the reset ownership */ 2360 qla83xx_reset_ownership(vha); 2361 2362 /* 2363 * On first protocol driver load: 2364 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery 2365 * register. 2366 * Others: Check compatibility with current IDC Major version. 2367 */ 2368 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver); 2369 if (ha->flags.nic_core_reset_owner) { 2370 /* Set IDC Major version */ 2371 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION; 2372 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver); 2373 2374 /* Clearing IDC-Lock-Recovery register */ 2375 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0); 2376 } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) { 2377 /* 2378 * Clear further IDC participation if we are not compatible with 2379 * the current IDC Major Version. 2380 */ 2381 ql_log(ql_log_warn, vha, 0xb07d, 2382 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n", 2383 idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION); 2384 __qla83xx_clear_drv_presence(vha); 2385 rval = QLA_FUNCTION_FAILED; 2386 goto exit; 2387 } 2388 /* Each function sets its supported Minor version. */ 2389 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver); 2390 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2)); 2391 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver); 2392 2393 if (ha->flags.nic_core_reset_owner) { 2394 memset(config, 0, sizeof(config)); 2395 if (!qla81xx_get_port_config(vha, config)) 2396 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, 2397 QLA8XXX_DEV_READY); 2398 } 2399 2400 rval = qla83xx_idc_state_handler(vha); 2401 2402 exit: 2403 qla83xx_idc_unlock(vha, 0); 2404 2405 return rval; 2406 } 2407 2408 /* 2409 * qla2x00_initialize_adapter 2410 * Initialize board. 2411 * 2412 * Input: 2413 * ha = adapter block pointer. 2414 * 2415 * Returns: 2416 * 0 = success 2417 */ 2418 int 2419 qla2x00_initialize_adapter(scsi_qla_host_t *vha) 2420 { 2421 int rval; 2422 struct qla_hw_data *ha = vha->hw; 2423 struct req_que *req = ha->req_q_map[0]; 2424 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2425 2426 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats)); 2427 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat)); 2428 2429 /* Clear adapter flags. */ 2430 vha->flags.online = 0; 2431 ha->flags.chip_reset_done = 0; 2432 vha->flags.reset_active = 0; 2433 ha->flags.pci_channel_io_perm_failure = 0; 2434 ha->flags.eeh_busy = 0; 2435 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); 2436 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 2437 atomic_set(&vha->loop_state, LOOP_DOWN); 2438 vha->device_flags = DFLG_NO_CABLE; 2439 vha->dpc_flags = 0; 2440 vha->flags.management_server_logged_in = 0; 2441 vha->marker_needed = 0; 2442 ha->isp_abort_cnt = 0; 2443 ha->beacon_blink_led = 0; 2444 2445 set_bit(0, ha->req_qid_map); 2446 set_bit(0, ha->rsp_qid_map); 2447 2448 ql_dbg(ql_dbg_init, vha, 0x0040, 2449 "Configuring PCI space...\n"); 2450 rval = ha->isp_ops->pci_config(vha); 2451 if (rval) { 2452 ql_log(ql_log_warn, vha, 0x0044, 2453 "Unable to configure PCI space.\n"); 2454 return (rval); 2455 } 2456 2457 ha->isp_ops->reset_chip(vha); 2458 2459 /* Check for secure flash support */ 2460 if (IS_QLA28XX(ha)) { 2461 if (rd_reg_word(®->mailbox12) & BIT_0) 2462 ha->flags.secure_adapter = 1; 2463 ql_log(ql_log_info, vha, 0xffff, "Secure Adapter: %s\n", 2464 (ha->flags.secure_adapter) ? "Yes" : "No"); 2465 } 2466 2467 2468 rval = qla2xxx_get_flash_info(vha); 2469 if (rval) { 2470 ql_log(ql_log_fatal, vha, 0x004f, 2471 "Unable to validate FLASH data.\n"); 2472 return rval; 2473 } 2474 2475 if (IS_QLA8044(ha)) { 2476 qla8044_read_reset_template(vha); 2477 2478 /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0. 2479 * If DONRESET_BIT0 is set, drivers should not set dev_state 2480 * to NEED_RESET. But if NEED_RESET is set, drivers should 2481 * should honor the reset. */ 2482 if (ql2xdontresethba == 1) 2483 qla8044_set_idc_dontreset(vha); 2484 } 2485 2486 ha->isp_ops->get_flash_version(vha, req->ring); 2487 ql_dbg(ql_dbg_init, vha, 0x0061, 2488 "Configure NVRAM parameters...\n"); 2489 2490 /* Let priority default to FCP, can be overridden by nvram_config */ 2491 ha->fc4_type_priority = FC4_PRIORITY_FCP; 2492 2493 ha->isp_ops->nvram_config(vha); 2494 2495 if (ha->fc4_type_priority != FC4_PRIORITY_FCP && 2496 ha->fc4_type_priority != FC4_PRIORITY_NVME) 2497 ha->fc4_type_priority = FC4_PRIORITY_FCP; 2498 2499 ql_log(ql_log_info, vha, 0xffff, "FC4 priority set to %s\n", 2500 ha->fc4_type_priority == FC4_PRIORITY_FCP ? "FCP" : "NVMe"); 2501 2502 if (ha->flags.disable_serdes) { 2503 /* Mask HBA via NVRAM settings? */ 2504 ql_log(ql_log_info, vha, 0x0077, 2505 "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name); 2506 return QLA_FUNCTION_FAILED; 2507 } 2508 2509 ql_dbg(ql_dbg_init, vha, 0x0078, 2510 "Verifying loaded RISC code...\n"); 2511 2512 /* If smartsan enabled then require fdmi and rdp enabled */ 2513 if (ql2xsmartsan) { 2514 ql2xfdmienable = 1; 2515 ql2xrdpenable = 1; 2516 } 2517 2518 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) { 2519 rval = ha->isp_ops->chip_diag(vha); 2520 if (rval) 2521 return (rval); 2522 rval = qla2x00_setup_chip(vha); 2523 if (rval) 2524 return (rval); 2525 } 2526 2527 if (IS_QLA84XX(ha)) { 2528 ha->cs84xx = qla84xx_get_chip(vha); 2529 if (!ha->cs84xx) { 2530 ql_log(ql_log_warn, vha, 0x00d0, 2531 "Unable to configure ISP84XX.\n"); 2532 return QLA_FUNCTION_FAILED; 2533 } 2534 } 2535 2536 if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) 2537 rval = qla2x00_init_rings(vha); 2538 2539 /* No point in continuing if firmware initialization failed. */ 2540 if (rval != QLA_SUCCESS) 2541 return rval; 2542 2543 ha->flags.chip_reset_done = 1; 2544 2545 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) { 2546 /* Issue verify 84xx FW IOCB to complete 84xx initialization */ 2547 rval = qla84xx_init_chip(vha); 2548 if (rval != QLA_SUCCESS) { 2549 ql_log(ql_log_warn, vha, 0x00d4, 2550 "Unable to initialize ISP84XX.\n"); 2551 qla84xx_put_chip(vha); 2552 } 2553 } 2554 2555 /* Load the NIC Core f/w if we are the first protocol driver. */ 2556 if (IS_QLA8031(ha)) { 2557 rval = qla83xx_nic_core_fw_load(vha); 2558 if (rval) 2559 ql_log(ql_log_warn, vha, 0x0124, 2560 "Error in initializing NIC Core f/w.\n"); 2561 } 2562 2563 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) 2564 qla24xx_read_fcp_prio_cfg(vha); 2565 2566 if (IS_P3P_TYPE(ha)) 2567 qla82xx_set_driver_version(vha, QLA2XXX_VERSION); 2568 else 2569 qla25xx_set_driver_version(vha, QLA2XXX_VERSION); 2570 2571 return (rval); 2572 } 2573 2574 /** 2575 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers. 2576 * @vha: HA context 2577 * 2578 * Returns 0 on success. 2579 */ 2580 int 2581 qla2100_pci_config(scsi_qla_host_t *vha) 2582 { 2583 uint16_t w; 2584 unsigned long flags; 2585 struct qla_hw_data *ha = vha->hw; 2586 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2587 2588 pci_set_master(ha->pdev); 2589 pci_try_set_mwi(ha->pdev); 2590 2591 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 2592 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 2593 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 2594 2595 pci_disable_rom(ha->pdev); 2596 2597 /* Get PCI bus information. */ 2598 spin_lock_irqsave(&ha->hardware_lock, flags); 2599 ha->pci_attr = rd_reg_word(®->ctrl_status); 2600 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2601 2602 return QLA_SUCCESS; 2603 } 2604 2605 /** 2606 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers. 2607 * @vha: HA context 2608 * 2609 * Returns 0 on success. 2610 */ 2611 int 2612 qla2300_pci_config(scsi_qla_host_t *vha) 2613 { 2614 uint16_t w; 2615 unsigned long flags = 0; 2616 uint32_t cnt; 2617 struct qla_hw_data *ha = vha->hw; 2618 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2619 2620 pci_set_master(ha->pdev); 2621 pci_try_set_mwi(ha->pdev); 2622 2623 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 2624 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 2625 2626 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 2627 w &= ~PCI_COMMAND_INTX_DISABLE; 2628 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 2629 2630 /* 2631 * If this is a 2300 card and not 2312, reset the 2632 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately, 2633 * the 2310 also reports itself as a 2300 so we need to get the 2634 * fb revision level -- a 6 indicates it really is a 2300 and 2635 * not a 2310. 2636 */ 2637 if (IS_QLA2300(ha)) { 2638 spin_lock_irqsave(&ha->hardware_lock, flags); 2639 2640 /* Pause RISC. */ 2641 wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); 2642 for (cnt = 0; cnt < 30000; cnt++) { 2643 if ((rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) != 0) 2644 break; 2645 2646 udelay(10); 2647 } 2648 2649 /* Select FPM registers. */ 2650 wrt_reg_word(®->ctrl_status, 0x20); 2651 rd_reg_word(®->ctrl_status); 2652 2653 /* Get the fb rev level */ 2654 ha->fb_rev = RD_FB_CMD_REG(ha, reg); 2655 2656 if (ha->fb_rev == FPM_2300) 2657 pci_clear_mwi(ha->pdev); 2658 2659 /* Deselect FPM registers. */ 2660 wrt_reg_word(®->ctrl_status, 0x0); 2661 rd_reg_word(®->ctrl_status); 2662 2663 /* Release RISC module. */ 2664 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); 2665 for (cnt = 0; cnt < 30000; cnt++) { 2666 if ((rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0) 2667 break; 2668 2669 udelay(10); 2670 } 2671 2672 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2673 } 2674 2675 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 2676 2677 pci_disable_rom(ha->pdev); 2678 2679 /* Get PCI bus information. */ 2680 spin_lock_irqsave(&ha->hardware_lock, flags); 2681 ha->pci_attr = rd_reg_word(®->ctrl_status); 2682 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2683 2684 return QLA_SUCCESS; 2685 } 2686 2687 /** 2688 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers. 2689 * @vha: HA context 2690 * 2691 * Returns 0 on success. 2692 */ 2693 int 2694 qla24xx_pci_config(scsi_qla_host_t *vha) 2695 { 2696 uint16_t w; 2697 unsigned long flags = 0; 2698 struct qla_hw_data *ha = vha->hw; 2699 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2700 2701 pci_set_master(ha->pdev); 2702 pci_try_set_mwi(ha->pdev); 2703 2704 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 2705 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 2706 w &= ~PCI_COMMAND_INTX_DISABLE; 2707 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 2708 2709 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 2710 2711 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */ 2712 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX)) 2713 pcix_set_mmrbc(ha->pdev, 2048); 2714 2715 /* PCIe -- adjust Maximum Read Request Size (2048). */ 2716 if (pci_is_pcie(ha->pdev)) 2717 pcie_set_readrq(ha->pdev, 4096); 2718 2719 pci_disable_rom(ha->pdev); 2720 2721 ha->chip_revision = ha->pdev->revision; 2722 2723 /* Get PCI bus information. */ 2724 spin_lock_irqsave(&ha->hardware_lock, flags); 2725 ha->pci_attr = rd_reg_dword(®->ctrl_status); 2726 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2727 2728 return QLA_SUCCESS; 2729 } 2730 2731 /** 2732 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers. 2733 * @vha: HA context 2734 * 2735 * Returns 0 on success. 2736 */ 2737 int 2738 qla25xx_pci_config(scsi_qla_host_t *vha) 2739 { 2740 uint16_t w; 2741 struct qla_hw_data *ha = vha->hw; 2742 2743 pci_set_master(ha->pdev); 2744 pci_try_set_mwi(ha->pdev); 2745 2746 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 2747 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 2748 w &= ~PCI_COMMAND_INTX_DISABLE; 2749 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 2750 2751 /* PCIe -- adjust Maximum Read Request Size (2048). */ 2752 if (pci_is_pcie(ha->pdev)) 2753 pcie_set_readrq(ha->pdev, 4096); 2754 2755 pci_disable_rom(ha->pdev); 2756 2757 ha->chip_revision = ha->pdev->revision; 2758 2759 return QLA_SUCCESS; 2760 } 2761 2762 /** 2763 * qla2x00_isp_firmware() - Choose firmware image. 2764 * @vha: HA context 2765 * 2766 * Returns 0 on success. 2767 */ 2768 static int 2769 qla2x00_isp_firmware(scsi_qla_host_t *vha) 2770 { 2771 int rval; 2772 uint16_t loop_id, topo, sw_cap; 2773 uint8_t domain, area, al_pa; 2774 struct qla_hw_data *ha = vha->hw; 2775 2776 /* Assume loading risc code */ 2777 rval = QLA_FUNCTION_FAILED; 2778 2779 if (ha->flags.disable_risc_code_load) { 2780 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n"); 2781 2782 /* Verify checksum of loaded RISC code. */ 2783 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address); 2784 if (rval == QLA_SUCCESS) { 2785 /* And, verify we are not in ROM code. */ 2786 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, 2787 &area, &domain, &topo, &sw_cap); 2788 } 2789 } 2790 2791 if (rval) 2792 ql_dbg(ql_dbg_init, vha, 0x007a, 2793 "**** Load RISC code ****.\n"); 2794 2795 return (rval); 2796 } 2797 2798 /** 2799 * qla2x00_reset_chip() - Reset ISP chip. 2800 * @vha: HA context 2801 * 2802 * Returns 0 on success. 2803 */ 2804 int 2805 qla2x00_reset_chip(scsi_qla_host_t *vha) 2806 { 2807 unsigned long flags = 0; 2808 struct qla_hw_data *ha = vha->hw; 2809 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2810 uint32_t cnt; 2811 uint16_t cmd; 2812 int rval = QLA_FUNCTION_FAILED; 2813 2814 if (unlikely(pci_channel_offline(ha->pdev))) 2815 return rval; 2816 2817 ha->isp_ops->disable_intrs(ha); 2818 2819 spin_lock_irqsave(&ha->hardware_lock, flags); 2820 2821 /* Turn off master enable */ 2822 cmd = 0; 2823 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd); 2824 cmd &= ~PCI_COMMAND_MASTER; 2825 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); 2826 2827 if (!IS_QLA2100(ha)) { 2828 /* Pause RISC. */ 2829 wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); 2830 if (IS_QLA2200(ha) || IS_QLA2300(ha)) { 2831 for (cnt = 0; cnt < 30000; cnt++) { 2832 if ((rd_reg_word(®->hccr) & 2833 HCCR_RISC_PAUSE) != 0) 2834 break; 2835 udelay(100); 2836 } 2837 } else { 2838 rd_reg_word(®->hccr); /* PCI Posting. */ 2839 udelay(10); 2840 } 2841 2842 /* Select FPM registers. */ 2843 wrt_reg_word(®->ctrl_status, 0x20); 2844 rd_reg_word(®->ctrl_status); /* PCI Posting. */ 2845 2846 /* FPM Soft Reset. */ 2847 wrt_reg_word(®->fpm_diag_config, 0x100); 2848 rd_reg_word(®->fpm_diag_config); /* PCI Posting. */ 2849 2850 /* Toggle Fpm Reset. */ 2851 if (!IS_QLA2200(ha)) { 2852 wrt_reg_word(®->fpm_diag_config, 0x0); 2853 rd_reg_word(®->fpm_diag_config); /* PCI Posting. */ 2854 } 2855 2856 /* Select frame buffer registers. */ 2857 wrt_reg_word(®->ctrl_status, 0x10); 2858 rd_reg_word(®->ctrl_status); /* PCI Posting. */ 2859 2860 /* Reset frame buffer FIFOs. */ 2861 if (IS_QLA2200(ha)) { 2862 WRT_FB_CMD_REG(ha, reg, 0xa000); 2863 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */ 2864 } else { 2865 WRT_FB_CMD_REG(ha, reg, 0x00fc); 2866 2867 /* Read back fb_cmd until zero or 3 seconds max */ 2868 for (cnt = 0; cnt < 3000; cnt++) { 2869 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0) 2870 break; 2871 udelay(100); 2872 } 2873 } 2874 2875 /* Select RISC module registers. */ 2876 wrt_reg_word(®->ctrl_status, 0); 2877 rd_reg_word(®->ctrl_status); /* PCI Posting. */ 2878 2879 /* Reset RISC processor. */ 2880 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 2881 rd_reg_word(®->hccr); /* PCI Posting. */ 2882 2883 /* Release RISC processor. */ 2884 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); 2885 rd_reg_word(®->hccr); /* PCI Posting. */ 2886 } 2887 2888 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 2889 wrt_reg_word(®->hccr, HCCR_CLR_HOST_INT); 2890 2891 /* Reset ISP chip. */ 2892 wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET); 2893 2894 /* Wait for RISC to recover from reset. */ 2895 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 2896 /* 2897 * It is necessary to for a delay here since the card doesn't 2898 * respond to PCI reads during a reset. On some architectures 2899 * this will result in an MCA. 2900 */ 2901 udelay(20); 2902 for (cnt = 30000; cnt; cnt--) { 2903 if ((rd_reg_word(®->ctrl_status) & 2904 CSR_ISP_SOFT_RESET) == 0) 2905 break; 2906 udelay(100); 2907 } 2908 } else 2909 udelay(10); 2910 2911 /* Reset RISC processor. */ 2912 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 2913 2914 wrt_reg_word(®->semaphore, 0); 2915 2916 /* Release RISC processor. */ 2917 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); 2918 rd_reg_word(®->hccr); /* PCI Posting. */ 2919 2920 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 2921 for (cnt = 0; cnt < 30000; cnt++) { 2922 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY) 2923 break; 2924 2925 udelay(100); 2926 } 2927 } else 2928 udelay(100); 2929 2930 /* Turn on master enable */ 2931 cmd |= PCI_COMMAND_MASTER; 2932 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); 2933 2934 /* Disable RISC pause on FPM parity error. */ 2935 if (!IS_QLA2100(ha)) { 2936 wrt_reg_word(®->hccr, HCCR_DISABLE_PARITY_PAUSE); 2937 rd_reg_word(®->hccr); /* PCI Posting. */ 2938 } 2939 2940 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2941 2942 return QLA_SUCCESS; 2943 } 2944 2945 /** 2946 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC. 2947 * @vha: HA context 2948 * 2949 * Returns 0 on success. 2950 */ 2951 static int 2952 qla81xx_reset_mpi(scsi_qla_host_t *vha) 2953 { 2954 uint16_t mb[4] = {0x1010, 0, 1, 0}; 2955 2956 if (!IS_QLA81XX(vha->hw)) 2957 return QLA_SUCCESS; 2958 2959 return qla81xx_write_mpi_register(vha, mb); 2960 } 2961 2962 static int 2963 qla_chk_risc_recovery(scsi_qla_host_t *vha) 2964 { 2965 struct qla_hw_data *ha = vha->hw; 2966 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2967 __le16 __iomem *mbptr = ®->mailbox0; 2968 int i; 2969 u16 mb[32]; 2970 int rc = QLA_SUCCESS; 2971 2972 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 2973 return rc; 2974 2975 /* this check is only valid after RISC reset */ 2976 mb[0] = rd_reg_word(mbptr); 2977 mbptr++; 2978 if (mb[0] == 0xf) { 2979 rc = QLA_FUNCTION_FAILED; 2980 2981 for (i = 1; i < 32; i++) { 2982 mb[i] = rd_reg_word(mbptr); 2983 mbptr++; 2984 } 2985 2986 ql_log(ql_log_warn, vha, 0x1015, 2987 "RISC reset failed. mb[0-7] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n", 2988 mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6], mb[7]); 2989 ql_log(ql_log_warn, vha, 0x1015, 2990 "RISC reset failed. mb[8-15] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n", 2991 mb[8], mb[9], mb[10], mb[11], mb[12], mb[13], mb[14], 2992 mb[15]); 2993 ql_log(ql_log_warn, vha, 0x1015, 2994 "RISC reset failed. mb[16-23] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n", 2995 mb[16], mb[17], mb[18], mb[19], mb[20], mb[21], mb[22], 2996 mb[23]); 2997 ql_log(ql_log_warn, vha, 0x1015, 2998 "RISC reset failed. mb[24-31] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n", 2999 mb[24], mb[25], mb[26], mb[27], mb[28], mb[29], mb[30], 3000 mb[31]); 3001 } 3002 return rc; 3003 } 3004 3005 /** 3006 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC. 3007 * @vha: HA context 3008 * 3009 * Returns 0 on success. 3010 */ 3011 static inline int 3012 qla24xx_reset_risc(scsi_qla_host_t *vha) 3013 { 3014 unsigned long flags = 0; 3015 struct qla_hw_data *ha = vha->hw; 3016 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3017 uint32_t cnt; 3018 uint16_t wd; 3019 static int abts_cnt; /* ISP abort retry counts */ 3020 int rval = QLA_SUCCESS; 3021 int print = 1; 3022 3023 spin_lock_irqsave(&ha->hardware_lock, flags); 3024 3025 /* Reset RISC. */ 3026 wrt_reg_dword(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 3027 for (cnt = 0; cnt < 30000; cnt++) { 3028 if ((rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) 3029 break; 3030 3031 udelay(10); 3032 } 3033 3034 if (!(rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE)) 3035 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); 3036 3037 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e, 3038 "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n", 3039 rd_reg_dword(®->hccr), 3040 rd_reg_dword(®->ctrl_status), 3041 (rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE)); 3042 3043 wrt_reg_dword(®->ctrl_status, 3044 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 3045 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); 3046 3047 udelay(100); 3048 3049 /* Wait for firmware to complete NVRAM accesses. */ 3050 rd_reg_word(®->mailbox0); 3051 for (cnt = 10000; rd_reg_word(®->mailbox0) != 0 && 3052 rval == QLA_SUCCESS; cnt--) { 3053 barrier(); 3054 if (cnt) 3055 udelay(5); 3056 else 3057 rval = QLA_FUNCTION_TIMEOUT; 3058 } 3059 3060 if (rval == QLA_SUCCESS) 3061 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags); 3062 3063 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f, 3064 "HCCR: 0x%x, MailBox0 Status 0x%x\n", 3065 rd_reg_dword(®->hccr), 3066 rd_reg_word(®->mailbox0)); 3067 3068 /* Wait for soft-reset to complete. */ 3069 rd_reg_dword(®->ctrl_status); 3070 for (cnt = 0; cnt < 60; cnt++) { 3071 barrier(); 3072 if ((rd_reg_dword(®->ctrl_status) & 3073 CSRX_ISP_SOFT_RESET) == 0) 3074 break; 3075 3076 udelay(5); 3077 } 3078 if (!(rd_reg_dword(®->ctrl_status) & CSRX_ISP_SOFT_RESET)) 3079 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags); 3080 3081 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d, 3082 "HCCR: 0x%x, Soft Reset status: 0x%x\n", 3083 rd_reg_dword(®->hccr), 3084 rd_reg_dword(®->ctrl_status)); 3085 3086 /* If required, do an MPI FW reset now */ 3087 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) { 3088 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) { 3089 if (++abts_cnt < 5) { 3090 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3091 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags); 3092 } else { 3093 /* 3094 * We exhausted the ISP abort retries. We have to 3095 * set the board offline. 3096 */ 3097 abts_cnt = 0; 3098 vha->flags.online = 0; 3099 } 3100 } 3101 } 3102 3103 wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET); 3104 rd_reg_dword(®->hccr); 3105 3106 wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE); 3107 rd_reg_dword(®->hccr); 3108 3109 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_RESET); 3110 mdelay(10); 3111 rd_reg_dword(®->hccr); 3112 3113 wd = rd_reg_word(®->mailbox0); 3114 for (cnt = 300; wd != 0 && rval == QLA_SUCCESS; cnt--) { 3115 barrier(); 3116 if (cnt) { 3117 mdelay(1); 3118 if (print && qla_chk_risc_recovery(vha)) 3119 print = 0; 3120 3121 wd = rd_reg_word(®->mailbox0); 3122 } else { 3123 rval = QLA_FUNCTION_TIMEOUT; 3124 3125 ql_log(ql_log_warn, vha, 0x015e, 3126 "RISC reset timeout\n"); 3127 } 3128 } 3129 3130 if (rval == QLA_SUCCESS) 3131 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); 3132 3133 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e, 3134 "Host Risc 0x%x, mailbox0 0x%x\n", 3135 rd_reg_dword(®->hccr), 3136 rd_reg_word(®->mailbox0)); 3137 3138 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3139 3140 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f, 3141 "Driver in %s mode\n", 3142 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling"); 3143 3144 if (IS_NOPOLLING_TYPE(ha)) 3145 ha->isp_ops->enable_intrs(ha); 3146 3147 return rval; 3148 } 3149 3150 static void 3151 qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data) 3152 { 3153 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; 3154 3155 wrt_reg_dword(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); 3156 *data = rd_reg_dword(®->iobase_window + RISC_REGISTER_WINDOW_OFFSET); 3157 } 3158 3159 static void 3160 qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data) 3161 { 3162 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; 3163 3164 wrt_reg_dword(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); 3165 wrt_reg_dword(®->iobase_window + RISC_REGISTER_WINDOW_OFFSET, data); 3166 } 3167 3168 static void 3169 qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha) 3170 { 3171 uint32_t wd32 = 0; 3172 uint delta_msec = 100; 3173 uint elapsed_msec = 0; 3174 uint timeout_msec; 3175 ulong n; 3176 3177 if (vha->hw->pdev->subsystem_device != 0x0175 && 3178 vha->hw->pdev->subsystem_device != 0x0240) 3179 return; 3180 3181 wrt_reg_dword(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE); 3182 udelay(100); 3183 3184 attempt: 3185 timeout_msec = TIMEOUT_SEMAPHORE; 3186 n = timeout_msec / delta_msec; 3187 while (n--) { 3188 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET); 3189 qla25xx_read_risc_sema_reg(vha, &wd32); 3190 if (wd32 & RISC_SEMAPHORE) 3191 break; 3192 msleep(delta_msec); 3193 elapsed_msec += delta_msec; 3194 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED) 3195 goto force; 3196 } 3197 3198 if (!(wd32 & RISC_SEMAPHORE)) 3199 goto force; 3200 3201 if (!(wd32 & RISC_SEMAPHORE_FORCE)) 3202 goto acquired; 3203 3204 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR); 3205 timeout_msec = TIMEOUT_SEMAPHORE_FORCE; 3206 n = timeout_msec / delta_msec; 3207 while (n--) { 3208 qla25xx_read_risc_sema_reg(vha, &wd32); 3209 if (!(wd32 & RISC_SEMAPHORE_FORCE)) 3210 break; 3211 msleep(delta_msec); 3212 elapsed_msec += delta_msec; 3213 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED) 3214 goto force; 3215 } 3216 3217 if (wd32 & RISC_SEMAPHORE_FORCE) 3218 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR); 3219 3220 goto attempt; 3221 3222 force: 3223 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET); 3224 3225 acquired: 3226 return; 3227 } 3228 3229 /** 3230 * qla24xx_reset_chip() - Reset ISP24xx chip. 3231 * @vha: HA context 3232 * 3233 * Returns 0 on success. 3234 */ 3235 int 3236 qla24xx_reset_chip(scsi_qla_host_t *vha) 3237 { 3238 struct qla_hw_data *ha = vha->hw; 3239 int rval = QLA_FUNCTION_FAILED; 3240 3241 if (pci_channel_offline(ha->pdev) && 3242 ha->flags.pci_channel_io_perm_failure) { 3243 return rval; 3244 } 3245 3246 ha->isp_ops->disable_intrs(ha); 3247 3248 qla25xx_manipulate_risc_semaphore(vha); 3249 3250 /* Perform RISC reset. */ 3251 rval = qla24xx_reset_risc(vha); 3252 3253 return rval; 3254 } 3255 3256 /** 3257 * qla2x00_chip_diag() - Test chip for proper operation. 3258 * @vha: HA context 3259 * 3260 * Returns 0 on success. 3261 */ 3262 int 3263 qla2x00_chip_diag(scsi_qla_host_t *vha) 3264 { 3265 int rval; 3266 struct qla_hw_data *ha = vha->hw; 3267 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3268 unsigned long flags = 0; 3269 uint16_t data; 3270 uint32_t cnt; 3271 uint16_t mb[5]; 3272 struct req_que *req = ha->req_q_map[0]; 3273 3274 /* Assume a failed state */ 3275 rval = QLA_FUNCTION_FAILED; 3276 3277 ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n", 3278 ®->flash_address); 3279 3280 spin_lock_irqsave(&ha->hardware_lock, flags); 3281 3282 /* Reset ISP chip. */ 3283 wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET); 3284 3285 /* 3286 * We need to have a delay here since the card will not respond while 3287 * in reset causing an MCA on some architectures. 3288 */ 3289 udelay(20); 3290 data = qla2x00_debounce_register(®->ctrl_status); 3291 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) { 3292 udelay(5); 3293 data = rd_reg_word(®->ctrl_status); 3294 barrier(); 3295 } 3296 3297 if (!cnt) 3298 goto chip_diag_failed; 3299 3300 ql_dbg(ql_dbg_init, vha, 0x007c, 3301 "Reset register cleared by chip reset.\n"); 3302 3303 /* Reset RISC processor. */ 3304 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 3305 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); 3306 3307 /* Workaround for QLA2312 PCI parity error */ 3308 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 3309 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0)); 3310 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) { 3311 udelay(5); 3312 data = RD_MAILBOX_REG(ha, reg, 0); 3313 barrier(); 3314 } 3315 } else 3316 udelay(10); 3317 3318 if (!cnt) 3319 goto chip_diag_failed; 3320 3321 /* Check product ID of chip */ 3322 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n"); 3323 3324 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 3325 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 3326 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 3327 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4)); 3328 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) || 3329 mb[3] != PROD_ID_3) { 3330 ql_log(ql_log_warn, vha, 0x0062, 3331 "Wrong product ID = 0x%x,0x%x,0x%x.\n", 3332 mb[1], mb[2], mb[3]); 3333 3334 goto chip_diag_failed; 3335 } 3336 ha->product_id[0] = mb[1]; 3337 ha->product_id[1] = mb[2]; 3338 ha->product_id[2] = mb[3]; 3339 ha->product_id[3] = mb[4]; 3340 3341 /* Adjust fw RISC transfer size */ 3342 if (req->length > 1024) 3343 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; 3344 else 3345 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 3346 req->length; 3347 3348 if (IS_QLA2200(ha) && 3349 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { 3350 /* Limit firmware transfer size with a 2200A */ 3351 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n"); 3352 3353 ha->device_type |= DT_ISP2200A; 3354 ha->fw_transfer_size = 128; 3355 } 3356 3357 /* Wrap Incoming Mailboxes Test. */ 3358 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3359 3360 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n"); 3361 rval = qla2x00_mbx_reg_test(vha); 3362 if (rval) 3363 ql_log(ql_log_warn, vha, 0x0080, 3364 "Failed mailbox send register test.\n"); 3365 else 3366 /* Flag a successful rval */ 3367 rval = QLA_SUCCESS; 3368 spin_lock_irqsave(&ha->hardware_lock, flags); 3369 3370 chip_diag_failed: 3371 if (rval) 3372 ql_log(ql_log_info, vha, 0x0081, 3373 "Chip diagnostics **** FAILED ****.\n"); 3374 3375 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3376 3377 return (rval); 3378 } 3379 3380 /** 3381 * qla24xx_chip_diag() - Test ISP24xx for proper operation. 3382 * @vha: HA context 3383 * 3384 * Returns 0 on success. 3385 */ 3386 int 3387 qla24xx_chip_diag(scsi_qla_host_t *vha) 3388 { 3389 int rval; 3390 struct qla_hw_data *ha = vha->hw; 3391 struct req_que *req = ha->req_q_map[0]; 3392 3393 if (IS_P3P_TYPE(ha)) 3394 return QLA_SUCCESS; 3395 3396 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; 3397 3398 rval = qla2x00_mbx_reg_test(vha); 3399 if (rval) { 3400 ql_log(ql_log_warn, vha, 0x0082, 3401 "Failed mailbox send register test.\n"); 3402 } else { 3403 /* Flag a successful rval */ 3404 rval = QLA_SUCCESS; 3405 } 3406 3407 return rval; 3408 } 3409 3410 static void 3411 qla2x00_init_fce_trace(scsi_qla_host_t *vha) 3412 { 3413 int rval; 3414 dma_addr_t tc_dma; 3415 void *tc; 3416 struct qla_hw_data *ha = vha->hw; 3417 3418 if (!IS_FWI2_CAPABLE(ha)) 3419 return; 3420 3421 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 3422 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 3423 return; 3424 3425 if (ha->fce) { 3426 ql_dbg(ql_dbg_init, vha, 0x00bd, 3427 "%s: FCE Mem is already allocated.\n", 3428 __func__); 3429 return; 3430 } 3431 3432 /* Allocate memory for Fibre Channel Event Buffer. */ 3433 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 3434 GFP_KERNEL); 3435 if (!tc) { 3436 ql_log(ql_log_warn, vha, 0x00be, 3437 "Unable to allocate (%d KB) for FCE.\n", 3438 FCE_SIZE / 1024); 3439 return; 3440 } 3441 3442 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS, 3443 ha->fce_mb, &ha->fce_bufs); 3444 if (rval) { 3445 ql_log(ql_log_warn, vha, 0x00bf, 3446 "Unable to initialize FCE (%d).\n", rval); 3447 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma); 3448 return; 3449 } 3450 3451 ql_dbg(ql_dbg_init, vha, 0x00c0, 3452 "Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024); 3453 3454 ha->flags.fce_enabled = 1; 3455 ha->fce_dma = tc_dma; 3456 ha->fce = tc; 3457 } 3458 3459 static void 3460 qla2x00_init_eft_trace(scsi_qla_host_t *vha) 3461 { 3462 int rval; 3463 dma_addr_t tc_dma; 3464 void *tc; 3465 struct qla_hw_data *ha = vha->hw; 3466 3467 if (!IS_FWI2_CAPABLE(ha)) 3468 return; 3469 3470 if (ha->eft) { 3471 ql_dbg(ql_dbg_init, vha, 0x00bd, 3472 "%s: EFT Mem is already allocated.\n", 3473 __func__); 3474 return; 3475 } 3476 3477 /* Allocate memory for Extended Trace Buffer. */ 3478 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, 3479 GFP_KERNEL); 3480 if (!tc) { 3481 ql_log(ql_log_warn, vha, 0x00c1, 3482 "Unable to allocate (%d KB) for EFT.\n", 3483 EFT_SIZE / 1024); 3484 return; 3485 } 3486 3487 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); 3488 if (rval) { 3489 ql_log(ql_log_warn, vha, 0x00c2, 3490 "Unable to initialize EFT (%d).\n", rval); 3491 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma); 3492 return; 3493 } 3494 3495 ql_dbg(ql_dbg_init, vha, 0x00c3, 3496 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024); 3497 3498 ha->eft_dma = tc_dma; 3499 ha->eft = tc; 3500 } 3501 3502 static void 3503 qla2x00_alloc_offload_mem(scsi_qla_host_t *vha) 3504 { 3505 qla2x00_init_fce_trace(vha); 3506 qla2x00_init_eft_trace(vha); 3507 } 3508 3509 void 3510 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) 3511 { 3512 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size, 3513 eft_size, fce_size, mq_size; 3514 struct qla_hw_data *ha = vha->hw; 3515 struct req_que *req = ha->req_q_map[0]; 3516 struct rsp_que *rsp = ha->rsp_q_map[0]; 3517 struct qla2xxx_fw_dump *fw_dump; 3518 3519 if (ha->fw_dump) { 3520 ql_dbg(ql_dbg_init, vha, 0x00bd, 3521 "Firmware dump already allocated.\n"); 3522 return; 3523 } 3524 3525 ha->fw_dumped = 0; 3526 ha->fw_dump_cap_flags = 0; 3527 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0; 3528 req_q_size = rsp_q_size = 0; 3529 3530 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 3531 fixed_size = sizeof(struct qla2100_fw_dump); 3532 } else if (IS_QLA23XX(ha)) { 3533 fixed_size = offsetof(struct qla2300_fw_dump, data_ram); 3534 mem_size = (ha->fw_memory_size - 0x11000 + 1) * 3535 sizeof(uint16_t); 3536 } else if (IS_FWI2_CAPABLE(ha)) { 3537 if (IS_QLA83XX(ha)) 3538 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem); 3539 else if (IS_QLA81XX(ha)) 3540 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); 3541 else if (IS_QLA25XX(ha)) 3542 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem); 3543 else 3544 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem); 3545 3546 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 3547 sizeof(uint32_t); 3548 if (ha->mqenable) { 3549 if (!IS_QLA83XX(ha)) 3550 mq_size = sizeof(struct qla2xxx_mq_chain); 3551 /* 3552 * Allocate maximum buffer size for all queues - Q0. 3553 * Resizing must be done at end-of-dump processing. 3554 */ 3555 mq_size += (ha->max_req_queues - 1) * 3556 (req->length * sizeof(request_t)); 3557 mq_size += (ha->max_rsp_queues - 1) * 3558 (rsp->length * sizeof(response_t)); 3559 } 3560 if (ha->tgt.atio_ring) 3561 mq_size += ha->tgt.atio_q_length * sizeof(request_t); 3562 3563 qla2x00_init_fce_trace(vha); 3564 if (ha->fce) 3565 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; 3566 qla2x00_init_eft_trace(vha); 3567 if (ha->eft) 3568 eft_size = EFT_SIZE; 3569 } 3570 3571 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 3572 struct fwdt *fwdt = ha->fwdt; 3573 uint j; 3574 3575 for (j = 0; j < 2; j++, fwdt++) { 3576 if (!fwdt->template) { 3577 ql_dbg(ql_dbg_init, vha, 0x00ba, 3578 "-> fwdt%u no template\n", j); 3579 continue; 3580 } 3581 ql_dbg(ql_dbg_init, vha, 0x00fa, 3582 "-> fwdt%u calculating fwdump size...\n", j); 3583 fwdt->dump_size = qla27xx_fwdt_calculate_dump_size( 3584 vha, fwdt->template); 3585 ql_dbg(ql_dbg_init, vha, 0x00fa, 3586 "-> fwdt%u calculated fwdump size = %#lx bytes\n", 3587 j, fwdt->dump_size); 3588 dump_size += fwdt->dump_size; 3589 } 3590 /* Add space for spare MPI fw dump. */ 3591 dump_size += ha->fwdt[1].dump_size; 3592 } else { 3593 req_q_size = req->length * sizeof(request_t); 3594 rsp_q_size = rsp->length * sizeof(response_t); 3595 dump_size = offsetof(struct qla2xxx_fw_dump, isp); 3596 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size 3597 + eft_size; 3598 ha->chain_offset = dump_size; 3599 dump_size += mq_size + fce_size; 3600 if (ha->exchoffld_buf) 3601 dump_size += sizeof(struct qla2xxx_offld_chain) + 3602 ha->exchoffld_size; 3603 if (ha->exlogin_buf) 3604 dump_size += sizeof(struct qla2xxx_offld_chain) + 3605 ha->exlogin_size; 3606 } 3607 3608 if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) { 3609 3610 ql_dbg(ql_dbg_init, vha, 0x00c5, 3611 "%s dump_size %d fw_dump_len %d fw_dump_alloc_len %d\n", 3612 __func__, dump_size, ha->fw_dump_len, 3613 ha->fw_dump_alloc_len); 3614 3615 fw_dump = vmalloc(dump_size); 3616 if (!fw_dump) { 3617 ql_log(ql_log_warn, vha, 0x00c4, 3618 "Unable to allocate (%d KB) for firmware dump.\n", 3619 dump_size / 1024); 3620 } else { 3621 mutex_lock(&ha->optrom_mutex); 3622 if (ha->fw_dumped) { 3623 memcpy(fw_dump, ha->fw_dump, ha->fw_dump_len); 3624 vfree(ha->fw_dump); 3625 ha->fw_dump = fw_dump; 3626 ha->fw_dump_alloc_len = dump_size; 3627 ql_dbg(ql_dbg_init, vha, 0x00c5, 3628 "Re-Allocated (%d KB) and save firmware dump.\n", 3629 dump_size / 1024); 3630 } else { 3631 vfree(ha->fw_dump); 3632 ha->fw_dump = fw_dump; 3633 3634 ha->fw_dump_len = ha->fw_dump_alloc_len = 3635 dump_size; 3636 ql_dbg(ql_dbg_init, vha, 0x00c5, 3637 "Allocated (%d KB) for firmware dump.\n", 3638 dump_size / 1024); 3639 3640 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 3641 ha->mpi_fw_dump = (char *)fw_dump + 3642 ha->fwdt[1].dump_size; 3643 mutex_unlock(&ha->optrom_mutex); 3644 return; 3645 } 3646 3647 ha->fw_dump->signature[0] = 'Q'; 3648 ha->fw_dump->signature[1] = 'L'; 3649 ha->fw_dump->signature[2] = 'G'; 3650 ha->fw_dump->signature[3] = 'C'; 3651 ha->fw_dump->version = htonl(1); 3652 3653 ha->fw_dump->fixed_size = htonl(fixed_size); 3654 ha->fw_dump->mem_size = htonl(mem_size); 3655 ha->fw_dump->req_q_size = htonl(req_q_size); 3656 ha->fw_dump->rsp_q_size = htonl(rsp_q_size); 3657 3658 ha->fw_dump->eft_size = htonl(eft_size); 3659 ha->fw_dump->eft_addr_l = 3660 htonl(LSD(ha->eft_dma)); 3661 ha->fw_dump->eft_addr_h = 3662 htonl(MSD(ha->eft_dma)); 3663 3664 ha->fw_dump->header_size = 3665 htonl(offsetof 3666 (struct qla2xxx_fw_dump, isp)); 3667 } 3668 mutex_unlock(&ha->optrom_mutex); 3669 } 3670 } 3671 } 3672 3673 static int 3674 qla81xx_mpi_sync(scsi_qla_host_t *vha) 3675 { 3676 #define MPS_MASK 0xe0 3677 int rval; 3678 uint16_t dc; 3679 uint32_t dw; 3680 3681 if (!IS_QLA81XX(vha->hw)) 3682 return QLA_SUCCESS; 3683 3684 rval = qla2x00_write_ram_word(vha, 0x7c00, 1); 3685 if (rval != QLA_SUCCESS) { 3686 ql_log(ql_log_warn, vha, 0x0105, 3687 "Unable to acquire semaphore.\n"); 3688 goto done; 3689 } 3690 3691 pci_read_config_word(vha->hw->pdev, 0x54, &dc); 3692 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw); 3693 if (rval != QLA_SUCCESS) { 3694 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n"); 3695 goto done_release; 3696 } 3697 3698 dc &= MPS_MASK; 3699 if (dc == (dw & MPS_MASK)) 3700 goto done_release; 3701 3702 dw &= ~MPS_MASK; 3703 dw |= dc; 3704 rval = qla2x00_write_ram_word(vha, 0x7a15, dw); 3705 if (rval != QLA_SUCCESS) { 3706 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n"); 3707 } 3708 3709 done_release: 3710 rval = qla2x00_write_ram_word(vha, 0x7c00, 0); 3711 if (rval != QLA_SUCCESS) { 3712 ql_log(ql_log_warn, vha, 0x006d, 3713 "Unable to release semaphore.\n"); 3714 } 3715 3716 done: 3717 return rval; 3718 } 3719 3720 int 3721 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req) 3722 { 3723 /* Don't try to reallocate the array */ 3724 if (req->outstanding_cmds) 3725 return QLA_SUCCESS; 3726 3727 if (!IS_FWI2_CAPABLE(ha)) 3728 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS; 3729 else { 3730 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count) 3731 req->num_outstanding_cmds = ha->cur_fw_xcb_count; 3732 else 3733 req->num_outstanding_cmds = ha->cur_fw_iocb_count; 3734 } 3735 3736 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds, 3737 sizeof(srb_t *), 3738 GFP_KERNEL); 3739 3740 if (!req->outstanding_cmds) { 3741 /* 3742 * Try to allocate a minimal size just so we can get through 3743 * initialization. 3744 */ 3745 req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS; 3746 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds, 3747 sizeof(srb_t *), 3748 GFP_KERNEL); 3749 3750 if (!req->outstanding_cmds) { 3751 ql_log(ql_log_fatal, NULL, 0x0126, 3752 "Failed to allocate memory for " 3753 "outstanding_cmds for req_que %p.\n", req); 3754 req->num_outstanding_cmds = 0; 3755 return QLA_FUNCTION_FAILED; 3756 } 3757 } 3758 3759 return QLA_SUCCESS; 3760 } 3761 3762 #define PRINT_FIELD(_field, _flag, _str) { \ 3763 if (a0->_field & _flag) {\ 3764 if (p) {\ 3765 strcat(ptr, "|");\ 3766 ptr++;\ 3767 leftover--;\ 3768 } \ 3769 len = snprintf(ptr, leftover, "%s", _str); \ 3770 p = 1;\ 3771 leftover -= len;\ 3772 ptr += len; \ 3773 } \ 3774 } 3775 3776 static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha) 3777 { 3778 #define STR_LEN 64 3779 struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data; 3780 u8 str[STR_LEN], *ptr, p; 3781 int leftover, len; 3782 3783 memset(str, 0, STR_LEN); 3784 snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name); 3785 ql_dbg(ql_dbg_init, vha, 0x015a, 3786 "SFP MFG Name: %s\n", str); 3787 3788 memset(str, 0, STR_LEN); 3789 snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn); 3790 ql_dbg(ql_dbg_init, vha, 0x015c, 3791 "SFP Part Name: %s\n", str); 3792 3793 /* media */ 3794 memset(str, 0, STR_LEN); 3795 ptr = str; 3796 leftover = STR_LEN; 3797 p = len = 0; 3798 PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX"); 3799 PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair"); 3800 PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax"); 3801 PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax"); 3802 PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um"); 3803 PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um"); 3804 PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode"); 3805 ql_dbg(ql_dbg_init, vha, 0x0160, 3806 "SFP Media: %s\n", str); 3807 3808 /* link length */ 3809 memset(str, 0, STR_LEN); 3810 ptr = str; 3811 leftover = STR_LEN; 3812 p = len = 0; 3813 PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long"); 3814 PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short"); 3815 PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate"); 3816 PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long"); 3817 PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium"); 3818 ql_dbg(ql_dbg_init, vha, 0x0196, 3819 "SFP Link Length: %s\n", str); 3820 3821 memset(str, 0, STR_LEN); 3822 ptr = str; 3823 leftover = STR_LEN; 3824 p = len = 0; 3825 PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)"); 3826 PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)"); 3827 PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)"); 3828 PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)"); 3829 PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)"); 3830 ql_dbg(ql_dbg_init, vha, 0x016e, 3831 "SFP FC Link Tech: %s\n", str); 3832 3833 if (a0->length_km) 3834 ql_dbg(ql_dbg_init, vha, 0x016f, 3835 "SFP Distant: %d km\n", a0->length_km); 3836 if (a0->length_100m) 3837 ql_dbg(ql_dbg_init, vha, 0x0170, 3838 "SFP Distant: %d m\n", a0->length_100m*100); 3839 if (a0->length_50um_10m) 3840 ql_dbg(ql_dbg_init, vha, 0x0189, 3841 "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10); 3842 if (a0->length_62um_10m) 3843 ql_dbg(ql_dbg_init, vha, 0x018a, 3844 "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10); 3845 if (a0->length_om4_10m) 3846 ql_dbg(ql_dbg_init, vha, 0x0194, 3847 "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10); 3848 if (a0->length_om3_10m) 3849 ql_dbg(ql_dbg_init, vha, 0x0195, 3850 "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10); 3851 } 3852 3853 3854 /** 3855 * qla24xx_detect_sfp() 3856 * 3857 * @vha: adapter state pointer. 3858 * 3859 * @return 3860 * 0 -- Configure firmware to use short-range settings -- normal 3861 * buffer-to-buffer credits. 3862 * 3863 * 1 -- Configure firmware to use long-range settings -- extra 3864 * buffer-to-buffer credits should be allocated with 3865 * ha->lr_distance containing distance settings from NVRAM or SFP 3866 * (if supported). 3867 */ 3868 int 3869 qla24xx_detect_sfp(scsi_qla_host_t *vha) 3870 { 3871 int rc, used_nvram; 3872 struct sff_8247_a0 *a; 3873 struct qla_hw_data *ha = vha->hw; 3874 struct nvram_81xx *nv = ha->nvram; 3875 #define LR_DISTANCE_UNKNOWN 2 3876 static const char * const types[] = { "Short", "Long" }; 3877 static const char * const lengths[] = { "(10km)", "(5km)", "" }; 3878 u8 ll = 0; 3879 3880 /* Seed with NVRAM settings. */ 3881 used_nvram = 0; 3882 ha->flags.lr_detected = 0; 3883 if (IS_BPM_RANGE_CAPABLE(ha) && 3884 (nv->enhanced_features & NEF_LR_DIST_ENABLE)) { 3885 used_nvram = 1; 3886 ha->flags.lr_detected = 1; 3887 ha->lr_distance = 3888 (nv->enhanced_features >> LR_DIST_NV_POS) 3889 & LR_DIST_NV_MASK; 3890 } 3891 3892 if (!IS_BPM_ENABLED(vha)) 3893 goto out; 3894 /* Determine SR/LR capabilities of SFP/Transceiver. */ 3895 rc = qla2x00_read_sfp_dev(vha, NULL, 0); 3896 if (rc) 3897 goto out; 3898 3899 used_nvram = 0; 3900 a = (struct sff_8247_a0 *)vha->hw->sfp_data; 3901 qla2xxx_print_sfp_info(vha); 3902 3903 ha->flags.lr_detected = 0; 3904 ll = a->fc_ll_cc7; 3905 if (ll & FC_LL_VL || ll & FC_LL_L) { 3906 /* Long range, track length. */ 3907 ha->flags.lr_detected = 1; 3908 3909 if (a->length_km > 5 || a->length_100m > 50) 3910 ha->lr_distance = LR_DISTANCE_10K; 3911 else 3912 ha->lr_distance = LR_DISTANCE_5K; 3913 } 3914 3915 out: 3916 ql_dbg(ql_dbg_async, vha, 0x507b, 3917 "SFP detect: %s-Range SFP %s (nvr=%x ll=%x lr=%x lrd=%x).\n", 3918 types[ha->flags.lr_detected], 3919 ha->flags.lr_detected ? lengths[ha->lr_distance] : 3920 lengths[LR_DISTANCE_UNKNOWN], 3921 used_nvram, ll, ha->flags.lr_detected, ha->lr_distance); 3922 return ha->flags.lr_detected; 3923 } 3924 3925 void qla_init_iocb_limit(scsi_qla_host_t *vha) 3926 { 3927 u16 i, num_qps; 3928 u32 limit; 3929 struct qla_hw_data *ha = vha->hw; 3930 3931 num_qps = ha->num_qpairs + 1; 3932 limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100; 3933 3934 ha->base_qpair->fwres.iocbs_total = ha->orig_fw_iocb_count; 3935 ha->base_qpair->fwres.iocbs_limit = limit; 3936 ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps; 3937 ha->base_qpair->fwres.iocbs_used = 0; 3938 for (i = 0; i < ha->max_qpairs; i++) { 3939 if (ha->queue_pair_map[i]) { 3940 ha->queue_pair_map[i]->fwres.iocbs_total = 3941 ha->orig_fw_iocb_count; 3942 ha->queue_pair_map[i]->fwres.iocbs_limit = limit; 3943 ha->queue_pair_map[i]->fwres.iocbs_qp_limit = 3944 limit / num_qps; 3945 ha->queue_pair_map[i]->fwres.iocbs_used = 0; 3946 } 3947 } 3948 } 3949 3950 /** 3951 * qla2x00_setup_chip() - Load and start RISC firmware. 3952 * @vha: HA context 3953 * 3954 * Returns 0 on success. 3955 */ 3956 static int 3957 qla2x00_setup_chip(scsi_qla_host_t *vha) 3958 { 3959 int rval; 3960 uint32_t srisc_address = 0; 3961 struct qla_hw_data *ha = vha->hw; 3962 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3963 unsigned long flags; 3964 uint16_t fw_major_version; 3965 int done_once = 0; 3966 3967 if (IS_P3P_TYPE(ha)) { 3968 rval = ha->isp_ops->load_risc(vha, &srisc_address); 3969 if (rval == QLA_SUCCESS) { 3970 qla2x00_stop_firmware(vha); 3971 goto enable_82xx_npiv; 3972 } else 3973 goto failed; 3974 } 3975 3976 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 3977 /* Disable SRAM, Instruction RAM and GP RAM parity. */ 3978 spin_lock_irqsave(&ha->hardware_lock, flags); 3979 wrt_reg_word(®->hccr, (HCCR_ENABLE_PARITY + 0x0)); 3980 rd_reg_word(®->hccr); 3981 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3982 } 3983 3984 qla81xx_mpi_sync(vha); 3985 3986 execute_fw_with_lr: 3987 /* Load firmware sequences */ 3988 rval = ha->isp_ops->load_risc(vha, &srisc_address); 3989 if (rval == QLA_SUCCESS) { 3990 ql_dbg(ql_dbg_init, vha, 0x00c9, 3991 "Verifying Checksum of loaded RISC code.\n"); 3992 3993 rval = qla2x00_verify_checksum(vha, srisc_address); 3994 if (rval == QLA_SUCCESS) { 3995 /* Start firmware execution. */ 3996 ql_dbg(ql_dbg_init, vha, 0x00ca, 3997 "Starting firmware.\n"); 3998 3999 if (ql2xexlogins) 4000 ha->flags.exlogins_enabled = 1; 4001 4002 if (qla_is_exch_offld_enabled(vha)) 4003 ha->flags.exchoffld_enabled = 1; 4004 4005 rval = qla2x00_execute_fw(vha, srisc_address); 4006 /* Retrieve firmware information. */ 4007 if (rval == QLA_SUCCESS) { 4008 /* Enable BPM support? */ 4009 if (!done_once++ && qla24xx_detect_sfp(vha)) { 4010 ql_dbg(ql_dbg_init, vha, 0x00ca, 4011 "Re-starting firmware -- BPM.\n"); 4012 /* Best-effort - re-init. */ 4013 ha->isp_ops->reset_chip(vha); 4014 ha->isp_ops->chip_diag(vha); 4015 goto execute_fw_with_lr; 4016 } 4017 4018 if (IS_ZIO_THRESHOLD_CAPABLE(ha)) 4019 qla27xx_set_zio_threshold(vha, 4020 ha->last_zio_threshold); 4021 4022 rval = qla2x00_set_exlogins_buffer(vha); 4023 if (rval != QLA_SUCCESS) 4024 goto failed; 4025 4026 rval = qla2x00_set_exchoffld_buffer(vha); 4027 if (rval != QLA_SUCCESS) 4028 goto failed; 4029 4030 enable_82xx_npiv: 4031 fw_major_version = ha->fw_major_version; 4032 if (IS_P3P_TYPE(ha)) 4033 qla82xx_check_md_needed(vha); 4034 else 4035 rval = qla2x00_get_fw_version(vha); 4036 if (rval != QLA_SUCCESS) 4037 goto failed; 4038 ha->flags.npiv_supported = 0; 4039 if (IS_QLA2XXX_MIDTYPE(ha) && 4040 (ha->fw_attributes & BIT_2)) { 4041 ha->flags.npiv_supported = 1; 4042 if ((!ha->max_npiv_vports) || 4043 ((ha->max_npiv_vports + 1) % 4044 MIN_MULTI_ID_FABRIC)) 4045 ha->max_npiv_vports = 4046 MIN_MULTI_ID_FABRIC - 1; 4047 } 4048 qla2x00_get_resource_cnts(vha); 4049 qla_init_iocb_limit(vha); 4050 4051 /* 4052 * Allocate the array of outstanding commands 4053 * now that we know the firmware resources. 4054 */ 4055 rval = qla2x00_alloc_outstanding_cmds(ha, 4056 vha->req); 4057 if (rval != QLA_SUCCESS) 4058 goto failed; 4059 4060 if (!fw_major_version && !(IS_P3P_TYPE(ha))) 4061 qla2x00_alloc_offload_mem(vha); 4062 4063 if (ql2xallocfwdump && !(IS_P3P_TYPE(ha))) 4064 qla2x00_alloc_fw_dump(vha); 4065 4066 } else { 4067 goto failed; 4068 } 4069 } else { 4070 ql_log(ql_log_fatal, vha, 0x00cd, 4071 "ISP Firmware failed checksum.\n"); 4072 goto failed; 4073 } 4074 4075 /* Enable PUREX PASSTHRU */ 4076 if (ql2xrdpenable || ha->flags.scm_supported_f || 4077 ha->flags.edif_enabled) 4078 qla25xx_set_els_cmds_supported(vha); 4079 } else 4080 goto failed; 4081 4082 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 4083 /* Enable proper parity. */ 4084 spin_lock_irqsave(&ha->hardware_lock, flags); 4085 if (IS_QLA2300(ha)) 4086 /* SRAM parity */ 4087 wrt_reg_word(®->hccr, HCCR_ENABLE_PARITY + 0x1); 4088 else 4089 /* SRAM, Instruction RAM and GP RAM parity */ 4090 wrt_reg_word(®->hccr, HCCR_ENABLE_PARITY + 0x7); 4091 rd_reg_word(®->hccr); 4092 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4093 } 4094 4095 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) 4096 ha->flags.fac_supported = 1; 4097 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { 4098 uint32_t size; 4099 4100 rval = qla81xx_fac_get_sector_size(vha, &size); 4101 if (rval == QLA_SUCCESS) { 4102 ha->flags.fac_supported = 1; 4103 ha->fdt_block_size = size << 2; 4104 } else { 4105 ql_log(ql_log_warn, vha, 0x00ce, 4106 "Unsupported FAC firmware (%d.%02d.%02d).\n", 4107 ha->fw_major_version, ha->fw_minor_version, 4108 ha->fw_subminor_version); 4109 4110 if (IS_QLA83XX(ha)) { 4111 ha->flags.fac_supported = 0; 4112 rval = QLA_SUCCESS; 4113 } 4114 } 4115 } 4116 failed: 4117 if (rval) { 4118 ql_log(ql_log_fatal, vha, 0x00cf, 4119 "Setup chip ****FAILED****.\n"); 4120 } 4121 4122 return (rval); 4123 } 4124 4125 /** 4126 * qla2x00_init_response_q_entries() - Initializes response queue entries. 4127 * @rsp: response queue 4128 * 4129 * Beginning of request ring has initialization control block already built 4130 * by nvram config routine. 4131 * 4132 * Returns 0 on success. 4133 */ 4134 void 4135 qla2x00_init_response_q_entries(struct rsp_que *rsp) 4136 { 4137 uint16_t cnt; 4138 response_t *pkt; 4139 4140 rsp->ring_ptr = rsp->ring; 4141 rsp->ring_index = 0; 4142 rsp->status_srb = NULL; 4143 pkt = rsp->ring_ptr; 4144 for (cnt = 0; cnt < rsp->length; cnt++) { 4145 pkt->signature = RESPONSE_PROCESSED; 4146 pkt++; 4147 } 4148 } 4149 4150 /** 4151 * qla2x00_update_fw_options() - Read and process firmware options. 4152 * @vha: HA context 4153 * 4154 * Returns 0 on success. 4155 */ 4156 void 4157 qla2x00_update_fw_options(scsi_qla_host_t *vha) 4158 { 4159 uint16_t swing, emphasis, tx_sens, rx_sens; 4160 struct qla_hw_data *ha = vha->hw; 4161 4162 memset(ha->fw_options, 0, sizeof(ha->fw_options)); 4163 qla2x00_get_fw_options(vha, ha->fw_options); 4164 4165 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 4166 return; 4167 4168 /* Serial Link options. */ 4169 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115, 4170 "Serial link options.\n"); 4171 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109, 4172 ha->fw_seriallink_options, sizeof(ha->fw_seriallink_options)); 4173 4174 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 4175 if (ha->fw_seriallink_options[3] & BIT_2) { 4176 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING; 4177 4178 /* 1G settings */ 4179 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0); 4180 emphasis = (ha->fw_seriallink_options[2] & 4181 (BIT_4 | BIT_3)) >> 3; 4182 tx_sens = ha->fw_seriallink_options[0] & 4183 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 4184 rx_sens = (ha->fw_seriallink_options[0] & 4185 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; 4186 ha->fw_options[10] = (emphasis << 14) | (swing << 8); 4187 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 4188 if (rx_sens == 0x0) 4189 rx_sens = 0x3; 4190 ha->fw_options[10] |= (tx_sens << 4) | rx_sens; 4191 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) 4192 ha->fw_options[10] |= BIT_5 | 4193 ((rx_sens & (BIT_1 | BIT_0)) << 2) | 4194 (tx_sens & (BIT_1 | BIT_0)); 4195 4196 /* 2G settings */ 4197 swing = (ha->fw_seriallink_options[2] & 4198 (BIT_7 | BIT_6 | BIT_5)) >> 5; 4199 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0); 4200 tx_sens = ha->fw_seriallink_options[1] & 4201 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 4202 rx_sens = (ha->fw_seriallink_options[1] & 4203 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; 4204 ha->fw_options[11] = (emphasis << 14) | (swing << 8); 4205 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 4206 if (rx_sens == 0x0) 4207 rx_sens = 0x3; 4208 ha->fw_options[11] |= (tx_sens << 4) | rx_sens; 4209 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) 4210 ha->fw_options[11] |= BIT_5 | 4211 ((rx_sens & (BIT_1 | BIT_0)) << 2) | 4212 (tx_sens & (BIT_1 | BIT_0)); 4213 } 4214 4215 /* FCP2 options. */ 4216 /* Return command IOCBs without waiting for an ABTS to complete. */ 4217 ha->fw_options[3] |= BIT_13; 4218 4219 /* LED scheme. */ 4220 if (ha->flags.enable_led_scheme) 4221 ha->fw_options[2] |= BIT_12; 4222 4223 /* Detect ISP6312. */ 4224 if (IS_QLA6312(ha)) 4225 ha->fw_options[2] |= BIT_13; 4226 4227 /* Set Retry FLOGI in case of P2P connection */ 4228 if (ha->operating_mode == P2P) { 4229 ha->fw_options[2] |= BIT_3; 4230 ql_dbg(ql_dbg_disc, vha, 0x2100, 4231 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", 4232 __func__, ha->fw_options[2]); 4233 } 4234 4235 /* Update firmware options. */ 4236 qla2x00_set_fw_options(vha, ha->fw_options); 4237 } 4238 4239 void 4240 qla24xx_update_fw_options(scsi_qla_host_t *vha) 4241 { 4242 int rval; 4243 struct qla_hw_data *ha = vha->hw; 4244 4245 if (IS_P3P_TYPE(ha)) 4246 return; 4247 4248 /* Hold status IOCBs until ABTS response received. */ 4249 if (ql2xfwholdabts) 4250 ha->fw_options[3] |= BIT_12; 4251 4252 /* Set Retry FLOGI in case of P2P connection */ 4253 if (ha->operating_mode == P2P) { 4254 ha->fw_options[2] |= BIT_3; 4255 ql_dbg(ql_dbg_disc, vha, 0x2101, 4256 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", 4257 __func__, ha->fw_options[2]); 4258 } 4259 4260 /* Move PUREX, ABTS RX & RIDA to ATIOQ */ 4261 if (ql2xmvasynctoatio && !ha->flags.edif_enabled && 4262 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) { 4263 if (qla_tgt_mode_enabled(vha) || 4264 qla_dual_mode_enabled(vha)) 4265 ha->fw_options[2] |= BIT_11; 4266 else 4267 ha->fw_options[2] &= ~BIT_11; 4268 } 4269 4270 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 4271 IS_QLA28XX(ha)) { 4272 /* 4273 * Tell FW to track each exchange to prevent 4274 * driver from using stale exchange. 4275 */ 4276 if (qla_tgt_mode_enabled(vha) || 4277 qla_dual_mode_enabled(vha)) 4278 ha->fw_options[2] |= BIT_4; 4279 else 4280 ha->fw_options[2] &= ~(BIT_4); 4281 4282 /* Reserve 1/2 of emergency exchanges for ELS.*/ 4283 if (qla2xuseresexchforels) 4284 ha->fw_options[2] |= BIT_8; 4285 else 4286 ha->fw_options[2] &= ~BIT_8; 4287 4288 /* 4289 * N2N: set Secure=1 for PLOGI ACC and 4290 * fw shal not send PRLI after PLOGI Acc 4291 */ 4292 if (ha->flags.edif_enabled && 4293 DBELL_ACTIVE(vha)) { 4294 ha->fw_options[3] |= BIT_15; 4295 ha->flags.n2n_fw_acc_sec = 1; 4296 } else { 4297 ha->fw_options[3] &= ~BIT_15; 4298 ha->flags.n2n_fw_acc_sec = 0; 4299 } 4300 } 4301 4302 if (ql2xrdpenable || ha->flags.scm_supported_f || 4303 ha->flags.edif_enabled) 4304 ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB; 4305 4306 /* Enable Async 8130/8131 events -- transceiver insertion/removal */ 4307 if (IS_BPM_RANGE_CAPABLE(ha)) 4308 ha->fw_options[3] |= BIT_10; 4309 4310 ql_dbg(ql_dbg_init, vha, 0x00e8, 4311 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", 4312 __func__, ha->fw_options[1], ha->fw_options[2], 4313 ha->fw_options[3], vha->host->active_mode); 4314 4315 if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3]) 4316 qla2x00_set_fw_options(vha, ha->fw_options); 4317 4318 /* Update Serial Link options. */ 4319 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) 4320 return; 4321 4322 rval = qla2x00_set_serdes_params(vha, 4323 le16_to_cpu(ha->fw_seriallink_options24[1]), 4324 le16_to_cpu(ha->fw_seriallink_options24[2]), 4325 le16_to_cpu(ha->fw_seriallink_options24[3])); 4326 if (rval != QLA_SUCCESS) { 4327 ql_log(ql_log_warn, vha, 0x0104, 4328 "Unable to update Serial Link options (%x).\n", rval); 4329 } 4330 } 4331 4332 void 4333 qla2x00_config_rings(struct scsi_qla_host *vha) 4334 { 4335 struct qla_hw_data *ha = vha->hw; 4336 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 4337 struct req_que *req = ha->req_q_map[0]; 4338 struct rsp_que *rsp = ha->rsp_q_map[0]; 4339 4340 /* Setup ring parameters in initialization control block. */ 4341 ha->init_cb->request_q_outpointer = cpu_to_le16(0); 4342 ha->init_cb->response_q_inpointer = cpu_to_le16(0); 4343 ha->init_cb->request_q_length = cpu_to_le16(req->length); 4344 ha->init_cb->response_q_length = cpu_to_le16(rsp->length); 4345 put_unaligned_le64(req->dma, &ha->init_cb->request_q_address); 4346 put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address); 4347 4348 wrt_reg_word(ISP_REQ_Q_IN(ha, reg), 0); 4349 wrt_reg_word(ISP_REQ_Q_OUT(ha, reg), 0); 4350 wrt_reg_word(ISP_RSP_Q_IN(ha, reg), 0); 4351 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), 0); 4352 rd_reg_word(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */ 4353 } 4354 4355 void 4356 qla24xx_config_rings(struct scsi_qla_host *vha) 4357 { 4358 struct qla_hw_data *ha = vha->hw; 4359 device_reg_t *reg = ISP_QUE_REG(ha, 0); 4360 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; 4361 struct qla_msix_entry *msix; 4362 struct init_cb_24xx *icb; 4363 uint16_t rid = 0; 4364 struct req_que *req = ha->req_q_map[0]; 4365 struct rsp_que *rsp = ha->rsp_q_map[0]; 4366 4367 /* Setup ring parameters in initialization control block. */ 4368 icb = (struct init_cb_24xx *)ha->init_cb; 4369 icb->request_q_outpointer = cpu_to_le16(0); 4370 icb->response_q_inpointer = cpu_to_le16(0); 4371 icb->request_q_length = cpu_to_le16(req->length); 4372 icb->response_q_length = cpu_to_le16(rsp->length); 4373 put_unaligned_le64(req->dma, &icb->request_q_address); 4374 put_unaligned_le64(rsp->dma, &icb->response_q_address); 4375 4376 /* Setup ATIO queue dma pointers for target mode */ 4377 icb->atio_q_inpointer = cpu_to_le16(0); 4378 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length); 4379 put_unaligned_le64(ha->tgt.atio_dma, &icb->atio_q_address); 4380 4381 if (IS_SHADOW_REG_CAPABLE(ha)) 4382 icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29); 4383 4384 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 4385 IS_QLA28XX(ha)) { 4386 icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS); 4387 icb->rid = cpu_to_le16(rid); 4388 if (ha->flags.msix_enabled) { 4389 msix = &ha->msix_entries[1]; 4390 ql_dbg(ql_dbg_init, vha, 0x0019, 4391 "Registering vector 0x%x for base que.\n", 4392 msix->entry); 4393 icb->msix = cpu_to_le16(msix->entry); 4394 } 4395 /* Use alternate PCI bus number */ 4396 if (MSB(rid)) 4397 icb->firmware_options_2 |= cpu_to_le32(BIT_19); 4398 /* Use alternate PCI devfn */ 4399 if (LSB(rid)) 4400 icb->firmware_options_2 |= cpu_to_le32(BIT_18); 4401 4402 /* Use Disable MSIX Handshake mode for capable adapters */ 4403 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) && 4404 (ha->flags.msix_enabled)) { 4405 icb->firmware_options_2 &= cpu_to_le32(~BIT_22); 4406 ha->flags.disable_msix_handshake = 1; 4407 ql_dbg(ql_dbg_init, vha, 0x00fe, 4408 "MSIX Handshake Disable Mode turned on.\n"); 4409 } else { 4410 icb->firmware_options_2 |= cpu_to_le32(BIT_22); 4411 } 4412 icb->firmware_options_2 |= cpu_to_le32(BIT_23); 4413 4414 wrt_reg_dword(®->isp25mq.req_q_in, 0); 4415 wrt_reg_dword(®->isp25mq.req_q_out, 0); 4416 wrt_reg_dword(®->isp25mq.rsp_q_in, 0); 4417 wrt_reg_dword(®->isp25mq.rsp_q_out, 0); 4418 } else { 4419 wrt_reg_dword(®->isp24.req_q_in, 0); 4420 wrt_reg_dword(®->isp24.req_q_out, 0); 4421 wrt_reg_dword(®->isp24.rsp_q_in, 0); 4422 wrt_reg_dword(®->isp24.rsp_q_out, 0); 4423 } 4424 4425 qlt_24xx_config_rings(vha); 4426 4427 /* If the user has configured the speed, set it here */ 4428 if (ha->set_data_rate) { 4429 ql_dbg(ql_dbg_init, vha, 0x00fd, 4430 "Speed set by user : %s Gbps \n", 4431 qla2x00_get_link_speed_str(ha, ha->set_data_rate)); 4432 icb->firmware_options_3 = cpu_to_le32(ha->set_data_rate << 13); 4433 } 4434 4435 /* PCI posting */ 4436 rd_reg_word(&ioreg->hccr); 4437 } 4438 4439 /** 4440 * qla2x00_init_rings() - Initializes firmware. 4441 * @vha: HA context 4442 * 4443 * Beginning of request ring has initialization control block already built 4444 * by nvram config routine. 4445 * 4446 * Returns 0 on success. 4447 */ 4448 int 4449 qla2x00_init_rings(scsi_qla_host_t *vha) 4450 { 4451 int rval; 4452 unsigned long flags = 0; 4453 int cnt, que; 4454 struct qla_hw_data *ha = vha->hw; 4455 struct req_que *req; 4456 struct rsp_que *rsp; 4457 struct mid_init_cb_24xx *mid_init_cb = 4458 (struct mid_init_cb_24xx *) ha->init_cb; 4459 4460 spin_lock_irqsave(&ha->hardware_lock, flags); 4461 4462 /* Clear outstanding commands array. */ 4463 for (que = 0; que < ha->max_req_queues; que++) { 4464 req = ha->req_q_map[que]; 4465 if (!req || !test_bit(que, ha->req_qid_map)) 4466 continue; 4467 req->out_ptr = (uint16_t *)(req->ring + req->length); 4468 *req->out_ptr = 0; 4469 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) 4470 req->outstanding_cmds[cnt] = NULL; 4471 4472 req->current_outstanding_cmd = 1; 4473 4474 /* Initialize firmware. */ 4475 req->ring_ptr = req->ring; 4476 req->ring_index = 0; 4477 req->cnt = req->length; 4478 } 4479 4480 for (que = 0; que < ha->max_rsp_queues; que++) { 4481 rsp = ha->rsp_q_map[que]; 4482 if (!rsp || !test_bit(que, ha->rsp_qid_map)) 4483 continue; 4484 rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length); 4485 *rsp->in_ptr = 0; 4486 /* Initialize response queue entries */ 4487 if (IS_QLAFX00(ha)) 4488 qlafx00_init_response_q_entries(rsp); 4489 else 4490 qla2x00_init_response_q_entries(rsp); 4491 } 4492 4493 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; 4494 ha->tgt.atio_ring_index = 0; 4495 /* Initialize ATIO queue entries */ 4496 qlt_init_atio_q_entries(vha); 4497 4498 ha->isp_ops->config_rings(vha); 4499 4500 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4501 4502 if (IS_QLAFX00(ha)) { 4503 rval = qlafx00_init_firmware(vha, ha->init_cb_size); 4504 goto next_check; 4505 } 4506 4507 /* Update any ISP specific firmware options before initialization. */ 4508 ha->isp_ops->update_fw_options(vha); 4509 4510 ql_dbg(ql_dbg_init, vha, 0x00d1, 4511 "Issue init firmware FW opt 1-3= %08x %08x %08x.\n", 4512 le32_to_cpu(mid_init_cb->init_cb.firmware_options_1), 4513 le32_to_cpu(mid_init_cb->init_cb.firmware_options_2), 4514 le32_to_cpu(mid_init_cb->init_cb.firmware_options_3)); 4515 4516 if (ha->flags.npiv_supported) { 4517 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha)) 4518 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; 4519 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); 4520 } 4521 4522 if (IS_FWI2_CAPABLE(ha)) { 4523 mid_init_cb->options = cpu_to_le16(BIT_1); 4524 mid_init_cb->init_cb.execution_throttle = 4525 cpu_to_le16(ha->cur_fw_xcb_count); 4526 ha->flags.dport_enabled = 4527 (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) & 4528 BIT_7) != 0; 4529 ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n", 4530 (ha->flags.dport_enabled) ? "enabled" : "disabled"); 4531 /* FA-WWPN Status */ 4532 ha->flags.fawwpn_enabled = 4533 (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) & 4534 BIT_6) != 0; 4535 ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n", 4536 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled"); 4537 } 4538 4539 /* ELS pass through payload is limit by frame size. */ 4540 if (ha->flags.edif_enabled) 4541 mid_init_cb->init_cb.frame_payload_size = cpu_to_le16(ELS_MAX_PAYLOAD); 4542 4543 rval = qla2x00_init_firmware(vha, ha->init_cb_size); 4544 next_check: 4545 if (rval) { 4546 ql_log(ql_log_fatal, vha, 0x00d2, 4547 "Init Firmware **** FAILED ****.\n"); 4548 } else { 4549 ql_dbg(ql_dbg_init, vha, 0x00d3, 4550 "Init Firmware -- success.\n"); 4551 QLA_FW_STARTED(ha); 4552 vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0; 4553 } 4554 4555 return (rval); 4556 } 4557 4558 /** 4559 * qla2x00_fw_ready() - Waits for firmware ready. 4560 * @vha: HA context 4561 * 4562 * Returns 0 on success. 4563 */ 4564 static int 4565 qla2x00_fw_ready(scsi_qla_host_t *vha) 4566 { 4567 int rval; 4568 unsigned long wtime, mtime, cs84xx_time; 4569 uint16_t min_wait; /* Minimum wait time if loop is down */ 4570 uint16_t wait_time; /* Wait time if loop is coming ready */ 4571 uint16_t state[6]; 4572 struct qla_hw_data *ha = vha->hw; 4573 4574 if (IS_QLAFX00(vha->hw)) 4575 return qlafx00_fw_ready(vha); 4576 4577 /* Time to wait for loop down */ 4578 if (IS_P3P_TYPE(ha)) 4579 min_wait = 30; 4580 else 4581 min_wait = 20; 4582 4583 /* 4584 * Firmware should take at most one RATOV to login, plus 5 seconds for 4585 * our own processing. 4586 */ 4587 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) { 4588 wait_time = min_wait; 4589 } 4590 4591 /* Min wait time if loop down */ 4592 mtime = jiffies + (min_wait * HZ); 4593 4594 /* wait time before firmware ready */ 4595 wtime = jiffies + (wait_time * HZ); 4596 4597 /* Wait for ISP to finish LIP */ 4598 if (!vha->flags.init_done) 4599 ql_log(ql_log_info, vha, 0x801e, 4600 "Waiting for LIP to complete.\n"); 4601 4602 do { 4603 memset(state, -1, sizeof(state)); 4604 rval = qla2x00_get_firmware_state(vha, state); 4605 if (rval == QLA_SUCCESS) { 4606 if (state[0] < FSTATE_LOSS_OF_SYNC) { 4607 vha->device_flags &= ~DFLG_NO_CABLE; 4608 } 4609 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { 4610 ql_dbg(ql_dbg_taskm, vha, 0x801f, 4611 "fw_state=%x 84xx=%x.\n", state[0], 4612 state[2]); 4613 if ((state[2] & FSTATE_LOGGED_IN) && 4614 (state[2] & FSTATE_WAITING_FOR_VERIFY)) { 4615 ql_dbg(ql_dbg_taskm, vha, 0x8028, 4616 "Sending verify iocb.\n"); 4617 4618 cs84xx_time = jiffies; 4619 rval = qla84xx_init_chip(vha); 4620 if (rval != QLA_SUCCESS) { 4621 ql_log(ql_log_warn, 4622 vha, 0x8007, 4623 "Init chip failed.\n"); 4624 break; 4625 } 4626 4627 /* Add time taken to initialize. */ 4628 cs84xx_time = jiffies - cs84xx_time; 4629 wtime += cs84xx_time; 4630 mtime += cs84xx_time; 4631 ql_dbg(ql_dbg_taskm, vha, 0x8008, 4632 "Increasing wait time by %ld. " 4633 "New time %ld.\n", cs84xx_time, 4634 wtime); 4635 } 4636 } else if (state[0] == FSTATE_READY) { 4637 ql_dbg(ql_dbg_taskm, vha, 0x8037, 4638 "F/W Ready - OK.\n"); 4639 4640 qla2x00_get_retry_cnt(vha, &ha->retry_count, 4641 &ha->login_timeout, &ha->r_a_tov); 4642 4643 rval = QLA_SUCCESS; 4644 break; 4645 } 4646 4647 rval = QLA_FUNCTION_FAILED; 4648 4649 if (atomic_read(&vha->loop_down_timer) && 4650 state[0] != FSTATE_READY) { 4651 /* Loop down. Timeout on min_wait for states 4652 * other than Wait for Login. 4653 */ 4654 if (time_after_eq(jiffies, mtime)) { 4655 ql_log(ql_log_info, vha, 0x8038, 4656 "Cable is unplugged...\n"); 4657 4658 vha->device_flags |= DFLG_NO_CABLE; 4659 break; 4660 } 4661 } 4662 } else { 4663 /* Mailbox cmd failed. Timeout on min_wait. */ 4664 if (time_after_eq(jiffies, mtime) || 4665 ha->flags.isp82xx_fw_hung) 4666 break; 4667 } 4668 4669 if (time_after_eq(jiffies, wtime)) 4670 break; 4671 4672 /* Delay for a while */ 4673 msleep(500); 4674 } while (1); 4675 4676 ql_dbg(ql_dbg_taskm, vha, 0x803a, 4677 "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0], 4678 state[1], state[2], state[3], state[4], state[5], jiffies); 4679 4680 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) { 4681 ql_log(ql_log_warn, vha, 0x803b, 4682 "Firmware ready **** FAILED ****.\n"); 4683 } 4684 4685 return (rval); 4686 } 4687 4688 /* 4689 * qla2x00_configure_hba 4690 * Setup adapter context. 4691 * 4692 * Input: 4693 * ha = adapter state pointer. 4694 * 4695 * Returns: 4696 * 0 = success 4697 * 4698 * Context: 4699 * Kernel context. 4700 */ 4701 static int 4702 qla2x00_configure_hba(scsi_qla_host_t *vha) 4703 { 4704 int rval; 4705 uint16_t loop_id; 4706 uint16_t topo; 4707 uint16_t sw_cap; 4708 uint8_t al_pa; 4709 uint8_t area; 4710 uint8_t domain; 4711 char connect_type[22]; 4712 struct qla_hw_data *ha = vha->hw; 4713 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 4714 port_id_t id; 4715 unsigned long flags; 4716 4717 /* Get host addresses. */ 4718 rval = qla2x00_get_adapter_id(vha, 4719 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); 4720 if (rval != QLA_SUCCESS) { 4721 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || 4722 IS_CNA_CAPABLE(ha) || 4723 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { 4724 ql_dbg(ql_dbg_disc, vha, 0x2008, 4725 "Loop is in a transition state.\n"); 4726 } else { 4727 ql_log(ql_log_warn, vha, 0x2009, 4728 "Unable to get host loop ID.\n"); 4729 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) && 4730 (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) { 4731 ql_log(ql_log_warn, vha, 0x1151, 4732 "Doing link init.\n"); 4733 if (qla24xx_link_initialize(vha) == QLA_SUCCESS) 4734 return rval; 4735 } 4736 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 4737 } 4738 return (rval); 4739 } 4740 4741 if (topo == 4) { 4742 ql_log(ql_log_info, vha, 0x200a, 4743 "Cannot get topology - retrying.\n"); 4744 return (QLA_FUNCTION_FAILED); 4745 } 4746 4747 vha->loop_id = loop_id; 4748 4749 /* initialize */ 4750 ha->min_external_loopid = SNS_FIRST_LOOP_ID; 4751 ha->operating_mode = LOOP; 4752 4753 switch (topo) { 4754 case 0: 4755 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n"); 4756 ha->switch_cap = 0; 4757 ha->current_topology = ISP_CFG_NL; 4758 strcpy(connect_type, "(Loop)"); 4759 break; 4760 4761 case 1: 4762 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n"); 4763 ha->switch_cap = sw_cap; 4764 ha->current_topology = ISP_CFG_FL; 4765 strcpy(connect_type, "(FL_Port)"); 4766 break; 4767 4768 case 2: 4769 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n"); 4770 ha->switch_cap = 0; 4771 ha->operating_mode = P2P; 4772 ha->current_topology = ISP_CFG_N; 4773 strcpy(connect_type, "(N_Port-to-N_Port)"); 4774 break; 4775 4776 case 3: 4777 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n"); 4778 ha->switch_cap = sw_cap; 4779 ha->operating_mode = P2P; 4780 ha->current_topology = ISP_CFG_F; 4781 strcpy(connect_type, "(F_Port)"); 4782 break; 4783 4784 default: 4785 ql_dbg(ql_dbg_disc, vha, 0x200f, 4786 "HBA in unknown topology %x, using NL.\n", topo); 4787 ha->switch_cap = 0; 4788 ha->current_topology = ISP_CFG_NL; 4789 strcpy(connect_type, "(Loop)"); 4790 break; 4791 } 4792 4793 /* Save Host port and loop ID. */ 4794 /* byte order - Big Endian */ 4795 id.b.domain = domain; 4796 id.b.area = area; 4797 id.b.al_pa = al_pa; 4798 id.b.rsvd_1 = 0; 4799 spin_lock_irqsave(&ha->hardware_lock, flags); 4800 if (vha->hw->flags.edif_enabled) { 4801 if (topo != 2) 4802 qlt_update_host_map(vha, id); 4803 } else if (!(topo == 2 && ha->flags.n2n_bigger)) 4804 qlt_update_host_map(vha, id); 4805 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4806 4807 if (!vha->flags.init_done) 4808 ql_log(ql_log_info, vha, 0x2010, 4809 "Topology - %s, Host Loop address 0x%x.\n", 4810 connect_type, vha->loop_id); 4811 4812 return(rval); 4813 } 4814 4815 inline void 4816 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, 4817 const char *def) 4818 { 4819 char *st, *en; 4820 uint16_t index; 4821 uint64_t zero[2] = { 0 }; 4822 struct qla_hw_data *ha = vha->hw; 4823 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && 4824 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha); 4825 4826 if (len > sizeof(zero)) 4827 len = sizeof(zero); 4828 if (memcmp(model, &zero, len) != 0) { 4829 memcpy(ha->model_number, model, len); 4830 st = en = ha->model_number; 4831 en += len - 1; 4832 while (en > st) { 4833 if (*en != 0x20 && *en != 0x00) 4834 break; 4835 *en-- = '\0'; 4836 } 4837 4838 index = (ha->pdev->subsystem_device & 0xff); 4839 if (use_tbl && 4840 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 4841 index < QLA_MODEL_NAMES) 4842 strlcpy(ha->model_desc, 4843 qla2x00_model_name[index * 2 + 1], 4844 sizeof(ha->model_desc)); 4845 } else { 4846 index = (ha->pdev->subsystem_device & 0xff); 4847 if (use_tbl && 4848 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 4849 index < QLA_MODEL_NAMES) { 4850 strlcpy(ha->model_number, 4851 qla2x00_model_name[index * 2], 4852 sizeof(ha->model_number)); 4853 strlcpy(ha->model_desc, 4854 qla2x00_model_name[index * 2 + 1], 4855 sizeof(ha->model_desc)); 4856 } else { 4857 strlcpy(ha->model_number, def, 4858 sizeof(ha->model_number)); 4859 } 4860 } 4861 if (IS_FWI2_CAPABLE(ha)) 4862 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc, 4863 sizeof(ha->model_desc)); 4864 } 4865 4866 /* On sparc systems, obtain port and node WWN from firmware 4867 * properties. 4868 */ 4869 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv) 4870 { 4871 #ifdef CONFIG_SPARC 4872 struct qla_hw_data *ha = vha->hw; 4873 struct pci_dev *pdev = ha->pdev; 4874 struct device_node *dp = pci_device_to_OF_node(pdev); 4875 const u8 *val; 4876 int len; 4877 4878 val = of_get_property(dp, "port-wwn", &len); 4879 if (val && len >= WWN_SIZE) 4880 memcpy(nv->port_name, val, WWN_SIZE); 4881 4882 val = of_get_property(dp, "node-wwn", &len); 4883 if (val && len >= WWN_SIZE) 4884 memcpy(nv->node_name, val, WWN_SIZE); 4885 #endif 4886 } 4887 4888 /* 4889 * NVRAM configuration for ISP 2xxx 4890 * 4891 * Input: 4892 * ha = adapter block pointer. 4893 * 4894 * Output: 4895 * initialization control block in response_ring 4896 * host adapters parameters in host adapter block 4897 * 4898 * Returns: 4899 * 0 = success. 4900 */ 4901 int 4902 qla2x00_nvram_config(scsi_qla_host_t *vha) 4903 { 4904 int rval; 4905 uint8_t chksum = 0; 4906 uint16_t cnt; 4907 uint8_t *dptr1, *dptr2; 4908 struct qla_hw_data *ha = vha->hw; 4909 init_cb_t *icb = ha->init_cb; 4910 nvram_t *nv = ha->nvram; 4911 uint8_t *ptr = ha->nvram; 4912 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 4913 4914 rval = QLA_SUCCESS; 4915 4916 /* Determine NVRAM starting address. */ 4917 ha->nvram_size = sizeof(*nv); 4918 ha->nvram_base = 0; 4919 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) 4920 if ((rd_reg_word(®->ctrl_status) >> 14) == 1) 4921 ha->nvram_base = 0x80; 4922 4923 /* Get NVRAM data and calculate checksum. */ 4924 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size); 4925 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) 4926 chksum += *ptr++; 4927 4928 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f, 4929 "Contents of NVRAM.\n"); 4930 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110, 4931 nv, ha->nvram_size); 4932 4933 /* Bad NVRAM data, set defaults parameters. */ 4934 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) || 4935 nv->nvram_version < 1) { 4936 /* Reset NVRAM data. */ 4937 ql_log(ql_log_warn, vha, 0x0064, 4938 "Inconsistent NVRAM detected: checksum=%#x id=%.4s version=%#x.\n", 4939 chksum, nv->id, nv->nvram_version); 4940 ql_log(ql_log_warn, vha, 0x0065, 4941 "Falling back to " 4942 "functioning (yet invalid -- WWPN) defaults.\n"); 4943 4944 /* 4945 * Set default initialization control block. 4946 */ 4947 memset(nv, 0, ha->nvram_size); 4948 nv->parameter_block_version = ICB_VERSION; 4949 4950 if (IS_QLA23XX(ha)) { 4951 nv->firmware_options[0] = BIT_2 | BIT_1; 4952 nv->firmware_options[1] = BIT_7 | BIT_5; 4953 nv->add_firmware_options[0] = BIT_5; 4954 nv->add_firmware_options[1] = BIT_5 | BIT_4; 4955 nv->frame_payload_size = cpu_to_le16(2048); 4956 nv->special_options[1] = BIT_7; 4957 } else if (IS_QLA2200(ha)) { 4958 nv->firmware_options[0] = BIT_2 | BIT_1; 4959 nv->firmware_options[1] = BIT_7 | BIT_5; 4960 nv->add_firmware_options[0] = BIT_5; 4961 nv->add_firmware_options[1] = BIT_5 | BIT_4; 4962 nv->frame_payload_size = cpu_to_le16(1024); 4963 } else if (IS_QLA2100(ha)) { 4964 nv->firmware_options[0] = BIT_3 | BIT_1; 4965 nv->firmware_options[1] = BIT_5; 4966 nv->frame_payload_size = cpu_to_le16(1024); 4967 } 4968 4969 nv->max_iocb_allocation = cpu_to_le16(256); 4970 nv->execution_throttle = cpu_to_le16(16); 4971 nv->retry_count = 8; 4972 nv->retry_delay = 1; 4973 4974 nv->port_name[0] = 33; 4975 nv->port_name[3] = 224; 4976 nv->port_name[4] = 139; 4977 4978 qla2xxx_nvram_wwn_from_ofw(vha, nv); 4979 4980 nv->login_timeout = 4; 4981 4982 /* 4983 * Set default host adapter parameters 4984 */ 4985 nv->host_p[1] = BIT_2; 4986 nv->reset_delay = 5; 4987 nv->port_down_retry_count = 8; 4988 nv->max_luns_per_target = cpu_to_le16(8); 4989 nv->link_down_timeout = 60; 4990 4991 rval = 1; 4992 } 4993 4994 /* Reset Initialization control block */ 4995 memset(icb, 0, ha->init_cb_size); 4996 4997 /* 4998 * Setup driver NVRAM options. 4999 */ 5000 nv->firmware_options[0] |= (BIT_6 | BIT_1); 5001 nv->firmware_options[0] &= ~(BIT_5 | BIT_4); 5002 nv->firmware_options[1] |= (BIT_5 | BIT_0); 5003 nv->firmware_options[1] &= ~BIT_4; 5004 5005 if (IS_QLA23XX(ha)) { 5006 nv->firmware_options[0] |= BIT_2; 5007 nv->firmware_options[0] &= ~BIT_3; 5008 nv->special_options[0] &= ~BIT_6; 5009 nv->add_firmware_options[1] |= BIT_5 | BIT_4; 5010 5011 if (IS_QLA2300(ha)) { 5012 if (ha->fb_rev == FPM_2310) { 5013 strcpy(ha->model_number, "QLA2310"); 5014 } else { 5015 strcpy(ha->model_number, "QLA2300"); 5016 } 5017 } else { 5018 qla2x00_set_model_info(vha, nv->model_number, 5019 sizeof(nv->model_number), "QLA23xx"); 5020 } 5021 } else if (IS_QLA2200(ha)) { 5022 nv->firmware_options[0] |= BIT_2; 5023 /* 5024 * 'Point-to-point preferred, else loop' is not a safe 5025 * connection mode setting. 5026 */ 5027 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) == 5028 (BIT_5 | BIT_4)) { 5029 /* Force 'loop preferred, else point-to-point'. */ 5030 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4); 5031 nv->add_firmware_options[0] |= BIT_5; 5032 } 5033 strcpy(ha->model_number, "QLA22xx"); 5034 } else /*if (IS_QLA2100(ha))*/ { 5035 strcpy(ha->model_number, "QLA2100"); 5036 } 5037 5038 /* 5039 * Copy over NVRAM RISC parameter block to initialization control block. 5040 */ 5041 dptr1 = (uint8_t *)icb; 5042 dptr2 = (uint8_t *)&nv->parameter_block_version; 5043 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version; 5044 while (cnt--) 5045 *dptr1++ = *dptr2++; 5046 5047 /* Copy 2nd half. */ 5048 dptr1 = (uint8_t *)icb->add_firmware_options; 5049 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options; 5050 while (cnt--) 5051 *dptr1++ = *dptr2++; 5052 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); 5053 /* Use alternate WWN? */ 5054 if (nv->host_p[1] & BIT_7) { 5055 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 5056 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 5057 } 5058 5059 /* Prepare nodename */ 5060 if ((icb->firmware_options[1] & BIT_6) == 0) { 5061 /* 5062 * Firmware will apply the following mask if the nodename was 5063 * not provided. 5064 */ 5065 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 5066 icb->node_name[0] &= 0xF0; 5067 } 5068 5069 /* 5070 * Set host adapter parameters. 5071 */ 5072 5073 /* 5074 * BIT_7 in the host-parameters section allows for modification to 5075 * internal driver logging. 5076 */ 5077 if (nv->host_p[0] & BIT_7) 5078 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; 5079 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); 5080 /* Always load RISC code on non ISP2[12]00 chips. */ 5081 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) 5082 ha->flags.disable_risc_code_load = 0; 5083 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0); 5084 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0); 5085 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0); 5086 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0; 5087 ha->flags.disable_serdes = 0; 5088 5089 ha->operating_mode = 5090 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4; 5091 5092 memcpy(ha->fw_seriallink_options, nv->seriallink_options, 5093 sizeof(ha->fw_seriallink_options)); 5094 5095 /* save HBA serial number */ 5096 ha->serial0 = icb->port_name[5]; 5097 ha->serial1 = icb->port_name[6]; 5098 ha->serial2 = icb->port_name[7]; 5099 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 5100 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 5101 5102 icb->execution_throttle = cpu_to_le16(0xFFFF); 5103 5104 ha->retry_count = nv->retry_count; 5105 5106 /* Set minimum login_timeout to 4 seconds. */ 5107 if (nv->login_timeout != ql2xlogintimeout) 5108 nv->login_timeout = ql2xlogintimeout; 5109 if (nv->login_timeout < 4) 5110 nv->login_timeout = 4; 5111 ha->login_timeout = nv->login_timeout; 5112 5113 /* Set minimum RATOV to 100 tenths of a second. */ 5114 ha->r_a_tov = 100; 5115 5116 ha->loop_reset_delay = nv->reset_delay; 5117 5118 /* Link Down Timeout = 0: 5119 * 5120 * When Port Down timer expires we will start returning 5121 * I/O's to OS with "DID_NO_CONNECT". 5122 * 5123 * Link Down Timeout != 0: 5124 * 5125 * The driver waits for the link to come up after link down 5126 * before returning I/Os to OS with "DID_NO_CONNECT". 5127 */ 5128 if (nv->link_down_timeout == 0) { 5129 ha->loop_down_abort_time = 5130 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 5131 } else { 5132 ha->link_down_timeout = nv->link_down_timeout; 5133 ha->loop_down_abort_time = 5134 (LOOP_DOWN_TIME - ha->link_down_timeout); 5135 } 5136 5137 /* 5138 * Need enough time to try and get the port back. 5139 */ 5140 ha->port_down_retry_count = nv->port_down_retry_count; 5141 if (qlport_down_retry) 5142 ha->port_down_retry_count = qlport_down_retry; 5143 /* Set login_retry_count */ 5144 ha->login_retry_count = nv->retry_count; 5145 if (ha->port_down_retry_count == nv->port_down_retry_count && 5146 ha->port_down_retry_count > 3) 5147 ha->login_retry_count = ha->port_down_retry_count; 5148 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 5149 ha->login_retry_count = ha->port_down_retry_count; 5150 if (ql2xloginretrycount) 5151 ha->login_retry_count = ql2xloginretrycount; 5152 5153 icb->lun_enables = cpu_to_le16(0); 5154 icb->command_resource_count = 0; 5155 icb->immediate_notify_resource_count = 0; 5156 icb->timeout = cpu_to_le16(0); 5157 5158 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 5159 /* Enable RIO */ 5160 icb->firmware_options[0] &= ~BIT_3; 5161 icb->add_firmware_options[0] &= 5162 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 5163 icb->add_firmware_options[0] |= BIT_2; 5164 icb->response_accumulation_timer = 3; 5165 icb->interrupt_delay_timer = 5; 5166 5167 vha->flags.process_response_queue = 1; 5168 } else { 5169 /* Enable ZIO. */ 5170 if (!vha->flags.init_done) { 5171 ha->zio_mode = icb->add_firmware_options[0] & 5172 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 5173 ha->zio_timer = icb->interrupt_delay_timer ? 5174 icb->interrupt_delay_timer : 2; 5175 } 5176 icb->add_firmware_options[0] &= 5177 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 5178 vha->flags.process_response_queue = 0; 5179 if (ha->zio_mode != QLA_ZIO_DISABLED) { 5180 ha->zio_mode = QLA_ZIO_MODE_6; 5181 5182 ql_log(ql_log_info, vha, 0x0068, 5183 "ZIO mode %d enabled; timer delay (%d us).\n", 5184 ha->zio_mode, ha->zio_timer * 100); 5185 5186 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode; 5187 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer; 5188 vha->flags.process_response_queue = 1; 5189 } 5190 } 5191 5192 if (rval) { 5193 ql_log(ql_log_warn, vha, 0x0069, 5194 "NVRAM configuration failed.\n"); 5195 } 5196 return (rval); 5197 } 5198 5199 static void 5200 qla2x00_rport_del(void *data) 5201 { 5202 fc_port_t *fcport = data; 5203 struct fc_rport *rport; 5204 unsigned long flags; 5205 5206 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 5207 rport = fcport->drport ? fcport->drport : fcport->rport; 5208 fcport->drport = NULL; 5209 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 5210 if (rport) { 5211 ql_dbg(ql_dbg_disc, fcport->vha, 0x210b, 5212 "%s %8phN. rport %p roles %x\n", 5213 __func__, fcport->port_name, rport, 5214 rport->roles); 5215 5216 fc_remote_port_delete(rport); 5217 } 5218 } 5219 5220 void qla2x00_set_fcport_state(fc_port_t *fcport, int state) 5221 { 5222 int old_state; 5223 5224 old_state = atomic_read(&fcport->state); 5225 atomic_set(&fcport->state, state); 5226 5227 /* Don't print state transitions during initial allocation of fcport */ 5228 if (old_state && old_state != state) { 5229 ql_dbg(ql_dbg_disc, fcport->vha, 0x207d, 5230 "FCPort %8phC state transitioned from %s to %s - portid=%02x%02x%02x.\n", 5231 fcport->port_name, port_state_str[old_state], 5232 port_state_str[state], fcport->d_id.b.domain, 5233 fcport->d_id.b.area, fcport->d_id.b.al_pa); 5234 } 5235 } 5236 5237 /** 5238 * qla2x00_alloc_fcport() - Allocate a generic fcport. 5239 * @vha: HA context 5240 * @flags: allocation flags 5241 * 5242 * Returns a pointer to the allocated fcport, or NULL, if none available. 5243 */ 5244 fc_port_t * 5245 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) 5246 { 5247 fc_port_t *fcport; 5248 5249 fcport = kzalloc(sizeof(fc_port_t), flags); 5250 if (!fcport) 5251 return NULL; 5252 5253 fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev, 5254 sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma, 5255 flags); 5256 if (!fcport->ct_desc.ct_sns) { 5257 ql_log(ql_log_warn, vha, 0xd049, 5258 "Failed to allocate ct_sns request.\n"); 5259 kfree(fcport); 5260 return NULL; 5261 } 5262 5263 /* Setup fcport template structure. */ 5264 fcport->vha = vha; 5265 fcport->port_type = FCT_UNKNOWN; 5266 fcport->loop_id = FC_NO_LOOP_ID; 5267 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); 5268 fcport->supported_classes = FC_COS_UNSPECIFIED; 5269 fcport->fp_speed = PORT_SPEED_UNKNOWN; 5270 5271 fcport->disc_state = DSC_DELETED; 5272 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 5273 fcport->deleted = QLA_SESS_DELETED; 5274 fcport->login_retry = vha->hw->login_retry_count; 5275 fcport->chip_reset = vha->hw->base_qpair->chip_reset; 5276 fcport->logout_on_delete = 1; 5277 fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; 5278 fcport->tgt_short_link_down_cnt = 0; 5279 fcport->dev_loss_tmo = 0; 5280 5281 if (!fcport->ct_desc.ct_sns) { 5282 ql_log(ql_log_warn, vha, 0xd049, 5283 "Failed to allocate ct_sns request.\n"); 5284 kfree(fcport); 5285 return NULL; 5286 } 5287 5288 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); 5289 INIT_WORK(&fcport->free_work, qlt_free_session_done); 5290 INIT_WORK(&fcport->reg_work, qla_register_fcport_fn); 5291 INIT_LIST_HEAD(&fcport->gnl_entry); 5292 INIT_LIST_HEAD(&fcport->list); 5293 5294 INIT_LIST_HEAD(&fcport->sess_cmd_list); 5295 spin_lock_init(&fcport->sess_cmd_lock); 5296 5297 spin_lock_init(&fcport->edif.sa_list_lock); 5298 INIT_LIST_HEAD(&fcport->edif.tx_sa_list); 5299 INIT_LIST_HEAD(&fcport->edif.rx_sa_list); 5300 5301 spin_lock_init(&fcport->edif.indx_list_lock); 5302 INIT_LIST_HEAD(&fcport->edif.edif_indx_list); 5303 5304 return fcport; 5305 } 5306 5307 void 5308 qla2x00_free_fcport(fc_port_t *fcport) 5309 { 5310 if (fcport->ct_desc.ct_sns) { 5311 dma_free_coherent(&fcport->vha->hw->pdev->dev, 5312 sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns, 5313 fcport->ct_desc.ct_sns_dma); 5314 5315 fcport->ct_desc.ct_sns = NULL; 5316 } 5317 5318 qla_edif_flush_sa_ctl_lists(fcport); 5319 list_del(&fcport->list); 5320 qla2x00_clear_loop_id(fcport); 5321 5322 qla_edif_list_del(fcport); 5323 5324 kfree(fcport); 5325 } 5326 5327 static void qla_get_login_template(scsi_qla_host_t *vha) 5328 { 5329 struct qla_hw_data *ha = vha->hw; 5330 int rval; 5331 u32 *bp, sz; 5332 __be32 *q; 5333 5334 memset(ha->init_cb, 0, ha->init_cb_size); 5335 sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size); 5336 rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma, 5337 ha->init_cb, sz); 5338 if (rval != QLA_SUCCESS) { 5339 ql_dbg(ql_dbg_init, vha, 0x00d1, 5340 "PLOGI ELS param read fail.\n"); 5341 return; 5342 } 5343 q = (__be32 *)&ha->plogi_els_payld.fl_csp; 5344 5345 bp = (uint32_t *)ha->init_cb; 5346 cpu_to_be32_array(q, bp, sz / 4); 5347 ha->flags.plogi_template_valid = 1; 5348 } 5349 5350 /* 5351 * qla2x00_configure_loop 5352 * Updates Fibre Channel Device Database with what is actually on loop. 5353 * 5354 * Input: 5355 * ha = adapter block pointer. 5356 * 5357 * Returns: 5358 * 0 = success. 5359 * 1 = error. 5360 * 2 = database was full and device was not configured. 5361 */ 5362 static int 5363 qla2x00_configure_loop(scsi_qla_host_t *vha) 5364 { 5365 int rval; 5366 unsigned long flags, save_flags; 5367 struct qla_hw_data *ha = vha->hw; 5368 5369 rval = QLA_SUCCESS; 5370 5371 /* Get Initiator ID */ 5372 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) { 5373 rval = qla2x00_configure_hba(vha); 5374 if (rval != QLA_SUCCESS) { 5375 ql_dbg(ql_dbg_disc, vha, 0x2013, 5376 "Unable to configure HBA.\n"); 5377 return (rval); 5378 } 5379 } 5380 5381 save_flags = flags = vha->dpc_flags; 5382 ql_dbg(ql_dbg_disc, vha, 0x2014, 5383 "Configure loop -- dpc flags = 0x%lx.\n", flags); 5384 5385 /* 5386 * If we have both an RSCN and PORT UPDATE pending then handle them 5387 * both at the same time. 5388 */ 5389 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 5390 clear_bit(RSCN_UPDATE, &vha->dpc_flags); 5391 5392 qla2x00_get_data_rate(vha); 5393 qla_get_login_template(vha); 5394 5395 /* Determine what we need to do */ 5396 if ((ha->current_topology == ISP_CFG_FL || 5397 ha->current_topology == ISP_CFG_F) && 5398 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 5399 5400 set_bit(RSCN_UPDATE, &flags); 5401 clear_bit(LOCAL_LOOP_UPDATE, &flags); 5402 5403 } else if (ha->current_topology == ISP_CFG_NL || 5404 ha->current_topology == ISP_CFG_N) { 5405 clear_bit(RSCN_UPDATE, &flags); 5406 set_bit(LOCAL_LOOP_UPDATE, &flags); 5407 } else if (!vha->flags.online || 5408 (test_bit(ABORT_ISP_ACTIVE, &flags))) { 5409 set_bit(RSCN_UPDATE, &flags); 5410 set_bit(LOCAL_LOOP_UPDATE, &flags); 5411 } 5412 5413 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) { 5414 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 5415 ql_dbg(ql_dbg_disc, vha, 0x2015, 5416 "Loop resync needed, failing.\n"); 5417 rval = QLA_FUNCTION_FAILED; 5418 } else 5419 rval = qla2x00_configure_local_loop(vha); 5420 } 5421 5422 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) { 5423 if (LOOP_TRANSITION(vha)) { 5424 ql_dbg(ql_dbg_disc, vha, 0x2099, 5425 "Needs RSCN update and loop transition.\n"); 5426 rval = QLA_FUNCTION_FAILED; 5427 } 5428 else 5429 rval = qla2x00_configure_fabric(vha); 5430 } 5431 5432 if (rval == QLA_SUCCESS) { 5433 if (atomic_read(&vha->loop_down_timer) || 5434 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 5435 rval = QLA_FUNCTION_FAILED; 5436 } else { 5437 atomic_set(&vha->loop_state, LOOP_READY); 5438 ql_dbg(ql_dbg_disc, vha, 0x2069, 5439 "LOOP READY.\n"); 5440 ha->flags.fw_init_done = 1; 5441 5442 /* 5443 * use link up to wake up app to get ready for 5444 * authentication. 5445 */ 5446 if (ha->flags.edif_enabled && DBELL_INACTIVE(vha)) 5447 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, 5448 ha->link_data_rate); 5449 5450 /* 5451 * Process any ATIO queue entries that came in 5452 * while we weren't online. 5453 */ 5454 if (qla_tgt_mode_enabled(vha) || 5455 qla_dual_mode_enabled(vha)) { 5456 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 5457 qlt_24xx_process_atio_queue(vha, 0); 5458 spin_unlock_irqrestore(&ha->tgt.atio_lock, 5459 flags); 5460 } 5461 } 5462 } 5463 5464 if (rval) { 5465 ql_dbg(ql_dbg_disc, vha, 0x206a, 5466 "%s *** FAILED ***.\n", __func__); 5467 } else { 5468 ql_dbg(ql_dbg_disc, vha, 0x206b, 5469 "%s: exiting normally. local port wwpn %8phN id %06x)\n", 5470 __func__, vha->port_name, vha->d_id.b24); 5471 } 5472 5473 /* Restore state if a resync event occurred during processing */ 5474 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 5475 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 5476 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 5477 if (test_bit(RSCN_UPDATE, &save_flags)) { 5478 set_bit(RSCN_UPDATE, &vha->dpc_flags); 5479 } 5480 } 5481 5482 return (rval); 5483 } 5484 5485 static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha) 5486 { 5487 unsigned long flags; 5488 fc_port_t *fcport; 5489 5490 ql_dbg(ql_dbg_disc, vha, 0x206a, "%s %d.\n", __func__, __LINE__); 5491 5492 if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) 5493 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 5494 5495 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5496 if (fcport->n2n_flag) { 5497 qla24xx_fcport_handle_login(vha, fcport); 5498 return QLA_SUCCESS; 5499 } 5500 } 5501 5502 spin_lock_irqsave(&vha->work_lock, flags); 5503 vha->scan.scan_retry++; 5504 spin_unlock_irqrestore(&vha->work_lock, flags); 5505 5506 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { 5507 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 5508 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5509 } 5510 return QLA_FUNCTION_FAILED; 5511 } 5512 5513 /* 5514 * qla2x00_configure_local_loop 5515 * Updates Fibre Channel Device Database with local loop devices. 5516 * 5517 * Input: 5518 * ha = adapter block pointer. 5519 * 5520 * Returns: 5521 * 0 = success. 5522 */ 5523 static int 5524 qla2x00_configure_local_loop(scsi_qla_host_t *vha) 5525 { 5526 int rval, rval2; 5527 int found_devs; 5528 int found; 5529 fc_port_t *fcport, *new_fcport; 5530 uint16_t index; 5531 uint16_t entries; 5532 struct gid_list_info *gid; 5533 uint16_t loop_id; 5534 uint8_t domain, area, al_pa; 5535 struct qla_hw_data *ha = vha->hw; 5536 unsigned long flags; 5537 5538 /* Inititae N2N login. */ 5539 if (N2N_TOPO(ha)) 5540 return qla2x00_configure_n2n_loop(vha); 5541 5542 found_devs = 0; 5543 new_fcport = NULL; 5544 entries = MAX_FIBRE_DEVICES_LOOP; 5545 5546 /* Get list of logged in devices. */ 5547 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha)); 5548 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma, 5549 &entries); 5550 if (rval != QLA_SUCCESS) 5551 goto err; 5552 5553 ql_dbg(ql_dbg_disc, vha, 0x2011, 5554 "Entries in ID list (%d).\n", entries); 5555 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075, 5556 ha->gid_list, entries * sizeof(*ha->gid_list)); 5557 5558 if (entries == 0) { 5559 spin_lock_irqsave(&vha->work_lock, flags); 5560 vha->scan.scan_retry++; 5561 spin_unlock_irqrestore(&vha->work_lock, flags); 5562 5563 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { 5564 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 5565 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5566 } 5567 } else { 5568 vha->scan.scan_retry = 0; 5569 } 5570 5571 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5572 fcport->scan_state = QLA_FCPORT_SCAN; 5573 } 5574 5575 /* Allocate temporary fcport for any new fcports discovered. */ 5576 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 5577 if (new_fcport == NULL) { 5578 ql_log(ql_log_warn, vha, 0x2012, 5579 "Memory allocation failed for fcport.\n"); 5580 rval = QLA_MEMORY_ALLOC_FAILED; 5581 goto err; 5582 } 5583 new_fcport->flags &= ~FCF_FABRIC_DEVICE; 5584 5585 /* Add devices to port list. */ 5586 gid = ha->gid_list; 5587 for (index = 0; index < entries; index++) { 5588 domain = gid->domain; 5589 area = gid->area; 5590 al_pa = gid->al_pa; 5591 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 5592 loop_id = gid->loop_id_2100; 5593 else 5594 loop_id = le16_to_cpu(gid->loop_id); 5595 gid = (void *)gid + ha->gid_list_info_size; 5596 5597 /* Bypass reserved domain fields. */ 5598 if ((domain & 0xf0) == 0xf0) 5599 continue; 5600 5601 /* Bypass if not same domain and area of adapter. */ 5602 if (area && domain && ((area != vha->d_id.b.area) || 5603 (domain != vha->d_id.b.domain)) && 5604 (ha->current_topology == ISP_CFG_NL)) 5605 continue; 5606 5607 5608 /* Bypass invalid local loop ID. */ 5609 if (loop_id > LAST_LOCAL_LOOP_ID) 5610 continue; 5611 5612 memset(new_fcport->port_name, 0, WWN_SIZE); 5613 5614 /* Fill in member data. */ 5615 new_fcport->d_id.b.domain = domain; 5616 new_fcport->d_id.b.area = area; 5617 new_fcport->d_id.b.al_pa = al_pa; 5618 new_fcport->loop_id = loop_id; 5619 new_fcport->scan_state = QLA_FCPORT_FOUND; 5620 5621 rval2 = qla2x00_get_port_database(vha, new_fcport, 0); 5622 if (rval2 != QLA_SUCCESS) { 5623 ql_dbg(ql_dbg_disc, vha, 0x2097, 5624 "Failed to retrieve fcport information " 5625 "-- get_port_database=%x, loop_id=0x%04x.\n", 5626 rval2, new_fcport->loop_id); 5627 /* Skip retry if N2N */ 5628 if (ha->current_topology != ISP_CFG_N) { 5629 ql_dbg(ql_dbg_disc, vha, 0x2105, 5630 "Scheduling resync.\n"); 5631 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5632 continue; 5633 } 5634 } 5635 5636 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5637 /* Check for matching device in port list. */ 5638 found = 0; 5639 fcport = NULL; 5640 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5641 if (memcmp(new_fcport->port_name, fcport->port_name, 5642 WWN_SIZE)) 5643 continue; 5644 5645 fcport->flags &= ~FCF_FABRIC_DEVICE; 5646 fcport->loop_id = new_fcport->loop_id; 5647 fcport->port_type = new_fcport->port_type; 5648 fcport->d_id.b24 = new_fcport->d_id.b24; 5649 memcpy(fcport->node_name, new_fcport->node_name, 5650 WWN_SIZE); 5651 fcport->scan_state = QLA_FCPORT_FOUND; 5652 if (fcport->login_retry == 0) { 5653 fcport->login_retry = vha->hw->login_retry_count; 5654 ql_dbg(ql_dbg_disc, vha, 0x2135, 5655 "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n", 5656 fcport->port_name, fcport->loop_id, 5657 fcport->login_retry); 5658 } 5659 found++; 5660 break; 5661 } 5662 5663 if (!found) { 5664 /* New device, add to fcports list. */ 5665 list_add_tail(&new_fcport->list, &vha->vp_fcports); 5666 5667 /* Allocate a new replacement fcport. */ 5668 fcport = new_fcport; 5669 5670 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5671 5672 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 5673 5674 if (new_fcport == NULL) { 5675 ql_log(ql_log_warn, vha, 0xd031, 5676 "Failed to allocate memory for fcport.\n"); 5677 rval = QLA_MEMORY_ALLOC_FAILED; 5678 goto err; 5679 } 5680 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5681 new_fcport->flags &= ~FCF_FABRIC_DEVICE; 5682 } 5683 5684 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5685 5686 /* Base iIDMA settings on HBA port speed. */ 5687 fcport->fp_speed = ha->link_data_rate; 5688 5689 found_devs++; 5690 } 5691 5692 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5693 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5694 break; 5695 5696 if (fcport->scan_state == QLA_FCPORT_SCAN) { 5697 if ((qla_dual_mode_enabled(vha) || 5698 qla_ini_mode_enabled(vha)) && 5699 atomic_read(&fcport->state) == FCS_ONLINE) { 5700 qla2x00_mark_device_lost(vha, fcport, 5701 ql2xplogiabsentdevice); 5702 if (fcport->loop_id != FC_NO_LOOP_ID && 5703 (fcport->flags & FCF_FCP2_DEVICE) == 0 && 5704 fcport->port_type != FCT_INITIATOR && 5705 fcport->port_type != FCT_BROADCAST) { 5706 ql_dbg(ql_dbg_disc, vha, 0x20f0, 5707 "%s %d %8phC post del sess\n", 5708 __func__, __LINE__, 5709 fcport->port_name); 5710 5711 qlt_schedule_sess_for_deletion(fcport); 5712 continue; 5713 } 5714 } 5715 } 5716 5717 if (fcport->scan_state == QLA_FCPORT_FOUND) 5718 qla24xx_fcport_handle_login(vha, fcport); 5719 } 5720 5721 qla2x00_free_fcport(new_fcport); 5722 5723 return rval; 5724 5725 err: 5726 ql_dbg(ql_dbg_disc, vha, 0x2098, 5727 "Configure local loop error exit: rval=%x.\n", rval); 5728 return rval; 5729 } 5730 5731 static void 5732 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 5733 { 5734 int rval; 5735 uint16_t mb[MAILBOX_REGISTER_COUNT]; 5736 struct qla_hw_data *ha = vha->hw; 5737 5738 if (!IS_IIDMA_CAPABLE(ha)) 5739 return; 5740 5741 if (atomic_read(&fcport->state) != FCS_ONLINE) 5742 return; 5743 5744 if (fcport->fp_speed == PORT_SPEED_UNKNOWN || 5745 fcport->fp_speed > ha->link_data_rate || 5746 !ha->flags.gpsc_supported) 5747 return; 5748 5749 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed, 5750 mb); 5751 if (rval != QLA_SUCCESS) { 5752 ql_dbg(ql_dbg_disc, vha, 0x2004, 5753 "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n", 5754 fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]); 5755 } else { 5756 ql_dbg(ql_dbg_disc, vha, 0x2005, 5757 "iIDMA adjusted to %s GB/s (%X) on %8phN.\n", 5758 qla2x00_get_link_speed_str(ha, fcport->fp_speed), 5759 fcport->fp_speed, fcport->port_name); 5760 } 5761 } 5762 5763 void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport) 5764 { 5765 qla2x00_iidma_fcport(vha, fcport); 5766 qla24xx_update_fcport_fcp_prio(vha, fcport); 5767 } 5768 5769 int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport) 5770 { 5771 struct qla_work_evt *e; 5772 5773 e = qla2x00_alloc_work(vha, QLA_EVT_IIDMA); 5774 if (!e) 5775 return QLA_FUNCTION_FAILED; 5776 5777 e->u.fcport.fcport = fcport; 5778 return qla2x00_post_work(vha, e); 5779 } 5780 5781 /* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/ 5782 static void 5783 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) 5784 { 5785 struct fc_rport_identifiers rport_ids; 5786 struct fc_rport *rport; 5787 unsigned long flags; 5788 5789 if (atomic_read(&fcport->state) == FCS_ONLINE) 5790 return; 5791 5792 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 5793 5794 rport_ids.node_name = wwn_to_u64(fcport->node_name); 5795 rport_ids.port_name = wwn_to_u64(fcport->port_name); 5796 rport_ids.port_id = fcport->d_id.b.domain << 16 | 5797 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; 5798 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 5799 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids); 5800 if (!rport) { 5801 ql_log(ql_log_warn, vha, 0x2006, 5802 "Unable to allocate fc remote port.\n"); 5803 return; 5804 } 5805 5806 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 5807 *((fc_port_t **)rport->dd_data) = fcport; 5808 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 5809 fcport->dev_loss_tmo = rport->dev_loss_tmo; 5810 5811 rport->supported_classes = fcport->supported_classes; 5812 5813 rport_ids.roles = FC_PORT_ROLE_UNKNOWN; 5814 if (fcport->port_type == FCT_INITIATOR) 5815 rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; 5816 if (fcport->port_type == FCT_TARGET) 5817 rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET; 5818 if (fcport->port_type & FCT_NVME_INITIATOR) 5819 rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR; 5820 if (fcport->port_type & FCT_NVME_TARGET) 5821 rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET; 5822 if (fcport->port_type & FCT_NVME_DISCOVERY) 5823 rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY; 5824 5825 fc_remote_port_rolechg(rport, rport_ids.roles); 5826 5827 ql_dbg(ql_dbg_disc, vha, 0x20ee, 5828 "%s: %8phN. rport %ld:0:%d (%p) is %s mode\n", 5829 __func__, fcport->port_name, vha->host_no, 5830 rport->scsi_target_id, rport, 5831 (fcport->port_type == FCT_TARGET) ? "tgt" : 5832 ((fcport->port_type & FCT_NVME) ? "nvme" : "ini")); 5833 } 5834 5835 /* 5836 * qla2x00_update_fcport 5837 * Updates device on list. 5838 * 5839 * Input: 5840 * ha = adapter block pointer. 5841 * fcport = port structure pointer. 5842 * 5843 * Return: 5844 * 0 - Success 5845 * BIT_0 - error 5846 * 5847 * Context: 5848 * Kernel context. 5849 */ 5850 void 5851 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 5852 { 5853 if (IS_SW_RESV_ADDR(fcport->d_id)) 5854 return; 5855 5856 ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n", 5857 __func__, fcport->port_name); 5858 5859 qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT); 5860 fcport->login_retry = vha->hw->login_retry_count; 5861 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 5862 fcport->deleted = 0; 5863 if (vha->hw->current_topology == ISP_CFG_NL) 5864 fcport->logout_on_delete = 0; 5865 else 5866 fcport->logout_on_delete = 1; 5867 fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0; 5868 5869 if (fcport->tgt_link_down_time < fcport->dev_loss_tmo) { 5870 fcport->tgt_short_link_down_cnt++; 5871 fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; 5872 } 5873 5874 switch (vha->hw->current_topology) { 5875 case ISP_CFG_N: 5876 case ISP_CFG_NL: 5877 fcport->keep_nport_handle = 1; 5878 break; 5879 default: 5880 break; 5881 } 5882 5883 qla2x00_iidma_fcport(vha, fcport); 5884 5885 qla2x00_dfs_create_rport(vha, fcport); 5886 5887 qla24xx_update_fcport_fcp_prio(vha, fcport); 5888 5889 switch (vha->host->active_mode) { 5890 case MODE_INITIATOR: 5891 qla2x00_reg_remote_port(vha, fcport); 5892 break; 5893 case MODE_TARGET: 5894 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 5895 if (!vha->vha_tgt.qla_tgt->tgt_stop && 5896 !vha->vha_tgt.qla_tgt->tgt_stopped) 5897 qlt_fc_port_added(vha, fcport); 5898 break; 5899 case MODE_DUAL: 5900 qla2x00_reg_remote_port(vha, fcport); 5901 if (!vha->vha_tgt.qla_tgt->tgt_stop && 5902 !vha->vha_tgt.qla_tgt->tgt_stopped) 5903 qlt_fc_port_added(vha, fcport); 5904 break; 5905 default: 5906 break; 5907 } 5908 5909 if (NVME_TARGET(vha->hw, fcport)) 5910 qla_nvme_register_remote(vha, fcport); 5911 5912 if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) { 5913 if (fcport->id_changed) { 5914 fcport->id_changed = 0; 5915 ql_dbg(ql_dbg_disc, vha, 0x20d7, 5916 "%s %d %8phC post gfpnid fcp_cnt %d\n", 5917 __func__, __LINE__, fcport->port_name, 5918 vha->fcport_count); 5919 qla24xx_post_gfpnid_work(vha, fcport); 5920 } else { 5921 ql_dbg(ql_dbg_disc, vha, 0x20d7, 5922 "%s %d %8phC post gpsc fcp_cnt %d\n", 5923 __func__, __LINE__, fcport->port_name, 5924 vha->fcport_count); 5925 qla24xx_post_gpsc_work(vha, fcport); 5926 } 5927 } 5928 5929 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE); 5930 } 5931 5932 void qla_register_fcport_fn(struct work_struct *work) 5933 { 5934 fc_port_t *fcport = container_of(work, struct fc_port, reg_work); 5935 u32 rscn_gen = fcport->rscn_gen; 5936 u16 data[2]; 5937 5938 if (IS_SW_RESV_ADDR(fcport->d_id)) 5939 return; 5940 5941 qla2x00_update_fcport(fcport->vha, fcport); 5942 5943 ql_dbg(ql_dbg_disc, fcport->vha, 0x911e, 5944 "%s rscn gen %d/%d next DS %d\n", __func__, 5945 rscn_gen, fcport->rscn_gen, fcport->next_disc_state); 5946 5947 if (rscn_gen != fcport->rscn_gen) { 5948 /* RSCN(s) came in while registration */ 5949 switch (fcport->next_disc_state) { 5950 case DSC_DELETE_PEND: 5951 qlt_schedule_sess_for_deletion(fcport); 5952 break; 5953 case DSC_ADISC: 5954 data[0] = data[1] = 0; 5955 qla2x00_post_async_adisc_work(fcport->vha, fcport, 5956 data); 5957 break; 5958 default: 5959 break; 5960 } 5961 } 5962 } 5963 5964 /* 5965 * qla2x00_configure_fabric 5966 * Setup SNS devices with loop ID's. 5967 * 5968 * Input: 5969 * ha = adapter block pointer. 5970 * 5971 * Returns: 5972 * 0 = success. 5973 * BIT_0 = error 5974 */ 5975 static int 5976 qla2x00_configure_fabric(scsi_qla_host_t *vha) 5977 { 5978 int rval; 5979 fc_port_t *fcport; 5980 uint16_t mb[MAILBOX_REGISTER_COUNT]; 5981 uint16_t loop_id; 5982 LIST_HEAD(new_fcports); 5983 struct qla_hw_data *ha = vha->hw; 5984 int discovery_gen; 5985 5986 /* If FL port exists, then SNS is present */ 5987 if (IS_FWI2_CAPABLE(ha)) 5988 loop_id = NPH_F_PORT; 5989 else 5990 loop_id = SNS_FL_PORT; 5991 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1); 5992 if (rval != QLA_SUCCESS) { 5993 ql_dbg(ql_dbg_disc, vha, 0x20a0, 5994 "MBX_GET_PORT_NAME failed, No FL Port.\n"); 5995 5996 vha->device_flags &= ~SWITCH_FOUND; 5997 return (QLA_SUCCESS); 5998 } 5999 vha->device_flags |= SWITCH_FOUND; 6000 6001 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_port_name, 0); 6002 if (rval != QLA_SUCCESS) 6003 ql_dbg(ql_dbg_disc, vha, 0x20ff, 6004 "Failed to get Fabric Port Name\n"); 6005 6006 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { 6007 rval = qla2x00_send_change_request(vha, 0x3, 0); 6008 if (rval != QLA_SUCCESS) 6009 ql_log(ql_log_warn, vha, 0x121, 6010 "Failed to enable receiving of RSCN requests: 0x%x.\n", 6011 rval); 6012 } 6013 6014 do { 6015 qla2x00_mgmt_svr_login(vha); 6016 6017 /* Ensure we are logged into the SNS. */ 6018 loop_id = NPH_SNS_LID(ha); 6019 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, 6020 0xfc, mb, BIT_1|BIT_0); 6021 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { 6022 ql_dbg(ql_dbg_disc, vha, 0x20a1, 6023 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n", 6024 loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval); 6025 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 6026 return rval; 6027 } 6028 6029 /* FDMI support. */ 6030 if (ql2xfdmienable && 6031 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags)) 6032 qla2x00_fdmi_register(vha); 6033 6034 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) { 6035 if (qla2x00_rft_id(vha)) { 6036 /* EMPTY */ 6037 ql_dbg(ql_dbg_disc, vha, 0x20a2, 6038 "Register FC-4 TYPE failed.\n"); 6039 if (test_bit(LOOP_RESYNC_NEEDED, 6040 &vha->dpc_flags)) 6041 break; 6042 } 6043 if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) { 6044 /* EMPTY */ 6045 ql_dbg(ql_dbg_disc, vha, 0x209a, 6046 "Register FC-4 Features failed.\n"); 6047 if (test_bit(LOOP_RESYNC_NEEDED, 6048 &vha->dpc_flags)) 6049 break; 6050 } 6051 if (vha->flags.nvme_enabled) { 6052 if (qla2x00_rff_id(vha, FC_TYPE_NVME)) { 6053 ql_dbg(ql_dbg_disc, vha, 0x2049, 6054 "Register NVME FC Type Features failed.\n"); 6055 } 6056 } 6057 if (qla2x00_rnn_id(vha)) { 6058 /* EMPTY */ 6059 ql_dbg(ql_dbg_disc, vha, 0x2104, 6060 "Register Node Name failed.\n"); 6061 if (test_bit(LOOP_RESYNC_NEEDED, 6062 &vha->dpc_flags)) 6063 break; 6064 } else if (qla2x00_rsnn_nn(vha)) { 6065 /* EMPTY */ 6066 ql_dbg(ql_dbg_disc, vha, 0x209b, 6067 "Register Symbolic Node Name failed.\n"); 6068 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 6069 break; 6070 } 6071 } 6072 6073 6074 /* Mark the time right before querying FW for connected ports. 6075 * This process is long, asynchronous and by the time it's done, 6076 * collected information might not be accurate anymore. E.g. 6077 * disconnected port might have re-connected and a brand new 6078 * session has been created. In this case session's generation 6079 * will be newer than discovery_gen. */ 6080 qlt_do_generation_tick(vha, &discovery_gen); 6081 6082 if (USE_ASYNC_SCAN(ha)) { 6083 rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI, 6084 NULL); 6085 if (rval) 6086 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 6087 } else { 6088 list_for_each_entry(fcport, &vha->vp_fcports, list) 6089 fcport->scan_state = QLA_FCPORT_SCAN; 6090 6091 rval = qla2x00_find_all_fabric_devs(vha); 6092 } 6093 if (rval != QLA_SUCCESS) 6094 break; 6095 } while (0); 6096 6097 if (!vha->nvme_local_port && vha->flags.nvme_enabled) 6098 qla_nvme_register_hba(vha); 6099 6100 if (rval) 6101 ql_dbg(ql_dbg_disc, vha, 0x2068, 6102 "Configure fabric error exit rval=%d.\n", rval); 6103 6104 return (rval); 6105 } 6106 6107 /* 6108 * qla2x00_find_all_fabric_devs 6109 * 6110 * Input: 6111 * ha = adapter block pointer. 6112 * dev = database device entry pointer. 6113 * 6114 * Returns: 6115 * 0 = success. 6116 * 6117 * Context: 6118 * Kernel context. 6119 */ 6120 static int 6121 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) 6122 { 6123 int rval; 6124 uint16_t loop_id; 6125 fc_port_t *fcport, *new_fcport; 6126 int found; 6127 6128 sw_info_t *swl; 6129 int swl_idx; 6130 int first_dev, last_dev; 6131 port_id_t wrap = {}, nxt_d_id; 6132 struct qla_hw_data *ha = vha->hw; 6133 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 6134 unsigned long flags; 6135 6136 rval = QLA_SUCCESS; 6137 6138 /* Try GID_PT to get device list, else GAN. */ 6139 if (!ha->swl) 6140 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t), 6141 GFP_KERNEL); 6142 swl = ha->swl; 6143 if (!swl) { 6144 /*EMPTY*/ 6145 ql_dbg(ql_dbg_disc, vha, 0x209c, 6146 "GID_PT allocations failed, fallback on GA_NXT.\n"); 6147 } else { 6148 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t)); 6149 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) { 6150 swl = NULL; 6151 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 6152 return rval; 6153 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) { 6154 swl = NULL; 6155 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 6156 return rval; 6157 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) { 6158 swl = NULL; 6159 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 6160 return rval; 6161 } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) { 6162 swl = NULL; 6163 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 6164 return rval; 6165 } 6166 6167 /* If other queries succeeded probe for FC-4 type */ 6168 if (swl) { 6169 qla2x00_gff_id(vha, swl); 6170 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 6171 return rval; 6172 } 6173 } 6174 swl_idx = 0; 6175 6176 /* Allocate temporary fcport for any new fcports discovered. */ 6177 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 6178 if (new_fcport == NULL) { 6179 ql_log(ql_log_warn, vha, 0x209d, 6180 "Failed to allocate memory for fcport.\n"); 6181 return (QLA_MEMORY_ALLOC_FAILED); 6182 } 6183 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 6184 /* Set start port ID scan at adapter ID. */ 6185 first_dev = 1; 6186 last_dev = 0; 6187 6188 /* Starting free loop ID. */ 6189 loop_id = ha->min_external_loopid; 6190 for (; loop_id <= ha->max_loop_id; loop_id++) { 6191 if (qla2x00_is_reserved_id(vha, loop_id)) 6192 continue; 6193 6194 if (ha->current_topology == ISP_CFG_FL && 6195 (atomic_read(&vha->loop_down_timer) || 6196 LOOP_TRANSITION(vha))) { 6197 atomic_set(&vha->loop_down_timer, 0); 6198 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 6199 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 6200 break; 6201 } 6202 6203 if (swl != NULL) { 6204 if (last_dev) { 6205 wrap.b24 = new_fcport->d_id.b24; 6206 } else { 6207 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24; 6208 memcpy(new_fcport->node_name, 6209 swl[swl_idx].node_name, WWN_SIZE); 6210 memcpy(new_fcport->port_name, 6211 swl[swl_idx].port_name, WWN_SIZE); 6212 memcpy(new_fcport->fabric_port_name, 6213 swl[swl_idx].fabric_port_name, WWN_SIZE); 6214 new_fcport->fp_speed = swl[swl_idx].fp_speed; 6215 new_fcport->fc4_type = swl[swl_idx].fc4_type; 6216 6217 new_fcport->nvme_flag = 0; 6218 if (vha->flags.nvme_enabled && 6219 swl[swl_idx].fc4_type & FS_FC4TYPE_NVME) { 6220 ql_log(ql_log_info, vha, 0x2131, 6221 "FOUND: NVME port %8phC as FC Type 28h\n", 6222 new_fcport->port_name); 6223 } 6224 6225 if (swl[swl_idx].d_id.b.rsvd_1 != 0) { 6226 last_dev = 1; 6227 } 6228 swl_idx++; 6229 } 6230 } else { 6231 /* Send GA_NXT to the switch */ 6232 rval = qla2x00_ga_nxt(vha, new_fcport); 6233 if (rval != QLA_SUCCESS) { 6234 ql_log(ql_log_warn, vha, 0x209e, 6235 "SNS scan failed -- assuming " 6236 "zero-entry result.\n"); 6237 rval = QLA_SUCCESS; 6238 break; 6239 } 6240 } 6241 6242 /* If wrap on switch device list, exit. */ 6243 if (first_dev) { 6244 wrap.b24 = new_fcport->d_id.b24; 6245 first_dev = 0; 6246 } else if (new_fcport->d_id.b24 == wrap.b24) { 6247 ql_dbg(ql_dbg_disc, vha, 0x209f, 6248 "Device wrap (%02x%02x%02x).\n", 6249 new_fcport->d_id.b.domain, 6250 new_fcport->d_id.b.area, 6251 new_fcport->d_id.b.al_pa); 6252 break; 6253 } 6254 6255 /* Bypass if same physical adapter. */ 6256 if (new_fcport->d_id.b24 == base_vha->d_id.b24) 6257 continue; 6258 6259 /* Bypass virtual ports of the same host. */ 6260 if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24)) 6261 continue; 6262 6263 /* Bypass if same domain and area of adapter. */ 6264 if (((new_fcport->d_id.b24 & 0xffff00) == 6265 (vha->d_id.b24 & 0xffff00)) && ha->current_topology == 6266 ISP_CFG_FL) 6267 continue; 6268 6269 /* Bypass reserved domain fields. */ 6270 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0) 6271 continue; 6272 6273 /* Bypass ports whose FCP-4 type is not FCP_SCSI */ 6274 if (ql2xgffidenable && 6275 (!(new_fcport->fc4_type & FS_FC4TYPE_FCP) && 6276 new_fcport->fc4_type != 0)) 6277 continue; 6278 6279 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 6280 6281 /* Locate matching device in database. */ 6282 found = 0; 6283 list_for_each_entry(fcport, &vha->vp_fcports, list) { 6284 if (memcmp(new_fcport->port_name, fcport->port_name, 6285 WWN_SIZE)) 6286 continue; 6287 6288 fcport->scan_state = QLA_FCPORT_FOUND; 6289 6290 found++; 6291 6292 /* Update port state. */ 6293 memcpy(fcport->fabric_port_name, 6294 new_fcport->fabric_port_name, WWN_SIZE); 6295 fcport->fp_speed = new_fcport->fp_speed; 6296 6297 /* 6298 * If address the same and state FCS_ONLINE 6299 * (or in target mode), nothing changed. 6300 */ 6301 if (fcport->d_id.b24 == new_fcport->d_id.b24 && 6302 (atomic_read(&fcport->state) == FCS_ONLINE || 6303 (vha->host->active_mode == MODE_TARGET))) { 6304 break; 6305 } 6306 6307 if (fcport->login_retry == 0) 6308 fcport->login_retry = 6309 vha->hw->login_retry_count; 6310 /* 6311 * If device was not a fabric device before. 6312 */ 6313 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { 6314 fcport->d_id.b24 = new_fcport->d_id.b24; 6315 qla2x00_clear_loop_id(fcport); 6316 fcport->flags |= (FCF_FABRIC_DEVICE | 6317 FCF_LOGIN_NEEDED); 6318 break; 6319 } 6320 6321 /* 6322 * Port ID changed or device was marked to be updated; 6323 * Log it out if still logged in and mark it for 6324 * relogin later. 6325 */ 6326 if (qla_tgt_mode_enabled(base_vha)) { 6327 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080, 6328 "port changed FC ID, %8phC" 6329 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n", 6330 fcport->port_name, 6331 fcport->d_id.b.domain, 6332 fcport->d_id.b.area, 6333 fcport->d_id.b.al_pa, 6334 fcport->loop_id, 6335 new_fcport->d_id.b.domain, 6336 new_fcport->d_id.b.area, 6337 new_fcport->d_id.b.al_pa); 6338 fcport->d_id.b24 = new_fcport->d_id.b24; 6339 break; 6340 } 6341 6342 fcport->d_id.b24 = new_fcport->d_id.b24; 6343 fcport->flags |= FCF_LOGIN_NEEDED; 6344 break; 6345 } 6346 6347 if (found && NVME_TARGET(vha->hw, fcport)) { 6348 if (fcport->disc_state == DSC_DELETE_PEND) { 6349 qla2x00_set_fcport_disc_state(fcport, DSC_GNL); 6350 vha->fcport_count--; 6351 fcport->login_succ = 0; 6352 } 6353 } 6354 6355 if (found) { 6356 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 6357 continue; 6358 } 6359 /* If device was not in our fcports list, then add it. */ 6360 new_fcport->scan_state = QLA_FCPORT_FOUND; 6361 list_add_tail(&new_fcport->list, &vha->vp_fcports); 6362 6363 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 6364 6365 6366 /* Allocate a new replacement fcport. */ 6367 nxt_d_id.b24 = new_fcport->d_id.b24; 6368 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 6369 if (new_fcport == NULL) { 6370 ql_log(ql_log_warn, vha, 0xd032, 6371 "Memory allocation failed for fcport.\n"); 6372 return (QLA_MEMORY_ALLOC_FAILED); 6373 } 6374 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 6375 new_fcport->d_id.b24 = nxt_d_id.b24; 6376 } 6377 6378 qla2x00_free_fcport(new_fcport); 6379 6380 /* 6381 * Logout all previous fabric dev marked lost, except FCP2 devices. 6382 */ 6383 list_for_each_entry(fcport, &vha->vp_fcports, list) { 6384 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 6385 break; 6386 6387 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) 6388 continue; 6389 6390 if (fcport->scan_state == QLA_FCPORT_SCAN) { 6391 if ((qla_dual_mode_enabled(vha) || 6392 qla_ini_mode_enabled(vha)) && 6393 atomic_read(&fcport->state) == FCS_ONLINE) { 6394 qla2x00_mark_device_lost(vha, fcport, 6395 ql2xplogiabsentdevice); 6396 if (fcport->loop_id != FC_NO_LOOP_ID && 6397 (fcport->flags & FCF_FCP2_DEVICE) == 0 && 6398 fcport->port_type != FCT_INITIATOR && 6399 fcport->port_type != FCT_BROADCAST) { 6400 ql_dbg(ql_dbg_disc, vha, 0x20f0, 6401 "%s %d %8phC post del sess\n", 6402 __func__, __LINE__, 6403 fcport->port_name); 6404 qlt_schedule_sess_for_deletion(fcport); 6405 continue; 6406 } 6407 } 6408 } 6409 6410 if (fcport->scan_state == QLA_FCPORT_FOUND && 6411 (fcport->flags & FCF_LOGIN_NEEDED) != 0) 6412 qla24xx_fcport_handle_login(vha, fcport); 6413 } 6414 return (rval); 6415 } 6416 6417 /* FW does not set aside Loop id for MGMT Server/FFFFFAh */ 6418 int 6419 qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha) 6420 { 6421 int loop_id = FC_NO_LOOP_ID; 6422 int lid = NPH_MGMT_SERVER - vha->vp_idx; 6423 unsigned long flags; 6424 struct qla_hw_data *ha = vha->hw; 6425 6426 if (vha->vp_idx == 0) { 6427 set_bit(NPH_MGMT_SERVER, ha->loop_id_map); 6428 return NPH_MGMT_SERVER; 6429 } 6430 6431 /* pick id from high and work down to low */ 6432 spin_lock_irqsave(&ha->vport_slock, flags); 6433 for (; lid > 0; lid--) { 6434 if (!test_bit(lid, vha->hw->loop_id_map)) { 6435 set_bit(lid, vha->hw->loop_id_map); 6436 loop_id = lid; 6437 break; 6438 } 6439 } 6440 spin_unlock_irqrestore(&ha->vport_slock, flags); 6441 6442 return loop_id; 6443 } 6444 6445 /* 6446 * qla2x00_fabric_login 6447 * Issue fabric login command. 6448 * 6449 * Input: 6450 * ha = adapter block pointer. 6451 * device = pointer to FC device type structure. 6452 * 6453 * Returns: 6454 * 0 - Login successfully 6455 * 1 - Login failed 6456 * 2 - Initiator device 6457 * 3 - Fatal error 6458 */ 6459 int 6460 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport, 6461 uint16_t *next_loopid) 6462 { 6463 int rval; 6464 int retry; 6465 uint16_t tmp_loopid; 6466 uint16_t mb[MAILBOX_REGISTER_COUNT]; 6467 struct qla_hw_data *ha = vha->hw; 6468 6469 retry = 0; 6470 tmp_loopid = 0; 6471 6472 for (;;) { 6473 ql_dbg(ql_dbg_disc, vha, 0x2000, 6474 "Trying Fabric Login w/loop id 0x%04x for port " 6475 "%02x%02x%02x.\n", 6476 fcport->loop_id, fcport->d_id.b.domain, 6477 fcport->d_id.b.area, fcport->d_id.b.al_pa); 6478 6479 /* Login fcport on switch. */ 6480 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id, 6481 fcport->d_id.b.domain, fcport->d_id.b.area, 6482 fcport->d_id.b.al_pa, mb, BIT_0); 6483 if (rval != QLA_SUCCESS) { 6484 return rval; 6485 } 6486 if (mb[0] == MBS_PORT_ID_USED) { 6487 /* 6488 * Device has another loop ID. The firmware team 6489 * recommends the driver perform an implicit login with 6490 * the specified ID again. The ID we just used is save 6491 * here so we return with an ID that can be tried by 6492 * the next login. 6493 */ 6494 retry++; 6495 tmp_loopid = fcport->loop_id; 6496 fcport->loop_id = mb[1]; 6497 6498 ql_dbg(ql_dbg_disc, vha, 0x2001, 6499 "Fabric Login: port in use - next loop " 6500 "id=0x%04x, port id= %02x%02x%02x.\n", 6501 fcport->loop_id, fcport->d_id.b.domain, 6502 fcport->d_id.b.area, fcport->d_id.b.al_pa); 6503 6504 } else if (mb[0] == MBS_COMMAND_COMPLETE) { 6505 /* 6506 * Login succeeded. 6507 */ 6508 if (retry) { 6509 /* A retry occurred before. */ 6510 *next_loopid = tmp_loopid; 6511 } else { 6512 /* 6513 * No retry occurred before. Just increment the 6514 * ID value for next login. 6515 */ 6516 *next_loopid = (fcport->loop_id + 1); 6517 } 6518 6519 if (mb[1] & BIT_0) { 6520 fcport->port_type = FCT_INITIATOR; 6521 } else { 6522 fcport->port_type = FCT_TARGET; 6523 if (mb[1] & BIT_1) { 6524 fcport->flags |= FCF_FCP2_DEVICE; 6525 } 6526 } 6527 6528 if (mb[10] & BIT_0) 6529 fcport->supported_classes |= FC_COS_CLASS2; 6530 if (mb[10] & BIT_1) 6531 fcport->supported_classes |= FC_COS_CLASS3; 6532 6533 if (IS_FWI2_CAPABLE(ha)) { 6534 if (mb[10] & BIT_7) 6535 fcport->flags |= 6536 FCF_CONF_COMP_SUPPORTED; 6537 } 6538 6539 rval = QLA_SUCCESS; 6540 break; 6541 } else if (mb[0] == MBS_LOOP_ID_USED) { 6542 /* 6543 * Loop ID already used, try next loop ID. 6544 */ 6545 fcport->loop_id++; 6546 rval = qla2x00_find_new_loop_id(vha, fcport); 6547 if (rval != QLA_SUCCESS) { 6548 /* Ran out of loop IDs to use */ 6549 break; 6550 } 6551 } else if (mb[0] == MBS_COMMAND_ERROR) { 6552 /* 6553 * Firmware possibly timed out during login. If NO 6554 * retries are left to do then the device is declared 6555 * dead. 6556 */ 6557 *next_loopid = fcport->loop_id; 6558 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 6559 fcport->d_id.b.domain, fcport->d_id.b.area, 6560 fcport->d_id.b.al_pa); 6561 qla2x00_mark_device_lost(vha, fcport, 1); 6562 6563 rval = 1; 6564 break; 6565 } else { 6566 /* 6567 * unrecoverable / not handled error 6568 */ 6569 ql_dbg(ql_dbg_disc, vha, 0x2002, 6570 "Failed=%x port_id=%02x%02x%02x loop_id=%x " 6571 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain, 6572 fcport->d_id.b.area, fcport->d_id.b.al_pa, 6573 fcport->loop_id, jiffies); 6574 6575 *next_loopid = fcport->loop_id; 6576 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 6577 fcport->d_id.b.domain, fcport->d_id.b.area, 6578 fcport->d_id.b.al_pa); 6579 qla2x00_clear_loop_id(fcport); 6580 fcport->login_retry = 0; 6581 6582 rval = 3; 6583 break; 6584 } 6585 } 6586 6587 return (rval); 6588 } 6589 6590 /* 6591 * qla2x00_local_device_login 6592 * Issue local device login command. 6593 * 6594 * Input: 6595 * ha = adapter block pointer. 6596 * loop_id = loop id of device to login to. 6597 * 6598 * Returns (Where's the #define!!!!): 6599 * 0 - Login successfully 6600 * 1 - Login failed 6601 * 3 - Fatal error 6602 */ 6603 int 6604 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport) 6605 { 6606 int rval; 6607 uint16_t mb[MAILBOX_REGISTER_COUNT]; 6608 6609 memset(mb, 0, sizeof(mb)); 6610 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0); 6611 if (rval == QLA_SUCCESS) { 6612 /* Interrogate mailbox registers for any errors */ 6613 if (mb[0] == MBS_COMMAND_ERROR) 6614 rval = 1; 6615 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR) 6616 /* device not in PCB table */ 6617 rval = 3; 6618 } 6619 6620 return (rval); 6621 } 6622 6623 /* 6624 * qla2x00_loop_resync 6625 * Resync with fibre channel devices. 6626 * 6627 * Input: 6628 * ha = adapter block pointer. 6629 * 6630 * Returns: 6631 * 0 = success 6632 */ 6633 int 6634 qla2x00_loop_resync(scsi_qla_host_t *vha) 6635 { 6636 int rval = QLA_SUCCESS; 6637 uint32_t wait_time; 6638 6639 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 6640 if (vha->flags.online) { 6641 if (!(rval = qla2x00_fw_ready(vha))) { 6642 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 6643 wait_time = 256; 6644 do { 6645 if (!IS_QLAFX00(vha->hw)) { 6646 /* 6647 * Issue a marker after FW becomes 6648 * ready. 6649 */ 6650 qla2x00_marker(vha, vha->hw->base_qpair, 6651 0, 0, MK_SYNC_ALL); 6652 vha->marker_needed = 0; 6653 } 6654 6655 /* Remap devices on Loop. */ 6656 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 6657 6658 if (IS_QLAFX00(vha->hw)) 6659 qlafx00_configure_devices(vha); 6660 else 6661 qla2x00_configure_loop(vha); 6662 6663 wait_time--; 6664 } while (!atomic_read(&vha->loop_down_timer) && 6665 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 6666 && wait_time && (test_bit(LOOP_RESYNC_NEEDED, 6667 &vha->dpc_flags))); 6668 } 6669 } 6670 6671 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 6672 return (QLA_FUNCTION_FAILED); 6673 6674 if (rval) 6675 ql_dbg(ql_dbg_disc, vha, 0x206c, 6676 "%s *** FAILED ***.\n", __func__); 6677 6678 return (rval); 6679 } 6680 6681 /* 6682 * qla2x00_perform_loop_resync 6683 * Description: This function will set the appropriate flags and call 6684 * qla2x00_loop_resync. If successful loop will be resynced 6685 * Arguments : scsi_qla_host_t pointer 6686 * returm : Success or Failure 6687 */ 6688 6689 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha) 6690 { 6691 int32_t rval = 0; 6692 6693 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) { 6694 /*Configure the flags so that resync happens properly*/ 6695 atomic_set(&ha->loop_down_timer, 0); 6696 if (!(ha->device_flags & DFLG_NO_CABLE)) { 6697 atomic_set(&ha->loop_state, LOOP_UP); 6698 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 6699 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); 6700 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 6701 6702 rval = qla2x00_loop_resync(ha); 6703 } else 6704 atomic_set(&ha->loop_state, LOOP_DEAD); 6705 6706 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); 6707 } 6708 6709 return rval; 6710 } 6711 6712 void 6713 qla2x00_update_fcports(scsi_qla_host_t *base_vha) 6714 { 6715 fc_port_t *fcport; 6716 struct scsi_qla_host *vha, *tvp; 6717 struct qla_hw_data *ha = base_vha->hw; 6718 unsigned long flags; 6719 6720 spin_lock_irqsave(&ha->vport_slock, flags); 6721 /* Go with deferred removal of rport references. */ 6722 list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list) { 6723 atomic_inc(&vha->vref_count); 6724 list_for_each_entry(fcport, &vha->vp_fcports, list) { 6725 if (fcport->drport && 6726 atomic_read(&fcport->state) != FCS_UNCONFIGURED) { 6727 spin_unlock_irqrestore(&ha->vport_slock, flags); 6728 qla2x00_rport_del(fcport); 6729 6730 spin_lock_irqsave(&ha->vport_slock, flags); 6731 } 6732 } 6733 atomic_dec(&vha->vref_count); 6734 wake_up(&vha->vref_waitq); 6735 } 6736 spin_unlock_irqrestore(&ha->vport_slock, flags); 6737 } 6738 6739 /* Assumes idc_lock always held on entry */ 6740 void 6741 qla83xx_reset_ownership(scsi_qla_host_t *vha) 6742 { 6743 struct qla_hw_data *ha = vha->hw; 6744 uint32_t drv_presence, drv_presence_mask; 6745 uint32_t dev_part_info1, dev_part_info2, class_type; 6746 uint32_t class_type_mask = 0x3; 6747 uint16_t fcoe_other_function = 0xffff, i; 6748 6749 if (IS_QLA8044(ha)) { 6750 drv_presence = qla8044_rd_direct(vha, 6751 QLA8044_CRB_DRV_ACTIVE_INDEX); 6752 dev_part_info1 = qla8044_rd_direct(vha, 6753 QLA8044_CRB_DEV_PART_INFO_INDEX); 6754 dev_part_info2 = qla8044_rd_direct(vha, 6755 QLA8044_CRB_DEV_PART_INFO2); 6756 } else { 6757 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 6758 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1); 6759 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2); 6760 } 6761 for (i = 0; i < 8; i++) { 6762 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask); 6763 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) && 6764 (i != ha->portnum)) { 6765 fcoe_other_function = i; 6766 break; 6767 } 6768 } 6769 if (fcoe_other_function == 0xffff) { 6770 for (i = 0; i < 8; i++) { 6771 class_type = ((dev_part_info2 >> (i * 4)) & 6772 class_type_mask); 6773 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) && 6774 ((i + 8) != ha->portnum)) { 6775 fcoe_other_function = i + 8; 6776 break; 6777 } 6778 } 6779 } 6780 /* 6781 * Prepare drv-presence mask based on fcoe functions present. 6782 * However consider only valid physical fcoe function numbers (0-15). 6783 */ 6784 drv_presence_mask = ~((1 << (ha->portnum)) | 6785 ((fcoe_other_function == 0xffff) ? 6786 0 : (1 << (fcoe_other_function)))); 6787 6788 /* We are the reset owner iff: 6789 * - No other protocol drivers present. 6790 * - This is the lowest among fcoe functions. */ 6791 if (!(drv_presence & drv_presence_mask) && 6792 (ha->portnum < fcoe_other_function)) { 6793 ql_dbg(ql_dbg_p3p, vha, 0xb07f, 6794 "This host is Reset owner.\n"); 6795 ha->flags.nic_core_reset_owner = 1; 6796 } 6797 } 6798 6799 static int 6800 __qla83xx_set_drv_ack(scsi_qla_host_t *vha) 6801 { 6802 int rval = QLA_SUCCESS; 6803 struct qla_hw_data *ha = vha->hw; 6804 uint32_t drv_ack; 6805 6806 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); 6807 if (rval == QLA_SUCCESS) { 6808 drv_ack |= (1 << ha->portnum); 6809 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack); 6810 } 6811 6812 return rval; 6813 } 6814 6815 static int 6816 __qla83xx_clear_drv_ack(scsi_qla_host_t *vha) 6817 { 6818 int rval = QLA_SUCCESS; 6819 struct qla_hw_data *ha = vha->hw; 6820 uint32_t drv_ack; 6821 6822 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); 6823 if (rval == QLA_SUCCESS) { 6824 drv_ack &= ~(1 << ha->portnum); 6825 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack); 6826 } 6827 6828 return rval; 6829 } 6830 6831 /* Assumes idc-lock always held on entry */ 6832 void 6833 qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type) 6834 { 6835 struct qla_hw_data *ha = vha->hw; 6836 uint32_t idc_audit_reg = 0, duration_secs = 0; 6837 6838 switch (audit_type) { 6839 case IDC_AUDIT_TIMESTAMP: 6840 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000); 6841 idc_audit_reg = (ha->portnum) | 6842 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8); 6843 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg); 6844 break; 6845 6846 case IDC_AUDIT_COMPLETION: 6847 duration_secs = ((jiffies_to_msecs(jiffies) - 6848 jiffies_to_msecs(ha->idc_audit_ts)) / 1000); 6849 idc_audit_reg = (ha->portnum) | 6850 (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8); 6851 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg); 6852 break; 6853 6854 default: 6855 ql_log(ql_log_warn, vha, 0xb078, 6856 "Invalid audit type specified.\n"); 6857 break; 6858 } 6859 } 6860 6861 /* Assumes idc_lock always held on entry */ 6862 static int 6863 qla83xx_initiating_reset(scsi_qla_host_t *vha) 6864 { 6865 struct qla_hw_data *ha = vha->hw; 6866 uint32_t idc_control, dev_state; 6867 6868 __qla83xx_get_idc_control(vha, &idc_control); 6869 if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) { 6870 ql_log(ql_log_info, vha, 0xb080, 6871 "NIC Core reset has been disabled. idc-control=0x%x\n", 6872 idc_control); 6873 return QLA_FUNCTION_FAILED; 6874 } 6875 6876 /* Set NEED-RESET iff in READY state and we are the reset-owner */ 6877 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state); 6878 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) { 6879 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, 6880 QLA8XXX_DEV_NEED_RESET); 6881 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n"); 6882 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP); 6883 } else { 6884 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", 6885 qdev_state(dev_state)); 6886 6887 /* SV: XXX: Is timeout required here? */ 6888 /* Wait for IDC state change READY -> NEED_RESET */ 6889 while (dev_state == QLA8XXX_DEV_READY) { 6890 qla83xx_idc_unlock(vha, 0); 6891 msleep(200); 6892 qla83xx_idc_lock(vha, 0); 6893 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state); 6894 } 6895 } 6896 6897 /* Send IDC ack by writing to drv-ack register */ 6898 __qla83xx_set_drv_ack(vha); 6899 6900 return QLA_SUCCESS; 6901 } 6902 6903 int 6904 __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control) 6905 { 6906 return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control); 6907 } 6908 6909 int 6910 __qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control) 6911 { 6912 return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control); 6913 } 6914 6915 static int 6916 qla83xx_check_driver_presence(scsi_qla_host_t *vha) 6917 { 6918 uint32_t drv_presence = 0; 6919 struct qla_hw_data *ha = vha->hw; 6920 6921 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 6922 if (drv_presence & (1 << ha->portnum)) 6923 return QLA_SUCCESS; 6924 else 6925 return QLA_TEST_FAILED; 6926 } 6927 6928 int 6929 qla83xx_nic_core_reset(scsi_qla_host_t *vha) 6930 { 6931 int rval = QLA_SUCCESS; 6932 struct qla_hw_data *ha = vha->hw; 6933 6934 ql_dbg(ql_dbg_p3p, vha, 0xb058, 6935 "Entered %s().\n", __func__); 6936 6937 if (vha->device_flags & DFLG_DEV_FAILED) { 6938 ql_log(ql_log_warn, vha, 0xb059, 6939 "Device in unrecoverable FAILED state.\n"); 6940 return QLA_FUNCTION_FAILED; 6941 } 6942 6943 qla83xx_idc_lock(vha, 0); 6944 6945 if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) { 6946 ql_log(ql_log_warn, vha, 0xb05a, 6947 "Function=0x%x has been removed from IDC participation.\n", 6948 ha->portnum); 6949 rval = QLA_FUNCTION_FAILED; 6950 goto exit; 6951 } 6952 6953 qla83xx_reset_ownership(vha); 6954 6955 rval = qla83xx_initiating_reset(vha); 6956 6957 /* 6958 * Perform reset if we are the reset-owner, 6959 * else wait till IDC state changes to READY/FAILED. 6960 */ 6961 if (rval == QLA_SUCCESS) { 6962 rval = qla83xx_idc_state_handler(vha); 6963 6964 if (rval == QLA_SUCCESS) 6965 ha->flags.nic_core_hung = 0; 6966 __qla83xx_clear_drv_ack(vha); 6967 } 6968 6969 exit: 6970 qla83xx_idc_unlock(vha, 0); 6971 6972 ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__); 6973 6974 return rval; 6975 } 6976 6977 int 6978 qla2xxx_mctp_dump(scsi_qla_host_t *vha) 6979 { 6980 struct qla_hw_data *ha = vha->hw; 6981 int rval = QLA_FUNCTION_FAILED; 6982 6983 if (!IS_MCTP_CAPABLE(ha)) { 6984 /* This message can be removed from the final version */ 6985 ql_log(ql_log_info, vha, 0x506d, 6986 "This board is not MCTP capable\n"); 6987 return rval; 6988 } 6989 6990 if (!ha->mctp_dump) { 6991 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev, 6992 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL); 6993 6994 if (!ha->mctp_dump) { 6995 ql_log(ql_log_warn, vha, 0x506e, 6996 "Failed to allocate memory for mctp dump\n"); 6997 return rval; 6998 } 6999 } 7000 7001 #define MCTP_DUMP_STR_ADDR 0x00000000 7002 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma, 7003 MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4); 7004 if (rval != QLA_SUCCESS) { 7005 ql_log(ql_log_warn, vha, 0x506f, 7006 "Failed to capture mctp dump\n"); 7007 } else { 7008 ql_log(ql_log_info, vha, 0x5070, 7009 "Mctp dump capture for host (%ld/%p).\n", 7010 vha->host_no, ha->mctp_dump); 7011 ha->mctp_dumped = 1; 7012 } 7013 7014 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) { 7015 ha->flags.nic_core_reset_hdlr_active = 1; 7016 rval = qla83xx_restart_nic_firmware(vha); 7017 if (rval) 7018 /* NIC Core reset failed. */ 7019 ql_log(ql_log_warn, vha, 0x5071, 7020 "Failed to restart nic firmware\n"); 7021 else 7022 ql_dbg(ql_dbg_p3p, vha, 0xb084, 7023 "Restarted NIC firmware successfully.\n"); 7024 ha->flags.nic_core_reset_hdlr_active = 0; 7025 } 7026 7027 return rval; 7028 7029 } 7030 7031 /* 7032 * qla2x00_quiesce_io 7033 * Description: This function will block the new I/Os 7034 * Its not aborting any I/Os as context 7035 * is not destroyed during quiescence 7036 * Arguments: scsi_qla_host_t 7037 * return : void 7038 */ 7039 void 7040 qla2x00_quiesce_io(scsi_qla_host_t *vha) 7041 { 7042 struct qla_hw_data *ha = vha->hw; 7043 struct scsi_qla_host *vp, *tvp; 7044 unsigned long flags; 7045 7046 ql_dbg(ql_dbg_dpc, vha, 0x401d, 7047 "Quiescing I/O - ha=%p.\n", ha); 7048 7049 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 7050 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 7051 atomic_set(&vha->loop_state, LOOP_DOWN); 7052 qla2x00_mark_all_devices_lost(vha); 7053 7054 spin_lock_irqsave(&ha->vport_slock, flags); 7055 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 7056 atomic_inc(&vp->vref_count); 7057 spin_unlock_irqrestore(&ha->vport_slock, flags); 7058 7059 qla2x00_mark_all_devices_lost(vp); 7060 7061 spin_lock_irqsave(&ha->vport_slock, flags); 7062 atomic_dec(&vp->vref_count); 7063 } 7064 spin_unlock_irqrestore(&ha->vport_slock, flags); 7065 } else { 7066 if (!atomic_read(&vha->loop_down_timer)) 7067 atomic_set(&vha->loop_down_timer, 7068 LOOP_DOWN_TIME); 7069 } 7070 /* Wait for pending cmds to complete */ 7071 WARN_ON_ONCE(qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) 7072 != QLA_SUCCESS); 7073 } 7074 7075 void 7076 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) 7077 { 7078 struct qla_hw_data *ha = vha->hw; 7079 struct scsi_qla_host *vp, *tvp; 7080 unsigned long flags; 7081 fc_port_t *fcport; 7082 u16 i; 7083 7084 /* For ISP82XX, driver waits for completion of the commands. 7085 * online flag should be set. 7086 */ 7087 if (!(IS_P3P_TYPE(ha))) 7088 vha->flags.online = 0; 7089 ha->flags.chip_reset_done = 0; 7090 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 7091 vha->qla_stats.total_isp_aborts++; 7092 7093 ql_log(ql_log_info, vha, 0x00af, 7094 "Performing ISP error recovery - ha=%p.\n", ha); 7095 7096 ha->flags.purge_mbox = 1; 7097 /* For ISP82XX, reset_chip is just disabling interrupts. 7098 * Driver waits for the completion of the commands. 7099 * the interrupts need to be enabled. 7100 */ 7101 if (!(IS_P3P_TYPE(ha))) 7102 ha->isp_ops->reset_chip(vha); 7103 7104 ha->link_data_rate = PORT_SPEED_UNKNOWN; 7105 SAVE_TOPO(ha); 7106 ha->flags.rida_fmt2 = 0; 7107 ha->flags.n2n_ae = 0; 7108 ha->flags.lip_ae = 0; 7109 ha->current_topology = 0; 7110 QLA_FW_STOPPED(ha); 7111 ha->flags.fw_init_done = 0; 7112 ha->chip_reset++; 7113 ha->base_qpair->chip_reset = ha->chip_reset; 7114 ha->base_qpair->cmd_cnt = ha->base_qpair->cmd_completion_cnt = 0; 7115 ha->base_qpair->prev_completion_cnt = 0; 7116 for (i = 0; i < ha->max_qpairs; i++) { 7117 if (ha->queue_pair_map[i]) { 7118 ha->queue_pair_map[i]->chip_reset = 7119 ha->base_qpair->chip_reset; 7120 ha->queue_pair_map[i]->cmd_cnt = 7121 ha->queue_pair_map[i]->cmd_completion_cnt = 0; 7122 ha->base_qpair->prev_completion_cnt = 0; 7123 } 7124 } 7125 7126 /* purge MBox commands */ 7127 if (atomic_read(&ha->num_pend_mbx_stage3)) { 7128 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 7129 complete(&ha->mbx_intr_comp); 7130 } 7131 7132 i = 0; 7133 while (atomic_read(&ha->num_pend_mbx_stage3) || 7134 atomic_read(&ha->num_pend_mbx_stage2) || 7135 atomic_read(&ha->num_pend_mbx_stage1)) { 7136 msleep(20); 7137 i++; 7138 if (i > 50) 7139 break; 7140 } 7141 ha->flags.purge_mbox = 0; 7142 7143 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 7144 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 7145 atomic_set(&vha->loop_state, LOOP_DOWN); 7146 qla2x00_mark_all_devices_lost(vha); 7147 7148 spin_lock_irqsave(&ha->vport_slock, flags); 7149 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 7150 atomic_inc(&vp->vref_count); 7151 spin_unlock_irqrestore(&ha->vport_slock, flags); 7152 7153 qla2x00_mark_all_devices_lost(vp); 7154 7155 spin_lock_irqsave(&ha->vport_slock, flags); 7156 atomic_dec(&vp->vref_count); 7157 } 7158 spin_unlock_irqrestore(&ha->vport_slock, flags); 7159 } else { 7160 if (!atomic_read(&vha->loop_down_timer)) 7161 atomic_set(&vha->loop_down_timer, 7162 LOOP_DOWN_TIME); 7163 } 7164 7165 /* Clear all async request states across all VPs. */ 7166 list_for_each_entry(fcport, &vha->vp_fcports, list) { 7167 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 7168 fcport->scan_state = 0; 7169 } 7170 spin_lock_irqsave(&ha->vport_slock, flags); 7171 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 7172 atomic_inc(&vp->vref_count); 7173 spin_unlock_irqrestore(&ha->vport_slock, flags); 7174 7175 list_for_each_entry(fcport, &vp->vp_fcports, list) 7176 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 7177 7178 spin_lock_irqsave(&ha->vport_slock, flags); 7179 atomic_dec(&vp->vref_count); 7180 } 7181 spin_unlock_irqrestore(&ha->vport_slock, flags); 7182 7183 /* Make sure for ISP 82XX IO DMA is complete */ 7184 if (IS_P3P_TYPE(ha)) { 7185 qla82xx_chip_reset_cleanup(vha); 7186 ql_log(ql_log_info, vha, 0x00b4, 7187 "Done chip reset cleanup.\n"); 7188 7189 /* Done waiting for pending commands. Reset online flag */ 7190 vha->flags.online = 0; 7191 } 7192 7193 /* Requeue all commands in outstanding command list. */ 7194 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 7195 /* memory barrier */ 7196 wmb(); 7197 } 7198 7199 /* 7200 * qla2x00_abort_isp 7201 * Resets ISP and aborts all outstanding commands. 7202 * 7203 * Input: 7204 * ha = adapter block pointer. 7205 * 7206 * Returns: 7207 * 0 = success 7208 */ 7209 int 7210 qla2x00_abort_isp(scsi_qla_host_t *vha) 7211 { 7212 int rval; 7213 uint8_t status = 0; 7214 struct qla_hw_data *ha = vha->hw; 7215 struct scsi_qla_host *vp, *tvp; 7216 struct req_que *req = ha->req_q_map[0]; 7217 unsigned long flags; 7218 7219 if (vha->flags.online) { 7220 qla2x00_abort_isp_cleanup(vha); 7221 7222 vha->dport_status |= DPORT_DIAG_CHIP_RESET_IN_PROGRESS; 7223 vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS; 7224 7225 if (vha->hw->flags.port_isolated) 7226 return status; 7227 7228 if (qla2x00_isp_reg_stat(ha)) { 7229 ql_log(ql_log_info, vha, 0x803f, 7230 "ISP Abort - ISP reg disconnect, exiting.\n"); 7231 return status; 7232 } 7233 7234 if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) { 7235 ha->flags.chip_reset_done = 1; 7236 vha->flags.online = 1; 7237 status = 0; 7238 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 7239 return status; 7240 } 7241 7242 if (IS_QLA8031(ha)) { 7243 ql_dbg(ql_dbg_p3p, vha, 0xb05c, 7244 "Clearing fcoe driver presence.\n"); 7245 if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS) 7246 ql_dbg(ql_dbg_p3p, vha, 0xb073, 7247 "Error while clearing DRV-Presence.\n"); 7248 } 7249 7250 if (unlikely(pci_channel_offline(ha->pdev) && 7251 ha->flags.pci_channel_io_perm_failure)) { 7252 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 7253 status = 0; 7254 return status; 7255 } 7256 7257 switch (vha->qlini_mode) { 7258 case QLA2XXX_INI_MODE_DISABLED: 7259 if (!qla_tgt_mode_enabled(vha)) 7260 return 0; 7261 break; 7262 case QLA2XXX_INI_MODE_DUAL: 7263 if (!qla_dual_mode_enabled(vha) && 7264 !qla_ini_mode_enabled(vha)) 7265 return 0; 7266 break; 7267 case QLA2XXX_INI_MODE_ENABLED: 7268 default: 7269 break; 7270 } 7271 7272 ha->isp_ops->get_flash_version(vha, req->ring); 7273 7274 if (qla2x00_isp_reg_stat(ha)) { 7275 ql_log(ql_log_info, vha, 0x803f, 7276 "ISP Abort - ISP reg disconnect pre nvram config, exiting.\n"); 7277 return status; 7278 } 7279 ha->isp_ops->nvram_config(vha); 7280 7281 if (qla2x00_isp_reg_stat(ha)) { 7282 ql_log(ql_log_info, vha, 0x803f, 7283 "ISP Abort - ISP reg disconnect post nvmram config, exiting.\n"); 7284 return status; 7285 } 7286 if (!qla2x00_restart_isp(vha)) { 7287 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 7288 7289 if (!atomic_read(&vha->loop_down_timer)) { 7290 /* 7291 * Issue marker command only when we are going 7292 * to start the I/O . 7293 */ 7294 vha->marker_needed = 1; 7295 } 7296 7297 vha->flags.online = 1; 7298 7299 ha->isp_ops->enable_intrs(ha); 7300 7301 ha->isp_abort_cnt = 0; 7302 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 7303 7304 if (IS_QLA81XX(ha) || IS_QLA8031(ha)) 7305 qla2x00_get_fw_version(vha); 7306 if (ha->fce) { 7307 ha->flags.fce_enabled = 1; 7308 memset(ha->fce, 0, 7309 fce_calc_size(ha->fce_bufs)); 7310 rval = qla2x00_enable_fce_trace(vha, 7311 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 7312 &ha->fce_bufs); 7313 if (rval) { 7314 ql_log(ql_log_warn, vha, 0x8033, 7315 "Unable to reinitialize FCE " 7316 "(%d).\n", rval); 7317 ha->flags.fce_enabled = 0; 7318 } 7319 } 7320 7321 if (ha->eft) { 7322 memset(ha->eft, 0, EFT_SIZE); 7323 rval = qla2x00_enable_eft_trace(vha, 7324 ha->eft_dma, EFT_NUM_BUFFERS); 7325 if (rval) { 7326 ql_log(ql_log_warn, vha, 0x8034, 7327 "Unable to reinitialize EFT " 7328 "(%d).\n", rval); 7329 } 7330 } 7331 } else { /* failed the ISP abort */ 7332 vha->flags.online = 1; 7333 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 7334 if (ha->isp_abort_cnt == 0) { 7335 ql_log(ql_log_fatal, vha, 0x8035, 7336 "ISP error recover failed - " 7337 "board disabled.\n"); 7338 /* 7339 * The next call disables the board 7340 * completely. 7341 */ 7342 qla2x00_abort_isp_cleanup(vha); 7343 vha->flags.online = 0; 7344 clear_bit(ISP_ABORT_RETRY, 7345 &vha->dpc_flags); 7346 status = 0; 7347 } else { /* schedule another ISP abort */ 7348 ha->isp_abort_cnt--; 7349 ql_dbg(ql_dbg_taskm, vha, 0x8020, 7350 "ISP abort - retry remaining %d.\n", 7351 ha->isp_abort_cnt); 7352 status = 1; 7353 } 7354 } else { 7355 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 7356 ql_dbg(ql_dbg_taskm, vha, 0x8021, 7357 "ISP error recovery - retrying (%d) " 7358 "more times.\n", ha->isp_abort_cnt); 7359 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 7360 status = 1; 7361 } 7362 } 7363 7364 } 7365 7366 if (vha->hw->flags.port_isolated) { 7367 qla2x00_abort_isp_cleanup(vha); 7368 return status; 7369 } 7370 7371 if (!status) { 7372 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__); 7373 qla2x00_configure_hba(vha); 7374 spin_lock_irqsave(&ha->vport_slock, flags); 7375 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 7376 if (vp->vp_idx) { 7377 atomic_inc(&vp->vref_count); 7378 spin_unlock_irqrestore(&ha->vport_slock, flags); 7379 7380 qla2x00_vp_abort_isp(vp); 7381 7382 spin_lock_irqsave(&ha->vport_slock, flags); 7383 atomic_dec(&vp->vref_count); 7384 } 7385 } 7386 spin_unlock_irqrestore(&ha->vport_slock, flags); 7387 7388 if (IS_QLA8031(ha)) { 7389 ql_dbg(ql_dbg_p3p, vha, 0xb05d, 7390 "Setting back fcoe driver presence.\n"); 7391 if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS) 7392 ql_dbg(ql_dbg_p3p, vha, 0xb074, 7393 "Error while setting DRV-Presence.\n"); 7394 } 7395 } else { 7396 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n", 7397 __func__); 7398 } 7399 7400 return(status); 7401 } 7402 7403 /* 7404 * qla2x00_restart_isp 7405 * restarts the ISP after a reset 7406 * 7407 * Input: 7408 * ha = adapter block pointer. 7409 * 7410 * Returns: 7411 * 0 = success 7412 */ 7413 static int 7414 qla2x00_restart_isp(scsi_qla_host_t *vha) 7415 { 7416 int status; 7417 struct qla_hw_data *ha = vha->hw; 7418 7419 /* If firmware needs to be loaded */ 7420 if (qla2x00_isp_firmware(vha)) { 7421 vha->flags.online = 0; 7422 status = ha->isp_ops->chip_diag(vha); 7423 if (status) 7424 return status; 7425 status = qla2x00_setup_chip(vha); 7426 if (status) 7427 return status; 7428 } 7429 7430 status = qla2x00_init_rings(vha); 7431 if (status) 7432 return status; 7433 7434 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 7435 ha->flags.chip_reset_done = 1; 7436 7437 /* Initialize the queues in use */ 7438 qla25xx_init_queues(ha); 7439 7440 status = qla2x00_fw_ready(vha); 7441 if (status) { 7442 /* if no cable then assume it's good */ 7443 return vha->device_flags & DFLG_NO_CABLE ? 0 : status; 7444 } 7445 7446 /* Issue a marker after FW becomes ready. */ 7447 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); 7448 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 7449 7450 return 0; 7451 } 7452 7453 static int 7454 qla25xx_init_queues(struct qla_hw_data *ha) 7455 { 7456 struct rsp_que *rsp = NULL; 7457 struct req_que *req = NULL; 7458 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 7459 int ret = -1; 7460 int i; 7461 7462 for (i = 1; i < ha->max_rsp_queues; i++) { 7463 rsp = ha->rsp_q_map[i]; 7464 if (rsp && test_bit(i, ha->rsp_qid_map)) { 7465 rsp->options &= ~BIT_0; 7466 ret = qla25xx_init_rsp_que(base_vha, rsp); 7467 if (ret != QLA_SUCCESS) 7468 ql_dbg(ql_dbg_init, base_vha, 0x00ff, 7469 "%s Rsp que: %d init failed.\n", 7470 __func__, rsp->id); 7471 else 7472 ql_dbg(ql_dbg_init, base_vha, 0x0100, 7473 "%s Rsp que: %d inited.\n", 7474 __func__, rsp->id); 7475 } 7476 } 7477 for (i = 1; i < ha->max_req_queues; i++) { 7478 req = ha->req_q_map[i]; 7479 if (req && test_bit(i, ha->req_qid_map)) { 7480 /* Clear outstanding commands array. */ 7481 req->options &= ~BIT_0; 7482 ret = qla25xx_init_req_que(base_vha, req); 7483 if (ret != QLA_SUCCESS) 7484 ql_dbg(ql_dbg_init, base_vha, 0x0101, 7485 "%s Req que: %d init failed.\n", 7486 __func__, req->id); 7487 else 7488 ql_dbg(ql_dbg_init, base_vha, 0x0102, 7489 "%s Req que: %d inited.\n", 7490 __func__, req->id); 7491 } 7492 } 7493 return ret; 7494 } 7495 7496 /* 7497 * qla2x00_reset_adapter 7498 * Reset adapter. 7499 * 7500 * Input: 7501 * ha = adapter block pointer. 7502 */ 7503 int 7504 qla2x00_reset_adapter(scsi_qla_host_t *vha) 7505 { 7506 unsigned long flags = 0; 7507 struct qla_hw_data *ha = vha->hw; 7508 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 7509 7510 vha->flags.online = 0; 7511 ha->isp_ops->disable_intrs(ha); 7512 7513 spin_lock_irqsave(&ha->hardware_lock, flags); 7514 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 7515 rd_reg_word(®->hccr); /* PCI Posting. */ 7516 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); 7517 rd_reg_word(®->hccr); /* PCI Posting. */ 7518 spin_unlock_irqrestore(&ha->hardware_lock, flags); 7519 7520 return QLA_SUCCESS; 7521 } 7522 7523 int 7524 qla24xx_reset_adapter(scsi_qla_host_t *vha) 7525 { 7526 unsigned long flags = 0; 7527 struct qla_hw_data *ha = vha->hw; 7528 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 7529 7530 if (IS_P3P_TYPE(ha)) 7531 return QLA_SUCCESS; 7532 7533 vha->flags.online = 0; 7534 ha->isp_ops->disable_intrs(ha); 7535 7536 spin_lock_irqsave(&ha->hardware_lock, flags); 7537 wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET); 7538 rd_reg_dword(®->hccr); 7539 wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE); 7540 rd_reg_dword(®->hccr); 7541 spin_unlock_irqrestore(&ha->hardware_lock, flags); 7542 7543 if (IS_NOPOLLING_TYPE(ha)) 7544 ha->isp_ops->enable_intrs(ha); 7545 7546 return QLA_SUCCESS; 7547 } 7548 7549 /* On sparc systems, obtain port and node WWN from firmware 7550 * properties. 7551 */ 7552 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, 7553 struct nvram_24xx *nv) 7554 { 7555 #ifdef CONFIG_SPARC 7556 struct qla_hw_data *ha = vha->hw; 7557 struct pci_dev *pdev = ha->pdev; 7558 struct device_node *dp = pci_device_to_OF_node(pdev); 7559 const u8 *val; 7560 int len; 7561 7562 val = of_get_property(dp, "port-wwn", &len); 7563 if (val && len >= WWN_SIZE) 7564 memcpy(nv->port_name, val, WWN_SIZE); 7565 7566 val = of_get_property(dp, "node-wwn", &len); 7567 if (val && len >= WWN_SIZE) 7568 memcpy(nv->node_name, val, WWN_SIZE); 7569 #endif 7570 } 7571 7572 int 7573 qla24xx_nvram_config(scsi_qla_host_t *vha) 7574 { 7575 int rval; 7576 struct init_cb_24xx *icb; 7577 struct nvram_24xx *nv; 7578 __le32 *dptr; 7579 uint8_t *dptr1, *dptr2; 7580 uint32_t chksum; 7581 uint16_t cnt; 7582 struct qla_hw_data *ha = vha->hw; 7583 7584 rval = QLA_SUCCESS; 7585 icb = (struct init_cb_24xx *)ha->init_cb; 7586 nv = ha->nvram; 7587 7588 /* Determine NVRAM starting address. */ 7589 if (ha->port_no == 0) { 7590 ha->nvram_base = FA_NVRAM_FUNC0_ADDR; 7591 ha->vpd_base = FA_NVRAM_VPD0_ADDR; 7592 } else { 7593 ha->nvram_base = FA_NVRAM_FUNC1_ADDR; 7594 ha->vpd_base = FA_NVRAM_VPD1_ADDR; 7595 } 7596 7597 ha->nvram_size = sizeof(*nv); 7598 ha->vpd_size = FA_NVRAM_VPD_SIZE; 7599 7600 /* Get VPD data into cache */ 7601 ha->vpd = ha->nvram + VPD_OFFSET; 7602 ha->isp_ops->read_nvram(vha, ha->vpd, 7603 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); 7604 7605 /* Get NVRAM data into cache and calculate checksum. */ 7606 dptr = (__force __le32 *)nv; 7607 ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size); 7608 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) 7609 chksum += le32_to_cpu(*dptr); 7610 7611 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a, 7612 "Contents of NVRAM\n"); 7613 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d, 7614 nv, ha->nvram_size); 7615 7616 /* Bad NVRAM data, set defaults parameters. */ 7617 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) || 7618 le16_to_cpu(nv->nvram_version) < ICB_VERSION) { 7619 /* Reset NVRAM data. */ 7620 ql_log(ql_log_warn, vha, 0x006b, 7621 "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n", 7622 chksum, nv->id, nv->nvram_version); 7623 ql_dump_buffer(ql_dbg_init, vha, 0x006b, nv, sizeof(*nv)); 7624 ql_log(ql_log_warn, vha, 0x006c, 7625 "Falling back to functioning (yet invalid -- WWPN) " 7626 "defaults.\n"); 7627 7628 /* 7629 * Set default initialization control block. 7630 */ 7631 memset(nv, 0, ha->nvram_size); 7632 nv->nvram_version = cpu_to_le16(ICB_VERSION); 7633 nv->version = cpu_to_le16(ICB_VERSION); 7634 nv->frame_payload_size = cpu_to_le16(2048); 7635 nv->execution_throttle = cpu_to_le16(0xFFFF); 7636 nv->exchange_count = cpu_to_le16(0); 7637 nv->hard_address = cpu_to_le16(124); 7638 nv->port_name[0] = 0x21; 7639 nv->port_name[1] = 0x00 + ha->port_no + 1; 7640 nv->port_name[2] = 0x00; 7641 nv->port_name[3] = 0xe0; 7642 nv->port_name[4] = 0x8b; 7643 nv->port_name[5] = 0x1c; 7644 nv->port_name[6] = 0x55; 7645 nv->port_name[7] = 0x86; 7646 nv->node_name[0] = 0x20; 7647 nv->node_name[1] = 0x00; 7648 nv->node_name[2] = 0x00; 7649 nv->node_name[3] = 0xe0; 7650 nv->node_name[4] = 0x8b; 7651 nv->node_name[5] = 0x1c; 7652 nv->node_name[6] = 0x55; 7653 nv->node_name[7] = 0x86; 7654 qla24xx_nvram_wwn_from_ofw(vha, nv); 7655 nv->login_retry_count = cpu_to_le16(8); 7656 nv->interrupt_delay_timer = cpu_to_le16(0); 7657 nv->login_timeout = cpu_to_le16(0); 7658 nv->firmware_options_1 = 7659 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); 7660 nv->firmware_options_2 = cpu_to_le32(2 << 4); 7661 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 7662 nv->firmware_options_3 = cpu_to_le32(2 << 13); 7663 nv->host_p = cpu_to_le32(BIT_11|BIT_10); 7664 nv->efi_parameters = cpu_to_le32(0); 7665 nv->reset_delay = 5; 7666 nv->max_luns_per_target = cpu_to_le16(128); 7667 nv->port_down_retry_count = cpu_to_le16(30); 7668 nv->link_down_timeout = cpu_to_le16(30); 7669 7670 rval = 1; 7671 } 7672 7673 if (qla_tgt_mode_enabled(vha)) { 7674 /* Don't enable full login after initial LIP */ 7675 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 7676 /* Don't enable LIP full login for initiator */ 7677 nv->host_p &= cpu_to_le32(~BIT_10); 7678 } 7679 7680 qlt_24xx_config_nvram_stage1(vha, nv); 7681 7682 /* Reset Initialization control block */ 7683 memset(icb, 0, ha->init_cb_size); 7684 7685 /* Copy 1st segment. */ 7686 dptr1 = (uint8_t *)icb; 7687 dptr2 = (uint8_t *)&nv->version; 7688 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; 7689 while (cnt--) 7690 *dptr1++ = *dptr2++; 7691 7692 icb->login_retry_count = nv->login_retry_count; 7693 icb->link_down_on_nos = nv->link_down_on_nos; 7694 7695 /* Copy 2nd segment. */ 7696 dptr1 = (uint8_t *)&icb->interrupt_delay_timer; 7697 dptr2 = (uint8_t *)&nv->interrupt_delay_timer; 7698 cnt = (uint8_t *)&icb->reserved_3 - 7699 (uint8_t *)&icb->interrupt_delay_timer; 7700 while (cnt--) 7701 *dptr1++ = *dptr2++; 7702 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); 7703 /* 7704 * Setup driver NVRAM options. 7705 */ 7706 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), 7707 "QLA2462"); 7708 7709 qlt_24xx_config_nvram_stage2(vha, icb); 7710 7711 if (nv->host_p & cpu_to_le32(BIT_15)) { 7712 /* Use alternate WWN? */ 7713 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 7714 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 7715 } 7716 7717 /* Prepare nodename */ 7718 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) { 7719 /* 7720 * Firmware will apply the following mask if the nodename was 7721 * not provided. 7722 */ 7723 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 7724 icb->node_name[0] &= 0xF0; 7725 } 7726 7727 /* Set host adapter parameters. */ 7728 ha->flags.disable_risc_code_load = 0; 7729 ha->flags.enable_lip_reset = 0; 7730 ha->flags.enable_lip_full_login = 7731 le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0; 7732 ha->flags.enable_target_reset = 7733 le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0; 7734 ha->flags.enable_led_scheme = 0; 7735 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0; 7736 7737 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & 7738 (BIT_6 | BIT_5 | BIT_4)) >> 4; 7739 7740 memcpy(ha->fw_seriallink_options24, nv->seriallink_options, 7741 sizeof(ha->fw_seriallink_options24)); 7742 7743 /* save HBA serial number */ 7744 ha->serial0 = icb->port_name[5]; 7745 ha->serial1 = icb->port_name[6]; 7746 ha->serial2 = icb->port_name[7]; 7747 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 7748 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 7749 7750 icb->execution_throttle = cpu_to_le16(0xFFFF); 7751 7752 ha->retry_count = le16_to_cpu(nv->login_retry_count); 7753 7754 /* Set minimum login_timeout to 4 seconds. */ 7755 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) 7756 nv->login_timeout = cpu_to_le16(ql2xlogintimeout); 7757 if (le16_to_cpu(nv->login_timeout) < 4) 7758 nv->login_timeout = cpu_to_le16(4); 7759 ha->login_timeout = le16_to_cpu(nv->login_timeout); 7760 7761 /* Set minimum RATOV to 100 tenths of a second. */ 7762 ha->r_a_tov = 100; 7763 7764 ha->loop_reset_delay = nv->reset_delay; 7765 7766 /* Link Down Timeout = 0: 7767 * 7768 * When Port Down timer expires we will start returning 7769 * I/O's to OS with "DID_NO_CONNECT". 7770 * 7771 * Link Down Timeout != 0: 7772 * 7773 * The driver waits for the link to come up after link down 7774 * before returning I/Os to OS with "DID_NO_CONNECT". 7775 */ 7776 if (le16_to_cpu(nv->link_down_timeout) == 0) { 7777 ha->loop_down_abort_time = 7778 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 7779 } else { 7780 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); 7781 ha->loop_down_abort_time = 7782 (LOOP_DOWN_TIME - ha->link_down_timeout); 7783 } 7784 7785 /* Need enough time to try and get the port back. */ 7786 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); 7787 if (qlport_down_retry) 7788 ha->port_down_retry_count = qlport_down_retry; 7789 7790 /* Set login_retry_count */ 7791 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); 7792 if (ha->port_down_retry_count == 7793 le16_to_cpu(nv->port_down_retry_count) && 7794 ha->port_down_retry_count > 3) 7795 ha->login_retry_count = ha->port_down_retry_count; 7796 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 7797 ha->login_retry_count = ha->port_down_retry_count; 7798 if (ql2xloginretrycount) 7799 ha->login_retry_count = ql2xloginretrycount; 7800 7801 /* N2N: driver will initiate Login instead of FW */ 7802 icb->firmware_options_3 |= cpu_to_le32(BIT_8); 7803 7804 /* Enable ZIO. */ 7805 if (!vha->flags.init_done) { 7806 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 7807 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 7808 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 7809 le16_to_cpu(icb->interrupt_delay_timer) : 2; 7810 } 7811 icb->firmware_options_2 &= cpu_to_le32( 7812 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 7813 if (ha->zio_mode != QLA_ZIO_DISABLED) { 7814 ha->zio_mode = QLA_ZIO_MODE_6; 7815 7816 ql_log(ql_log_info, vha, 0x006f, 7817 "ZIO mode %d enabled; timer delay (%d us).\n", 7818 ha->zio_mode, ha->zio_timer * 100); 7819 7820 icb->firmware_options_2 |= cpu_to_le32( 7821 (uint32_t)ha->zio_mode); 7822 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); 7823 } 7824 7825 if (rval) { 7826 ql_log(ql_log_warn, vha, 0x0070, 7827 "NVRAM configuration failed.\n"); 7828 } 7829 return (rval); 7830 } 7831 7832 static void 7833 qla27xx_print_image(struct scsi_qla_host *vha, char *name, 7834 struct qla27xx_image_status *image_status) 7835 { 7836 ql_dbg(ql_dbg_init, vha, 0x018b, 7837 "%s %s: mask=%#02x gen=%#04x ver=%u.%u map=%#01x sum=%#08x sig=%#08x\n", 7838 name, "status", 7839 image_status->image_status_mask, 7840 le16_to_cpu(image_status->generation), 7841 image_status->ver_major, 7842 image_status->ver_minor, 7843 image_status->bitmap, 7844 le32_to_cpu(image_status->checksum), 7845 le32_to_cpu(image_status->signature)); 7846 } 7847 7848 static bool 7849 qla28xx_check_aux_image_status_signature( 7850 struct qla27xx_image_status *image_status) 7851 { 7852 ulong signature = le32_to_cpu(image_status->signature); 7853 7854 return signature != QLA28XX_AUX_IMG_STATUS_SIGN; 7855 } 7856 7857 static bool 7858 qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status) 7859 { 7860 ulong signature = le32_to_cpu(image_status->signature); 7861 7862 return 7863 signature != QLA27XX_IMG_STATUS_SIGN && 7864 signature != QLA28XX_IMG_STATUS_SIGN; 7865 } 7866 7867 static ulong 7868 qla27xx_image_status_checksum(struct qla27xx_image_status *image_status) 7869 { 7870 __le32 *p = (__force __le32 *)image_status; 7871 uint n = sizeof(*image_status) / sizeof(*p); 7872 uint32_t sum = 0; 7873 7874 for ( ; n--; p++) 7875 sum += le32_to_cpup(p); 7876 7877 return sum; 7878 } 7879 7880 static inline uint 7881 qla28xx_component_bitmask(struct qla27xx_image_status *aux, uint bitmask) 7882 { 7883 return aux->bitmap & bitmask ? 7884 QLA27XX_SECONDARY_IMAGE : QLA27XX_PRIMARY_IMAGE; 7885 } 7886 7887 static void 7888 qla28xx_component_status( 7889 struct active_regions *active_regions, struct qla27xx_image_status *aux) 7890 { 7891 active_regions->aux.board_config = 7892 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_BOARD_CONFIG); 7893 7894 active_regions->aux.vpd_nvram = 7895 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_VPD_NVRAM); 7896 7897 active_regions->aux.npiv_config_0_1 = 7898 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_0_1); 7899 7900 active_regions->aux.npiv_config_2_3 = 7901 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3); 7902 } 7903 7904 static int 7905 qla27xx_compare_image_generation( 7906 struct qla27xx_image_status *pri_image_status, 7907 struct qla27xx_image_status *sec_image_status) 7908 { 7909 /* calculate generation delta as uint16 (this accounts for wrap) */ 7910 int16_t delta = 7911 le16_to_cpu(pri_image_status->generation) - 7912 le16_to_cpu(sec_image_status->generation); 7913 7914 ql_dbg(ql_dbg_init, NULL, 0x0180, "generation delta = %d\n", delta); 7915 7916 return delta; 7917 } 7918 7919 void 7920 qla28xx_get_aux_images( 7921 struct scsi_qla_host *vha, struct active_regions *active_regions) 7922 { 7923 struct qla_hw_data *ha = vha->hw; 7924 struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status; 7925 bool valid_pri_image = false, valid_sec_image = false; 7926 bool active_pri_image = false, active_sec_image = false; 7927 7928 if (!ha->flt_region_aux_img_status_pri) { 7929 ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n"); 7930 goto check_sec_image; 7931 } 7932 7933 qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status, 7934 ha->flt_region_aux_img_status_pri, 7935 sizeof(pri_aux_image_status) >> 2); 7936 qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status); 7937 7938 if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) { 7939 ql_dbg(ql_dbg_init, vha, 0x018b, 7940 "Primary aux image signature (%#x) not valid\n", 7941 le32_to_cpu(pri_aux_image_status.signature)); 7942 goto check_sec_image; 7943 } 7944 7945 if (qla27xx_image_status_checksum(&pri_aux_image_status)) { 7946 ql_dbg(ql_dbg_init, vha, 0x018c, 7947 "Primary aux image checksum failed\n"); 7948 goto check_sec_image; 7949 } 7950 7951 valid_pri_image = true; 7952 7953 if (pri_aux_image_status.image_status_mask & 1) { 7954 ql_dbg(ql_dbg_init, vha, 0x018d, 7955 "Primary aux image is active\n"); 7956 active_pri_image = true; 7957 } 7958 7959 check_sec_image: 7960 if (!ha->flt_region_aux_img_status_sec) { 7961 ql_dbg(ql_dbg_init, vha, 0x018a, 7962 "Secondary aux image not addressed\n"); 7963 goto check_valid_image; 7964 } 7965 7966 qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status, 7967 ha->flt_region_aux_img_status_sec, 7968 sizeof(sec_aux_image_status) >> 2); 7969 qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status); 7970 7971 if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) { 7972 ql_dbg(ql_dbg_init, vha, 0x018b, 7973 "Secondary aux image signature (%#x) not valid\n", 7974 le32_to_cpu(sec_aux_image_status.signature)); 7975 goto check_valid_image; 7976 } 7977 7978 if (qla27xx_image_status_checksum(&sec_aux_image_status)) { 7979 ql_dbg(ql_dbg_init, vha, 0x018c, 7980 "Secondary aux image checksum failed\n"); 7981 goto check_valid_image; 7982 } 7983 7984 valid_sec_image = true; 7985 7986 if (sec_aux_image_status.image_status_mask & 1) { 7987 ql_dbg(ql_dbg_init, vha, 0x018d, 7988 "Secondary aux image is active\n"); 7989 active_sec_image = true; 7990 } 7991 7992 check_valid_image: 7993 if (valid_pri_image && active_pri_image && 7994 valid_sec_image && active_sec_image) { 7995 if (qla27xx_compare_image_generation(&pri_aux_image_status, 7996 &sec_aux_image_status) >= 0) { 7997 qla28xx_component_status(active_regions, 7998 &pri_aux_image_status); 7999 } else { 8000 qla28xx_component_status(active_regions, 8001 &sec_aux_image_status); 8002 } 8003 } else if (valid_pri_image && active_pri_image) { 8004 qla28xx_component_status(active_regions, &pri_aux_image_status); 8005 } else if (valid_sec_image && active_sec_image) { 8006 qla28xx_component_status(active_regions, &sec_aux_image_status); 8007 } 8008 8009 ql_dbg(ql_dbg_init, vha, 0x018f, 8010 "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u\n", 8011 active_regions->aux.board_config, 8012 active_regions->aux.vpd_nvram, 8013 active_regions->aux.npiv_config_0_1, 8014 active_regions->aux.npiv_config_2_3); 8015 } 8016 8017 void 8018 qla27xx_get_active_image(struct scsi_qla_host *vha, 8019 struct active_regions *active_regions) 8020 { 8021 struct qla_hw_data *ha = vha->hw; 8022 struct qla27xx_image_status pri_image_status, sec_image_status; 8023 bool valid_pri_image = false, valid_sec_image = false; 8024 bool active_pri_image = false, active_sec_image = false; 8025 8026 if (!ha->flt_region_img_status_pri) { 8027 ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n"); 8028 goto check_sec_image; 8029 } 8030 8031 if (qla24xx_read_flash_data(vha, (uint32_t *)&pri_image_status, 8032 ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) != 8033 QLA_SUCCESS) { 8034 WARN_ON_ONCE(true); 8035 goto check_sec_image; 8036 } 8037 qla27xx_print_image(vha, "Primary image", &pri_image_status); 8038 8039 if (qla27xx_check_image_status_signature(&pri_image_status)) { 8040 ql_dbg(ql_dbg_init, vha, 0x018b, 8041 "Primary image signature (%#x) not valid\n", 8042 le32_to_cpu(pri_image_status.signature)); 8043 goto check_sec_image; 8044 } 8045 8046 if (qla27xx_image_status_checksum(&pri_image_status)) { 8047 ql_dbg(ql_dbg_init, vha, 0x018c, 8048 "Primary image checksum failed\n"); 8049 goto check_sec_image; 8050 } 8051 8052 valid_pri_image = true; 8053 8054 if (pri_image_status.image_status_mask & 1) { 8055 ql_dbg(ql_dbg_init, vha, 0x018d, 8056 "Primary image is active\n"); 8057 active_pri_image = true; 8058 } 8059 8060 check_sec_image: 8061 if (!ha->flt_region_img_status_sec) { 8062 ql_dbg(ql_dbg_init, vha, 0x018a, "Secondary image not addressed\n"); 8063 goto check_valid_image; 8064 } 8065 8066 qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status), 8067 ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2); 8068 qla27xx_print_image(vha, "Secondary image", &sec_image_status); 8069 8070 if (qla27xx_check_image_status_signature(&sec_image_status)) { 8071 ql_dbg(ql_dbg_init, vha, 0x018b, 8072 "Secondary image signature (%#x) not valid\n", 8073 le32_to_cpu(sec_image_status.signature)); 8074 goto check_valid_image; 8075 } 8076 8077 if (qla27xx_image_status_checksum(&sec_image_status)) { 8078 ql_dbg(ql_dbg_init, vha, 0x018c, 8079 "Secondary image checksum failed\n"); 8080 goto check_valid_image; 8081 } 8082 8083 valid_sec_image = true; 8084 8085 if (sec_image_status.image_status_mask & 1) { 8086 ql_dbg(ql_dbg_init, vha, 0x018d, 8087 "Secondary image is active\n"); 8088 active_sec_image = true; 8089 } 8090 8091 check_valid_image: 8092 if (valid_pri_image && active_pri_image) 8093 active_regions->global = QLA27XX_PRIMARY_IMAGE; 8094 8095 if (valid_sec_image && active_sec_image) { 8096 if (!active_regions->global || 8097 qla27xx_compare_image_generation( 8098 &pri_image_status, &sec_image_status) < 0) { 8099 active_regions->global = QLA27XX_SECONDARY_IMAGE; 8100 } 8101 } 8102 8103 ql_dbg(ql_dbg_init, vha, 0x018f, "active image %s (%u)\n", 8104 active_regions->global == QLA27XX_DEFAULT_IMAGE ? 8105 "default (boot/fw)" : 8106 active_regions->global == QLA27XX_PRIMARY_IMAGE ? 8107 "primary" : 8108 active_regions->global == QLA27XX_SECONDARY_IMAGE ? 8109 "secondary" : "invalid", 8110 active_regions->global); 8111 } 8112 8113 bool qla24xx_risc_firmware_invalid(uint32_t *dword) 8114 { 8115 return 8116 !(dword[4] | dword[5] | dword[6] | dword[7]) || 8117 !(~dword[4] | ~dword[5] | ~dword[6] | ~dword[7]); 8118 } 8119 8120 static int 8121 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, 8122 uint32_t faddr) 8123 { 8124 int rval; 8125 uint templates, segments, fragment; 8126 ulong i; 8127 uint j; 8128 ulong dlen; 8129 uint32_t *dcode; 8130 uint32_t risc_addr, risc_size, risc_attr = 0; 8131 struct qla_hw_data *ha = vha->hw; 8132 struct req_que *req = ha->req_q_map[0]; 8133 struct fwdt *fwdt = ha->fwdt; 8134 8135 ql_dbg(ql_dbg_init, vha, 0x008b, 8136 "FW: Loading firmware from flash (%x).\n", faddr); 8137 8138 dcode = (uint32_t *)req->ring; 8139 qla24xx_read_flash_data(vha, dcode, faddr, 8); 8140 if (qla24xx_risc_firmware_invalid(dcode)) { 8141 ql_log(ql_log_fatal, vha, 0x008c, 8142 "Unable to verify the integrity of flash firmware " 8143 "image.\n"); 8144 ql_log(ql_log_fatal, vha, 0x008d, 8145 "Firmware data: %08x %08x %08x %08x.\n", 8146 dcode[0], dcode[1], dcode[2], dcode[3]); 8147 8148 return QLA_FUNCTION_FAILED; 8149 } 8150 8151 dcode = (uint32_t *)req->ring; 8152 *srisc_addr = 0; 8153 segments = FA_RISC_CODE_SEGMENTS; 8154 for (j = 0; j < segments; j++) { 8155 ql_dbg(ql_dbg_init, vha, 0x008d, 8156 "-> Loading segment %u...\n", j); 8157 qla24xx_read_flash_data(vha, dcode, faddr, 10); 8158 risc_addr = be32_to_cpu((__force __be32)dcode[2]); 8159 risc_size = be32_to_cpu((__force __be32)dcode[3]); 8160 if (!*srisc_addr) { 8161 *srisc_addr = risc_addr; 8162 risc_attr = be32_to_cpu((__force __be32)dcode[9]); 8163 } 8164 8165 dlen = ha->fw_transfer_size >> 2; 8166 for (fragment = 0; risc_size; fragment++) { 8167 if (dlen > risc_size) 8168 dlen = risc_size; 8169 8170 ql_dbg(ql_dbg_init, vha, 0x008e, 8171 "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n", 8172 fragment, risc_addr, faddr, dlen); 8173 qla24xx_read_flash_data(vha, dcode, faddr, dlen); 8174 for (i = 0; i < dlen; i++) 8175 dcode[i] = swab32(dcode[i]); 8176 8177 rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen); 8178 if (rval) { 8179 ql_log(ql_log_fatal, vha, 0x008f, 8180 "-> Failed load firmware fragment %u.\n", 8181 fragment); 8182 return QLA_FUNCTION_FAILED; 8183 } 8184 8185 faddr += dlen; 8186 risc_addr += dlen; 8187 risc_size -= dlen; 8188 } 8189 } 8190 8191 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 8192 return QLA_SUCCESS; 8193 8194 templates = (risc_attr & BIT_9) ? 2 : 1; 8195 ql_dbg(ql_dbg_init, vha, 0x0160, "-> templates = %u\n", templates); 8196 for (j = 0; j < templates; j++, fwdt++) { 8197 vfree(fwdt->template); 8198 fwdt->template = NULL; 8199 fwdt->length = 0; 8200 8201 dcode = (uint32_t *)req->ring; 8202 qla24xx_read_flash_data(vha, dcode, faddr, 7); 8203 risc_size = be32_to_cpu((__force __be32)dcode[2]); 8204 ql_dbg(ql_dbg_init, vha, 0x0161, 8205 "-> fwdt%u template array at %#x (%#x dwords)\n", 8206 j, faddr, risc_size); 8207 if (!risc_size || !~risc_size) { 8208 ql_dbg(ql_dbg_init, vha, 0x0162, 8209 "-> fwdt%u failed to read array\n", j); 8210 goto failed; 8211 } 8212 8213 /* skip header and ignore checksum */ 8214 faddr += 7; 8215 risc_size -= 8; 8216 8217 ql_dbg(ql_dbg_init, vha, 0x0163, 8218 "-> fwdt%u template allocate template %#x words...\n", 8219 j, risc_size); 8220 fwdt->template = vmalloc(risc_size * sizeof(*dcode)); 8221 if (!fwdt->template) { 8222 ql_log(ql_log_warn, vha, 0x0164, 8223 "-> fwdt%u failed allocate template.\n", j); 8224 goto failed; 8225 } 8226 8227 dcode = fwdt->template; 8228 qla24xx_read_flash_data(vha, dcode, faddr, risc_size); 8229 8230 if (!qla27xx_fwdt_template_valid(dcode)) { 8231 ql_log(ql_log_warn, vha, 0x0165, 8232 "-> fwdt%u failed template validate\n", j); 8233 goto failed; 8234 } 8235 8236 dlen = qla27xx_fwdt_template_size(dcode); 8237 ql_dbg(ql_dbg_init, vha, 0x0166, 8238 "-> fwdt%u template size %#lx bytes (%#lx words)\n", 8239 j, dlen, dlen / sizeof(*dcode)); 8240 if (dlen > risc_size * sizeof(*dcode)) { 8241 ql_log(ql_log_warn, vha, 0x0167, 8242 "-> fwdt%u template exceeds array (%-lu bytes)\n", 8243 j, dlen - risc_size * sizeof(*dcode)); 8244 goto failed; 8245 } 8246 8247 fwdt->length = dlen; 8248 ql_dbg(ql_dbg_init, vha, 0x0168, 8249 "-> fwdt%u loaded template ok\n", j); 8250 8251 faddr += risc_size + 1; 8252 } 8253 8254 return QLA_SUCCESS; 8255 8256 failed: 8257 vfree(fwdt->template); 8258 fwdt->template = NULL; 8259 fwdt->length = 0; 8260 8261 return QLA_SUCCESS; 8262 } 8263 8264 #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/" 8265 8266 int 8267 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 8268 { 8269 int rval; 8270 int i, fragment; 8271 uint16_t *wcode; 8272 __be16 *fwcode; 8273 uint32_t risc_addr, risc_size, fwclen, wlen, *seg; 8274 struct fw_blob *blob; 8275 struct qla_hw_data *ha = vha->hw; 8276 struct req_que *req = ha->req_q_map[0]; 8277 8278 /* Load firmware blob. */ 8279 blob = qla2x00_request_firmware(vha); 8280 if (!blob) { 8281 ql_log(ql_log_info, vha, 0x0083, 8282 "Firmware image unavailable.\n"); 8283 ql_log(ql_log_info, vha, 0x0084, 8284 "Firmware images can be retrieved from: "QLA_FW_URL ".\n"); 8285 return QLA_FUNCTION_FAILED; 8286 } 8287 8288 rval = QLA_SUCCESS; 8289 8290 wcode = (uint16_t *)req->ring; 8291 *srisc_addr = 0; 8292 fwcode = (__force __be16 *)blob->fw->data; 8293 fwclen = 0; 8294 8295 /* Validate firmware image by checking version. */ 8296 if (blob->fw->size < 8 * sizeof(uint16_t)) { 8297 ql_log(ql_log_fatal, vha, 0x0085, 8298 "Unable to verify integrity of firmware image (%zd).\n", 8299 blob->fw->size); 8300 goto fail_fw_integrity; 8301 } 8302 for (i = 0; i < 4; i++) 8303 wcode[i] = be16_to_cpu(fwcode[i + 4]); 8304 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff && 8305 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 && 8306 wcode[2] == 0 && wcode[3] == 0)) { 8307 ql_log(ql_log_fatal, vha, 0x0086, 8308 "Unable to verify integrity of firmware image.\n"); 8309 ql_log(ql_log_fatal, vha, 0x0087, 8310 "Firmware data: %04x %04x %04x %04x.\n", 8311 wcode[0], wcode[1], wcode[2], wcode[3]); 8312 goto fail_fw_integrity; 8313 } 8314 8315 seg = blob->segs; 8316 while (*seg && rval == QLA_SUCCESS) { 8317 risc_addr = *seg; 8318 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr; 8319 risc_size = be16_to_cpu(fwcode[3]); 8320 8321 /* Validate firmware image size. */ 8322 fwclen += risc_size * sizeof(uint16_t); 8323 if (blob->fw->size < fwclen) { 8324 ql_log(ql_log_fatal, vha, 0x0088, 8325 "Unable to verify integrity of firmware image " 8326 "(%zd).\n", blob->fw->size); 8327 goto fail_fw_integrity; 8328 } 8329 8330 fragment = 0; 8331 while (risc_size > 0 && rval == QLA_SUCCESS) { 8332 wlen = (uint16_t)(ha->fw_transfer_size >> 1); 8333 if (wlen > risc_size) 8334 wlen = risc_size; 8335 ql_dbg(ql_dbg_init, vha, 0x0089, 8336 "Loading risc segment@ risc addr %x number of " 8337 "words 0x%x.\n", risc_addr, wlen); 8338 8339 for (i = 0; i < wlen; i++) 8340 wcode[i] = swab16((__force u32)fwcode[i]); 8341 8342 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 8343 wlen); 8344 if (rval) { 8345 ql_log(ql_log_fatal, vha, 0x008a, 8346 "Failed to load segment %d of firmware.\n", 8347 fragment); 8348 break; 8349 } 8350 8351 fwcode += wlen; 8352 risc_addr += wlen; 8353 risc_size -= wlen; 8354 fragment++; 8355 } 8356 8357 /* Next segment. */ 8358 seg++; 8359 } 8360 return rval; 8361 8362 fail_fw_integrity: 8363 return QLA_FUNCTION_FAILED; 8364 } 8365 8366 static int 8367 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) 8368 { 8369 int rval; 8370 uint templates, segments, fragment; 8371 uint32_t *dcode; 8372 ulong dlen; 8373 uint32_t risc_addr, risc_size, risc_attr = 0; 8374 ulong i; 8375 uint j; 8376 struct fw_blob *blob; 8377 __be32 *fwcode; 8378 struct qla_hw_data *ha = vha->hw; 8379 struct req_que *req = ha->req_q_map[0]; 8380 struct fwdt *fwdt = ha->fwdt; 8381 8382 ql_dbg(ql_dbg_init, vha, 0x0090, 8383 "-> FW: Loading via request-firmware.\n"); 8384 8385 blob = qla2x00_request_firmware(vha); 8386 if (!blob) { 8387 ql_log(ql_log_warn, vha, 0x0092, 8388 "-> Firmware file not found.\n"); 8389 8390 return QLA_FUNCTION_FAILED; 8391 } 8392 8393 fwcode = (__force __be32 *)blob->fw->data; 8394 dcode = (__force uint32_t *)fwcode; 8395 if (qla24xx_risc_firmware_invalid(dcode)) { 8396 ql_log(ql_log_fatal, vha, 0x0093, 8397 "Unable to verify integrity of firmware image (%zd).\n", 8398 blob->fw->size); 8399 ql_log(ql_log_fatal, vha, 0x0095, 8400 "Firmware data: %08x %08x %08x %08x.\n", 8401 dcode[0], dcode[1], dcode[2], dcode[3]); 8402 return QLA_FUNCTION_FAILED; 8403 } 8404 8405 dcode = (uint32_t *)req->ring; 8406 *srisc_addr = 0; 8407 segments = FA_RISC_CODE_SEGMENTS; 8408 for (j = 0; j < segments; j++) { 8409 ql_dbg(ql_dbg_init, vha, 0x0096, 8410 "-> Loading segment %u...\n", j); 8411 risc_addr = be32_to_cpu(fwcode[2]); 8412 risc_size = be32_to_cpu(fwcode[3]); 8413 8414 if (!*srisc_addr) { 8415 *srisc_addr = risc_addr; 8416 risc_attr = be32_to_cpu(fwcode[9]); 8417 } 8418 8419 dlen = ha->fw_transfer_size >> 2; 8420 for (fragment = 0; risc_size; fragment++) { 8421 if (dlen > risc_size) 8422 dlen = risc_size; 8423 8424 ql_dbg(ql_dbg_init, vha, 0x0097, 8425 "-> Loading fragment %u: %#x <- %#x (%#lx words)...\n", 8426 fragment, risc_addr, 8427 (uint32_t)(fwcode - (typeof(fwcode))blob->fw->data), 8428 dlen); 8429 8430 for (i = 0; i < dlen; i++) 8431 dcode[i] = swab32((__force u32)fwcode[i]); 8432 8433 rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen); 8434 if (rval) { 8435 ql_log(ql_log_fatal, vha, 0x0098, 8436 "-> Failed load firmware fragment %u.\n", 8437 fragment); 8438 return QLA_FUNCTION_FAILED; 8439 } 8440 8441 fwcode += dlen; 8442 risc_addr += dlen; 8443 risc_size -= dlen; 8444 } 8445 } 8446 8447 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 8448 return QLA_SUCCESS; 8449 8450 templates = (risc_attr & BIT_9) ? 2 : 1; 8451 ql_dbg(ql_dbg_init, vha, 0x0170, "-> templates = %u\n", templates); 8452 for (j = 0; j < templates; j++, fwdt++) { 8453 vfree(fwdt->template); 8454 fwdt->template = NULL; 8455 fwdt->length = 0; 8456 8457 risc_size = be32_to_cpu(fwcode[2]); 8458 ql_dbg(ql_dbg_init, vha, 0x0171, 8459 "-> fwdt%u template array at %#x (%#x dwords)\n", 8460 j, (uint32_t)((void *)fwcode - (void *)blob->fw->data), 8461 risc_size); 8462 if (!risc_size || !~risc_size) { 8463 ql_dbg(ql_dbg_init, vha, 0x0172, 8464 "-> fwdt%u failed to read array\n", j); 8465 goto failed; 8466 } 8467 8468 /* skip header and ignore checksum */ 8469 fwcode += 7; 8470 risc_size -= 8; 8471 8472 ql_dbg(ql_dbg_init, vha, 0x0173, 8473 "-> fwdt%u template allocate template %#x words...\n", 8474 j, risc_size); 8475 fwdt->template = vmalloc(risc_size * sizeof(*dcode)); 8476 if (!fwdt->template) { 8477 ql_log(ql_log_warn, vha, 0x0174, 8478 "-> fwdt%u failed allocate template.\n", j); 8479 goto failed; 8480 } 8481 8482 dcode = fwdt->template; 8483 for (i = 0; i < risc_size; i++) 8484 dcode[i] = (__force u32)fwcode[i]; 8485 8486 if (!qla27xx_fwdt_template_valid(dcode)) { 8487 ql_log(ql_log_warn, vha, 0x0175, 8488 "-> fwdt%u failed template validate\n", j); 8489 goto failed; 8490 } 8491 8492 dlen = qla27xx_fwdt_template_size(dcode); 8493 ql_dbg(ql_dbg_init, vha, 0x0176, 8494 "-> fwdt%u template size %#lx bytes (%#lx words)\n", 8495 j, dlen, dlen / sizeof(*dcode)); 8496 if (dlen > risc_size * sizeof(*dcode)) { 8497 ql_log(ql_log_warn, vha, 0x0177, 8498 "-> fwdt%u template exceeds array (%-lu bytes)\n", 8499 j, dlen - risc_size * sizeof(*dcode)); 8500 goto failed; 8501 } 8502 8503 fwdt->length = dlen; 8504 ql_dbg(ql_dbg_init, vha, 0x0178, 8505 "-> fwdt%u loaded template ok\n", j); 8506 8507 fwcode += risc_size + 1; 8508 } 8509 8510 return QLA_SUCCESS; 8511 8512 failed: 8513 vfree(fwdt->template); 8514 fwdt->template = NULL; 8515 fwdt->length = 0; 8516 8517 return QLA_SUCCESS; 8518 } 8519 8520 int 8521 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 8522 { 8523 int rval; 8524 8525 if (ql2xfwloadbin == 1) 8526 return qla81xx_load_risc(vha, srisc_addr); 8527 8528 /* 8529 * FW Load priority: 8530 * 1) Firmware via request-firmware interface (.bin file). 8531 * 2) Firmware residing in flash. 8532 */ 8533 rval = qla24xx_load_risc_blob(vha, srisc_addr); 8534 if (rval == QLA_SUCCESS) 8535 return rval; 8536 8537 return qla24xx_load_risc_flash(vha, srisc_addr, 8538 vha->hw->flt_region_fw); 8539 } 8540 8541 int 8542 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 8543 { 8544 int rval; 8545 struct qla_hw_data *ha = vha->hw; 8546 struct active_regions active_regions = { }; 8547 8548 if (ql2xfwloadbin == 2) 8549 goto try_blob_fw; 8550 8551 /* FW Load priority: 8552 * 1) Firmware residing in flash. 8553 * 2) Firmware via request-firmware interface (.bin file). 8554 * 3) Golden-Firmware residing in flash -- (limited operation). 8555 */ 8556 8557 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 8558 goto try_primary_fw; 8559 8560 qla27xx_get_active_image(vha, &active_regions); 8561 8562 if (active_regions.global != QLA27XX_SECONDARY_IMAGE) 8563 goto try_primary_fw; 8564 8565 ql_dbg(ql_dbg_init, vha, 0x008b, 8566 "Loading secondary firmware image.\n"); 8567 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw_sec); 8568 if (!rval) 8569 return rval; 8570 8571 try_primary_fw: 8572 ql_dbg(ql_dbg_init, vha, 0x008b, 8573 "Loading primary firmware image.\n"); 8574 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw); 8575 if (!rval) 8576 return rval; 8577 8578 try_blob_fw: 8579 rval = qla24xx_load_risc_blob(vha, srisc_addr); 8580 if (!rval || !ha->flt_region_gold_fw) 8581 return rval; 8582 8583 ql_log(ql_log_info, vha, 0x0099, 8584 "Attempting to fallback to golden firmware.\n"); 8585 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); 8586 if (rval) 8587 return rval; 8588 8589 ql_log(ql_log_info, vha, 0x009a, "Need firmware flash update.\n"); 8590 ha->flags.running_gold_fw = 1; 8591 return rval; 8592 } 8593 8594 void 8595 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) 8596 { 8597 int ret, retries; 8598 struct qla_hw_data *ha = vha->hw; 8599 8600 if (ha->flags.pci_channel_io_perm_failure) 8601 return; 8602 if (!IS_FWI2_CAPABLE(ha)) 8603 return; 8604 if (!ha->fw_major_version) 8605 return; 8606 if (!ha->flags.fw_started) 8607 return; 8608 8609 ret = qla2x00_stop_firmware(vha); 8610 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && 8611 ret != QLA_INVALID_COMMAND && retries ; retries--) { 8612 ha->isp_ops->reset_chip(vha); 8613 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS) 8614 continue; 8615 if (qla2x00_setup_chip(vha) != QLA_SUCCESS) 8616 continue; 8617 ql_log(ql_log_info, vha, 0x8015, 8618 "Attempting retry of stop-firmware command.\n"); 8619 ret = qla2x00_stop_firmware(vha); 8620 } 8621 8622 QLA_FW_STOPPED(ha); 8623 ha->flags.fw_init_done = 0; 8624 } 8625 8626 int 8627 qla24xx_configure_vhba(scsi_qla_host_t *vha) 8628 { 8629 int rval = QLA_SUCCESS; 8630 int rval2; 8631 uint16_t mb[MAILBOX_REGISTER_COUNT]; 8632 struct qla_hw_data *ha = vha->hw; 8633 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 8634 8635 if (!vha->vp_idx) 8636 return -EINVAL; 8637 8638 rval = qla2x00_fw_ready(base_vha); 8639 8640 if (rval == QLA_SUCCESS) { 8641 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 8642 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); 8643 } 8644 8645 vha->flags.management_server_logged_in = 0; 8646 8647 /* Login to SNS first */ 8648 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, 8649 BIT_1); 8650 if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { 8651 if (rval2 == QLA_MEMORY_ALLOC_FAILED) 8652 ql_dbg(ql_dbg_init, vha, 0x0120, 8653 "Failed SNS login: loop_id=%x, rval2=%d\n", 8654 NPH_SNS, rval2); 8655 else 8656 ql_dbg(ql_dbg_init, vha, 0x0103, 8657 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " 8658 "mb[2]=%x mb[6]=%x mb[7]=%x.\n", 8659 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]); 8660 return (QLA_FUNCTION_FAILED); 8661 } 8662 8663 atomic_set(&vha->loop_down_timer, 0); 8664 atomic_set(&vha->loop_state, LOOP_UP); 8665 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 8666 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 8667 rval = qla2x00_loop_resync(base_vha); 8668 8669 return rval; 8670 } 8671 8672 /* 84XX Support **************************************************************/ 8673 8674 static LIST_HEAD(qla_cs84xx_list); 8675 static DEFINE_MUTEX(qla_cs84xx_mutex); 8676 8677 static struct qla_chip_state_84xx * 8678 qla84xx_get_chip(struct scsi_qla_host *vha) 8679 { 8680 struct qla_chip_state_84xx *cs84xx; 8681 struct qla_hw_data *ha = vha->hw; 8682 8683 mutex_lock(&qla_cs84xx_mutex); 8684 8685 /* Find any shared 84xx chip. */ 8686 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) { 8687 if (cs84xx->bus == ha->pdev->bus) { 8688 kref_get(&cs84xx->kref); 8689 goto done; 8690 } 8691 } 8692 8693 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL); 8694 if (!cs84xx) 8695 goto done; 8696 8697 kref_init(&cs84xx->kref); 8698 spin_lock_init(&cs84xx->access_lock); 8699 mutex_init(&cs84xx->fw_update_mutex); 8700 cs84xx->bus = ha->pdev->bus; 8701 8702 list_add_tail(&cs84xx->list, &qla_cs84xx_list); 8703 done: 8704 mutex_unlock(&qla_cs84xx_mutex); 8705 return cs84xx; 8706 } 8707 8708 static void 8709 __qla84xx_chip_release(struct kref *kref) 8710 { 8711 struct qla_chip_state_84xx *cs84xx = 8712 container_of(kref, struct qla_chip_state_84xx, kref); 8713 8714 mutex_lock(&qla_cs84xx_mutex); 8715 list_del(&cs84xx->list); 8716 mutex_unlock(&qla_cs84xx_mutex); 8717 kfree(cs84xx); 8718 } 8719 8720 void 8721 qla84xx_put_chip(struct scsi_qla_host *vha) 8722 { 8723 struct qla_hw_data *ha = vha->hw; 8724 8725 if (ha->cs84xx) 8726 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release); 8727 } 8728 8729 static int 8730 qla84xx_init_chip(scsi_qla_host_t *vha) 8731 { 8732 int rval; 8733 uint16_t status[2]; 8734 struct qla_hw_data *ha = vha->hw; 8735 8736 mutex_lock(&ha->cs84xx->fw_update_mutex); 8737 8738 rval = qla84xx_verify_chip(vha, status); 8739 8740 mutex_unlock(&ha->cs84xx->fw_update_mutex); 8741 8742 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED : 8743 QLA_SUCCESS; 8744 } 8745 8746 /* 81XX Support **************************************************************/ 8747 8748 int 8749 qla81xx_nvram_config(scsi_qla_host_t *vha) 8750 { 8751 int rval; 8752 struct init_cb_81xx *icb; 8753 struct nvram_81xx *nv; 8754 __le32 *dptr; 8755 uint8_t *dptr1, *dptr2; 8756 uint32_t chksum; 8757 uint16_t cnt; 8758 struct qla_hw_data *ha = vha->hw; 8759 uint32_t faddr; 8760 struct active_regions active_regions = { }; 8761 8762 rval = QLA_SUCCESS; 8763 icb = (struct init_cb_81xx *)ha->init_cb; 8764 nv = ha->nvram; 8765 8766 /* Determine NVRAM starting address. */ 8767 ha->nvram_size = sizeof(*nv); 8768 ha->vpd_size = FA_NVRAM_VPD_SIZE; 8769 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha)) 8770 ha->vpd_size = FA_VPD_SIZE_82XX; 8771 8772 if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) 8773 qla28xx_get_aux_images(vha, &active_regions); 8774 8775 /* Get VPD data into cache */ 8776 ha->vpd = ha->nvram + VPD_OFFSET; 8777 8778 faddr = ha->flt_region_vpd; 8779 if (IS_QLA28XX(ha)) { 8780 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) 8781 faddr = ha->flt_region_vpd_sec; 8782 ql_dbg(ql_dbg_init, vha, 0x0110, 8783 "Loading %s nvram image.\n", 8784 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ? 8785 "primary" : "secondary"); 8786 } 8787 ha->isp_ops->read_optrom(vha, ha->vpd, faddr << 2, ha->vpd_size); 8788 8789 /* Get NVRAM data into cache and calculate checksum. */ 8790 faddr = ha->flt_region_nvram; 8791 if (IS_QLA28XX(ha)) { 8792 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) 8793 faddr = ha->flt_region_nvram_sec; 8794 } 8795 ql_dbg(ql_dbg_init, vha, 0x0110, 8796 "Loading %s nvram image.\n", 8797 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ? 8798 "primary" : "secondary"); 8799 ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size); 8800 8801 dptr = (__force __le32 *)nv; 8802 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) 8803 chksum += le32_to_cpu(*dptr); 8804 8805 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111, 8806 "Contents of NVRAM:\n"); 8807 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112, 8808 nv, ha->nvram_size); 8809 8810 /* Bad NVRAM data, set defaults parameters. */ 8811 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) || 8812 le16_to_cpu(nv->nvram_version) < ICB_VERSION) { 8813 /* Reset NVRAM data. */ 8814 ql_log(ql_log_info, vha, 0x0073, 8815 "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n", 8816 chksum, nv->id, le16_to_cpu(nv->nvram_version)); 8817 ql_dump_buffer(ql_dbg_init, vha, 0x0073, nv, sizeof(*nv)); 8818 ql_log(ql_log_info, vha, 0x0074, 8819 "Falling back to functioning (yet invalid -- WWPN) " 8820 "defaults.\n"); 8821 8822 /* 8823 * Set default initialization control block. 8824 */ 8825 memset(nv, 0, ha->nvram_size); 8826 nv->nvram_version = cpu_to_le16(ICB_VERSION); 8827 nv->version = cpu_to_le16(ICB_VERSION); 8828 nv->frame_payload_size = cpu_to_le16(2048); 8829 nv->execution_throttle = cpu_to_le16(0xFFFF); 8830 nv->exchange_count = cpu_to_le16(0); 8831 nv->port_name[0] = 0x21; 8832 nv->port_name[1] = 0x00 + ha->port_no + 1; 8833 nv->port_name[2] = 0x00; 8834 nv->port_name[3] = 0xe0; 8835 nv->port_name[4] = 0x8b; 8836 nv->port_name[5] = 0x1c; 8837 nv->port_name[6] = 0x55; 8838 nv->port_name[7] = 0x86; 8839 nv->node_name[0] = 0x20; 8840 nv->node_name[1] = 0x00; 8841 nv->node_name[2] = 0x00; 8842 nv->node_name[3] = 0xe0; 8843 nv->node_name[4] = 0x8b; 8844 nv->node_name[5] = 0x1c; 8845 nv->node_name[6] = 0x55; 8846 nv->node_name[7] = 0x86; 8847 nv->login_retry_count = cpu_to_le16(8); 8848 nv->interrupt_delay_timer = cpu_to_le16(0); 8849 nv->login_timeout = cpu_to_le16(0); 8850 nv->firmware_options_1 = 8851 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); 8852 nv->firmware_options_2 = cpu_to_le32(2 << 4); 8853 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 8854 nv->firmware_options_3 = cpu_to_le32(2 << 13); 8855 nv->host_p = cpu_to_le32(BIT_11|BIT_10); 8856 nv->efi_parameters = cpu_to_le32(0); 8857 nv->reset_delay = 5; 8858 nv->max_luns_per_target = cpu_to_le16(128); 8859 nv->port_down_retry_count = cpu_to_le16(30); 8860 nv->link_down_timeout = cpu_to_le16(180); 8861 nv->enode_mac[0] = 0x00; 8862 nv->enode_mac[1] = 0xC0; 8863 nv->enode_mac[2] = 0xDD; 8864 nv->enode_mac[3] = 0x04; 8865 nv->enode_mac[4] = 0x05; 8866 nv->enode_mac[5] = 0x06 + ha->port_no + 1; 8867 8868 rval = 1; 8869 } 8870 8871 if (IS_T10_PI_CAPABLE(ha)) 8872 nv->frame_payload_size &= cpu_to_le16(~7); 8873 8874 qlt_81xx_config_nvram_stage1(vha, nv); 8875 8876 /* Reset Initialization control block */ 8877 memset(icb, 0, ha->init_cb_size); 8878 8879 /* Copy 1st segment. */ 8880 dptr1 = (uint8_t *)icb; 8881 dptr2 = (uint8_t *)&nv->version; 8882 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; 8883 while (cnt--) 8884 *dptr1++ = *dptr2++; 8885 8886 icb->login_retry_count = nv->login_retry_count; 8887 8888 /* Copy 2nd segment. */ 8889 dptr1 = (uint8_t *)&icb->interrupt_delay_timer; 8890 dptr2 = (uint8_t *)&nv->interrupt_delay_timer; 8891 cnt = (uint8_t *)&icb->reserved_5 - 8892 (uint8_t *)&icb->interrupt_delay_timer; 8893 while (cnt--) 8894 *dptr1++ = *dptr2++; 8895 8896 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac)); 8897 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */ 8898 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) { 8899 icb->enode_mac[0] = 0x00; 8900 icb->enode_mac[1] = 0xC0; 8901 icb->enode_mac[2] = 0xDD; 8902 icb->enode_mac[3] = 0x04; 8903 icb->enode_mac[4] = 0x05; 8904 icb->enode_mac[5] = 0x06 + ha->port_no + 1; 8905 } 8906 8907 /* Use extended-initialization control block. */ 8908 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb)); 8909 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); 8910 /* 8911 * Setup driver NVRAM options. 8912 */ 8913 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), 8914 "QLE8XXX"); 8915 8916 qlt_81xx_config_nvram_stage2(vha, icb); 8917 8918 /* Use alternate WWN? */ 8919 if (nv->host_p & cpu_to_le32(BIT_15)) { 8920 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 8921 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 8922 } 8923 8924 /* Prepare nodename */ 8925 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) { 8926 /* 8927 * Firmware will apply the following mask if the nodename was 8928 * not provided. 8929 */ 8930 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 8931 icb->node_name[0] &= 0xF0; 8932 } 8933 8934 if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) { 8935 if ((nv->enhanced_features & BIT_7) == 0) 8936 ha->flags.scm_supported_a = 1; 8937 } 8938 8939 /* Set host adapter parameters. */ 8940 ha->flags.disable_risc_code_load = 0; 8941 ha->flags.enable_lip_reset = 0; 8942 ha->flags.enable_lip_full_login = 8943 le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0; 8944 ha->flags.enable_target_reset = 8945 le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0; 8946 ha->flags.enable_led_scheme = 0; 8947 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0; 8948 8949 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & 8950 (BIT_6 | BIT_5 | BIT_4)) >> 4; 8951 8952 /* save HBA serial number */ 8953 ha->serial0 = icb->port_name[5]; 8954 ha->serial1 = icb->port_name[6]; 8955 ha->serial2 = icb->port_name[7]; 8956 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 8957 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 8958 8959 icb->execution_throttle = cpu_to_le16(0xFFFF); 8960 8961 ha->retry_count = le16_to_cpu(nv->login_retry_count); 8962 8963 /* Set minimum login_timeout to 4 seconds. */ 8964 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) 8965 nv->login_timeout = cpu_to_le16(ql2xlogintimeout); 8966 if (le16_to_cpu(nv->login_timeout) < 4) 8967 nv->login_timeout = cpu_to_le16(4); 8968 ha->login_timeout = le16_to_cpu(nv->login_timeout); 8969 8970 /* Set minimum RATOV to 100 tenths of a second. */ 8971 ha->r_a_tov = 100; 8972 8973 ha->loop_reset_delay = nv->reset_delay; 8974 8975 /* Link Down Timeout = 0: 8976 * 8977 * When Port Down timer expires we will start returning 8978 * I/O's to OS with "DID_NO_CONNECT". 8979 * 8980 * Link Down Timeout != 0: 8981 * 8982 * The driver waits for the link to come up after link down 8983 * before returning I/Os to OS with "DID_NO_CONNECT". 8984 */ 8985 if (le16_to_cpu(nv->link_down_timeout) == 0) { 8986 ha->loop_down_abort_time = 8987 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 8988 } else { 8989 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); 8990 ha->loop_down_abort_time = 8991 (LOOP_DOWN_TIME - ha->link_down_timeout); 8992 } 8993 8994 /* Need enough time to try and get the port back. */ 8995 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); 8996 if (qlport_down_retry) 8997 ha->port_down_retry_count = qlport_down_retry; 8998 8999 /* Set login_retry_count */ 9000 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); 9001 if (ha->port_down_retry_count == 9002 le16_to_cpu(nv->port_down_retry_count) && 9003 ha->port_down_retry_count > 3) 9004 ha->login_retry_count = ha->port_down_retry_count; 9005 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 9006 ha->login_retry_count = ha->port_down_retry_count; 9007 if (ql2xloginretrycount) 9008 ha->login_retry_count = ql2xloginretrycount; 9009 9010 /* if not running MSI-X we need handshaking on interrupts */ 9011 if (!vha->hw->flags.msix_enabled && 9012 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) 9013 icb->firmware_options_2 |= cpu_to_le32(BIT_22); 9014 9015 /* Enable ZIO. */ 9016 if (!vha->flags.init_done) { 9017 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 9018 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 9019 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 9020 le16_to_cpu(icb->interrupt_delay_timer) : 2; 9021 } 9022 icb->firmware_options_2 &= cpu_to_le32( 9023 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 9024 vha->flags.process_response_queue = 0; 9025 if (ha->zio_mode != QLA_ZIO_DISABLED) { 9026 ha->zio_mode = QLA_ZIO_MODE_6; 9027 9028 ql_log(ql_log_info, vha, 0x0075, 9029 "ZIO mode %d enabled; timer delay (%d us).\n", 9030 ha->zio_mode, 9031 ha->zio_timer * 100); 9032 9033 icb->firmware_options_2 |= cpu_to_le32( 9034 (uint32_t)ha->zio_mode); 9035 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); 9036 vha->flags.process_response_queue = 1; 9037 } 9038 9039 /* enable RIDA Format2 */ 9040 icb->firmware_options_3 |= cpu_to_le32(BIT_0); 9041 9042 /* N2N: driver will initiate Login instead of FW */ 9043 icb->firmware_options_3 |= cpu_to_le32(BIT_8); 9044 9045 /* Determine NVMe/FCP priority for target ports */ 9046 ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha); 9047 9048 if (rval) { 9049 ql_log(ql_log_warn, vha, 0x0076, 9050 "NVRAM configuration failed.\n"); 9051 } 9052 return (rval); 9053 } 9054 9055 int 9056 qla82xx_restart_isp(scsi_qla_host_t *vha) 9057 { 9058 int status, rval; 9059 struct qla_hw_data *ha = vha->hw; 9060 struct scsi_qla_host *vp, *tvp; 9061 unsigned long flags; 9062 9063 status = qla2x00_init_rings(vha); 9064 if (!status) { 9065 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 9066 ha->flags.chip_reset_done = 1; 9067 9068 status = qla2x00_fw_ready(vha); 9069 if (!status) { 9070 /* Issue a marker after FW becomes ready. */ 9071 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); 9072 vha->flags.online = 1; 9073 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 9074 } 9075 9076 /* if no cable then assume it's good */ 9077 if ((vha->device_flags & DFLG_NO_CABLE)) 9078 status = 0; 9079 } 9080 9081 if (!status) { 9082 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 9083 9084 if (!atomic_read(&vha->loop_down_timer)) { 9085 /* 9086 * Issue marker command only when we are going 9087 * to start the I/O . 9088 */ 9089 vha->marker_needed = 1; 9090 } 9091 9092 ha->isp_ops->enable_intrs(ha); 9093 9094 ha->isp_abort_cnt = 0; 9095 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 9096 9097 /* Update the firmware version */ 9098 status = qla82xx_check_md_needed(vha); 9099 9100 if (ha->fce) { 9101 ha->flags.fce_enabled = 1; 9102 memset(ha->fce, 0, 9103 fce_calc_size(ha->fce_bufs)); 9104 rval = qla2x00_enable_fce_trace(vha, 9105 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 9106 &ha->fce_bufs); 9107 if (rval) { 9108 ql_log(ql_log_warn, vha, 0x8001, 9109 "Unable to reinitialize FCE (%d).\n", 9110 rval); 9111 ha->flags.fce_enabled = 0; 9112 } 9113 } 9114 9115 if (ha->eft) { 9116 memset(ha->eft, 0, EFT_SIZE); 9117 rval = qla2x00_enable_eft_trace(vha, 9118 ha->eft_dma, EFT_NUM_BUFFERS); 9119 if (rval) { 9120 ql_log(ql_log_warn, vha, 0x8010, 9121 "Unable to reinitialize EFT (%d).\n", 9122 rval); 9123 } 9124 } 9125 } 9126 9127 if (!status) { 9128 ql_dbg(ql_dbg_taskm, vha, 0x8011, 9129 "qla82xx_restart_isp succeeded.\n"); 9130 9131 spin_lock_irqsave(&ha->vport_slock, flags); 9132 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 9133 if (vp->vp_idx) { 9134 atomic_inc(&vp->vref_count); 9135 spin_unlock_irqrestore(&ha->vport_slock, flags); 9136 9137 qla2x00_vp_abort_isp(vp); 9138 9139 spin_lock_irqsave(&ha->vport_slock, flags); 9140 atomic_dec(&vp->vref_count); 9141 } 9142 } 9143 spin_unlock_irqrestore(&ha->vport_slock, flags); 9144 9145 } else { 9146 ql_log(ql_log_warn, vha, 0x8016, 9147 "qla82xx_restart_isp **** FAILED ****.\n"); 9148 } 9149 9150 return status; 9151 } 9152 9153 /* 9154 * qla24xx_get_fcp_prio 9155 * Gets the fcp cmd priority value for the logged in port. 9156 * Looks for a match of the port descriptors within 9157 * each of the fcp prio config entries. If a match is found, 9158 * the tag (priority) value is returned. 9159 * 9160 * Input: 9161 * vha = scsi host structure pointer. 9162 * fcport = port structure pointer. 9163 * 9164 * Return: 9165 * non-zero (if found) 9166 * -1 (if not found) 9167 * 9168 * Context: 9169 * Kernel context 9170 */ 9171 static int 9172 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) 9173 { 9174 int i, entries; 9175 uint8_t pid_match, wwn_match; 9176 int priority; 9177 uint32_t pid1, pid2; 9178 uint64_t wwn1, wwn2; 9179 struct qla_fcp_prio_entry *pri_entry; 9180 struct qla_hw_data *ha = vha->hw; 9181 9182 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled) 9183 return -1; 9184 9185 priority = -1; 9186 entries = ha->fcp_prio_cfg->num_entries; 9187 pri_entry = &ha->fcp_prio_cfg->entry[0]; 9188 9189 for (i = 0; i < entries; i++) { 9190 pid_match = wwn_match = 0; 9191 9192 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) { 9193 pri_entry++; 9194 continue; 9195 } 9196 9197 /* check source pid for a match */ 9198 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) { 9199 pid1 = pri_entry->src_pid & INVALID_PORT_ID; 9200 pid2 = vha->d_id.b24 & INVALID_PORT_ID; 9201 if (pid1 == INVALID_PORT_ID) 9202 pid_match++; 9203 else if (pid1 == pid2) 9204 pid_match++; 9205 } 9206 9207 /* check destination pid for a match */ 9208 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) { 9209 pid1 = pri_entry->dst_pid & INVALID_PORT_ID; 9210 pid2 = fcport->d_id.b24 & INVALID_PORT_ID; 9211 if (pid1 == INVALID_PORT_ID) 9212 pid_match++; 9213 else if (pid1 == pid2) 9214 pid_match++; 9215 } 9216 9217 /* check source WWN for a match */ 9218 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) { 9219 wwn1 = wwn_to_u64(vha->port_name); 9220 wwn2 = wwn_to_u64(pri_entry->src_wwpn); 9221 if (wwn2 == (uint64_t)-1) 9222 wwn_match++; 9223 else if (wwn1 == wwn2) 9224 wwn_match++; 9225 } 9226 9227 /* check destination WWN for a match */ 9228 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) { 9229 wwn1 = wwn_to_u64(fcport->port_name); 9230 wwn2 = wwn_to_u64(pri_entry->dst_wwpn); 9231 if (wwn2 == (uint64_t)-1) 9232 wwn_match++; 9233 else if (wwn1 == wwn2) 9234 wwn_match++; 9235 } 9236 9237 if (pid_match == 2 || wwn_match == 2) { 9238 /* Found a matching entry */ 9239 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID) 9240 priority = pri_entry->tag; 9241 break; 9242 } 9243 9244 pri_entry++; 9245 } 9246 9247 return priority; 9248 } 9249 9250 /* 9251 * qla24xx_update_fcport_fcp_prio 9252 * Activates fcp priority for the logged in fc port 9253 * 9254 * Input: 9255 * vha = scsi host structure pointer. 9256 * fcp = port structure pointer. 9257 * 9258 * Return: 9259 * QLA_SUCCESS or QLA_FUNCTION_FAILED 9260 * 9261 * Context: 9262 * Kernel context. 9263 */ 9264 int 9265 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) 9266 { 9267 int ret; 9268 int priority; 9269 uint16_t mb[5]; 9270 9271 if (fcport->port_type != FCT_TARGET || 9272 fcport->loop_id == FC_NO_LOOP_ID) 9273 return QLA_FUNCTION_FAILED; 9274 9275 priority = qla24xx_get_fcp_prio(vha, fcport); 9276 if (priority < 0) 9277 return QLA_FUNCTION_FAILED; 9278 9279 if (IS_P3P_TYPE(vha->hw)) { 9280 fcport->fcp_prio = priority & 0xf; 9281 return QLA_SUCCESS; 9282 } 9283 9284 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb); 9285 if (ret == QLA_SUCCESS) { 9286 if (fcport->fcp_prio != priority) 9287 ql_dbg(ql_dbg_user, vha, 0x709e, 9288 "Updated FCP_CMND priority - value=%d loop_id=%d " 9289 "port_id=%02x%02x%02x.\n", priority, 9290 fcport->loop_id, fcport->d_id.b.domain, 9291 fcport->d_id.b.area, fcport->d_id.b.al_pa); 9292 fcport->fcp_prio = priority & 0xf; 9293 } else 9294 ql_dbg(ql_dbg_user, vha, 0x704f, 9295 "Unable to update FCP_CMND priority - ret=0x%x for " 9296 "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id, 9297 fcport->d_id.b.domain, fcport->d_id.b.area, 9298 fcport->d_id.b.al_pa); 9299 return ret; 9300 } 9301 9302 /* 9303 * qla24xx_update_all_fcp_prio 9304 * Activates fcp priority for all the logged in ports 9305 * 9306 * Input: 9307 * ha = adapter block pointer. 9308 * 9309 * Return: 9310 * QLA_SUCCESS or QLA_FUNCTION_FAILED 9311 * 9312 * Context: 9313 * Kernel context. 9314 */ 9315 int 9316 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha) 9317 { 9318 int ret; 9319 fc_port_t *fcport; 9320 9321 ret = QLA_FUNCTION_FAILED; 9322 /* We need to set priority for all logged in ports */ 9323 list_for_each_entry(fcport, &vha->vp_fcports, list) 9324 ret = qla24xx_update_fcport_fcp_prio(vha, fcport); 9325 9326 return ret; 9327 } 9328 9329 struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, 9330 int vp_idx, bool startqp) 9331 { 9332 int rsp_id = 0; 9333 int req_id = 0; 9334 int i; 9335 struct qla_hw_data *ha = vha->hw; 9336 uint16_t qpair_id = 0; 9337 struct qla_qpair *qpair = NULL; 9338 struct qla_msix_entry *msix; 9339 9340 if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) { 9341 ql_log(ql_log_warn, vha, 0x00181, 9342 "FW/Driver is not multi-queue capable.\n"); 9343 return NULL; 9344 } 9345 9346 if (ql2xmqsupport || ql2xnvmeenable) { 9347 qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL); 9348 if (qpair == NULL) { 9349 ql_log(ql_log_warn, vha, 0x0182, 9350 "Failed to allocate memory for queue pair.\n"); 9351 return NULL; 9352 } 9353 9354 qpair->hw = vha->hw; 9355 qpair->vha = vha; 9356 qpair->qp_lock_ptr = &qpair->qp_lock; 9357 spin_lock_init(&qpair->qp_lock); 9358 qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; 9359 9360 /* Assign available que pair id */ 9361 mutex_lock(&ha->mq_lock); 9362 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs); 9363 if (ha->num_qpairs >= ha->max_qpairs) { 9364 mutex_unlock(&ha->mq_lock); 9365 ql_log(ql_log_warn, vha, 0x0183, 9366 "No resources to create additional q pair.\n"); 9367 goto fail_qid_map; 9368 } 9369 ha->num_qpairs++; 9370 set_bit(qpair_id, ha->qpair_qid_map); 9371 ha->queue_pair_map[qpair_id] = qpair; 9372 qpair->id = qpair_id; 9373 qpair->vp_idx = vp_idx; 9374 qpair->fw_started = ha->flags.fw_started; 9375 INIT_LIST_HEAD(&qpair->hints_list); 9376 qpair->chip_reset = ha->base_qpair->chip_reset; 9377 qpair->enable_class_2 = ha->base_qpair->enable_class_2; 9378 qpair->enable_explicit_conf = 9379 ha->base_qpair->enable_explicit_conf; 9380 9381 for (i = 0; i < ha->msix_count; i++) { 9382 msix = &ha->msix_entries[i]; 9383 if (msix->in_use) 9384 continue; 9385 qpair->msix = msix; 9386 ql_dbg(ql_dbg_multiq, vha, 0xc00f, 9387 "Vector %x selected for qpair\n", msix->vector); 9388 break; 9389 } 9390 if (!qpair->msix) { 9391 ql_log(ql_log_warn, vha, 0x0184, 9392 "Out of MSI-X vectors!.\n"); 9393 goto fail_msix; 9394 } 9395 9396 qpair->msix->in_use = 1; 9397 list_add_tail(&qpair->qp_list_elem, &vha->qp_list); 9398 qpair->pdev = ha->pdev; 9399 if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) 9400 qpair->reqq_start_iocbs = qla_83xx_start_iocbs; 9401 9402 mutex_unlock(&ha->mq_lock); 9403 9404 /* Create response queue first */ 9405 rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp); 9406 if (!rsp_id) { 9407 ql_log(ql_log_warn, vha, 0x0185, 9408 "Failed to create response queue.\n"); 9409 goto fail_rsp; 9410 } 9411 9412 qpair->rsp = ha->rsp_q_map[rsp_id]; 9413 9414 /* Create request queue */ 9415 req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos, 9416 startqp); 9417 if (!req_id) { 9418 ql_log(ql_log_warn, vha, 0x0186, 9419 "Failed to create request queue.\n"); 9420 goto fail_req; 9421 } 9422 9423 qpair->req = ha->req_q_map[req_id]; 9424 qpair->rsp->req = qpair->req; 9425 qpair->rsp->qpair = qpair; 9426 /* init qpair to this cpu. Will adjust at run time. */ 9427 qla_cpu_update(qpair, raw_smp_processor_id()); 9428 9429 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 9430 if (ha->fw_attributes & BIT_4) 9431 qpair->difdix_supported = 1; 9432 } 9433 9434 qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 9435 if (!qpair->srb_mempool) { 9436 ql_log(ql_log_warn, vha, 0xd036, 9437 "Failed to create srb mempool for qpair %d\n", 9438 qpair->id); 9439 goto fail_mempool; 9440 } 9441 9442 /* Mark as online */ 9443 qpair->online = 1; 9444 9445 if (!vha->flags.qpairs_available) 9446 vha->flags.qpairs_available = 1; 9447 9448 ql_dbg(ql_dbg_multiq, vha, 0xc00d, 9449 "Request/Response queue pair created, id %d\n", 9450 qpair->id); 9451 ql_dbg(ql_dbg_init, vha, 0x0187, 9452 "Request/Response queue pair created, id %d\n", 9453 qpair->id); 9454 } 9455 return qpair; 9456 9457 fail_mempool: 9458 fail_req: 9459 qla25xx_delete_rsp_que(vha, qpair->rsp); 9460 fail_rsp: 9461 mutex_lock(&ha->mq_lock); 9462 qpair->msix->in_use = 0; 9463 list_del(&qpair->qp_list_elem); 9464 if (list_empty(&vha->qp_list)) 9465 vha->flags.qpairs_available = 0; 9466 fail_msix: 9467 ha->queue_pair_map[qpair_id] = NULL; 9468 clear_bit(qpair_id, ha->qpair_qid_map); 9469 ha->num_qpairs--; 9470 mutex_unlock(&ha->mq_lock); 9471 fail_qid_map: 9472 kfree(qpair); 9473 return NULL; 9474 } 9475 9476 int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) 9477 { 9478 int ret = QLA_FUNCTION_FAILED; 9479 struct qla_hw_data *ha = qpair->hw; 9480 9481 qpair->delete_in_progress = 1; 9482 9483 ret = qla25xx_delete_req_que(vha, qpair->req); 9484 if (ret != QLA_SUCCESS) 9485 goto fail; 9486 9487 ret = qla25xx_delete_rsp_que(vha, qpair->rsp); 9488 if (ret != QLA_SUCCESS) 9489 goto fail; 9490 9491 mutex_lock(&ha->mq_lock); 9492 ha->queue_pair_map[qpair->id] = NULL; 9493 clear_bit(qpair->id, ha->qpair_qid_map); 9494 ha->num_qpairs--; 9495 list_del(&qpair->qp_list_elem); 9496 if (list_empty(&vha->qp_list)) { 9497 vha->flags.qpairs_available = 0; 9498 vha->flags.qpairs_req_created = 0; 9499 vha->flags.qpairs_rsp_created = 0; 9500 } 9501 mempool_destroy(qpair->srb_mempool); 9502 kfree(qpair); 9503 mutex_unlock(&ha->mq_lock); 9504 9505 return QLA_SUCCESS; 9506 fail: 9507 return ret; 9508 } 9509 9510 uint64_t 9511 qla2x00_count_set_bits(uint32_t num) 9512 { 9513 /* Brian Kernighan's Algorithm */ 9514 u64 count = 0; 9515 9516 while (num) { 9517 num &= (num - 1); 9518 count++; 9519 } 9520 return count; 9521 } 9522 9523 uint64_t 9524 qla2x00_get_num_tgts(scsi_qla_host_t *vha) 9525 { 9526 fc_port_t *f, *tf; 9527 u64 count = 0; 9528 9529 f = NULL; 9530 tf = NULL; 9531 9532 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { 9533 if (f->port_type != FCT_TARGET) 9534 continue; 9535 count++; 9536 } 9537 return count; 9538 } 9539 9540 int qla2xxx_reset_stats(struct Scsi_Host *host, u32 flags) 9541 { 9542 scsi_qla_host_t *vha = shost_priv(host); 9543 fc_port_t *fcport = NULL; 9544 unsigned long int_flags; 9545 9546 if (flags & QLA2XX_HW_ERROR) 9547 vha->hw_err_cnt = 0; 9548 if (flags & QLA2XX_SHT_LNK_DWN) 9549 vha->short_link_down_cnt = 0; 9550 if (flags & QLA2XX_INT_ERR) 9551 vha->interface_err_cnt = 0; 9552 if (flags & QLA2XX_CMD_TIMEOUT) 9553 vha->cmd_timeout_cnt = 0; 9554 if (flags & QLA2XX_RESET_CMD_ERR) 9555 vha->reset_cmd_err_cnt = 0; 9556 if (flags & QLA2XX_TGT_SHT_LNK_DOWN) { 9557 spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags); 9558 list_for_each_entry(fcport, &vha->vp_fcports, list) { 9559 fcport->tgt_short_link_down_cnt = 0; 9560 fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; 9561 } 9562 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags); 9563 } 9564 vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; 9565 return 0; 9566 } 9567 9568 int qla2xxx_start_stats(struct Scsi_Host *host, u32 flags) 9569 { 9570 return qla2xxx_reset_stats(host, flags); 9571 } 9572 9573 int qla2xxx_stop_stats(struct Scsi_Host *host, u32 flags) 9574 { 9575 return qla2xxx_reset_stats(host, flags); 9576 } 9577 9578 int qla2xxx_get_ini_stats(struct Scsi_Host *host, u32 flags, 9579 void *data, u64 size) 9580 { 9581 scsi_qla_host_t *vha = shost_priv(host); 9582 struct ql_vnd_host_stats_resp *resp = (struct ql_vnd_host_stats_resp *)data; 9583 struct ql_vnd_stats *rsp_data = &resp->stats; 9584 u64 ini_entry_count = 0; 9585 u64 i = 0; 9586 u64 entry_count = 0; 9587 u64 num_tgt = 0; 9588 u32 tmp_stat_type = 0; 9589 fc_port_t *fcport = NULL; 9590 unsigned long int_flags; 9591 9592 /* Copy stat type to work on it */ 9593 tmp_stat_type = flags; 9594 9595 if (tmp_stat_type & BIT_17) { 9596 num_tgt = qla2x00_get_num_tgts(vha); 9597 /* unset BIT_17 */ 9598 tmp_stat_type &= ~(1 << 17); 9599 } 9600 ini_entry_count = qla2x00_count_set_bits(tmp_stat_type); 9601 9602 entry_count = ini_entry_count + num_tgt; 9603 9604 rsp_data->entry_count = entry_count; 9605 9606 i = 0; 9607 if (flags & QLA2XX_HW_ERROR) { 9608 rsp_data->entry[i].stat_type = QLA2XX_HW_ERROR; 9609 rsp_data->entry[i].tgt_num = 0x0; 9610 rsp_data->entry[i].cnt = vha->hw_err_cnt; 9611 i++; 9612 } 9613 9614 if (flags & QLA2XX_SHT_LNK_DWN) { 9615 rsp_data->entry[i].stat_type = QLA2XX_SHT_LNK_DWN; 9616 rsp_data->entry[i].tgt_num = 0x0; 9617 rsp_data->entry[i].cnt = vha->short_link_down_cnt; 9618 i++; 9619 } 9620 9621 if (flags & QLA2XX_INT_ERR) { 9622 rsp_data->entry[i].stat_type = QLA2XX_INT_ERR; 9623 rsp_data->entry[i].tgt_num = 0x0; 9624 rsp_data->entry[i].cnt = vha->interface_err_cnt; 9625 i++; 9626 } 9627 9628 if (flags & QLA2XX_CMD_TIMEOUT) { 9629 rsp_data->entry[i].stat_type = QLA2XX_CMD_TIMEOUT; 9630 rsp_data->entry[i].tgt_num = 0x0; 9631 rsp_data->entry[i].cnt = vha->cmd_timeout_cnt; 9632 i++; 9633 } 9634 9635 if (flags & QLA2XX_RESET_CMD_ERR) { 9636 rsp_data->entry[i].stat_type = QLA2XX_RESET_CMD_ERR; 9637 rsp_data->entry[i].tgt_num = 0x0; 9638 rsp_data->entry[i].cnt = vha->reset_cmd_err_cnt; 9639 i++; 9640 } 9641 9642 /* i will continue from previous loop, as target 9643 * entries are after initiator 9644 */ 9645 if (flags & QLA2XX_TGT_SHT_LNK_DOWN) { 9646 spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags); 9647 list_for_each_entry(fcport, &vha->vp_fcports, list) { 9648 if (fcport->port_type != FCT_TARGET) 9649 continue; 9650 if (!fcport->rport) 9651 continue; 9652 rsp_data->entry[i].stat_type = QLA2XX_TGT_SHT_LNK_DOWN; 9653 rsp_data->entry[i].tgt_num = fcport->rport->number; 9654 rsp_data->entry[i].cnt = fcport->tgt_short_link_down_cnt; 9655 i++; 9656 } 9657 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags); 9658 } 9659 resp->status = EXT_STATUS_OK; 9660 9661 return 0; 9662 } 9663 9664 int qla2xxx_get_tgt_stats(struct Scsi_Host *host, u32 flags, 9665 struct fc_rport *rport, void *data, u64 size) 9666 { 9667 struct ql_vnd_tgt_stats_resp *tgt_data = data; 9668 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 9669 9670 tgt_data->status = 0; 9671 tgt_data->stats.entry_count = 1; 9672 tgt_data->stats.entry[0].stat_type = flags; 9673 tgt_data->stats.entry[0].tgt_num = rport->number; 9674 tgt_data->stats.entry[0].cnt = fcport->tgt_short_link_down_cnt; 9675 9676 return 0; 9677 } 9678 9679 int qla2xxx_disable_port(struct Scsi_Host *host) 9680 { 9681 scsi_qla_host_t *vha = shost_priv(host); 9682 9683 vha->hw->flags.port_isolated = 1; 9684 9685 if (qla2x00_isp_reg_stat(vha->hw)) { 9686 ql_log(ql_log_info, vha, 0x9006, 9687 "PCI/Register disconnect, exiting.\n"); 9688 qla_pci_set_eeh_busy(vha); 9689 return FAILED; 9690 } 9691 if (qla2x00_chip_is_down(vha)) 9692 return 0; 9693 9694 if (vha->flags.online) { 9695 qla2x00_abort_isp_cleanup(vha); 9696 qla2x00_wait_for_sess_deletion(vha); 9697 } 9698 9699 return 0; 9700 } 9701 9702 int qla2xxx_enable_port(struct Scsi_Host *host) 9703 { 9704 scsi_qla_host_t *vha = shost_priv(host); 9705 9706 if (qla2x00_isp_reg_stat(vha->hw)) { 9707 ql_log(ql_log_info, vha, 0x9001, 9708 "PCI/Register disconnect, exiting.\n"); 9709 qla_pci_set_eeh_busy(vha); 9710 return FAILED; 9711 } 9712 9713 vha->hw->flags.port_isolated = 0; 9714 /* Set the flag to 1, so that isp_abort can proceed */ 9715 vha->flags.online = 1; 9716 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 9717 qla2xxx_wake_dpc(vha); 9718 9719 return 0; 9720 } 9721