1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 #include "qla_def.h" 7 #include "qla_gbl.h" 8 9 #include <linux/delay.h> 10 #include <linux/slab.h> 11 #include <linux/vmalloc.h> 12 13 #include "qla_devtbl.h" 14 15 #ifdef CONFIG_SPARC 16 #include <asm/prom.h> 17 #endif 18 19 #include "qla_target.h" 20 21 /* 22 * QLogic ISP2x00 Hardware Support Function Prototypes. 23 */ 24 static int qla2x00_isp_firmware(scsi_qla_host_t *); 25 static int qla2x00_setup_chip(scsi_qla_host_t *); 26 static int qla2x00_fw_ready(scsi_qla_host_t *); 27 static int qla2x00_configure_hba(scsi_qla_host_t *); 28 static int qla2x00_configure_loop(scsi_qla_host_t *); 29 static int qla2x00_configure_local_loop(scsi_qla_host_t *); 30 static int qla2x00_configure_fabric(scsi_qla_host_t *); 31 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *); 32 static int qla2x00_restart_isp(scsi_qla_host_t *); 33 34 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); 35 static int qla84xx_init_chip(scsi_qla_host_t *); 36 static int qla25xx_init_queues(struct qla_hw_data *); 37 static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, 38 struct event_arg *ea); 39 static void qla24xx_handle_prli_done_event(struct scsi_qla_host *, 40 struct event_arg *); 41 static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *); 42 43 /* SRB Extensions ---------------------------------------------------------- */ 44 45 void 46 qla2x00_sp_timeout(struct timer_list *t) 47 { 48 srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer); 49 struct srb_iocb *iocb; 50 51 WARN_ON(irqs_disabled()); 52 iocb = &sp->u.iocb_cmd; 53 iocb->timeout(sp); 54 } 55 56 void qla2x00_sp_free(srb_t *sp) 57 { 58 struct srb_iocb *iocb = &sp->u.iocb_cmd; 59 60 del_timer(&iocb->timer); 61 qla2x00_rel_sp(sp); 62 } 63 64 void qla2xxx_rel_done_warning(srb_t *sp, int res) 65 { 66 WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp); 67 } 68 69 void qla2xxx_rel_free_warning(srb_t *sp) 70 { 71 WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp); 72 } 73 74 /* Asynchronous Login/Logout Routines -------------------------------------- */ 75 76 unsigned long 77 qla2x00_get_async_timeout(struct scsi_qla_host *vha) 78 { 79 unsigned long tmo; 80 struct qla_hw_data *ha = vha->hw; 81 82 /* Firmware should use switch negotiated r_a_tov for timeout. */ 83 tmo = ha->r_a_tov / 10 * 2; 84 if (IS_QLAFX00(ha)) { 85 tmo = FX00_DEF_RATOV * 2; 86 } else if (!IS_FWI2_CAPABLE(ha)) { 87 /* 88 * Except for earlier ISPs where the timeout is seeded from the 89 * initialization control block. 90 */ 91 tmo = ha->login_timeout; 92 } 93 return tmo; 94 } 95 96 static void qla24xx_abort_iocb_timeout(void *data) 97 { 98 srb_t *sp = data; 99 struct srb_iocb *abt = &sp->u.iocb_cmd; 100 struct qla_qpair *qpair = sp->qpair; 101 u32 handle; 102 unsigned long flags; 103 104 if (sp->cmd_sp) 105 ql_dbg(ql_dbg_async, sp->vha, 0x507c, 106 "Abort timeout - cmd hdl=%x, cmd type=%x hdl=%x, type=%x\n", 107 sp->cmd_sp->handle, sp->cmd_sp->type, 108 sp->handle, sp->type); 109 else 110 ql_dbg(ql_dbg_async, sp->vha, 0x507c, 111 "Abort timeout 2 - hdl=%x, type=%x\n", 112 sp->handle, sp->type); 113 114 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 115 for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) { 116 if (sp->cmd_sp && (qpair->req->outstanding_cmds[handle] == 117 sp->cmd_sp)) 118 qpair->req->outstanding_cmds[handle] = NULL; 119 120 /* removing the abort */ 121 if (qpair->req->outstanding_cmds[handle] == sp) { 122 qpair->req->outstanding_cmds[handle] = NULL; 123 break; 124 } 125 } 126 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 127 128 if (sp->cmd_sp) 129 sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED); 130 131 abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT); 132 sp->done(sp, QLA_OS_TIMER_EXPIRED); 133 } 134 135 static void qla24xx_abort_sp_done(srb_t *sp, int res) 136 { 137 struct srb_iocb *abt = &sp->u.iocb_cmd; 138 srb_t *orig_sp = sp->cmd_sp; 139 140 if (orig_sp) 141 qla_wait_nvme_release_cmd_kref(orig_sp); 142 143 del_timer(&sp->u.iocb_cmd.timer); 144 if (sp->flags & SRB_WAKEUP_ON_COMP) 145 complete(&abt->u.abt.comp); 146 else 147 sp->free(sp); 148 } 149 150 int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) 151 { 152 scsi_qla_host_t *vha = cmd_sp->vha; 153 struct srb_iocb *abt_iocb; 154 srb_t *sp; 155 int rval = QLA_FUNCTION_FAILED; 156 157 sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport, 158 GFP_ATOMIC); 159 if (!sp) 160 return rval; 161 162 abt_iocb = &sp->u.iocb_cmd; 163 sp->type = SRB_ABT_CMD; 164 sp->name = "abort"; 165 sp->qpair = cmd_sp->qpair; 166 sp->cmd_sp = cmd_sp; 167 if (wait) 168 sp->flags = SRB_WAKEUP_ON_COMP; 169 170 abt_iocb->timeout = qla24xx_abort_iocb_timeout; 171 init_completion(&abt_iocb->u.abt.comp); 172 /* FW can send 2 x ABTS's timeout/20s */ 173 qla2x00_init_timer(sp, 42); 174 175 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle; 176 abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id); 177 178 sp->done = qla24xx_abort_sp_done; 179 180 ql_dbg(ql_dbg_async, vha, 0x507c, 181 "Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle, 182 cmd_sp->type); 183 184 rval = qla2x00_start_sp(sp); 185 if (rval != QLA_SUCCESS) { 186 sp->free(sp); 187 return rval; 188 } 189 190 if (wait) { 191 wait_for_completion(&abt_iocb->u.abt.comp); 192 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ? 193 QLA_SUCCESS : QLA_FUNCTION_FAILED; 194 sp->free(sp); 195 } 196 197 return rval; 198 } 199 200 void 201 qla2x00_async_iocb_timeout(void *data) 202 { 203 srb_t *sp = data; 204 fc_port_t *fcport = sp->fcport; 205 struct srb_iocb *lio = &sp->u.iocb_cmd; 206 int rc, h; 207 unsigned long flags; 208 209 if (fcport) { 210 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, 211 "Async-%s timeout - hdl=%x portid=%06x %8phC.\n", 212 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name); 213 214 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 215 } else { 216 pr_info("Async-%s timeout - hdl=%x.\n", 217 sp->name, sp->handle); 218 } 219 220 switch (sp->type) { 221 case SRB_LOGIN_CMD: 222 rc = qla24xx_async_abort_cmd(sp, false); 223 if (rc) { 224 /* Retry as needed. */ 225 lio->u.logio.data[0] = MBS_COMMAND_ERROR; 226 lio->u.logio.data[1] = 227 lio->u.logio.flags & SRB_LOGIN_RETRIED ? 228 QLA_LOGIO_LOGIN_RETRIED : 0; 229 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); 230 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; 231 h++) { 232 if (sp->qpair->req->outstanding_cmds[h] == 233 sp) { 234 sp->qpair->req->outstanding_cmds[h] = 235 NULL; 236 break; 237 } 238 } 239 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); 240 sp->done(sp, QLA_FUNCTION_TIMEOUT); 241 } 242 break; 243 case SRB_LOGOUT_CMD: 244 case SRB_CT_PTHRU_CMD: 245 case SRB_MB_IOCB: 246 case SRB_NACK_PLOGI: 247 case SRB_NACK_PRLI: 248 case SRB_NACK_LOGO: 249 case SRB_CTRL_VP: 250 default: 251 rc = qla24xx_async_abort_cmd(sp, false); 252 if (rc) { 253 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); 254 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; 255 h++) { 256 if (sp->qpair->req->outstanding_cmds[h] == 257 sp) { 258 sp->qpair->req->outstanding_cmds[h] = 259 NULL; 260 break; 261 } 262 } 263 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); 264 sp->done(sp, QLA_FUNCTION_TIMEOUT); 265 } 266 break; 267 } 268 } 269 270 static void qla2x00_async_login_sp_done(srb_t *sp, int res) 271 { 272 struct scsi_qla_host *vha = sp->vha; 273 struct srb_iocb *lio = &sp->u.iocb_cmd; 274 struct event_arg ea; 275 276 ql_dbg(ql_dbg_disc, vha, 0x20dd, 277 "%s %8phC res %d \n", __func__, sp->fcport->port_name, res); 278 279 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 280 281 if (!test_bit(UNLOADING, &vha->dpc_flags)) { 282 memset(&ea, 0, sizeof(ea)); 283 ea.fcport = sp->fcport; 284 ea.data[0] = lio->u.logio.data[0]; 285 ea.data[1] = lio->u.logio.data[1]; 286 ea.iop[0] = lio->u.logio.iop[0]; 287 ea.iop[1] = lio->u.logio.iop[1]; 288 ea.sp = sp; 289 qla24xx_handle_plogi_done_event(vha, &ea); 290 } 291 292 sp->free(sp); 293 } 294 295 static inline bool 296 fcport_is_smaller(fc_port_t *fcport) 297 { 298 if (wwn_to_u64(fcport->port_name) < 299 wwn_to_u64(fcport->vha->port_name)) 300 return true; 301 else 302 return false; 303 } 304 305 static inline bool 306 fcport_is_bigger(fc_port_t *fcport) 307 { 308 return !fcport_is_smaller(fcport); 309 } 310 311 int 312 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, 313 uint16_t *data) 314 { 315 srb_t *sp; 316 struct srb_iocb *lio; 317 int rval = QLA_FUNCTION_FAILED; 318 319 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) || 320 fcport->loop_id == FC_NO_LOOP_ID) { 321 ql_log(ql_log_warn, vha, 0xffff, 322 "%s: %8phC - not sending command.\n", 323 __func__, fcport->port_name); 324 return rval; 325 } 326 327 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 328 if (!sp) 329 goto done; 330 331 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); 332 fcport->flags |= FCF_ASYNC_SENT; 333 fcport->logout_completed = 0; 334 335 sp->type = SRB_LOGIN_CMD; 336 sp->name = "login"; 337 sp->gen1 = fcport->rscn_gen; 338 sp->gen2 = fcport->login_gen; 339 340 lio = &sp->u.iocb_cmd; 341 lio->timeout = qla2x00_async_iocb_timeout; 342 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 343 344 sp->done = qla2x00_async_login_sp_done; 345 if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) { 346 lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY; 347 } else { 348 if (vha->hw->flags.edif_enabled && 349 vha->e_dbell.db_flags & EDB_ACTIVE) { 350 lio->u.logio.flags |= 351 (SRB_LOGIN_FCSP | SRB_LOGIN_SKIP_PRLI); 352 ql_dbg(ql_dbg_disc, vha, 0x2072, 353 "Async-login: w/ FCSP %8phC hdl=%x, loopid=%x portid=%06x\n", 354 fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24); 355 } else { 356 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; 357 } 358 } 359 360 if (NVME_TARGET(vha->hw, fcport)) 361 lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI; 362 363 ql_dbg(ql_dbg_disc, vha, 0x2072, 364 "Async-login - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n", 365 fcport->port_name, sp->handle, fcport->loop_id, 366 fcport->d_id.b24, fcport->login_retry); 367 368 rval = qla2x00_start_sp(sp); 369 if (rval != QLA_SUCCESS) { 370 fcport->flags |= FCF_LOGIN_NEEDED; 371 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 372 goto done_free_sp; 373 } 374 375 return rval; 376 377 done_free_sp: 378 sp->free(sp); 379 fcport->flags &= ~FCF_ASYNC_SENT; 380 done: 381 fcport->flags &= ~FCF_ASYNC_ACTIVE; 382 return rval; 383 } 384 385 static void qla2x00_async_logout_sp_done(srb_t *sp, int res) 386 { 387 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 388 sp->fcport->login_gen++; 389 qlt_logo_completion_handler(sp->fcport, sp->u.iocb_cmd.u.logio.data[0]); 390 sp->free(sp); 391 } 392 393 int 394 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) 395 { 396 srb_t *sp; 397 struct srb_iocb *lio; 398 int rval = QLA_FUNCTION_FAILED; 399 400 fcport->flags |= FCF_ASYNC_SENT; 401 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 402 if (!sp) 403 goto done; 404 405 sp->type = SRB_LOGOUT_CMD; 406 sp->name = "logout"; 407 408 lio = &sp->u.iocb_cmd; 409 lio->timeout = qla2x00_async_iocb_timeout; 410 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 411 412 sp->done = qla2x00_async_logout_sp_done; 413 414 ql_dbg(ql_dbg_disc, vha, 0x2070, 415 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n", 416 sp->handle, fcport->loop_id, fcport->d_id.b.domain, 417 fcport->d_id.b.area, fcport->d_id.b.al_pa, 418 fcport->port_name, fcport->explicit_logout); 419 420 rval = qla2x00_start_sp(sp); 421 if (rval != QLA_SUCCESS) 422 goto done_free_sp; 423 return rval; 424 425 done_free_sp: 426 sp->free(sp); 427 done: 428 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 429 return rval; 430 } 431 432 void 433 qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport, 434 uint16_t *data) 435 { 436 fcport->flags &= ~FCF_ASYNC_ACTIVE; 437 /* Don't re-login in target mode */ 438 if (!fcport->tgt_session) 439 qla2x00_mark_device_lost(vha, fcport, 1); 440 qlt_logo_completion_handler(fcport, data[0]); 441 } 442 443 static void qla2x00_async_prlo_sp_done(srb_t *sp, int res) 444 { 445 struct srb_iocb *lio = &sp->u.iocb_cmd; 446 struct scsi_qla_host *vha = sp->vha; 447 448 sp->fcport->flags &= ~FCF_ASYNC_ACTIVE; 449 if (!test_bit(UNLOADING, &vha->dpc_flags)) 450 qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport, 451 lio->u.logio.data); 452 sp->free(sp); 453 } 454 455 int 456 qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport) 457 { 458 srb_t *sp; 459 struct srb_iocb *lio; 460 int rval; 461 462 rval = QLA_FUNCTION_FAILED; 463 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 464 if (!sp) 465 goto done; 466 467 sp->type = SRB_PRLO_CMD; 468 sp->name = "prlo"; 469 470 lio = &sp->u.iocb_cmd; 471 lio->timeout = qla2x00_async_iocb_timeout; 472 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 473 474 sp->done = qla2x00_async_prlo_sp_done; 475 476 ql_dbg(ql_dbg_disc, vha, 0x2070, 477 "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n", 478 sp->handle, fcport->loop_id, fcport->d_id.b.domain, 479 fcport->d_id.b.area, fcport->d_id.b.al_pa); 480 481 rval = qla2x00_start_sp(sp); 482 if (rval != QLA_SUCCESS) 483 goto done_free_sp; 484 485 return rval; 486 487 done_free_sp: 488 sp->free(sp); 489 done: 490 fcport->flags &= ~FCF_ASYNC_ACTIVE; 491 return rval; 492 } 493 494 static 495 void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea) 496 { 497 struct fc_port *fcport = ea->fcport; 498 499 ql_dbg(ql_dbg_disc, vha, 0x20d2, 500 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n", 501 __func__, fcport->port_name, fcport->disc_state, 502 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2, 503 fcport->rscn_gen, ea->sp->gen1, fcport->loop_id); 504 505 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n", 506 ea->data[0]); 507 508 if (ea->data[0] != MBS_COMMAND_COMPLETE) { 509 ql_dbg(ql_dbg_disc, vha, 0x2066, 510 "%s %8phC: adisc fail: post delete\n", 511 __func__, ea->fcport->port_name); 512 /* deleted = 0 & logout_on_delete = force fw cleanup */ 513 fcport->deleted = 0; 514 fcport->logout_on_delete = 1; 515 qlt_schedule_sess_for_deletion(ea->fcport); 516 return; 517 } 518 519 if (ea->fcport->disc_state == DSC_DELETE_PEND) 520 return; 521 522 if (ea->sp->gen2 != ea->fcport->login_gen) { 523 /* target side must have changed it. */ 524 ql_dbg(ql_dbg_disc, vha, 0x20d3, 525 "%s %8phC generation changed\n", 526 __func__, ea->fcport->port_name); 527 return; 528 } else if (ea->sp->gen1 != ea->fcport->rscn_gen) { 529 qla_rscn_replay(fcport); 530 qlt_schedule_sess_for_deletion(fcport); 531 return; 532 } 533 534 __qla24xx_handle_gpdb_event(vha, ea); 535 } 536 537 static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport) 538 { 539 struct qla_work_evt *e; 540 541 e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI); 542 if (!e) 543 return QLA_FUNCTION_FAILED; 544 545 e->u.fcport.fcport = fcport; 546 fcport->flags |= FCF_ASYNC_ACTIVE; 547 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); 548 return qla2x00_post_work(vha, e); 549 } 550 551 static void qla2x00_async_adisc_sp_done(srb_t *sp, int res) 552 { 553 struct scsi_qla_host *vha = sp->vha; 554 struct event_arg ea; 555 struct srb_iocb *lio = &sp->u.iocb_cmd; 556 557 ql_dbg(ql_dbg_disc, vha, 0x2066, 558 "Async done-%s res %x %8phC\n", 559 sp->name, res, sp->fcport->port_name); 560 561 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 562 563 memset(&ea, 0, sizeof(ea)); 564 ea.rc = res; 565 ea.data[0] = lio->u.logio.data[0]; 566 ea.data[1] = lio->u.logio.data[1]; 567 ea.iop[0] = lio->u.logio.iop[0]; 568 ea.iop[1] = lio->u.logio.iop[1]; 569 ea.fcport = sp->fcport; 570 ea.sp = sp; 571 572 qla24xx_handle_adisc_event(vha, &ea); 573 574 sp->free(sp); 575 } 576 577 int 578 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, 579 uint16_t *data) 580 { 581 srb_t *sp; 582 struct srb_iocb *lio; 583 int rval = QLA_FUNCTION_FAILED; 584 585 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) 586 return rval; 587 588 fcport->flags |= FCF_ASYNC_SENT; 589 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 590 if (!sp) 591 goto done; 592 593 sp->type = SRB_ADISC_CMD; 594 sp->name = "adisc"; 595 596 lio = &sp->u.iocb_cmd; 597 lio->timeout = qla2x00_async_iocb_timeout; 598 sp->gen1 = fcport->rscn_gen; 599 sp->gen2 = fcport->login_gen; 600 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 601 602 sp->done = qla2x00_async_adisc_sp_done; 603 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 604 lio->u.logio.flags |= SRB_LOGIN_RETRIED; 605 606 ql_dbg(ql_dbg_disc, vha, 0x206f, 607 "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n", 608 sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name); 609 610 rval = qla2x00_start_sp(sp); 611 if (rval != QLA_SUCCESS) 612 goto done_free_sp; 613 614 return rval; 615 616 done_free_sp: 617 sp->free(sp); 618 done: 619 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 620 qla2x00_post_async_adisc_work(vha, fcport, data); 621 return rval; 622 } 623 624 static bool qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id) 625 { 626 struct qla_hw_data *ha = vha->hw; 627 628 if (IS_FWI2_CAPABLE(ha)) 629 return loop_id > NPH_LAST_HANDLE; 630 631 return (loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) || 632 loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST; 633 } 634 635 /** 636 * qla2x00_find_new_loop_id - scan through our port list and find a new usable loop ID 637 * @vha: adapter state pointer. 638 * @dev: port structure pointer. 639 * 640 * Returns: 641 * qla2x00 local function return status code. 642 * 643 * Context: 644 * Kernel context. 645 */ 646 static int qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) 647 { 648 int rval; 649 struct qla_hw_data *ha = vha->hw; 650 unsigned long flags = 0; 651 652 rval = QLA_SUCCESS; 653 654 spin_lock_irqsave(&ha->vport_slock, flags); 655 656 dev->loop_id = find_first_zero_bit(ha->loop_id_map, LOOPID_MAP_SIZE); 657 if (dev->loop_id >= LOOPID_MAP_SIZE || 658 qla2x00_is_reserved_id(vha, dev->loop_id)) { 659 dev->loop_id = FC_NO_LOOP_ID; 660 rval = QLA_FUNCTION_FAILED; 661 } else { 662 set_bit(dev->loop_id, ha->loop_id_map); 663 } 664 spin_unlock_irqrestore(&ha->vport_slock, flags); 665 666 if (rval == QLA_SUCCESS) 667 ql_dbg(ql_dbg_disc, dev->vha, 0x2086, 668 "Assigning new loopid=%x, portid=%x.\n", 669 dev->loop_id, dev->d_id.b24); 670 else 671 ql_log(ql_log_warn, dev->vha, 0x2087, 672 "No loop_id's available, portid=%x.\n", 673 dev->d_id.b24); 674 675 return rval; 676 } 677 678 void qla2x00_clear_loop_id(fc_port_t *fcport) 679 { 680 struct qla_hw_data *ha = fcport->vha->hw; 681 682 if (fcport->loop_id == FC_NO_LOOP_ID || 683 qla2x00_is_reserved_id(fcport->vha, fcport->loop_id)) 684 return; 685 686 clear_bit(fcport->loop_id, ha->loop_id_map); 687 fcport->loop_id = FC_NO_LOOP_ID; 688 } 689 690 static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, 691 struct event_arg *ea) 692 { 693 fc_port_t *fcport, *conflict_fcport; 694 struct get_name_list_extended *e; 695 u16 i, n, found = 0, loop_id; 696 port_id_t id; 697 u64 wwn; 698 u16 data[2]; 699 u8 current_login_state, nvme_cls; 700 701 fcport = ea->fcport; 702 ql_dbg(ql_dbg_disc, vha, 0xffff, 703 "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d edif %d\n", 704 __func__, fcport->port_name, fcport->disc_state, 705 fcport->fw_login_state, ea->rc, 706 fcport->login_gen, fcport->last_login_gen, 707 fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id, fcport->edif.enable); 708 709 if (fcport->disc_state == DSC_DELETE_PEND) 710 return; 711 712 if (ea->rc) { /* rval */ 713 if (fcport->login_retry == 0) { 714 ql_dbg(ql_dbg_disc, vha, 0x20de, 715 "GNL failed Port login retry %8phN, retry cnt=%d.\n", 716 fcport->port_name, fcport->login_retry); 717 } 718 return; 719 } 720 721 if (fcport->last_rscn_gen != fcport->rscn_gen) { 722 qla_rscn_replay(fcport); 723 qlt_schedule_sess_for_deletion(fcport); 724 return; 725 } else if (fcport->last_login_gen != fcport->login_gen) { 726 ql_dbg(ql_dbg_disc, vha, 0x20e0, 727 "%s %8phC login gen changed\n", 728 __func__, fcport->port_name); 729 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 730 return; 731 } 732 733 n = ea->data[0] / sizeof(struct get_name_list_extended); 734 735 ql_dbg(ql_dbg_disc, vha, 0x20e1, 736 "%s %d %8phC n %d %02x%02x%02x lid %d \n", 737 __func__, __LINE__, fcport->port_name, n, 738 fcport->d_id.b.domain, fcport->d_id.b.area, 739 fcport->d_id.b.al_pa, fcport->loop_id); 740 741 for (i = 0; i < n; i++) { 742 e = &vha->gnl.l[i]; 743 wwn = wwn_to_u64(e->port_name); 744 id.b.domain = e->port_id[2]; 745 id.b.area = e->port_id[1]; 746 id.b.al_pa = e->port_id[0]; 747 id.b.rsvd_1 = 0; 748 749 if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE)) 750 continue; 751 752 if (IS_SW_RESV_ADDR(id)) 753 continue; 754 755 found = 1; 756 757 loop_id = le16_to_cpu(e->nport_handle); 758 loop_id = (loop_id & 0x7fff); 759 nvme_cls = e->current_login_state >> 4; 760 current_login_state = e->current_login_state & 0xf; 761 762 if (PRLI_PHASE(nvme_cls)) { 763 current_login_state = nvme_cls; 764 fcport->fc4_type &= ~FS_FC4TYPE_FCP; 765 fcport->fc4_type |= FS_FC4TYPE_NVME; 766 } else if (PRLI_PHASE(current_login_state)) { 767 fcport->fc4_type |= FS_FC4TYPE_FCP; 768 fcport->fc4_type &= ~FS_FC4TYPE_NVME; 769 } 770 771 ql_dbg(ql_dbg_disc, vha, 0x20e2, 772 "%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n", 773 __func__, fcport->port_name, 774 e->current_login_state, fcport->fw_login_state, 775 fcport->fc4_type, id.b24, fcport->d_id.b24, 776 loop_id, fcport->loop_id); 777 778 switch (fcport->disc_state) { 779 case DSC_DELETE_PEND: 780 case DSC_DELETED: 781 break; 782 default: 783 if ((id.b24 != fcport->d_id.b24 && 784 fcport->d_id.b24 && 785 fcport->loop_id != FC_NO_LOOP_ID) || 786 (fcport->loop_id != FC_NO_LOOP_ID && 787 fcport->loop_id != loop_id)) { 788 ql_dbg(ql_dbg_disc, vha, 0x20e3, 789 "%s %d %8phC post del sess\n", 790 __func__, __LINE__, fcport->port_name); 791 if (fcport->n2n_flag) 792 fcport->d_id.b24 = 0; 793 qlt_schedule_sess_for_deletion(fcport); 794 return; 795 } 796 break; 797 } 798 799 fcport->loop_id = loop_id; 800 if (fcport->n2n_flag) 801 fcport->d_id.b24 = id.b24; 802 803 wwn = wwn_to_u64(fcport->port_name); 804 qlt_find_sess_invalidate_other(vha, wwn, 805 id, loop_id, &conflict_fcport); 806 807 if (conflict_fcport) { 808 /* 809 * Another share fcport share the same loop_id & 810 * nport id. Conflict fcport needs to finish 811 * cleanup before this fcport can proceed to login. 812 */ 813 conflict_fcport->conflict = fcport; 814 fcport->login_pause = 1; 815 } 816 817 switch (vha->hw->current_topology) { 818 default: 819 switch (current_login_state) { 820 case DSC_LS_PRLI_COMP: 821 ql_dbg(ql_dbg_disc + ql_dbg_verbose, 822 vha, 0x20e4, "%s %d %8phC post gpdb\n", 823 __func__, __LINE__, fcport->port_name); 824 825 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0) 826 fcport->port_type = FCT_INITIATOR; 827 else 828 fcport->port_type = FCT_TARGET; 829 data[0] = data[1] = 0; 830 qla2x00_post_async_adisc_work(vha, fcport, 831 data); 832 break; 833 case DSC_LS_PLOGI_COMP: 834 if (vha->hw->flags.edif_enabled) { 835 /* check to see if App support Secure */ 836 qla24xx_post_gpdb_work(vha, fcport, 0); 837 break; 838 } 839 fallthrough; 840 case DSC_LS_PORT_UNAVAIL: 841 default: 842 if (fcport->loop_id == FC_NO_LOOP_ID) { 843 qla2x00_find_new_loop_id(vha, fcport); 844 fcport->fw_login_state = 845 DSC_LS_PORT_UNAVAIL; 846 } 847 ql_dbg(ql_dbg_disc, vha, 0x20e5, 848 "%s %d %8phC\n", __func__, __LINE__, 849 fcport->port_name); 850 qla24xx_fcport_handle_login(vha, fcport); 851 break; 852 } 853 break; 854 case ISP_CFG_N: 855 fcport->fw_login_state = current_login_state; 856 fcport->d_id = id; 857 switch (current_login_state) { 858 case DSC_LS_PRLI_PEND: 859 /* 860 * In the middle of PRLI. Let it finish. 861 * Allow relogin code to recheck state again 862 * with GNL. Push disc_state back to DELETED 863 * so GNL can go out again 864 */ 865 qla2x00_set_fcport_disc_state(fcport, 866 DSC_DELETED); 867 break; 868 case DSC_LS_PRLI_COMP: 869 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0) 870 fcport->port_type = FCT_INITIATOR; 871 else 872 fcport->port_type = FCT_TARGET; 873 874 data[0] = data[1] = 0; 875 qla2x00_post_async_adisc_work(vha, fcport, 876 data); 877 break; 878 case DSC_LS_PLOGI_COMP: 879 if (fcport_is_bigger(fcport)) { 880 /* local adapter is smaller */ 881 if (fcport->loop_id != FC_NO_LOOP_ID) 882 qla2x00_clear_loop_id(fcport); 883 884 fcport->loop_id = loop_id; 885 qla24xx_fcport_handle_login(vha, 886 fcport); 887 break; 888 } 889 fallthrough; 890 default: 891 if (fcport_is_smaller(fcport)) { 892 /* local adapter is bigger */ 893 if (fcport->loop_id != FC_NO_LOOP_ID) 894 qla2x00_clear_loop_id(fcport); 895 896 fcport->loop_id = loop_id; 897 qla24xx_fcport_handle_login(vha, 898 fcport); 899 } 900 break; 901 } 902 break; 903 } /* switch (ha->current_topology) */ 904 } 905 906 if (!found) { 907 switch (vha->hw->current_topology) { 908 case ISP_CFG_F: 909 case ISP_CFG_FL: 910 for (i = 0; i < n; i++) { 911 e = &vha->gnl.l[i]; 912 id.b.domain = e->port_id[0]; 913 id.b.area = e->port_id[1]; 914 id.b.al_pa = e->port_id[2]; 915 id.b.rsvd_1 = 0; 916 loop_id = le16_to_cpu(e->nport_handle); 917 918 if (fcport->d_id.b24 == id.b24) { 919 conflict_fcport = 920 qla2x00_find_fcport_by_wwpn(vha, 921 e->port_name, 0); 922 if (conflict_fcport) { 923 ql_dbg(ql_dbg_disc + ql_dbg_verbose, 924 vha, 0x20e5, 925 "%s %d %8phC post del sess\n", 926 __func__, __LINE__, 927 conflict_fcport->port_name); 928 qlt_schedule_sess_for_deletion 929 (conflict_fcport); 930 } 931 } 932 /* 933 * FW already picked this loop id for 934 * another fcport 935 */ 936 if (fcport->loop_id == loop_id) 937 fcport->loop_id = FC_NO_LOOP_ID; 938 } 939 qla24xx_fcport_handle_login(vha, fcport); 940 break; 941 case ISP_CFG_N: 942 qla2x00_set_fcport_disc_state(fcport, DSC_DELETED); 943 if (time_after_eq(jiffies, fcport->dm_login_expire)) { 944 if (fcport->n2n_link_reset_cnt < 2) { 945 fcport->n2n_link_reset_cnt++; 946 /* 947 * remote port is not sending PLOGI. 948 * Reset link to kick start his state 949 * machine 950 */ 951 set_bit(N2N_LINK_RESET, 952 &vha->dpc_flags); 953 } else { 954 if (fcport->n2n_chip_reset < 1) { 955 ql_log(ql_log_info, vha, 0x705d, 956 "Chip reset to bring laser down"); 957 set_bit(ISP_ABORT_NEEDED, 958 &vha->dpc_flags); 959 fcport->n2n_chip_reset++; 960 } else { 961 ql_log(ql_log_info, vha, 0x705d, 962 "Remote port %8ph is not coming back\n", 963 fcport->port_name); 964 fcport->scan_state = 0; 965 } 966 } 967 qla2xxx_wake_dpc(vha); 968 } else { 969 /* 970 * report port suppose to do PLOGI. Give him 971 * more time. FW will catch it. 972 */ 973 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 974 } 975 break; 976 default: 977 break; 978 } 979 } 980 } /* gnl_event */ 981 982 static void qla24xx_async_gnl_sp_done(srb_t *sp, int res) 983 { 984 struct scsi_qla_host *vha = sp->vha; 985 unsigned long flags; 986 struct fc_port *fcport = NULL, *tf; 987 u16 i, n = 0, loop_id; 988 struct event_arg ea; 989 struct get_name_list_extended *e; 990 u64 wwn; 991 struct list_head h; 992 bool found = false; 993 994 ql_dbg(ql_dbg_disc, vha, 0x20e7, 995 "Async done-%s res %x mb[1]=%x mb[2]=%x \n", 996 sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1], 997 sp->u.iocb_cmd.u.mbx.in_mb[2]); 998 999 if (res == QLA_FUNCTION_TIMEOUT) 1000 return; 1001 1002 sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE); 1003 memset(&ea, 0, sizeof(ea)); 1004 ea.sp = sp; 1005 ea.rc = res; 1006 1007 if (sp->u.iocb_cmd.u.mbx.in_mb[1] >= 1008 sizeof(struct get_name_list_extended)) { 1009 n = sp->u.iocb_cmd.u.mbx.in_mb[1] / 1010 sizeof(struct get_name_list_extended); 1011 ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */ 1012 } 1013 1014 for (i = 0; i < n; i++) { 1015 e = &vha->gnl.l[i]; 1016 loop_id = le16_to_cpu(e->nport_handle); 1017 /* mask out reserve bit */ 1018 loop_id = (loop_id & 0x7fff); 1019 set_bit(loop_id, vha->hw->loop_id_map); 1020 wwn = wwn_to_u64(e->port_name); 1021 1022 ql_dbg(ql_dbg_disc, vha, 0x20e8, 1023 "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n", 1024 __func__, &wwn, e->port_id[2], e->port_id[1], 1025 e->port_id[0], e->current_login_state, e->last_login_state, 1026 (loop_id & 0x7fff)); 1027 } 1028 1029 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1030 1031 INIT_LIST_HEAD(&h); 1032 fcport = tf = NULL; 1033 if (!list_empty(&vha->gnl.fcports)) 1034 list_splice_init(&vha->gnl.fcports, &h); 1035 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1036 1037 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { 1038 list_del_init(&fcport->gnl_entry); 1039 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1040 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 1041 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1042 ea.fcport = fcport; 1043 1044 qla24xx_handle_gnl_done_event(vha, &ea); 1045 } 1046 1047 /* create new fcport if fw has knowledge of new sessions */ 1048 for (i = 0; i < n; i++) { 1049 port_id_t id; 1050 u64 wwnn; 1051 1052 e = &vha->gnl.l[i]; 1053 wwn = wwn_to_u64(e->port_name); 1054 1055 found = false; 1056 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { 1057 if (!memcmp((u8 *)&wwn, fcport->port_name, 1058 WWN_SIZE)) { 1059 found = true; 1060 break; 1061 } 1062 } 1063 1064 id.b.domain = e->port_id[2]; 1065 id.b.area = e->port_id[1]; 1066 id.b.al_pa = e->port_id[0]; 1067 id.b.rsvd_1 = 0; 1068 1069 if (!found && wwn && !IS_SW_RESV_ADDR(id)) { 1070 ql_dbg(ql_dbg_disc, vha, 0x2065, 1071 "%s %d %8phC %06x post new sess\n", 1072 __func__, __LINE__, (u8 *)&wwn, id.b24); 1073 wwnn = wwn_to_u64(e->node_name); 1074 qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn, 1075 (u8 *)&wwnn, NULL, 0); 1076 } 1077 } 1078 1079 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1080 vha->gnl.sent = 0; 1081 if (!list_empty(&vha->gnl.fcports)) { 1082 /* retrigger gnl */ 1083 list_for_each_entry_safe(fcport, tf, &vha->gnl.fcports, 1084 gnl_entry) { 1085 list_del_init(&fcport->gnl_entry); 1086 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 1087 if (qla24xx_post_gnl_work(vha, fcport) == QLA_SUCCESS) 1088 break; 1089 } 1090 } 1091 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1092 1093 sp->free(sp); 1094 } 1095 1096 int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) 1097 { 1098 srb_t *sp; 1099 struct srb_iocb *mbx; 1100 int rval = QLA_FUNCTION_FAILED; 1101 unsigned long flags; 1102 u16 *mb; 1103 1104 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) 1105 return rval; 1106 1107 ql_dbg(ql_dbg_disc, vha, 0x20d9, 1108 "Async-gnlist WWPN %8phC \n", fcport->port_name); 1109 1110 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1111 fcport->flags |= FCF_ASYNC_SENT; 1112 qla2x00_set_fcport_disc_state(fcport, DSC_GNL); 1113 fcport->last_rscn_gen = fcport->rscn_gen; 1114 fcport->last_login_gen = fcport->login_gen; 1115 1116 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports); 1117 if (vha->gnl.sent) { 1118 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1119 return QLA_SUCCESS; 1120 } 1121 vha->gnl.sent = 1; 1122 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1123 1124 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1125 if (!sp) 1126 goto done; 1127 1128 sp->type = SRB_MB_IOCB; 1129 sp->name = "gnlist"; 1130 sp->gen1 = fcport->rscn_gen; 1131 sp->gen2 = fcport->login_gen; 1132 1133 mbx = &sp->u.iocb_cmd; 1134 mbx->timeout = qla2x00_async_iocb_timeout; 1135 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2); 1136 1137 mb = sp->u.iocb_cmd.u.mbx.out_mb; 1138 mb[0] = MBC_PORT_NODE_NAME_LIST; 1139 mb[1] = BIT_2 | BIT_3; 1140 mb[2] = MSW(vha->gnl.ldma); 1141 mb[3] = LSW(vha->gnl.ldma); 1142 mb[6] = MSW(MSD(vha->gnl.ldma)); 1143 mb[7] = LSW(MSD(vha->gnl.ldma)); 1144 mb[8] = vha->gnl.size; 1145 mb[9] = vha->vp_idx; 1146 1147 sp->done = qla24xx_async_gnl_sp_done; 1148 1149 ql_dbg(ql_dbg_disc, vha, 0x20da, 1150 "Async-%s - OUT WWPN %8phC hndl %x\n", 1151 sp->name, fcport->port_name, sp->handle); 1152 1153 rval = qla2x00_start_sp(sp); 1154 if (rval != QLA_SUCCESS) 1155 goto done_free_sp; 1156 1157 return rval; 1158 1159 done_free_sp: 1160 sp->free(sp); 1161 done: 1162 fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT); 1163 return rval; 1164 } 1165 1166 int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport) 1167 { 1168 struct qla_work_evt *e; 1169 1170 e = qla2x00_alloc_work(vha, QLA_EVT_GNL); 1171 if (!e) 1172 return QLA_FUNCTION_FAILED; 1173 1174 e->u.fcport.fcport = fcport; 1175 fcport->flags |= FCF_ASYNC_ACTIVE; 1176 return qla2x00_post_work(vha, e); 1177 } 1178 1179 static void qla24xx_async_gpdb_sp_done(srb_t *sp, int res) 1180 { 1181 struct scsi_qla_host *vha = sp->vha; 1182 struct qla_hw_data *ha = vha->hw; 1183 fc_port_t *fcport = sp->fcport; 1184 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb; 1185 struct event_arg ea; 1186 1187 ql_dbg(ql_dbg_disc, vha, 0x20db, 1188 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n", 1189 sp->name, res, fcport->port_name, mb[1], mb[2]); 1190 1191 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 1192 1193 if (res == QLA_FUNCTION_TIMEOUT) 1194 goto done; 1195 1196 memset(&ea, 0, sizeof(ea)); 1197 ea.fcport = fcport; 1198 ea.sp = sp; 1199 1200 qla24xx_handle_gpdb_event(vha, &ea); 1201 1202 done: 1203 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in, 1204 sp->u.iocb_cmd.u.mbx.in_dma); 1205 1206 sp->free(sp); 1207 } 1208 1209 int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport) 1210 { 1211 struct qla_work_evt *e; 1212 1213 if (vha->host->active_mode == MODE_TARGET) 1214 return QLA_FUNCTION_FAILED; 1215 1216 e = qla2x00_alloc_work(vha, QLA_EVT_PRLI); 1217 if (!e) 1218 return QLA_FUNCTION_FAILED; 1219 1220 e->u.fcport.fcport = fcport; 1221 1222 return qla2x00_post_work(vha, e); 1223 } 1224 1225 static void qla2x00_async_prli_sp_done(srb_t *sp, int res) 1226 { 1227 struct scsi_qla_host *vha = sp->vha; 1228 struct srb_iocb *lio = &sp->u.iocb_cmd; 1229 struct event_arg ea; 1230 1231 ql_dbg(ql_dbg_disc, vha, 0x2129, 1232 "%s %8phC res %d \n", __func__, 1233 sp->fcport->port_name, res); 1234 1235 sp->fcport->flags &= ~FCF_ASYNC_SENT; 1236 1237 if (!test_bit(UNLOADING, &vha->dpc_flags)) { 1238 memset(&ea, 0, sizeof(ea)); 1239 ea.fcport = sp->fcport; 1240 ea.data[0] = lio->u.logio.data[0]; 1241 ea.data[1] = lio->u.logio.data[1]; 1242 ea.iop[0] = lio->u.logio.iop[0]; 1243 ea.iop[1] = lio->u.logio.iop[1]; 1244 ea.sp = sp; 1245 1246 qla24xx_handle_prli_done_event(vha, &ea); 1247 } 1248 1249 sp->free(sp); 1250 } 1251 1252 int 1253 qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport) 1254 { 1255 srb_t *sp; 1256 struct srb_iocb *lio; 1257 int rval = QLA_FUNCTION_FAILED; 1258 1259 if (!vha->flags.online) { 1260 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n", 1261 __func__, __LINE__, fcport->port_name); 1262 return rval; 1263 } 1264 1265 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND || 1266 fcport->fw_login_state == DSC_LS_PRLI_PEND) && 1267 qla_dual_mode_enabled(vha)) { 1268 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n", 1269 __func__, __LINE__, fcport->port_name); 1270 return rval; 1271 } 1272 1273 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1274 if (!sp) 1275 return rval; 1276 1277 fcport->flags |= FCF_ASYNC_SENT; 1278 fcport->logout_completed = 0; 1279 1280 sp->type = SRB_PRLI_CMD; 1281 sp->name = "prli"; 1282 1283 lio = &sp->u.iocb_cmd; 1284 lio->timeout = qla2x00_async_iocb_timeout; 1285 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 1286 1287 sp->done = qla2x00_async_prli_sp_done; 1288 lio->u.logio.flags = 0; 1289 1290 if (NVME_TARGET(vha->hw, fcport)) 1291 lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI; 1292 1293 ql_dbg(ql_dbg_disc, vha, 0x211b, 1294 "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d fc4type %x priority %x %s.\n", 1295 fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24, 1296 fcport->login_retry, fcport->fc4_type, vha->hw->fc4_type_priority, 1297 NVME_TARGET(vha->hw, fcport) ? "nvme" : "fcp"); 1298 1299 rval = qla2x00_start_sp(sp); 1300 if (rval != QLA_SUCCESS) { 1301 fcport->flags |= FCF_LOGIN_NEEDED; 1302 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1303 goto done_free_sp; 1304 } 1305 1306 return rval; 1307 1308 done_free_sp: 1309 sp->free(sp); 1310 fcport->flags &= ~FCF_ASYNC_SENT; 1311 return rval; 1312 } 1313 1314 int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 1315 { 1316 struct qla_work_evt *e; 1317 1318 e = qla2x00_alloc_work(vha, QLA_EVT_GPDB); 1319 if (!e) 1320 return QLA_FUNCTION_FAILED; 1321 1322 e->u.fcport.fcport = fcport; 1323 e->u.fcport.opt = opt; 1324 fcport->flags |= FCF_ASYNC_ACTIVE; 1325 return qla2x00_post_work(vha, e); 1326 } 1327 1328 int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 1329 { 1330 srb_t *sp; 1331 struct srb_iocb *mbx; 1332 int rval = QLA_FUNCTION_FAILED; 1333 u16 *mb; 1334 dma_addr_t pd_dma; 1335 struct port_database_24xx *pd; 1336 struct qla_hw_data *ha = vha->hw; 1337 1338 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) || 1339 fcport->loop_id == FC_NO_LOOP_ID) { 1340 ql_log(ql_log_warn, vha, 0xffff, 1341 "%s: %8phC - not sending command.\n", 1342 __func__, fcport->port_name); 1343 return rval; 1344 } 1345 1346 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1347 if (!sp) 1348 goto done; 1349 1350 qla2x00_set_fcport_disc_state(fcport, DSC_GPDB); 1351 1352 fcport->flags |= FCF_ASYNC_SENT; 1353 sp->type = SRB_MB_IOCB; 1354 sp->name = "gpdb"; 1355 sp->gen1 = fcport->rscn_gen; 1356 sp->gen2 = fcport->login_gen; 1357 1358 mbx = &sp->u.iocb_cmd; 1359 mbx->timeout = qla2x00_async_iocb_timeout; 1360 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 1361 1362 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1363 if (pd == NULL) { 1364 ql_log(ql_log_warn, vha, 0xd043, 1365 "Failed to allocate port database structure.\n"); 1366 goto done_free_sp; 1367 } 1368 1369 mb = sp->u.iocb_cmd.u.mbx.out_mb; 1370 mb[0] = MBC_GET_PORT_DATABASE; 1371 mb[1] = fcport->loop_id; 1372 mb[2] = MSW(pd_dma); 1373 mb[3] = LSW(pd_dma); 1374 mb[6] = MSW(MSD(pd_dma)); 1375 mb[7] = LSW(MSD(pd_dma)); 1376 mb[9] = vha->vp_idx; 1377 mb[10] = opt; 1378 1379 mbx->u.mbx.in = pd; 1380 mbx->u.mbx.in_dma = pd_dma; 1381 1382 sp->done = qla24xx_async_gpdb_sp_done; 1383 1384 ql_dbg(ql_dbg_disc, vha, 0x20dc, 1385 "Async-%s %8phC hndl %x opt %x\n", 1386 sp->name, fcport->port_name, sp->handle, opt); 1387 1388 rval = qla2x00_start_sp(sp); 1389 if (rval != QLA_SUCCESS) 1390 goto done_free_sp; 1391 return rval; 1392 1393 done_free_sp: 1394 if (pd) 1395 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 1396 1397 sp->free(sp); 1398 fcport->flags &= ~FCF_ASYNC_SENT; 1399 done: 1400 fcport->flags &= ~FCF_ASYNC_ACTIVE; 1401 qla24xx_post_gpdb_work(vha, fcport, opt); 1402 return rval; 1403 } 1404 1405 static 1406 void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) 1407 { 1408 unsigned long flags; 1409 1410 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1411 ea->fcport->login_gen++; 1412 ea->fcport->deleted = 0; 1413 ea->fcport->logout_on_delete = 1; 1414 1415 if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) { 1416 vha->fcport_count++; 1417 ea->fcport->login_succ = 1; 1418 1419 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1420 qla24xx_sched_upd_fcport(ea->fcport); 1421 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1422 } else if (ea->fcport->login_succ) { 1423 /* 1424 * We have an existing session. A late RSCN delivery 1425 * must have triggered the session to be re-validate. 1426 * Session is still valid. 1427 */ 1428 ql_dbg(ql_dbg_disc, vha, 0x20d6, 1429 "%s %d %8phC session revalidate success\n", 1430 __func__, __LINE__, ea->fcport->port_name); 1431 qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_COMPLETE); 1432 } 1433 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1434 } 1435 1436 static int qla_chk_secure_login(scsi_qla_host_t *vha, fc_port_t *fcport, 1437 struct port_database_24xx *pd) 1438 { 1439 int rc = 0; 1440 1441 if (pd->secure_login) { 1442 ql_dbg(ql_dbg_disc, vha, 0x104d, 1443 "Secure Login established on %8phC\n", 1444 fcport->port_name); 1445 fcport->edif.secured_login = 1; 1446 fcport->edif.non_secured_login = 0; 1447 fcport->flags |= FCF_FCSP_DEVICE; 1448 } else { 1449 ql_dbg(ql_dbg_disc, vha, 0x104d, 1450 "non-Secure Login %8phC", 1451 fcport->port_name); 1452 fcport->edif.secured_login = 0; 1453 fcport->edif.non_secured_login = 1; 1454 } 1455 if (vha->hw->flags.edif_enabled) { 1456 if (fcport->flags & FCF_FCSP_DEVICE) { 1457 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_AUTH_PEND); 1458 /* Start edif prli timer & ring doorbell for app */ 1459 fcport->edif.rx_sa_set = 0; 1460 fcport->edif.tx_sa_set = 0; 1461 fcport->edif.rx_sa_pending = 0; 1462 fcport->edif.tx_sa_pending = 0; 1463 1464 qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, 1465 fcport->d_id.b24); 1466 1467 if (vha->e_dbell.db_flags == EDB_ACTIVE) { 1468 ql_dbg(ql_dbg_disc, vha, 0x20ef, 1469 "%s %d %8phC EDIF: post DB_AUTH: AUTH needed\n", 1470 __func__, __LINE__, fcport->port_name); 1471 fcport->edif.app_started = 1; 1472 fcport->edif.app_sess_online = 1; 1473 1474 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED, 1475 fcport->d_id.b24, 0, fcport); 1476 } 1477 1478 rc = 1; 1479 } else { 1480 ql_dbg(ql_dbg_disc, vha, 0x2117, 1481 "%s %d %8phC post prli\n", 1482 __func__, __LINE__, fcport->port_name); 1483 qla24xx_post_prli_work(vha, fcport); 1484 rc = 1; 1485 } 1486 } 1487 return rc; 1488 } 1489 1490 static 1491 void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) 1492 { 1493 fc_port_t *fcport = ea->fcport; 1494 struct port_database_24xx *pd; 1495 struct srb *sp = ea->sp; 1496 uint8_t ls; 1497 1498 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; 1499 1500 fcport->flags &= ~FCF_ASYNC_SENT; 1501 1502 ql_dbg(ql_dbg_disc, vha, 0x20d2, 1503 "%s %8phC DS %d LS %d fc4_type %x rc %d\n", __func__, 1504 fcport->port_name, fcport->disc_state, pd->current_login_state, 1505 fcport->fc4_type, ea->rc); 1506 1507 if (fcport->disc_state == DSC_DELETE_PEND) 1508 return; 1509 1510 if (NVME_TARGET(vha->hw, fcport)) 1511 ls = pd->current_login_state >> 4; 1512 else 1513 ls = pd->current_login_state & 0xf; 1514 1515 if (ea->sp->gen2 != fcport->login_gen) { 1516 /* target side must have changed it. */ 1517 1518 ql_dbg(ql_dbg_disc, vha, 0x20d3, 1519 "%s %8phC generation changed\n", 1520 __func__, fcport->port_name); 1521 return; 1522 } else if (ea->sp->gen1 != fcport->rscn_gen) { 1523 qla_rscn_replay(fcport); 1524 qlt_schedule_sess_for_deletion(fcport); 1525 return; 1526 } 1527 1528 switch (ls) { 1529 case PDS_PRLI_COMPLETE: 1530 __qla24xx_parse_gpdb(vha, fcport, pd); 1531 break; 1532 case PDS_PLOGI_COMPLETE: 1533 if (qla_chk_secure_login(vha, fcport, pd)) 1534 return; 1535 fallthrough; 1536 case PDS_PLOGI_PENDING: 1537 case PDS_PRLI_PENDING: 1538 case PDS_PRLI2_PENDING: 1539 /* Set discovery state back to GNL to Relogin attempt */ 1540 if (qla_dual_mode_enabled(vha) || 1541 qla_ini_mode_enabled(vha)) { 1542 qla2x00_set_fcport_disc_state(fcport, DSC_GNL); 1543 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1544 } 1545 return; 1546 case PDS_LOGO_PENDING: 1547 case PDS_PORT_UNAVAILABLE: 1548 default: 1549 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n", 1550 __func__, __LINE__, fcport->port_name); 1551 qlt_schedule_sess_for_deletion(fcport); 1552 return; 1553 } 1554 __qla24xx_handle_gpdb_event(vha, ea); 1555 } /* gpdb event */ 1556 1557 static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport) 1558 { 1559 u8 login = 0; 1560 int rc; 1561 1562 if (qla_tgt_mode_enabled(vha)) 1563 return; 1564 1565 if (qla_dual_mode_enabled(vha)) { 1566 if (N2N_TOPO(vha->hw)) { 1567 u64 mywwn, wwn; 1568 1569 mywwn = wwn_to_u64(vha->port_name); 1570 wwn = wwn_to_u64(fcport->port_name); 1571 if (mywwn > wwn) 1572 login = 1; 1573 else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP) 1574 && time_after_eq(jiffies, 1575 fcport->plogi_nack_done_deadline)) 1576 login = 1; 1577 } else { 1578 login = 1; 1579 } 1580 } else { 1581 /* initiator mode */ 1582 login = 1; 1583 } 1584 1585 if (login && fcport->login_retry) { 1586 fcport->login_retry--; 1587 if (fcport->loop_id == FC_NO_LOOP_ID) { 1588 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 1589 rc = qla2x00_find_new_loop_id(vha, fcport); 1590 if (rc) { 1591 ql_dbg(ql_dbg_disc, vha, 0x20e6, 1592 "%s %d %8phC post del sess - out of loopid\n", 1593 __func__, __LINE__, fcport->port_name); 1594 fcport->scan_state = 0; 1595 qlt_schedule_sess_for_deletion(fcport); 1596 return; 1597 } 1598 } 1599 ql_dbg(ql_dbg_disc, vha, 0x20bf, 1600 "%s %d %8phC post login\n", 1601 __func__, __LINE__, fcport->port_name); 1602 qla2x00_post_async_login_work(vha, fcport, NULL); 1603 } 1604 } 1605 1606 int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) 1607 { 1608 u16 data[2]; 1609 u64 wwn; 1610 u16 sec; 1611 1612 ql_dbg(ql_dbg_disc, vha, 0x20d8, 1613 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d\n", 1614 __func__, fcport->port_name, fcport->disc_state, 1615 fcport->fw_login_state, fcport->login_pause, fcport->flags, 1616 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen, 1617 fcport->login_gen, fcport->loop_id, fcport->scan_state); 1618 1619 if (fcport->scan_state != QLA_FCPORT_FOUND) 1620 return 0; 1621 1622 if ((fcport->loop_id != FC_NO_LOOP_ID) && 1623 qla_dual_mode_enabled(vha) && 1624 ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 1625 (fcport->fw_login_state == DSC_LS_PRLI_PEND))) 1626 return 0; 1627 1628 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP && 1629 !N2N_TOPO(vha->hw)) { 1630 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) { 1631 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1632 return 0; 1633 } 1634 } 1635 1636 /* Target won't initiate port login if fabric is present */ 1637 if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw)) 1638 return 0; 1639 1640 if (fcport->flags & FCF_ASYNC_SENT) { 1641 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1642 return 0; 1643 } 1644 1645 switch (fcport->disc_state) { 1646 case DSC_DELETED: 1647 wwn = wwn_to_u64(fcport->node_name); 1648 switch (vha->hw->current_topology) { 1649 case ISP_CFG_N: 1650 if (fcport_is_smaller(fcport)) { 1651 /* this adapter is bigger */ 1652 if (fcport->login_retry) { 1653 if (fcport->loop_id == FC_NO_LOOP_ID) { 1654 qla2x00_find_new_loop_id(vha, 1655 fcport); 1656 fcport->fw_login_state = 1657 DSC_LS_PORT_UNAVAIL; 1658 } 1659 fcport->login_retry--; 1660 qla_post_els_plogi_work(vha, fcport); 1661 } else { 1662 ql_log(ql_log_info, vha, 0x705d, 1663 "Unable to reach remote port %8phC", 1664 fcport->port_name); 1665 } 1666 } else { 1667 qla24xx_post_gnl_work(vha, fcport); 1668 } 1669 break; 1670 default: 1671 if (wwn == 0) { 1672 ql_dbg(ql_dbg_disc, vha, 0xffff, 1673 "%s %d %8phC post GNNID\n", 1674 __func__, __LINE__, fcport->port_name); 1675 qla24xx_post_gnnid_work(vha, fcport); 1676 } else if (fcport->loop_id == FC_NO_LOOP_ID) { 1677 ql_dbg(ql_dbg_disc, vha, 0x20bd, 1678 "%s %d %8phC post gnl\n", 1679 __func__, __LINE__, fcport->port_name); 1680 qla24xx_post_gnl_work(vha, fcport); 1681 } else { 1682 qla_chk_n2n_b4_login(vha, fcport); 1683 } 1684 break; 1685 } 1686 break; 1687 1688 case DSC_GNL: 1689 switch (vha->hw->current_topology) { 1690 case ISP_CFG_N: 1691 if ((fcport->current_login_state & 0xf) == 0x6) { 1692 ql_dbg(ql_dbg_disc, vha, 0x2118, 1693 "%s %d %8phC post GPDB work\n", 1694 __func__, __LINE__, fcport->port_name); 1695 fcport->chip_reset = 1696 vha->hw->base_qpair->chip_reset; 1697 qla24xx_post_gpdb_work(vha, fcport, 0); 1698 } else { 1699 ql_dbg(ql_dbg_disc, vha, 0x2118, 1700 "%s %d %8phC post %s PRLI\n", 1701 __func__, __LINE__, fcport->port_name, 1702 NVME_TARGET(vha->hw, fcport) ? "NVME" : 1703 "FC"); 1704 qla24xx_post_prli_work(vha, fcport); 1705 } 1706 break; 1707 default: 1708 if (fcport->login_pause) { 1709 ql_dbg(ql_dbg_disc, vha, 0x20d8, 1710 "%s %d %8phC exit\n", 1711 __func__, __LINE__, 1712 fcport->port_name); 1713 fcport->last_rscn_gen = fcport->rscn_gen; 1714 fcport->last_login_gen = fcport->login_gen; 1715 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1716 break; 1717 } 1718 qla_chk_n2n_b4_login(vha, fcport); 1719 break; 1720 } 1721 break; 1722 1723 case DSC_LOGIN_FAILED: 1724 if (N2N_TOPO(vha->hw)) 1725 qla_chk_n2n_b4_login(vha, fcport); 1726 else 1727 qlt_schedule_sess_for_deletion(fcport); 1728 break; 1729 1730 case DSC_LOGIN_COMPLETE: 1731 /* recheck login state */ 1732 data[0] = data[1] = 0; 1733 qla2x00_post_async_adisc_work(vha, fcport, data); 1734 break; 1735 1736 case DSC_LOGIN_PEND: 1737 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) 1738 qla24xx_post_prli_work(vha, fcport); 1739 break; 1740 1741 case DSC_UPD_FCPORT: 1742 sec = jiffies_to_msecs(jiffies - 1743 fcport->jiffies_at_registration)/1000; 1744 if (fcport->sec_since_registration < sec && sec && 1745 !(sec % 60)) { 1746 fcport->sec_since_registration = sec; 1747 ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, 1748 "%s %8phC - Slow Rport registration(%d Sec)\n", 1749 __func__, fcport->port_name, sec); 1750 } 1751 1752 if (fcport->next_disc_state != DSC_DELETE_PEND) 1753 fcport->next_disc_state = DSC_ADISC; 1754 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1755 break; 1756 1757 default: 1758 break; 1759 } 1760 1761 return 0; 1762 } 1763 1764 int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id, 1765 u8 *port_name, u8 *node_name, void *pla, u8 fc4_type) 1766 { 1767 struct qla_work_evt *e; 1768 1769 e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS); 1770 if (!e) 1771 return QLA_FUNCTION_FAILED; 1772 1773 e->u.new_sess.id = *id; 1774 e->u.new_sess.pla = pla; 1775 e->u.new_sess.fc4_type = fc4_type; 1776 memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE); 1777 if (node_name) 1778 memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE); 1779 1780 return qla2x00_post_work(vha, e); 1781 } 1782 1783 void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) 1784 { 1785 fc_port_t *fcport; 1786 unsigned long flags; 1787 1788 fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1); 1789 if (fcport) { 1790 if (fcport->flags & FCF_FCP2_DEVICE) { 1791 ql_dbg(ql_dbg_disc, vha, 0x2115, 1792 "Delaying session delete for FCP2 portid=%06x %8phC ", 1793 fcport->d_id.b24, fcport->port_name); 1794 return; 1795 } 1796 fcport->scan_needed = 1; 1797 fcport->rscn_gen++; 1798 } 1799 1800 spin_lock_irqsave(&vha->work_lock, flags); 1801 if (vha->scan.scan_flags == 0) { 1802 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__); 1803 vha->scan.scan_flags |= SF_QUEUED; 1804 schedule_delayed_work(&vha->scan.scan_work, 5); 1805 } 1806 spin_unlock_irqrestore(&vha->work_lock, flags); 1807 } 1808 1809 void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, 1810 struct event_arg *ea) 1811 { 1812 fc_port_t *fcport = ea->fcport; 1813 1814 if (test_bit(UNLOADING, &vha->dpc_flags)) 1815 return; 1816 1817 ql_dbg(ql_dbg_disc, vha, 0x2102, 1818 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n", 1819 __func__, fcport->port_name, fcport->disc_state, 1820 fcport->fw_login_state, fcport->login_pause, 1821 fcport->deleted, fcport->conflict, 1822 fcport->last_rscn_gen, fcport->rscn_gen, 1823 fcport->last_login_gen, fcport->login_gen, 1824 fcport->flags); 1825 1826 if (fcport->last_rscn_gen != fcport->rscn_gen) { 1827 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gnl\n", 1828 __func__, __LINE__, fcport->port_name); 1829 qla24xx_post_gnl_work(vha, fcport); 1830 return; 1831 } 1832 1833 qla24xx_fcport_handle_login(vha, fcport); 1834 } 1835 1836 void qla_handle_els_plogi_done(scsi_qla_host_t *vha, 1837 struct event_arg *ea) 1838 { 1839 /* for pure Target Mode, PRLI will not be initiated */ 1840 if (vha->host->active_mode == MODE_TARGET) 1841 return; 1842 1843 ql_dbg(ql_dbg_disc, vha, 0x2118, 1844 "%s %d %8phC post PRLI\n", 1845 __func__, __LINE__, ea->fcport->port_name); 1846 qla24xx_post_prli_work(vha, ea->fcport); 1847 } 1848 1849 /* 1850 * RSCN(s) came in for this fcport, but the RSCN(s) was not able 1851 * to be consumed by the fcport 1852 */ 1853 void qla_rscn_replay(fc_port_t *fcport) 1854 { 1855 struct event_arg ea; 1856 1857 switch (fcport->disc_state) { 1858 case DSC_DELETE_PEND: 1859 return; 1860 default: 1861 break; 1862 } 1863 1864 if (fcport->scan_needed) { 1865 memset(&ea, 0, sizeof(ea)); 1866 ea.id = fcport->d_id; 1867 ea.id.b.rsvd_1 = RSCN_PORT_ADDR; 1868 qla2x00_handle_rscn(fcport->vha, &ea); 1869 } 1870 } 1871 1872 static void 1873 qla2x00_tmf_iocb_timeout(void *data) 1874 { 1875 srb_t *sp = data; 1876 struct srb_iocb *tmf = &sp->u.iocb_cmd; 1877 int rc, h; 1878 unsigned long flags; 1879 1880 rc = qla24xx_async_abort_cmd(sp, false); 1881 if (rc) { 1882 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); 1883 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { 1884 if (sp->qpair->req->outstanding_cmds[h] == sp) { 1885 sp->qpair->req->outstanding_cmds[h] = NULL; 1886 break; 1887 } 1888 } 1889 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); 1890 tmf->u.tmf.comp_status = cpu_to_le16(CS_TIMEOUT); 1891 tmf->u.tmf.data = QLA_FUNCTION_FAILED; 1892 complete(&tmf->u.tmf.comp); 1893 } 1894 } 1895 1896 static void qla2x00_tmf_sp_done(srb_t *sp, int res) 1897 { 1898 struct srb_iocb *tmf = &sp->u.iocb_cmd; 1899 1900 complete(&tmf->u.tmf.comp); 1901 } 1902 1903 int 1904 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, 1905 uint32_t tag) 1906 { 1907 struct scsi_qla_host *vha = fcport->vha; 1908 struct srb_iocb *tm_iocb; 1909 srb_t *sp; 1910 int rval = QLA_FUNCTION_FAILED; 1911 1912 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1913 if (!sp) 1914 goto done; 1915 1916 tm_iocb = &sp->u.iocb_cmd; 1917 sp->type = SRB_TM_CMD; 1918 sp->name = "tmf"; 1919 1920 tm_iocb->timeout = qla2x00_tmf_iocb_timeout; 1921 init_completion(&tm_iocb->u.tmf.comp); 1922 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)); 1923 1924 tm_iocb->u.tmf.flags = flags; 1925 tm_iocb->u.tmf.lun = lun; 1926 tm_iocb->u.tmf.data = tag; 1927 sp->done = qla2x00_tmf_sp_done; 1928 1929 ql_dbg(ql_dbg_taskm, vha, 0x802f, 1930 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n", 1931 sp->handle, fcport->loop_id, fcport->d_id.b.domain, 1932 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1933 1934 rval = qla2x00_start_sp(sp); 1935 if (rval != QLA_SUCCESS) 1936 goto done_free_sp; 1937 wait_for_completion(&tm_iocb->u.tmf.comp); 1938 1939 rval = tm_iocb->u.tmf.data; 1940 1941 if (rval != QLA_SUCCESS) { 1942 ql_log(ql_log_warn, vha, 0x8030, 1943 "TM IOCB failed (%x).\n", rval); 1944 } 1945 1946 if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) { 1947 flags = tm_iocb->u.tmf.flags; 1948 lun = (uint16_t)tm_iocb->u.tmf.lun; 1949 1950 /* Issue Marker IOCB */ 1951 qla2x00_marker(vha, vha->hw->base_qpair, 1952 fcport->loop_id, lun, 1953 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 1954 } 1955 1956 done_free_sp: 1957 sp->free(sp); 1958 fcport->flags &= ~FCF_ASYNC_SENT; 1959 done: 1960 return rval; 1961 } 1962 1963 int 1964 qla24xx_async_abort_command(srb_t *sp) 1965 { 1966 unsigned long flags = 0; 1967 1968 uint32_t handle; 1969 fc_port_t *fcport = sp->fcport; 1970 struct qla_qpair *qpair = sp->qpair; 1971 struct scsi_qla_host *vha = fcport->vha; 1972 struct req_que *req = qpair->req; 1973 1974 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 1975 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 1976 if (req->outstanding_cmds[handle] == sp) 1977 break; 1978 } 1979 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 1980 1981 if (handle == req->num_outstanding_cmds) { 1982 /* Command not found. */ 1983 return QLA_FUNCTION_FAILED; 1984 } 1985 if (sp->type == SRB_FXIOCB_DCMD) 1986 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport, 1987 FXDISC_ABORT_IOCTL); 1988 1989 return qla24xx_async_abort_cmd(sp, true); 1990 } 1991 1992 static void 1993 qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea) 1994 { 1995 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n", 1996 ea->data[0]); 1997 1998 switch (ea->data[0]) { 1999 case MBS_COMMAND_COMPLETE: 2000 ql_dbg(ql_dbg_disc, vha, 0x2118, 2001 "%s %d %8phC post gpdb\n", 2002 __func__, __LINE__, ea->fcport->port_name); 2003 2004 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; 2005 ea->fcport->logout_on_delete = 1; 2006 ea->fcport->nvme_prli_service_param = ea->iop[0]; 2007 if (ea->iop[0] & NVME_PRLI_SP_FIRST_BURST) 2008 ea->fcport->nvme_first_burst_size = 2009 (ea->iop[1] & 0xffff) * 512; 2010 else 2011 ea->fcport->nvme_first_burst_size = 0; 2012 qla24xx_post_gpdb_work(vha, ea->fcport, 0); 2013 break; 2014 default: 2015 if ((ea->iop[0] == LSC_SCODE_ELS_REJECT) && 2016 (ea->iop[1] == 0x50000)) { /* reson 5=busy expl:0x0 */ 2017 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 2018 ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP; 2019 break; 2020 } 2021 2022 ql_dbg(ql_dbg_disc, vha, 0x2118, 2023 "%s %d %8phC priority %s, fc4type %x\n", 2024 __func__, __LINE__, ea->fcport->port_name, 2025 vha->hw->fc4_type_priority == FC4_PRIORITY_FCP ? 2026 "FCP" : "NVMe", ea->fcport->fc4_type); 2027 2028 if (N2N_TOPO(vha->hw)) { 2029 if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME) { 2030 ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME; 2031 ea->fcport->fc4_type |= FS_FC4TYPE_FCP; 2032 } else { 2033 ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP; 2034 ea->fcport->fc4_type |= FS_FC4TYPE_NVME; 2035 } 2036 2037 if (ea->fcport->n2n_link_reset_cnt < 3) { 2038 ea->fcport->n2n_link_reset_cnt++; 2039 vha->relogin_jif = jiffies + 2 * HZ; 2040 /* 2041 * PRLI failed. Reset link to kick start 2042 * state machine 2043 */ 2044 set_bit(N2N_LINK_RESET, &vha->dpc_flags); 2045 } else { 2046 ql_log(ql_log_warn, vha, 0x2119, 2047 "%s %d %8phC Unable to reconnect\n", 2048 __func__, __LINE__, 2049 ea->fcport->port_name); 2050 } 2051 } else { 2052 /* 2053 * switch connect. login failed. Take connection down 2054 * and allow relogin to retrigger 2055 */ 2056 if (NVME_FCP_TARGET(ea->fcport)) { 2057 ql_dbg(ql_dbg_disc, vha, 0x2118, 2058 "%s %d %8phC post %s prli\n", 2059 __func__, __LINE__, 2060 ea->fcport->port_name, 2061 (ea->fcport->fc4_type & FS_FC4TYPE_NVME) 2062 ? "NVMe" : "FCP"); 2063 if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME) 2064 ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME; 2065 else 2066 ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP; 2067 } 2068 2069 ea->fcport->flags &= ~FCF_ASYNC_SENT; 2070 ea->fcport->keep_nport_handle = 0; 2071 ea->fcport->logout_on_delete = 1; 2072 qlt_schedule_sess_for_deletion(ea->fcport); 2073 } 2074 break; 2075 } 2076 } 2077 2078 void 2079 qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) 2080 { 2081 port_id_t cid; /* conflict Nport id */ 2082 u16 lid; 2083 struct fc_port *conflict_fcport; 2084 unsigned long flags; 2085 struct fc_port *fcport = ea->fcport; 2086 2087 ql_dbg(ql_dbg_disc, vha, 0xffff, 2088 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n", 2089 __func__, fcport->port_name, fcport->disc_state, 2090 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen, 2091 ea->sp->gen1, fcport->rscn_gen, 2092 ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]); 2093 2094 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 2095 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) { 2096 ql_dbg(ql_dbg_disc, vha, 0x20ea, 2097 "%s %d %8phC Remote is trying to login\n", 2098 __func__, __LINE__, fcport->port_name); 2099 return; 2100 } 2101 2102 if ((fcport->disc_state == DSC_DELETE_PEND) || 2103 (fcport->disc_state == DSC_DELETED)) { 2104 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 2105 return; 2106 } 2107 2108 if (ea->sp->gen2 != fcport->login_gen) { 2109 /* target side must have changed it. */ 2110 ql_dbg(ql_dbg_disc, vha, 0x20d3, 2111 "%s %8phC generation changed\n", 2112 __func__, fcport->port_name); 2113 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 2114 return; 2115 } else if (ea->sp->gen1 != fcport->rscn_gen) { 2116 ql_dbg(ql_dbg_disc, vha, 0x20d3, 2117 "%s %8phC RSCN generation changed\n", 2118 __func__, fcport->port_name); 2119 qla_rscn_replay(fcport); 2120 qlt_schedule_sess_for_deletion(fcport); 2121 return; 2122 } 2123 2124 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n", 2125 ea->data[0]); 2126 2127 switch (ea->data[0]) { 2128 case MBS_COMMAND_COMPLETE: 2129 /* 2130 * Driver must validate login state - If PRLI not complete, 2131 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI 2132 * requests. 2133 */ 2134 if (vha->hw->flags.edif_enabled) { 2135 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); 2136 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 2137 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; 2138 ea->fcport->logout_on_delete = 1; 2139 ea->fcport->send_els_logo = 0; 2140 ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP; 2141 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 2142 2143 qla24xx_post_gpdb_work(vha, ea->fcport, 0); 2144 } else { 2145 if (NVME_TARGET(vha->hw, fcport)) { 2146 ql_dbg(ql_dbg_disc, vha, 0x2117, 2147 "%s %d %8phC post prli\n", 2148 __func__, __LINE__, fcport->port_name); 2149 qla24xx_post_prli_work(vha, fcport); 2150 } else { 2151 ql_dbg(ql_dbg_disc, vha, 0x20ea, 2152 "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n", 2153 __func__, __LINE__, fcport->port_name, 2154 fcport->loop_id, fcport->d_id.b24); 2155 2156 set_bit(fcport->loop_id, vha->hw->loop_id_map); 2157 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 2158 fcport->chip_reset = vha->hw->base_qpair->chip_reset; 2159 fcport->logout_on_delete = 1; 2160 fcport->send_els_logo = 0; 2161 fcport->fw_login_state = DSC_LS_PRLI_COMP; 2162 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 2163 2164 qla24xx_post_gpdb_work(vha, fcport, 0); 2165 } 2166 } 2167 break; 2168 case MBS_COMMAND_ERROR: 2169 ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n", 2170 __func__, __LINE__, ea->fcport->port_name, ea->data[1]); 2171 2172 ea->fcport->flags &= ~FCF_ASYNC_SENT; 2173 qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_FAILED); 2174 if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED) 2175 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 2176 else 2177 qla2x00_mark_device_lost(vha, ea->fcport, 1); 2178 break; 2179 case MBS_LOOP_ID_USED: 2180 /* data[1] = IO PARAM 1 = nport ID */ 2181 cid.b.domain = (ea->iop[1] >> 16) & 0xff; 2182 cid.b.area = (ea->iop[1] >> 8) & 0xff; 2183 cid.b.al_pa = ea->iop[1] & 0xff; 2184 cid.b.rsvd_1 = 0; 2185 2186 ql_dbg(ql_dbg_disc, vha, 0x20ec, 2187 "%s %d %8phC lid %#x in use with pid %06x post gnl\n", 2188 __func__, __LINE__, ea->fcport->port_name, 2189 ea->fcport->loop_id, cid.b24); 2190 2191 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); 2192 ea->fcport->loop_id = FC_NO_LOOP_ID; 2193 qla24xx_post_gnl_work(vha, ea->fcport); 2194 break; 2195 case MBS_PORT_ID_USED: 2196 lid = ea->iop[1] & 0xffff; 2197 qlt_find_sess_invalidate_other(vha, 2198 wwn_to_u64(ea->fcport->port_name), 2199 ea->fcport->d_id, lid, &conflict_fcport); 2200 2201 if (conflict_fcport) { 2202 /* 2203 * Another fcport share the same loop_id/nport id. 2204 * Conflict fcport needs to finish cleanup before this 2205 * fcport can proceed to login. 2206 */ 2207 conflict_fcport->conflict = ea->fcport; 2208 ea->fcport->login_pause = 1; 2209 2210 ql_dbg(ql_dbg_disc, vha, 0x20ed, 2211 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n", 2212 __func__, __LINE__, ea->fcport->port_name, 2213 ea->fcport->d_id.b24, lid); 2214 } else { 2215 ql_dbg(ql_dbg_disc, vha, 0x20ed, 2216 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n", 2217 __func__, __LINE__, ea->fcport->port_name, 2218 ea->fcport->d_id.b24, lid); 2219 2220 qla2x00_clear_loop_id(ea->fcport); 2221 set_bit(lid, vha->hw->loop_id_map); 2222 ea->fcport->loop_id = lid; 2223 ea->fcport->keep_nport_handle = 0; 2224 ea->fcport->logout_on_delete = 1; 2225 qlt_schedule_sess_for_deletion(ea->fcport); 2226 } 2227 break; 2228 } 2229 return; 2230 } 2231 2232 /****************************************************************************/ 2233 /* QLogic ISP2x00 Hardware Support Functions. */ 2234 /****************************************************************************/ 2235 2236 static int 2237 qla83xx_nic_core_fw_load(scsi_qla_host_t *vha) 2238 { 2239 int rval = QLA_SUCCESS; 2240 struct qla_hw_data *ha = vha->hw; 2241 uint32_t idc_major_ver, idc_minor_ver; 2242 uint16_t config[4]; 2243 2244 qla83xx_idc_lock(vha, 0); 2245 2246 /* SV: TODO: Assign initialization timeout from 2247 * flash-info / other param 2248 */ 2249 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT; 2250 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT; 2251 2252 /* Set our fcoe function presence */ 2253 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) { 2254 ql_dbg(ql_dbg_p3p, vha, 0xb077, 2255 "Error while setting DRV-Presence.\n"); 2256 rval = QLA_FUNCTION_FAILED; 2257 goto exit; 2258 } 2259 2260 /* Decide the reset ownership */ 2261 qla83xx_reset_ownership(vha); 2262 2263 /* 2264 * On first protocol driver load: 2265 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery 2266 * register. 2267 * Others: Check compatibility with current IDC Major version. 2268 */ 2269 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver); 2270 if (ha->flags.nic_core_reset_owner) { 2271 /* Set IDC Major version */ 2272 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION; 2273 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver); 2274 2275 /* Clearing IDC-Lock-Recovery register */ 2276 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0); 2277 } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) { 2278 /* 2279 * Clear further IDC participation if we are not compatible with 2280 * the current IDC Major Version. 2281 */ 2282 ql_log(ql_log_warn, vha, 0xb07d, 2283 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n", 2284 idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION); 2285 __qla83xx_clear_drv_presence(vha); 2286 rval = QLA_FUNCTION_FAILED; 2287 goto exit; 2288 } 2289 /* Each function sets its supported Minor version. */ 2290 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver); 2291 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2)); 2292 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver); 2293 2294 if (ha->flags.nic_core_reset_owner) { 2295 memset(config, 0, sizeof(config)); 2296 if (!qla81xx_get_port_config(vha, config)) 2297 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, 2298 QLA8XXX_DEV_READY); 2299 } 2300 2301 rval = qla83xx_idc_state_handler(vha); 2302 2303 exit: 2304 qla83xx_idc_unlock(vha, 0); 2305 2306 return rval; 2307 } 2308 2309 /* 2310 * qla2x00_initialize_adapter 2311 * Initialize board. 2312 * 2313 * Input: 2314 * ha = adapter block pointer. 2315 * 2316 * Returns: 2317 * 0 = success 2318 */ 2319 int 2320 qla2x00_initialize_adapter(scsi_qla_host_t *vha) 2321 { 2322 int rval; 2323 struct qla_hw_data *ha = vha->hw; 2324 struct req_que *req = ha->req_q_map[0]; 2325 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2326 2327 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats)); 2328 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat)); 2329 2330 /* Clear adapter flags. */ 2331 vha->flags.online = 0; 2332 ha->flags.chip_reset_done = 0; 2333 vha->flags.reset_active = 0; 2334 ha->flags.pci_channel_io_perm_failure = 0; 2335 ha->flags.eeh_busy = 0; 2336 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); 2337 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 2338 atomic_set(&vha->loop_state, LOOP_DOWN); 2339 vha->device_flags = DFLG_NO_CABLE; 2340 vha->dpc_flags = 0; 2341 vha->flags.management_server_logged_in = 0; 2342 vha->marker_needed = 0; 2343 ha->isp_abort_cnt = 0; 2344 ha->beacon_blink_led = 0; 2345 2346 set_bit(0, ha->req_qid_map); 2347 set_bit(0, ha->rsp_qid_map); 2348 2349 ql_dbg(ql_dbg_init, vha, 0x0040, 2350 "Configuring PCI space...\n"); 2351 rval = ha->isp_ops->pci_config(vha); 2352 if (rval) { 2353 ql_log(ql_log_warn, vha, 0x0044, 2354 "Unable to configure PCI space.\n"); 2355 return (rval); 2356 } 2357 2358 ha->isp_ops->reset_chip(vha); 2359 2360 /* Check for secure flash support */ 2361 if (IS_QLA28XX(ha)) { 2362 if (rd_reg_word(®->mailbox12) & BIT_0) 2363 ha->flags.secure_adapter = 1; 2364 ql_log(ql_log_info, vha, 0xffff, "Secure Adapter: %s\n", 2365 (ha->flags.secure_adapter) ? "Yes" : "No"); 2366 } 2367 2368 2369 rval = qla2xxx_get_flash_info(vha); 2370 if (rval) { 2371 ql_log(ql_log_fatal, vha, 0x004f, 2372 "Unable to validate FLASH data.\n"); 2373 return rval; 2374 } 2375 2376 if (IS_QLA8044(ha)) { 2377 qla8044_read_reset_template(vha); 2378 2379 /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0. 2380 * If DONRESET_BIT0 is set, drivers should not set dev_state 2381 * to NEED_RESET. But if NEED_RESET is set, drivers should 2382 * should honor the reset. */ 2383 if (ql2xdontresethba == 1) 2384 qla8044_set_idc_dontreset(vha); 2385 } 2386 2387 ha->isp_ops->get_flash_version(vha, req->ring); 2388 ql_dbg(ql_dbg_init, vha, 0x0061, 2389 "Configure NVRAM parameters...\n"); 2390 2391 /* Let priority default to FCP, can be overridden by nvram_config */ 2392 ha->fc4_type_priority = FC4_PRIORITY_FCP; 2393 2394 ha->isp_ops->nvram_config(vha); 2395 2396 if (ha->fc4_type_priority != FC4_PRIORITY_FCP && 2397 ha->fc4_type_priority != FC4_PRIORITY_NVME) 2398 ha->fc4_type_priority = FC4_PRIORITY_FCP; 2399 2400 ql_log(ql_log_info, vha, 0xffff, "FC4 priority set to %s\n", 2401 ha->fc4_type_priority == FC4_PRIORITY_FCP ? "FCP" : "NVMe"); 2402 2403 if (ha->flags.disable_serdes) { 2404 /* Mask HBA via NVRAM settings? */ 2405 ql_log(ql_log_info, vha, 0x0077, 2406 "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name); 2407 return QLA_FUNCTION_FAILED; 2408 } 2409 2410 ql_dbg(ql_dbg_init, vha, 0x0078, 2411 "Verifying loaded RISC code...\n"); 2412 2413 /* If smartsan enabled then require fdmi and rdp enabled */ 2414 if (ql2xsmartsan) { 2415 ql2xfdmienable = 1; 2416 ql2xrdpenable = 1; 2417 } 2418 2419 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) { 2420 rval = ha->isp_ops->chip_diag(vha); 2421 if (rval) 2422 return (rval); 2423 rval = qla2x00_setup_chip(vha); 2424 if (rval) 2425 return (rval); 2426 } 2427 2428 if (IS_QLA84XX(ha)) { 2429 ha->cs84xx = qla84xx_get_chip(vha); 2430 if (!ha->cs84xx) { 2431 ql_log(ql_log_warn, vha, 0x00d0, 2432 "Unable to configure ISP84XX.\n"); 2433 return QLA_FUNCTION_FAILED; 2434 } 2435 } 2436 2437 if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) 2438 rval = qla2x00_init_rings(vha); 2439 2440 /* No point in continuing if firmware initialization failed. */ 2441 if (rval != QLA_SUCCESS) 2442 return rval; 2443 2444 ha->flags.chip_reset_done = 1; 2445 2446 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) { 2447 /* Issue verify 84xx FW IOCB to complete 84xx initialization */ 2448 rval = qla84xx_init_chip(vha); 2449 if (rval != QLA_SUCCESS) { 2450 ql_log(ql_log_warn, vha, 0x00d4, 2451 "Unable to initialize ISP84XX.\n"); 2452 qla84xx_put_chip(vha); 2453 } 2454 } 2455 2456 /* Load the NIC Core f/w if we are the first protocol driver. */ 2457 if (IS_QLA8031(ha)) { 2458 rval = qla83xx_nic_core_fw_load(vha); 2459 if (rval) 2460 ql_log(ql_log_warn, vha, 0x0124, 2461 "Error in initializing NIC Core f/w.\n"); 2462 } 2463 2464 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) 2465 qla24xx_read_fcp_prio_cfg(vha); 2466 2467 if (IS_P3P_TYPE(ha)) 2468 qla82xx_set_driver_version(vha, QLA2XXX_VERSION); 2469 else 2470 qla25xx_set_driver_version(vha, QLA2XXX_VERSION); 2471 2472 return (rval); 2473 } 2474 2475 /** 2476 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers. 2477 * @vha: HA context 2478 * 2479 * Returns 0 on success. 2480 */ 2481 int 2482 qla2100_pci_config(scsi_qla_host_t *vha) 2483 { 2484 uint16_t w; 2485 unsigned long flags; 2486 struct qla_hw_data *ha = vha->hw; 2487 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2488 2489 pci_set_master(ha->pdev); 2490 pci_try_set_mwi(ha->pdev); 2491 2492 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 2493 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 2494 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 2495 2496 pci_disable_rom(ha->pdev); 2497 2498 /* Get PCI bus information. */ 2499 spin_lock_irqsave(&ha->hardware_lock, flags); 2500 ha->pci_attr = rd_reg_word(®->ctrl_status); 2501 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2502 2503 return QLA_SUCCESS; 2504 } 2505 2506 /** 2507 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers. 2508 * @vha: HA context 2509 * 2510 * Returns 0 on success. 2511 */ 2512 int 2513 qla2300_pci_config(scsi_qla_host_t *vha) 2514 { 2515 uint16_t w; 2516 unsigned long flags = 0; 2517 uint32_t cnt; 2518 struct qla_hw_data *ha = vha->hw; 2519 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2520 2521 pci_set_master(ha->pdev); 2522 pci_try_set_mwi(ha->pdev); 2523 2524 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 2525 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 2526 2527 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 2528 w &= ~PCI_COMMAND_INTX_DISABLE; 2529 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 2530 2531 /* 2532 * If this is a 2300 card and not 2312, reset the 2533 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately, 2534 * the 2310 also reports itself as a 2300 so we need to get the 2535 * fb revision level -- a 6 indicates it really is a 2300 and 2536 * not a 2310. 2537 */ 2538 if (IS_QLA2300(ha)) { 2539 spin_lock_irqsave(&ha->hardware_lock, flags); 2540 2541 /* Pause RISC. */ 2542 wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); 2543 for (cnt = 0; cnt < 30000; cnt++) { 2544 if ((rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) != 0) 2545 break; 2546 2547 udelay(10); 2548 } 2549 2550 /* Select FPM registers. */ 2551 wrt_reg_word(®->ctrl_status, 0x20); 2552 rd_reg_word(®->ctrl_status); 2553 2554 /* Get the fb rev level */ 2555 ha->fb_rev = RD_FB_CMD_REG(ha, reg); 2556 2557 if (ha->fb_rev == FPM_2300) 2558 pci_clear_mwi(ha->pdev); 2559 2560 /* Deselect FPM registers. */ 2561 wrt_reg_word(®->ctrl_status, 0x0); 2562 rd_reg_word(®->ctrl_status); 2563 2564 /* Release RISC module. */ 2565 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); 2566 for (cnt = 0; cnt < 30000; cnt++) { 2567 if ((rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0) 2568 break; 2569 2570 udelay(10); 2571 } 2572 2573 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2574 } 2575 2576 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 2577 2578 pci_disable_rom(ha->pdev); 2579 2580 /* Get PCI bus information. */ 2581 spin_lock_irqsave(&ha->hardware_lock, flags); 2582 ha->pci_attr = rd_reg_word(®->ctrl_status); 2583 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2584 2585 return QLA_SUCCESS; 2586 } 2587 2588 /** 2589 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers. 2590 * @vha: HA context 2591 * 2592 * Returns 0 on success. 2593 */ 2594 int 2595 qla24xx_pci_config(scsi_qla_host_t *vha) 2596 { 2597 uint16_t w; 2598 unsigned long flags = 0; 2599 struct qla_hw_data *ha = vha->hw; 2600 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2601 2602 pci_set_master(ha->pdev); 2603 pci_try_set_mwi(ha->pdev); 2604 2605 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 2606 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 2607 w &= ~PCI_COMMAND_INTX_DISABLE; 2608 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 2609 2610 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 2611 2612 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */ 2613 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX)) 2614 pcix_set_mmrbc(ha->pdev, 2048); 2615 2616 /* PCIe -- adjust Maximum Read Request Size (2048). */ 2617 if (pci_is_pcie(ha->pdev)) 2618 pcie_set_readrq(ha->pdev, 4096); 2619 2620 pci_disable_rom(ha->pdev); 2621 2622 ha->chip_revision = ha->pdev->revision; 2623 2624 /* Get PCI bus information. */ 2625 spin_lock_irqsave(&ha->hardware_lock, flags); 2626 ha->pci_attr = rd_reg_dword(®->ctrl_status); 2627 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2628 2629 return QLA_SUCCESS; 2630 } 2631 2632 /** 2633 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers. 2634 * @vha: HA context 2635 * 2636 * Returns 0 on success. 2637 */ 2638 int 2639 qla25xx_pci_config(scsi_qla_host_t *vha) 2640 { 2641 uint16_t w; 2642 struct qla_hw_data *ha = vha->hw; 2643 2644 pci_set_master(ha->pdev); 2645 pci_try_set_mwi(ha->pdev); 2646 2647 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 2648 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 2649 w &= ~PCI_COMMAND_INTX_DISABLE; 2650 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 2651 2652 /* PCIe -- adjust Maximum Read Request Size (2048). */ 2653 if (pci_is_pcie(ha->pdev)) 2654 pcie_set_readrq(ha->pdev, 4096); 2655 2656 pci_disable_rom(ha->pdev); 2657 2658 ha->chip_revision = ha->pdev->revision; 2659 2660 return QLA_SUCCESS; 2661 } 2662 2663 /** 2664 * qla2x00_isp_firmware() - Choose firmware image. 2665 * @vha: HA context 2666 * 2667 * Returns 0 on success. 2668 */ 2669 static int 2670 qla2x00_isp_firmware(scsi_qla_host_t *vha) 2671 { 2672 int rval; 2673 uint16_t loop_id, topo, sw_cap; 2674 uint8_t domain, area, al_pa; 2675 struct qla_hw_data *ha = vha->hw; 2676 2677 /* Assume loading risc code */ 2678 rval = QLA_FUNCTION_FAILED; 2679 2680 if (ha->flags.disable_risc_code_load) { 2681 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n"); 2682 2683 /* Verify checksum of loaded RISC code. */ 2684 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address); 2685 if (rval == QLA_SUCCESS) { 2686 /* And, verify we are not in ROM code. */ 2687 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, 2688 &area, &domain, &topo, &sw_cap); 2689 } 2690 } 2691 2692 if (rval) 2693 ql_dbg(ql_dbg_init, vha, 0x007a, 2694 "**** Load RISC code ****.\n"); 2695 2696 return (rval); 2697 } 2698 2699 /** 2700 * qla2x00_reset_chip() - Reset ISP chip. 2701 * @vha: HA context 2702 * 2703 * Returns 0 on success. 2704 */ 2705 int 2706 qla2x00_reset_chip(scsi_qla_host_t *vha) 2707 { 2708 unsigned long flags = 0; 2709 struct qla_hw_data *ha = vha->hw; 2710 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2711 uint32_t cnt; 2712 uint16_t cmd; 2713 int rval = QLA_FUNCTION_FAILED; 2714 2715 if (unlikely(pci_channel_offline(ha->pdev))) 2716 return rval; 2717 2718 ha->isp_ops->disable_intrs(ha); 2719 2720 spin_lock_irqsave(&ha->hardware_lock, flags); 2721 2722 /* Turn off master enable */ 2723 cmd = 0; 2724 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd); 2725 cmd &= ~PCI_COMMAND_MASTER; 2726 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); 2727 2728 if (!IS_QLA2100(ha)) { 2729 /* Pause RISC. */ 2730 wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); 2731 if (IS_QLA2200(ha) || IS_QLA2300(ha)) { 2732 for (cnt = 0; cnt < 30000; cnt++) { 2733 if ((rd_reg_word(®->hccr) & 2734 HCCR_RISC_PAUSE) != 0) 2735 break; 2736 udelay(100); 2737 } 2738 } else { 2739 rd_reg_word(®->hccr); /* PCI Posting. */ 2740 udelay(10); 2741 } 2742 2743 /* Select FPM registers. */ 2744 wrt_reg_word(®->ctrl_status, 0x20); 2745 rd_reg_word(®->ctrl_status); /* PCI Posting. */ 2746 2747 /* FPM Soft Reset. */ 2748 wrt_reg_word(®->fpm_diag_config, 0x100); 2749 rd_reg_word(®->fpm_diag_config); /* PCI Posting. */ 2750 2751 /* Toggle Fpm Reset. */ 2752 if (!IS_QLA2200(ha)) { 2753 wrt_reg_word(®->fpm_diag_config, 0x0); 2754 rd_reg_word(®->fpm_diag_config); /* PCI Posting. */ 2755 } 2756 2757 /* Select frame buffer registers. */ 2758 wrt_reg_word(®->ctrl_status, 0x10); 2759 rd_reg_word(®->ctrl_status); /* PCI Posting. */ 2760 2761 /* Reset frame buffer FIFOs. */ 2762 if (IS_QLA2200(ha)) { 2763 WRT_FB_CMD_REG(ha, reg, 0xa000); 2764 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */ 2765 } else { 2766 WRT_FB_CMD_REG(ha, reg, 0x00fc); 2767 2768 /* Read back fb_cmd until zero or 3 seconds max */ 2769 for (cnt = 0; cnt < 3000; cnt++) { 2770 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0) 2771 break; 2772 udelay(100); 2773 } 2774 } 2775 2776 /* Select RISC module registers. */ 2777 wrt_reg_word(®->ctrl_status, 0); 2778 rd_reg_word(®->ctrl_status); /* PCI Posting. */ 2779 2780 /* Reset RISC processor. */ 2781 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 2782 rd_reg_word(®->hccr); /* PCI Posting. */ 2783 2784 /* Release RISC processor. */ 2785 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); 2786 rd_reg_word(®->hccr); /* PCI Posting. */ 2787 } 2788 2789 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 2790 wrt_reg_word(®->hccr, HCCR_CLR_HOST_INT); 2791 2792 /* Reset ISP chip. */ 2793 wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET); 2794 2795 /* Wait for RISC to recover from reset. */ 2796 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 2797 /* 2798 * It is necessary to for a delay here since the card doesn't 2799 * respond to PCI reads during a reset. On some architectures 2800 * this will result in an MCA. 2801 */ 2802 udelay(20); 2803 for (cnt = 30000; cnt; cnt--) { 2804 if ((rd_reg_word(®->ctrl_status) & 2805 CSR_ISP_SOFT_RESET) == 0) 2806 break; 2807 udelay(100); 2808 } 2809 } else 2810 udelay(10); 2811 2812 /* Reset RISC processor. */ 2813 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 2814 2815 wrt_reg_word(®->semaphore, 0); 2816 2817 /* Release RISC processor. */ 2818 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); 2819 rd_reg_word(®->hccr); /* PCI Posting. */ 2820 2821 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 2822 for (cnt = 0; cnt < 30000; cnt++) { 2823 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY) 2824 break; 2825 2826 udelay(100); 2827 } 2828 } else 2829 udelay(100); 2830 2831 /* Turn on master enable */ 2832 cmd |= PCI_COMMAND_MASTER; 2833 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); 2834 2835 /* Disable RISC pause on FPM parity error. */ 2836 if (!IS_QLA2100(ha)) { 2837 wrt_reg_word(®->hccr, HCCR_DISABLE_PARITY_PAUSE); 2838 rd_reg_word(®->hccr); /* PCI Posting. */ 2839 } 2840 2841 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2842 2843 return QLA_SUCCESS; 2844 } 2845 2846 /** 2847 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC. 2848 * @vha: HA context 2849 * 2850 * Returns 0 on success. 2851 */ 2852 static int 2853 qla81xx_reset_mpi(scsi_qla_host_t *vha) 2854 { 2855 uint16_t mb[4] = {0x1010, 0, 1, 0}; 2856 2857 if (!IS_QLA81XX(vha->hw)) 2858 return QLA_SUCCESS; 2859 2860 return qla81xx_write_mpi_register(vha, mb); 2861 } 2862 2863 static int 2864 qla_chk_risc_recovery(scsi_qla_host_t *vha) 2865 { 2866 struct qla_hw_data *ha = vha->hw; 2867 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2868 __le16 __iomem *mbptr = ®->mailbox0; 2869 int i; 2870 u16 mb[32]; 2871 int rc = QLA_SUCCESS; 2872 2873 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 2874 return rc; 2875 2876 /* this check is only valid after RISC reset */ 2877 mb[0] = rd_reg_word(mbptr); 2878 mbptr++; 2879 if (mb[0] == 0xf) { 2880 rc = QLA_FUNCTION_FAILED; 2881 2882 for (i = 1; i < 32; i++) { 2883 mb[i] = rd_reg_word(mbptr); 2884 mbptr++; 2885 } 2886 2887 ql_log(ql_log_warn, vha, 0x1015, 2888 "RISC reset failed. mb[0-7] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n", 2889 mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6], mb[7]); 2890 ql_log(ql_log_warn, vha, 0x1015, 2891 "RISC reset failed. mb[8-15] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n", 2892 mb[8], mb[9], mb[10], mb[11], mb[12], mb[13], mb[14], 2893 mb[15]); 2894 ql_log(ql_log_warn, vha, 0x1015, 2895 "RISC reset failed. mb[16-23] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n", 2896 mb[16], mb[17], mb[18], mb[19], mb[20], mb[21], mb[22], 2897 mb[23]); 2898 ql_log(ql_log_warn, vha, 0x1015, 2899 "RISC reset failed. mb[24-31] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n", 2900 mb[24], mb[25], mb[26], mb[27], mb[28], mb[29], mb[30], 2901 mb[31]); 2902 } 2903 return rc; 2904 } 2905 2906 /** 2907 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC. 2908 * @vha: HA context 2909 * 2910 * Returns 0 on success. 2911 */ 2912 static inline int 2913 qla24xx_reset_risc(scsi_qla_host_t *vha) 2914 { 2915 unsigned long flags = 0; 2916 struct qla_hw_data *ha = vha->hw; 2917 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2918 uint32_t cnt; 2919 uint16_t wd; 2920 static int abts_cnt; /* ISP abort retry counts */ 2921 int rval = QLA_SUCCESS; 2922 int print = 1; 2923 2924 spin_lock_irqsave(&ha->hardware_lock, flags); 2925 2926 /* Reset RISC. */ 2927 wrt_reg_dword(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 2928 for (cnt = 0; cnt < 30000; cnt++) { 2929 if ((rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) 2930 break; 2931 2932 udelay(10); 2933 } 2934 2935 if (!(rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE)) 2936 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); 2937 2938 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e, 2939 "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n", 2940 rd_reg_dword(®->hccr), 2941 rd_reg_dword(®->ctrl_status), 2942 (rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE)); 2943 2944 wrt_reg_dword(®->ctrl_status, 2945 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 2946 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); 2947 2948 udelay(100); 2949 2950 /* Wait for firmware to complete NVRAM accesses. */ 2951 rd_reg_word(®->mailbox0); 2952 for (cnt = 10000; rd_reg_word(®->mailbox0) != 0 && 2953 rval == QLA_SUCCESS; cnt--) { 2954 barrier(); 2955 if (cnt) 2956 udelay(5); 2957 else 2958 rval = QLA_FUNCTION_TIMEOUT; 2959 } 2960 2961 if (rval == QLA_SUCCESS) 2962 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags); 2963 2964 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f, 2965 "HCCR: 0x%x, MailBox0 Status 0x%x\n", 2966 rd_reg_dword(®->hccr), 2967 rd_reg_word(®->mailbox0)); 2968 2969 /* Wait for soft-reset to complete. */ 2970 rd_reg_dword(®->ctrl_status); 2971 for (cnt = 0; cnt < 60; cnt++) { 2972 barrier(); 2973 if ((rd_reg_dword(®->ctrl_status) & 2974 CSRX_ISP_SOFT_RESET) == 0) 2975 break; 2976 2977 udelay(5); 2978 } 2979 if (!(rd_reg_dword(®->ctrl_status) & CSRX_ISP_SOFT_RESET)) 2980 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags); 2981 2982 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d, 2983 "HCCR: 0x%x, Soft Reset status: 0x%x\n", 2984 rd_reg_dword(®->hccr), 2985 rd_reg_dword(®->ctrl_status)); 2986 2987 /* If required, do an MPI FW reset now */ 2988 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) { 2989 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) { 2990 if (++abts_cnt < 5) { 2991 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2992 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags); 2993 } else { 2994 /* 2995 * We exhausted the ISP abort retries. We have to 2996 * set the board offline. 2997 */ 2998 abts_cnt = 0; 2999 vha->flags.online = 0; 3000 } 3001 } 3002 } 3003 3004 wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET); 3005 rd_reg_dword(®->hccr); 3006 3007 wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE); 3008 rd_reg_dword(®->hccr); 3009 3010 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_RESET); 3011 mdelay(10); 3012 rd_reg_dword(®->hccr); 3013 3014 wd = rd_reg_word(®->mailbox0); 3015 for (cnt = 300; wd != 0 && rval == QLA_SUCCESS; cnt--) { 3016 barrier(); 3017 if (cnt) { 3018 mdelay(1); 3019 if (print && qla_chk_risc_recovery(vha)) 3020 print = 0; 3021 3022 wd = rd_reg_word(®->mailbox0); 3023 } else { 3024 rval = QLA_FUNCTION_TIMEOUT; 3025 3026 ql_log(ql_log_warn, vha, 0x015e, 3027 "RISC reset timeout\n"); 3028 } 3029 } 3030 3031 if (rval == QLA_SUCCESS) 3032 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); 3033 3034 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e, 3035 "Host Risc 0x%x, mailbox0 0x%x\n", 3036 rd_reg_dword(®->hccr), 3037 rd_reg_word(®->mailbox0)); 3038 3039 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3040 3041 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f, 3042 "Driver in %s mode\n", 3043 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling"); 3044 3045 if (IS_NOPOLLING_TYPE(ha)) 3046 ha->isp_ops->enable_intrs(ha); 3047 3048 return rval; 3049 } 3050 3051 static void 3052 qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data) 3053 { 3054 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; 3055 3056 wrt_reg_dword(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); 3057 *data = rd_reg_dword(®->iobase_window + RISC_REGISTER_WINDOW_OFFSET); 3058 } 3059 3060 static void 3061 qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data) 3062 { 3063 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; 3064 3065 wrt_reg_dword(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); 3066 wrt_reg_dword(®->iobase_window + RISC_REGISTER_WINDOW_OFFSET, data); 3067 } 3068 3069 static void 3070 qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha) 3071 { 3072 uint32_t wd32 = 0; 3073 uint delta_msec = 100; 3074 uint elapsed_msec = 0; 3075 uint timeout_msec; 3076 ulong n; 3077 3078 if (vha->hw->pdev->subsystem_device != 0x0175 && 3079 vha->hw->pdev->subsystem_device != 0x0240) 3080 return; 3081 3082 wrt_reg_dword(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE); 3083 udelay(100); 3084 3085 attempt: 3086 timeout_msec = TIMEOUT_SEMAPHORE; 3087 n = timeout_msec / delta_msec; 3088 while (n--) { 3089 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET); 3090 qla25xx_read_risc_sema_reg(vha, &wd32); 3091 if (wd32 & RISC_SEMAPHORE) 3092 break; 3093 msleep(delta_msec); 3094 elapsed_msec += delta_msec; 3095 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED) 3096 goto force; 3097 } 3098 3099 if (!(wd32 & RISC_SEMAPHORE)) 3100 goto force; 3101 3102 if (!(wd32 & RISC_SEMAPHORE_FORCE)) 3103 goto acquired; 3104 3105 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR); 3106 timeout_msec = TIMEOUT_SEMAPHORE_FORCE; 3107 n = timeout_msec / delta_msec; 3108 while (n--) { 3109 qla25xx_read_risc_sema_reg(vha, &wd32); 3110 if (!(wd32 & RISC_SEMAPHORE_FORCE)) 3111 break; 3112 msleep(delta_msec); 3113 elapsed_msec += delta_msec; 3114 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED) 3115 goto force; 3116 } 3117 3118 if (wd32 & RISC_SEMAPHORE_FORCE) 3119 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR); 3120 3121 goto attempt; 3122 3123 force: 3124 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET); 3125 3126 acquired: 3127 return; 3128 } 3129 3130 /** 3131 * qla24xx_reset_chip() - Reset ISP24xx chip. 3132 * @vha: HA context 3133 * 3134 * Returns 0 on success. 3135 */ 3136 int 3137 qla24xx_reset_chip(scsi_qla_host_t *vha) 3138 { 3139 struct qla_hw_data *ha = vha->hw; 3140 int rval = QLA_FUNCTION_FAILED; 3141 3142 if (pci_channel_offline(ha->pdev) && 3143 ha->flags.pci_channel_io_perm_failure) { 3144 return rval; 3145 } 3146 3147 ha->isp_ops->disable_intrs(ha); 3148 3149 qla25xx_manipulate_risc_semaphore(vha); 3150 3151 /* Perform RISC reset. */ 3152 rval = qla24xx_reset_risc(vha); 3153 3154 return rval; 3155 } 3156 3157 /** 3158 * qla2x00_chip_diag() - Test chip for proper operation. 3159 * @vha: HA context 3160 * 3161 * Returns 0 on success. 3162 */ 3163 int 3164 qla2x00_chip_diag(scsi_qla_host_t *vha) 3165 { 3166 int rval; 3167 struct qla_hw_data *ha = vha->hw; 3168 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3169 unsigned long flags = 0; 3170 uint16_t data; 3171 uint32_t cnt; 3172 uint16_t mb[5]; 3173 struct req_que *req = ha->req_q_map[0]; 3174 3175 /* Assume a failed state */ 3176 rval = QLA_FUNCTION_FAILED; 3177 3178 ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n", 3179 ®->flash_address); 3180 3181 spin_lock_irqsave(&ha->hardware_lock, flags); 3182 3183 /* Reset ISP chip. */ 3184 wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET); 3185 3186 /* 3187 * We need to have a delay here since the card will not respond while 3188 * in reset causing an MCA on some architectures. 3189 */ 3190 udelay(20); 3191 data = qla2x00_debounce_register(®->ctrl_status); 3192 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) { 3193 udelay(5); 3194 data = rd_reg_word(®->ctrl_status); 3195 barrier(); 3196 } 3197 3198 if (!cnt) 3199 goto chip_diag_failed; 3200 3201 ql_dbg(ql_dbg_init, vha, 0x007c, 3202 "Reset register cleared by chip reset.\n"); 3203 3204 /* Reset RISC processor. */ 3205 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 3206 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); 3207 3208 /* Workaround for QLA2312 PCI parity error */ 3209 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 3210 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0)); 3211 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) { 3212 udelay(5); 3213 data = RD_MAILBOX_REG(ha, reg, 0); 3214 barrier(); 3215 } 3216 } else 3217 udelay(10); 3218 3219 if (!cnt) 3220 goto chip_diag_failed; 3221 3222 /* Check product ID of chip */ 3223 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n"); 3224 3225 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 3226 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 3227 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 3228 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4)); 3229 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) || 3230 mb[3] != PROD_ID_3) { 3231 ql_log(ql_log_warn, vha, 0x0062, 3232 "Wrong product ID = 0x%x,0x%x,0x%x.\n", 3233 mb[1], mb[2], mb[3]); 3234 3235 goto chip_diag_failed; 3236 } 3237 ha->product_id[0] = mb[1]; 3238 ha->product_id[1] = mb[2]; 3239 ha->product_id[2] = mb[3]; 3240 ha->product_id[3] = mb[4]; 3241 3242 /* Adjust fw RISC transfer size */ 3243 if (req->length > 1024) 3244 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; 3245 else 3246 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 3247 req->length; 3248 3249 if (IS_QLA2200(ha) && 3250 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { 3251 /* Limit firmware transfer size with a 2200A */ 3252 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n"); 3253 3254 ha->device_type |= DT_ISP2200A; 3255 ha->fw_transfer_size = 128; 3256 } 3257 3258 /* Wrap Incoming Mailboxes Test. */ 3259 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3260 3261 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n"); 3262 rval = qla2x00_mbx_reg_test(vha); 3263 if (rval) 3264 ql_log(ql_log_warn, vha, 0x0080, 3265 "Failed mailbox send register test.\n"); 3266 else 3267 /* Flag a successful rval */ 3268 rval = QLA_SUCCESS; 3269 spin_lock_irqsave(&ha->hardware_lock, flags); 3270 3271 chip_diag_failed: 3272 if (rval) 3273 ql_log(ql_log_info, vha, 0x0081, 3274 "Chip diagnostics **** FAILED ****.\n"); 3275 3276 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3277 3278 return (rval); 3279 } 3280 3281 /** 3282 * qla24xx_chip_diag() - Test ISP24xx for proper operation. 3283 * @vha: HA context 3284 * 3285 * Returns 0 on success. 3286 */ 3287 int 3288 qla24xx_chip_diag(scsi_qla_host_t *vha) 3289 { 3290 int rval; 3291 struct qla_hw_data *ha = vha->hw; 3292 struct req_que *req = ha->req_q_map[0]; 3293 3294 if (IS_P3P_TYPE(ha)) 3295 return QLA_SUCCESS; 3296 3297 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; 3298 3299 rval = qla2x00_mbx_reg_test(vha); 3300 if (rval) { 3301 ql_log(ql_log_warn, vha, 0x0082, 3302 "Failed mailbox send register test.\n"); 3303 } else { 3304 /* Flag a successful rval */ 3305 rval = QLA_SUCCESS; 3306 } 3307 3308 return rval; 3309 } 3310 3311 static void 3312 qla2x00_init_fce_trace(scsi_qla_host_t *vha) 3313 { 3314 int rval; 3315 dma_addr_t tc_dma; 3316 void *tc; 3317 struct qla_hw_data *ha = vha->hw; 3318 3319 if (!IS_FWI2_CAPABLE(ha)) 3320 return; 3321 3322 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 3323 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 3324 return; 3325 3326 if (ha->fce) { 3327 ql_dbg(ql_dbg_init, vha, 0x00bd, 3328 "%s: FCE Mem is already allocated.\n", 3329 __func__); 3330 return; 3331 } 3332 3333 /* Allocate memory for Fibre Channel Event Buffer. */ 3334 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 3335 GFP_KERNEL); 3336 if (!tc) { 3337 ql_log(ql_log_warn, vha, 0x00be, 3338 "Unable to allocate (%d KB) for FCE.\n", 3339 FCE_SIZE / 1024); 3340 return; 3341 } 3342 3343 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS, 3344 ha->fce_mb, &ha->fce_bufs); 3345 if (rval) { 3346 ql_log(ql_log_warn, vha, 0x00bf, 3347 "Unable to initialize FCE (%d).\n", rval); 3348 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma); 3349 return; 3350 } 3351 3352 ql_dbg(ql_dbg_init, vha, 0x00c0, 3353 "Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024); 3354 3355 ha->flags.fce_enabled = 1; 3356 ha->fce_dma = tc_dma; 3357 ha->fce = tc; 3358 } 3359 3360 static void 3361 qla2x00_init_eft_trace(scsi_qla_host_t *vha) 3362 { 3363 int rval; 3364 dma_addr_t tc_dma; 3365 void *tc; 3366 struct qla_hw_data *ha = vha->hw; 3367 3368 if (!IS_FWI2_CAPABLE(ha)) 3369 return; 3370 3371 if (ha->eft) { 3372 ql_dbg(ql_dbg_init, vha, 0x00bd, 3373 "%s: EFT Mem is already allocated.\n", 3374 __func__); 3375 return; 3376 } 3377 3378 /* Allocate memory for Extended Trace Buffer. */ 3379 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, 3380 GFP_KERNEL); 3381 if (!tc) { 3382 ql_log(ql_log_warn, vha, 0x00c1, 3383 "Unable to allocate (%d KB) for EFT.\n", 3384 EFT_SIZE / 1024); 3385 return; 3386 } 3387 3388 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); 3389 if (rval) { 3390 ql_log(ql_log_warn, vha, 0x00c2, 3391 "Unable to initialize EFT (%d).\n", rval); 3392 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma); 3393 return; 3394 } 3395 3396 ql_dbg(ql_dbg_init, vha, 0x00c3, 3397 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024); 3398 3399 ha->eft_dma = tc_dma; 3400 ha->eft = tc; 3401 } 3402 3403 static void 3404 qla2x00_alloc_offload_mem(scsi_qla_host_t *vha) 3405 { 3406 qla2x00_init_fce_trace(vha); 3407 qla2x00_init_eft_trace(vha); 3408 } 3409 3410 void 3411 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) 3412 { 3413 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size, 3414 eft_size, fce_size, mq_size; 3415 struct qla_hw_data *ha = vha->hw; 3416 struct req_que *req = ha->req_q_map[0]; 3417 struct rsp_que *rsp = ha->rsp_q_map[0]; 3418 struct qla2xxx_fw_dump *fw_dump; 3419 3420 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0; 3421 req_q_size = rsp_q_size = 0; 3422 3423 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 3424 fixed_size = sizeof(struct qla2100_fw_dump); 3425 } else if (IS_QLA23XX(ha)) { 3426 fixed_size = offsetof(struct qla2300_fw_dump, data_ram); 3427 mem_size = (ha->fw_memory_size - 0x11000 + 1) * 3428 sizeof(uint16_t); 3429 } else if (IS_FWI2_CAPABLE(ha)) { 3430 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 3431 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem); 3432 else if (IS_QLA81XX(ha)) 3433 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); 3434 else if (IS_QLA25XX(ha)) 3435 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem); 3436 else 3437 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem); 3438 3439 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 3440 sizeof(uint32_t); 3441 if (ha->mqenable) { 3442 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && 3443 !IS_QLA28XX(ha)) 3444 mq_size = sizeof(struct qla2xxx_mq_chain); 3445 /* 3446 * Allocate maximum buffer size for all queues - Q0. 3447 * Resizing must be done at end-of-dump processing. 3448 */ 3449 mq_size += (ha->max_req_queues - 1) * 3450 (req->length * sizeof(request_t)); 3451 mq_size += (ha->max_rsp_queues - 1) * 3452 (rsp->length * sizeof(response_t)); 3453 } 3454 if (ha->tgt.atio_ring) 3455 mq_size += ha->tgt.atio_q_length * sizeof(request_t); 3456 3457 qla2x00_init_fce_trace(vha); 3458 if (ha->fce) 3459 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; 3460 qla2x00_init_eft_trace(vha); 3461 if (ha->eft) 3462 eft_size = EFT_SIZE; 3463 } 3464 3465 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 3466 struct fwdt *fwdt = ha->fwdt; 3467 uint j; 3468 3469 for (j = 0; j < 2; j++, fwdt++) { 3470 if (!fwdt->template) { 3471 ql_dbg(ql_dbg_init, vha, 0x00ba, 3472 "-> fwdt%u no template\n", j); 3473 continue; 3474 } 3475 ql_dbg(ql_dbg_init, vha, 0x00fa, 3476 "-> fwdt%u calculating fwdump size...\n", j); 3477 fwdt->dump_size = qla27xx_fwdt_calculate_dump_size( 3478 vha, fwdt->template); 3479 ql_dbg(ql_dbg_init, vha, 0x00fa, 3480 "-> fwdt%u calculated fwdump size = %#lx bytes\n", 3481 j, fwdt->dump_size); 3482 dump_size += fwdt->dump_size; 3483 } 3484 /* Add space for spare MPI fw dump. */ 3485 dump_size += ha->fwdt[1].dump_size; 3486 } else { 3487 req_q_size = req->length * sizeof(request_t); 3488 rsp_q_size = rsp->length * sizeof(response_t); 3489 dump_size = offsetof(struct qla2xxx_fw_dump, isp); 3490 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size 3491 + eft_size; 3492 ha->chain_offset = dump_size; 3493 dump_size += mq_size + fce_size; 3494 if (ha->exchoffld_buf) 3495 dump_size += sizeof(struct qla2xxx_offld_chain) + 3496 ha->exchoffld_size; 3497 if (ha->exlogin_buf) 3498 dump_size += sizeof(struct qla2xxx_offld_chain) + 3499 ha->exlogin_size; 3500 } 3501 3502 if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) { 3503 3504 ql_dbg(ql_dbg_init, vha, 0x00c5, 3505 "%s dump_size %d fw_dump_len %d fw_dump_alloc_len %d\n", 3506 __func__, dump_size, ha->fw_dump_len, 3507 ha->fw_dump_alloc_len); 3508 3509 fw_dump = vmalloc(dump_size); 3510 if (!fw_dump) { 3511 ql_log(ql_log_warn, vha, 0x00c4, 3512 "Unable to allocate (%d KB) for firmware dump.\n", 3513 dump_size / 1024); 3514 } else { 3515 mutex_lock(&ha->optrom_mutex); 3516 if (ha->fw_dumped) { 3517 memcpy(fw_dump, ha->fw_dump, ha->fw_dump_len); 3518 vfree(ha->fw_dump); 3519 ha->fw_dump = fw_dump; 3520 ha->fw_dump_alloc_len = dump_size; 3521 ql_dbg(ql_dbg_init, vha, 0x00c5, 3522 "Re-Allocated (%d KB) and save firmware dump.\n", 3523 dump_size / 1024); 3524 } else { 3525 vfree(ha->fw_dump); 3526 ha->fw_dump = fw_dump; 3527 3528 ha->fw_dump_len = ha->fw_dump_alloc_len = 3529 dump_size; 3530 ql_dbg(ql_dbg_init, vha, 0x00c5, 3531 "Allocated (%d KB) for firmware dump.\n", 3532 dump_size / 1024); 3533 3534 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 3535 ha->mpi_fw_dump = (char *)fw_dump + 3536 ha->fwdt[1].dump_size; 3537 mutex_unlock(&ha->optrom_mutex); 3538 return; 3539 } 3540 3541 ha->fw_dump->signature[0] = 'Q'; 3542 ha->fw_dump->signature[1] = 'L'; 3543 ha->fw_dump->signature[2] = 'G'; 3544 ha->fw_dump->signature[3] = 'C'; 3545 ha->fw_dump->version = htonl(1); 3546 3547 ha->fw_dump->fixed_size = htonl(fixed_size); 3548 ha->fw_dump->mem_size = htonl(mem_size); 3549 ha->fw_dump->req_q_size = htonl(req_q_size); 3550 ha->fw_dump->rsp_q_size = htonl(rsp_q_size); 3551 3552 ha->fw_dump->eft_size = htonl(eft_size); 3553 ha->fw_dump->eft_addr_l = 3554 htonl(LSD(ha->eft_dma)); 3555 ha->fw_dump->eft_addr_h = 3556 htonl(MSD(ha->eft_dma)); 3557 3558 ha->fw_dump->header_size = 3559 htonl(offsetof 3560 (struct qla2xxx_fw_dump, isp)); 3561 } 3562 mutex_unlock(&ha->optrom_mutex); 3563 } 3564 } 3565 } 3566 3567 static int 3568 qla81xx_mpi_sync(scsi_qla_host_t *vha) 3569 { 3570 #define MPS_MASK 0xe0 3571 int rval; 3572 uint16_t dc; 3573 uint32_t dw; 3574 3575 if (!IS_QLA81XX(vha->hw)) 3576 return QLA_SUCCESS; 3577 3578 rval = qla2x00_write_ram_word(vha, 0x7c00, 1); 3579 if (rval != QLA_SUCCESS) { 3580 ql_log(ql_log_warn, vha, 0x0105, 3581 "Unable to acquire semaphore.\n"); 3582 goto done; 3583 } 3584 3585 pci_read_config_word(vha->hw->pdev, 0x54, &dc); 3586 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw); 3587 if (rval != QLA_SUCCESS) { 3588 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n"); 3589 goto done_release; 3590 } 3591 3592 dc &= MPS_MASK; 3593 if (dc == (dw & MPS_MASK)) 3594 goto done_release; 3595 3596 dw &= ~MPS_MASK; 3597 dw |= dc; 3598 rval = qla2x00_write_ram_word(vha, 0x7a15, dw); 3599 if (rval != QLA_SUCCESS) { 3600 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n"); 3601 } 3602 3603 done_release: 3604 rval = qla2x00_write_ram_word(vha, 0x7c00, 0); 3605 if (rval != QLA_SUCCESS) { 3606 ql_log(ql_log_warn, vha, 0x006d, 3607 "Unable to release semaphore.\n"); 3608 } 3609 3610 done: 3611 return rval; 3612 } 3613 3614 int 3615 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req) 3616 { 3617 /* Don't try to reallocate the array */ 3618 if (req->outstanding_cmds) 3619 return QLA_SUCCESS; 3620 3621 if (!IS_FWI2_CAPABLE(ha)) 3622 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS; 3623 else { 3624 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count) 3625 req->num_outstanding_cmds = ha->cur_fw_xcb_count; 3626 else 3627 req->num_outstanding_cmds = ha->cur_fw_iocb_count; 3628 } 3629 3630 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds, 3631 sizeof(srb_t *), 3632 GFP_KERNEL); 3633 3634 if (!req->outstanding_cmds) { 3635 /* 3636 * Try to allocate a minimal size just so we can get through 3637 * initialization. 3638 */ 3639 req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS; 3640 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds, 3641 sizeof(srb_t *), 3642 GFP_KERNEL); 3643 3644 if (!req->outstanding_cmds) { 3645 ql_log(ql_log_fatal, NULL, 0x0126, 3646 "Failed to allocate memory for " 3647 "outstanding_cmds for req_que %p.\n", req); 3648 req->num_outstanding_cmds = 0; 3649 return QLA_FUNCTION_FAILED; 3650 } 3651 } 3652 3653 return QLA_SUCCESS; 3654 } 3655 3656 #define PRINT_FIELD(_field, _flag, _str) { \ 3657 if (a0->_field & _flag) {\ 3658 if (p) {\ 3659 strcat(ptr, "|");\ 3660 ptr++;\ 3661 leftover--;\ 3662 } \ 3663 len = snprintf(ptr, leftover, "%s", _str); \ 3664 p = 1;\ 3665 leftover -= len;\ 3666 ptr += len; \ 3667 } \ 3668 } 3669 3670 static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha) 3671 { 3672 #define STR_LEN 64 3673 struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data; 3674 u8 str[STR_LEN], *ptr, p; 3675 int leftover, len; 3676 3677 memset(str, 0, STR_LEN); 3678 snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name); 3679 ql_dbg(ql_dbg_init, vha, 0x015a, 3680 "SFP MFG Name: %s\n", str); 3681 3682 memset(str, 0, STR_LEN); 3683 snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn); 3684 ql_dbg(ql_dbg_init, vha, 0x015c, 3685 "SFP Part Name: %s\n", str); 3686 3687 /* media */ 3688 memset(str, 0, STR_LEN); 3689 ptr = str; 3690 leftover = STR_LEN; 3691 p = len = 0; 3692 PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX"); 3693 PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair"); 3694 PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax"); 3695 PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax"); 3696 PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um"); 3697 PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um"); 3698 PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode"); 3699 ql_dbg(ql_dbg_init, vha, 0x0160, 3700 "SFP Media: %s\n", str); 3701 3702 /* link length */ 3703 memset(str, 0, STR_LEN); 3704 ptr = str; 3705 leftover = STR_LEN; 3706 p = len = 0; 3707 PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long"); 3708 PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short"); 3709 PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate"); 3710 PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long"); 3711 PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium"); 3712 ql_dbg(ql_dbg_init, vha, 0x0196, 3713 "SFP Link Length: %s\n", str); 3714 3715 memset(str, 0, STR_LEN); 3716 ptr = str; 3717 leftover = STR_LEN; 3718 p = len = 0; 3719 PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)"); 3720 PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)"); 3721 PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)"); 3722 PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)"); 3723 PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)"); 3724 ql_dbg(ql_dbg_init, vha, 0x016e, 3725 "SFP FC Link Tech: %s\n", str); 3726 3727 if (a0->length_km) 3728 ql_dbg(ql_dbg_init, vha, 0x016f, 3729 "SFP Distant: %d km\n", a0->length_km); 3730 if (a0->length_100m) 3731 ql_dbg(ql_dbg_init, vha, 0x0170, 3732 "SFP Distant: %d m\n", a0->length_100m*100); 3733 if (a0->length_50um_10m) 3734 ql_dbg(ql_dbg_init, vha, 0x0189, 3735 "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10); 3736 if (a0->length_62um_10m) 3737 ql_dbg(ql_dbg_init, vha, 0x018a, 3738 "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10); 3739 if (a0->length_om4_10m) 3740 ql_dbg(ql_dbg_init, vha, 0x0194, 3741 "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10); 3742 if (a0->length_om3_10m) 3743 ql_dbg(ql_dbg_init, vha, 0x0195, 3744 "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10); 3745 } 3746 3747 3748 /** 3749 * qla24xx_detect_sfp() 3750 * 3751 * @vha: adapter state pointer. 3752 * 3753 * @return 3754 * 0 -- Configure firmware to use short-range settings -- normal 3755 * buffer-to-buffer credits. 3756 * 3757 * 1 -- Configure firmware to use long-range settings -- extra 3758 * buffer-to-buffer credits should be allocated with 3759 * ha->lr_distance containing distance settings from NVRAM or SFP 3760 * (if supported). 3761 */ 3762 int 3763 qla24xx_detect_sfp(scsi_qla_host_t *vha) 3764 { 3765 int rc, used_nvram; 3766 struct sff_8247_a0 *a; 3767 struct qla_hw_data *ha = vha->hw; 3768 struct nvram_81xx *nv = ha->nvram; 3769 #define LR_DISTANCE_UNKNOWN 2 3770 static const char * const types[] = { "Short", "Long" }; 3771 static const char * const lengths[] = { "(10km)", "(5km)", "" }; 3772 u8 ll = 0; 3773 3774 /* Seed with NVRAM settings. */ 3775 used_nvram = 0; 3776 ha->flags.lr_detected = 0; 3777 if (IS_BPM_RANGE_CAPABLE(ha) && 3778 (nv->enhanced_features & NEF_LR_DIST_ENABLE)) { 3779 used_nvram = 1; 3780 ha->flags.lr_detected = 1; 3781 ha->lr_distance = 3782 (nv->enhanced_features >> LR_DIST_NV_POS) 3783 & LR_DIST_NV_MASK; 3784 } 3785 3786 if (!IS_BPM_ENABLED(vha)) 3787 goto out; 3788 /* Determine SR/LR capabilities of SFP/Transceiver. */ 3789 rc = qla2x00_read_sfp_dev(vha, NULL, 0); 3790 if (rc) 3791 goto out; 3792 3793 used_nvram = 0; 3794 a = (struct sff_8247_a0 *)vha->hw->sfp_data; 3795 qla2xxx_print_sfp_info(vha); 3796 3797 ha->flags.lr_detected = 0; 3798 ll = a->fc_ll_cc7; 3799 if (ll & FC_LL_VL || ll & FC_LL_L) { 3800 /* Long range, track length. */ 3801 ha->flags.lr_detected = 1; 3802 3803 if (a->length_km > 5 || a->length_100m > 50) 3804 ha->lr_distance = LR_DISTANCE_10K; 3805 else 3806 ha->lr_distance = LR_DISTANCE_5K; 3807 } 3808 3809 out: 3810 ql_dbg(ql_dbg_async, vha, 0x507b, 3811 "SFP detect: %s-Range SFP %s (nvr=%x ll=%x lr=%x lrd=%x).\n", 3812 types[ha->flags.lr_detected], 3813 ha->flags.lr_detected ? lengths[ha->lr_distance] : 3814 lengths[LR_DISTANCE_UNKNOWN], 3815 used_nvram, ll, ha->flags.lr_detected, ha->lr_distance); 3816 return ha->flags.lr_detected; 3817 } 3818 3819 void qla_init_iocb_limit(scsi_qla_host_t *vha) 3820 { 3821 u16 i, num_qps; 3822 u32 limit; 3823 struct qla_hw_data *ha = vha->hw; 3824 3825 num_qps = ha->num_qpairs + 1; 3826 limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100; 3827 3828 ha->base_qpair->fwres.iocbs_total = ha->orig_fw_iocb_count; 3829 ha->base_qpair->fwres.iocbs_limit = limit; 3830 ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps; 3831 ha->base_qpair->fwres.iocbs_used = 0; 3832 for (i = 0; i < ha->max_qpairs; i++) { 3833 if (ha->queue_pair_map[i]) { 3834 ha->queue_pair_map[i]->fwres.iocbs_total = 3835 ha->orig_fw_iocb_count; 3836 ha->queue_pair_map[i]->fwres.iocbs_limit = limit; 3837 ha->queue_pair_map[i]->fwres.iocbs_qp_limit = 3838 limit / num_qps; 3839 ha->queue_pair_map[i]->fwres.iocbs_used = 0; 3840 } 3841 } 3842 } 3843 3844 /** 3845 * qla2x00_setup_chip() - Load and start RISC firmware. 3846 * @vha: HA context 3847 * 3848 * Returns 0 on success. 3849 */ 3850 static int 3851 qla2x00_setup_chip(scsi_qla_host_t *vha) 3852 { 3853 int rval; 3854 uint32_t srisc_address = 0; 3855 struct qla_hw_data *ha = vha->hw; 3856 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3857 unsigned long flags; 3858 uint16_t fw_major_version; 3859 int done_once = 0; 3860 3861 if (IS_P3P_TYPE(ha)) { 3862 rval = ha->isp_ops->load_risc(vha, &srisc_address); 3863 if (rval == QLA_SUCCESS) { 3864 qla2x00_stop_firmware(vha); 3865 goto enable_82xx_npiv; 3866 } else 3867 goto failed; 3868 } 3869 3870 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 3871 /* Disable SRAM, Instruction RAM and GP RAM parity. */ 3872 spin_lock_irqsave(&ha->hardware_lock, flags); 3873 wrt_reg_word(®->hccr, (HCCR_ENABLE_PARITY + 0x0)); 3874 rd_reg_word(®->hccr); 3875 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3876 } 3877 3878 qla81xx_mpi_sync(vha); 3879 3880 execute_fw_with_lr: 3881 /* Load firmware sequences */ 3882 rval = ha->isp_ops->load_risc(vha, &srisc_address); 3883 if (rval == QLA_SUCCESS) { 3884 ql_dbg(ql_dbg_init, vha, 0x00c9, 3885 "Verifying Checksum of loaded RISC code.\n"); 3886 3887 rval = qla2x00_verify_checksum(vha, srisc_address); 3888 if (rval == QLA_SUCCESS) { 3889 /* Start firmware execution. */ 3890 ql_dbg(ql_dbg_init, vha, 0x00ca, 3891 "Starting firmware.\n"); 3892 3893 if (ql2xexlogins) 3894 ha->flags.exlogins_enabled = 1; 3895 3896 if (qla_is_exch_offld_enabled(vha)) 3897 ha->flags.exchoffld_enabled = 1; 3898 3899 rval = qla2x00_execute_fw(vha, srisc_address); 3900 /* Retrieve firmware information. */ 3901 if (rval == QLA_SUCCESS) { 3902 /* Enable BPM support? */ 3903 if (!done_once++ && qla24xx_detect_sfp(vha)) { 3904 ql_dbg(ql_dbg_init, vha, 0x00ca, 3905 "Re-starting firmware -- BPM.\n"); 3906 /* Best-effort - re-init. */ 3907 ha->isp_ops->reset_chip(vha); 3908 ha->isp_ops->chip_diag(vha); 3909 goto execute_fw_with_lr; 3910 } 3911 3912 if (IS_ZIO_THRESHOLD_CAPABLE(ha)) 3913 qla27xx_set_zio_threshold(vha, 3914 ha->last_zio_threshold); 3915 3916 rval = qla2x00_set_exlogins_buffer(vha); 3917 if (rval != QLA_SUCCESS) 3918 goto failed; 3919 3920 rval = qla2x00_set_exchoffld_buffer(vha); 3921 if (rval != QLA_SUCCESS) 3922 goto failed; 3923 3924 enable_82xx_npiv: 3925 fw_major_version = ha->fw_major_version; 3926 if (IS_P3P_TYPE(ha)) 3927 qla82xx_check_md_needed(vha); 3928 else 3929 rval = qla2x00_get_fw_version(vha); 3930 if (rval != QLA_SUCCESS) 3931 goto failed; 3932 ha->flags.npiv_supported = 0; 3933 if (IS_QLA2XXX_MIDTYPE(ha) && 3934 (ha->fw_attributes & BIT_2)) { 3935 ha->flags.npiv_supported = 1; 3936 if ((!ha->max_npiv_vports) || 3937 ((ha->max_npiv_vports + 1) % 3938 MIN_MULTI_ID_FABRIC)) 3939 ha->max_npiv_vports = 3940 MIN_MULTI_ID_FABRIC - 1; 3941 } 3942 qla2x00_get_resource_cnts(vha); 3943 qla_init_iocb_limit(vha); 3944 3945 /* 3946 * Allocate the array of outstanding commands 3947 * now that we know the firmware resources. 3948 */ 3949 rval = qla2x00_alloc_outstanding_cmds(ha, 3950 vha->req); 3951 if (rval != QLA_SUCCESS) 3952 goto failed; 3953 3954 if (!fw_major_version && !(IS_P3P_TYPE(ha))) 3955 qla2x00_alloc_offload_mem(vha); 3956 3957 if (ql2xallocfwdump && !(IS_P3P_TYPE(ha))) 3958 qla2x00_alloc_fw_dump(vha); 3959 3960 } else { 3961 goto failed; 3962 } 3963 } else { 3964 ql_log(ql_log_fatal, vha, 0x00cd, 3965 "ISP Firmware failed checksum.\n"); 3966 goto failed; 3967 } 3968 3969 /* Enable PUREX PASSTHRU */ 3970 if (ql2xrdpenable || ha->flags.scm_supported_f || 3971 ha->flags.edif_enabled) 3972 qla25xx_set_els_cmds_supported(vha); 3973 } else 3974 goto failed; 3975 3976 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 3977 /* Enable proper parity. */ 3978 spin_lock_irqsave(&ha->hardware_lock, flags); 3979 if (IS_QLA2300(ha)) 3980 /* SRAM parity */ 3981 wrt_reg_word(®->hccr, HCCR_ENABLE_PARITY + 0x1); 3982 else 3983 /* SRAM, Instruction RAM and GP RAM parity */ 3984 wrt_reg_word(®->hccr, HCCR_ENABLE_PARITY + 0x7); 3985 rd_reg_word(®->hccr); 3986 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3987 } 3988 3989 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) 3990 ha->flags.fac_supported = 1; 3991 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { 3992 uint32_t size; 3993 3994 rval = qla81xx_fac_get_sector_size(vha, &size); 3995 if (rval == QLA_SUCCESS) { 3996 ha->flags.fac_supported = 1; 3997 ha->fdt_block_size = size << 2; 3998 } else { 3999 ql_log(ql_log_warn, vha, 0x00ce, 4000 "Unsupported FAC firmware (%d.%02d.%02d).\n", 4001 ha->fw_major_version, ha->fw_minor_version, 4002 ha->fw_subminor_version); 4003 4004 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || 4005 IS_QLA28XX(ha)) { 4006 ha->flags.fac_supported = 0; 4007 rval = QLA_SUCCESS; 4008 } 4009 } 4010 } 4011 failed: 4012 if (rval) { 4013 ql_log(ql_log_fatal, vha, 0x00cf, 4014 "Setup chip ****FAILED****.\n"); 4015 } 4016 4017 return (rval); 4018 } 4019 4020 /** 4021 * qla2x00_init_response_q_entries() - Initializes response queue entries. 4022 * @rsp: response queue 4023 * 4024 * Beginning of request ring has initialization control block already built 4025 * by nvram config routine. 4026 * 4027 * Returns 0 on success. 4028 */ 4029 void 4030 qla2x00_init_response_q_entries(struct rsp_que *rsp) 4031 { 4032 uint16_t cnt; 4033 response_t *pkt; 4034 4035 rsp->ring_ptr = rsp->ring; 4036 rsp->ring_index = 0; 4037 rsp->status_srb = NULL; 4038 pkt = rsp->ring_ptr; 4039 for (cnt = 0; cnt < rsp->length; cnt++) { 4040 pkt->signature = RESPONSE_PROCESSED; 4041 pkt++; 4042 } 4043 } 4044 4045 /** 4046 * qla2x00_update_fw_options() - Read and process firmware options. 4047 * @vha: HA context 4048 * 4049 * Returns 0 on success. 4050 */ 4051 void 4052 qla2x00_update_fw_options(scsi_qla_host_t *vha) 4053 { 4054 uint16_t swing, emphasis, tx_sens, rx_sens; 4055 struct qla_hw_data *ha = vha->hw; 4056 4057 memset(ha->fw_options, 0, sizeof(ha->fw_options)); 4058 qla2x00_get_fw_options(vha, ha->fw_options); 4059 4060 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 4061 return; 4062 4063 /* Serial Link options. */ 4064 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115, 4065 "Serial link options.\n"); 4066 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109, 4067 ha->fw_seriallink_options, sizeof(ha->fw_seriallink_options)); 4068 4069 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 4070 if (ha->fw_seriallink_options[3] & BIT_2) { 4071 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING; 4072 4073 /* 1G settings */ 4074 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0); 4075 emphasis = (ha->fw_seriallink_options[2] & 4076 (BIT_4 | BIT_3)) >> 3; 4077 tx_sens = ha->fw_seriallink_options[0] & 4078 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 4079 rx_sens = (ha->fw_seriallink_options[0] & 4080 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; 4081 ha->fw_options[10] = (emphasis << 14) | (swing << 8); 4082 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 4083 if (rx_sens == 0x0) 4084 rx_sens = 0x3; 4085 ha->fw_options[10] |= (tx_sens << 4) | rx_sens; 4086 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) 4087 ha->fw_options[10] |= BIT_5 | 4088 ((rx_sens & (BIT_1 | BIT_0)) << 2) | 4089 (tx_sens & (BIT_1 | BIT_0)); 4090 4091 /* 2G settings */ 4092 swing = (ha->fw_seriallink_options[2] & 4093 (BIT_7 | BIT_6 | BIT_5)) >> 5; 4094 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0); 4095 tx_sens = ha->fw_seriallink_options[1] & 4096 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 4097 rx_sens = (ha->fw_seriallink_options[1] & 4098 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; 4099 ha->fw_options[11] = (emphasis << 14) | (swing << 8); 4100 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 4101 if (rx_sens == 0x0) 4102 rx_sens = 0x3; 4103 ha->fw_options[11] |= (tx_sens << 4) | rx_sens; 4104 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) 4105 ha->fw_options[11] |= BIT_5 | 4106 ((rx_sens & (BIT_1 | BIT_0)) << 2) | 4107 (tx_sens & (BIT_1 | BIT_0)); 4108 } 4109 4110 /* FCP2 options. */ 4111 /* Return command IOCBs without waiting for an ABTS to complete. */ 4112 ha->fw_options[3] |= BIT_13; 4113 4114 /* LED scheme. */ 4115 if (ha->flags.enable_led_scheme) 4116 ha->fw_options[2] |= BIT_12; 4117 4118 /* Detect ISP6312. */ 4119 if (IS_QLA6312(ha)) 4120 ha->fw_options[2] |= BIT_13; 4121 4122 /* Set Retry FLOGI in case of P2P connection */ 4123 if (ha->operating_mode == P2P) { 4124 ha->fw_options[2] |= BIT_3; 4125 ql_dbg(ql_dbg_disc, vha, 0x2100, 4126 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", 4127 __func__, ha->fw_options[2]); 4128 } 4129 4130 /* Update firmware options. */ 4131 qla2x00_set_fw_options(vha, ha->fw_options); 4132 } 4133 4134 void 4135 qla24xx_update_fw_options(scsi_qla_host_t *vha) 4136 { 4137 int rval; 4138 struct qla_hw_data *ha = vha->hw; 4139 4140 if (IS_P3P_TYPE(ha)) 4141 return; 4142 4143 /* Hold status IOCBs until ABTS response received. */ 4144 if (ql2xfwholdabts) 4145 ha->fw_options[3] |= BIT_12; 4146 4147 /* Set Retry FLOGI in case of P2P connection */ 4148 if (ha->operating_mode == P2P) { 4149 ha->fw_options[2] |= BIT_3; 4150 ql_dbg(ql_dbg_disc, vha, 0x2101, 4151 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", 4152 __func__, ha->fw_options[2]); 4153 } 4154 4155 /* Move PUREX, ABTS RX & RIDA to ATIOQ */ 4156 if (ql2xmvasynctoatio && !ha->flags.edif_enabled && 4157 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) { 4158 if (qla_tgt_mode_enabled(vha) || 4159 qla_dual_mode_enabled(vha)) 4160 ha->fw_options[2] |= BIT_11; 4161 else 4162 ha->fw_options[2] &= ~BIT_11; 4163 } 4164 4165 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 4166 IS_QLA28XX(ha)) { 4167 /* 4168 * Tell FW to track each exchange to prevent 4169 * driver from using stale exchange. 4170 */ 4171 if (qla_tgt_mode_enabled(vha) || 4172 qla_dual_mode_enabled(vha)) 4173 ha->fw_options[2] |= BIT_4; 4174 else 4175 ha->fw_options[2] &= ~BIT_4; 4176 4177 /* Reserve 1/2 of emergency exchanges for ELS.*/ 4178 if (qla2xuseresexchforels) 4179 ha->fw_options[2] |= BIT_8; 4180 else 4181 ha->fw_options[2] &= ~BIT_8; 4182 } 4183 4184 if (ql2xrdpenable || ha->flags.scm_supported_f || 4185 ha->flags.edif_enabled) 4186 ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB; 4187 4188 /* Enable Async 8130/8131 events -- transceiver insertion/removal */ 4189 if (IS_BPM_RANGE_CAPABLE(ha)) 4190 ha->fw_options[3] |= BIT_10; 4191 4192 ql_dbg(ql_dbg_init, vha, 0x00e8, 4193 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", 4194 __func__, ha->fw_options[1], ha->fw_options[2], 4195 ha->fw_options[3], vha->host->active_mode); 4196 4197 if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3]) 4198 qla2x00_set_fw_options(vha, ha->fw_options); 4199 4200 /* Update Serial Link options. */ 4201 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) 4202 return; 4203 4204 rval = qla2x00_set_serdes_params(vha, 4205 le16_to_cpu(ha->fw_seriallink_options24[1]), 4206 le16_to_cpu(ha->fw_seriallink_options24[2]), 4207 le16_to_cpu(ha->fw_seriallink_options24[3])); 4208 if (rval != QLA_SUCCESS) { 4209 ql_log(ql_log_warn, vha, 0x0104, 4210 "Unable to update Serial Link options (%x).\n", rval); 4211 } 4212 } 4213 4214 void 4215 qla2x00_config_rings(struct scsi_qla_host *vha) 4216 { 4217 struct qla_hw_data *ha = vha->hw; 4218 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 4219 struct req_que *req = ha->req_q_map[0]; 4220 struct rsp_que *rsp = ha->rsp_q_map[0]; 4221 4222 /* Setup ring parameters in initialization control block. */ 4223 ha->init_cb->request_q_outpointer = cpu_to_le16(0); 4224 ha->init_cb->response_q_inpointer = cpu_to_le16(0); 4225 ha->init_cb->request_q_length = cpu_to_le16(req->length); 4226 ha->init_cb->response_q_length = cpu_to_le16(rsp->length); 4227 put_unaligned_le64(req->dma, &ha->init_cb->request_q_address); 4228 put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address); 4229 4230 wrt_reg_word(ISP_REQ_Q_IN(ha, reg), 0); 4231 wrt_reg_word(ISP_REQ_Q_OUT(ha, reg), 0); 4232 wrt_reg_word(ISP_RSP_Q_IN(ha, reg), 0); 4233 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), 0); 4234 rd_reg_word(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */ 4235 } 4236 4237 void 4238 qla24xx_config_rings(struct scsi_qla_host *vha) 4239 { 4240 struct qla_hw_data *ha = vha->hw; 4241 device_reg_t *reg = ISP_QUE_REG(ha, 0); 4242 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; 4243 struct qla_msix_entry *msix; 4244 struct init_cb_24xx *icb; 4245 uint16_t rid = 0; 4246 struct req_que *req = ha->req_q_map[0]; 4247 struct rsp_que *rsp = ha->rsp_q_map[0]; 4248 4249 /* Setup ring parameters in initialization control block. */ 4250 icb = (struct init_cb_24xx *)ha->init_cb; 4251 icb->request_q_outpointer = cpu_to_le16(0); 4252 icb->response_q_inpointer = cpu_to_le16(0); 4253 icb->request_q_length = cpu_to_le16(req->length); 4254 icb->response_q_length = cpu_to_le16(rsp->length); 4255 put_unaligned_le64(req->dma, &icb->request_q_address); 4256 put_unaligned_le64(rsp->dma, &icb->response_q_address); 4257 4258 /* Setup ATIO queue dma pointers for target mode */ 4259 icb->atio_q_inpointer = cpu_to_le16(0); 4260 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length); 4261 put_unaligned_le64(ha->tgt.atio_dma, &icb->atio_q_address); 4262 4263 if (IS_SHADOW_REG_CAPABLE(ha)) 4264 icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29); 4265 4266 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 4267 IS_QLA28XX(ha)) { 4268 icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS); 4269 icb->rid = cpu_to_le16(rid); 4270 if (ha->flags.msix_enabled) { 4271 msix = &ha->msix_entries[1]; 4272 ql_dbg(ql_dbg_init, vha, 0x0019, 4273 "Registering vector 0x%x for base que.\n", 4274 msix->entry); 4275 icb->msix = cpu_to_le16(msix->entry); 4276 } 4277 /* Use alternate PCI bus number */ 4278 if (MSB(rid)) 4279 icb->firmware_options_2 |= cpu_to_le32(BIT_19); 4280 /* Use alternate PCI devfn */ 4281 if (LSB(rid)) 4282 icb->firmware_options_2 |= cpu_to_le32(BIT_18); 4283 4284 /* Use Disable MSIX Handshake mode for capable adapters */ 4285 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) && 4286 (ha->flags.msix_enabled)) { 4287 icb->firmware_options_2 &= cpu_to_le32(~BIT_22); 4288 ha->flags.disable_msix_handshake = 1; 4289 ql_dbg(ql_dbg_init, vha, 0x00fe, 4290 "MSIX Handshake Disable Mode turned on.\n"); 4291 } else { 4292 icb->firmware_options_2 |= cpu_to_le32(BIT_22); 4293 } 4294 icb->firmware_options_2 |= cpu_to_le32(BIT_23); 4295 4296 wrt_reg_dword(®->isp25mq.req_q_in, 0); 4297 wrt_reg_dword(®->isp25mq.req_q_out, 0); 4298 wrt_reg_dword(®->isp25mq.rsp_q_in, 0); 4299 wrt_reg_dword(®->isp25mq.rsp_q_out, 0); 4300 } else { 4301 wrt_reg_dword(®->isp24.req_q_in, 0); 4302 wrt_reg_dword(®->isp24.req_q_out, 0); 4303 wrt_reg_dword(®->isp24.rsp_q_in, 0); 4304 wrt_reg_dword(®->isp24.rsp_q_out, 0); 4305 } 4306 4307 qlt_24xx_config_rings(vha); 4308 4309 /* If the user has configured the speed, set it here */ 4310 if (ha->set_data_rate) { 4311 ql_dbg(ql_dbg_init, vha, 0x00fd, 4312 "Speed set by user : %s Gbps \n", 4313 qla2x00_get_link_speed_str(ha, ha->set_data_rate)); 4314 icb->firmware_options_3 = cpu_to_le32(ha->set_data_rate << 13); 4315 } 4316 4317 /* PCI posting */ 4318 rd_reg_word(&ioreg->hccr); 4319 } 4320 4321 /** 4322 * qla2x00_init_rings() - Initializes firmware. 4323 * @vha: HA context 4324 * 4325 * Beginning of request ring has initialization control block already built 4326 * by nvram config routine. 4327 * 4328 * Returns 0 on success. 4329 */ 4330 int 4331 qla2x00_init_rings(scsi_qla_host_t *vha) 4332 { 4333 int rval; 4334 unsigned long flags = 0; 4335 int cnt, que; 4336 struct qla_hw_data *ha = vha->hw; 4337 struct req_que *req; 4338 struct rsp_que *rsp; 4339 struct mid_init_cb_24xx *mid_init_cb = 4340 (struct mid_init_cb_24xx *) ha->init_cb; 4341 4342 spin_lock_irqsave(&ha->hardware_lock, flags); 4343 4344 /* Clear outstanding commands array. */ 4345 for (que = 0; que < ha->max_req_queues; que++) { 4346 req = ha->req_q_map[que]; 4347 if (!req || !test_bit(que, ha->req_qid_map)) 4348 continue; 4349 req->out_ptr = (uint16_t *)(req->ring + req->length); 4350 *req->out_ptr = 0; 4351 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) 4352 req->outstanding_cmds[cnt] = NULL; 4353 4354 req->current_outstanding_cmd = 1; 4355 4356 /* Initialize firmware. */ 4357 req->ring_ptr = req->ring; 4358 req->ring_index = 0; 4359 req->cnt = req->length; 4360 } 4361 4362 for (que = 0; que < ha->max_rsp_queues; que++) { 4363 rsp = ha->rsp_q_map[que]; 4364 if (!rsp || !test_bit(que, ha->rsp_qid_map)) 4365 continue; 4366 rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length); 4367 *rsp->in_ptr = 0; 4368 /* Initialize response queue entries */ 4369 if (IS_QLAFX00(ha)) 4370 qlafx00_init_response_q_entries(rsp); 4371 else 4372 qla2x00_init_response_q_entries(rsp); 4373 } 4374 4375 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; 4376 ha->tgt.atio_ring_index = 0; 4377 /* Initialize ATIO queue entries */ 4378 qlt_init_atio_q_entries(vha); 4379 4380 ha->isp_ops->config_rings(vha); 4381 4382 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4383 4384 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n"); 4385 4386 if (IS_QLAFX00(ha)) { 4387 rval = qlafx00_init_firmware(vha, ha->init_cb_size); 4388 goto next_check; 4389 } 4390 4391 /* Update any ISP specific firmware options before initialization. */ 4392 ha->isp_ops->update_fw_options(vha); 4393 4394 if (ha->flags.npiv_supported) { 4395 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha)) 4396 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; 4397 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); 4398 } 4399 4400 if (IS_FWI2_CAPABLE(ha)) { 4401 mid_init_cb->options = cpu_to_le16(BIT_1); 4402 mid_init_cb->init_cb.execution_throttle = 4403 cpu_to_le16(ha->cur_fw_xcb_count); 4404 ha->flags.dport_enabled = 4405 (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) & 4406 BIT_7) != 0; 4407 ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n", 4408 (ha->flags.dport_enabled) ? "enabled" : "disabled"); 4409 /* FA-WWPN Status */ 4410 ha->flags.fawwpn_enabled = 4411 (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) & 4412 BIT_6) != 0; 4413 ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n", 4414 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled"); 4415 } 4416 4417 rval = qla2x00_init_firmware(vha, ha->init_cb_size); 4418 next_check: 4419 if (rval) { 4420 ql_log(ql_log_fatal, vha, 0x00d2, 4421 "Init Firmware **** FAILED ****.\n"); 4422 } else { 4423 ql_dbg(ql_dbg_init, vha, 0x00d3, 4424 "Init Firmware -- success.\n"); 4425 QLA_FW_STARTED(ha); 4426 vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0; 4427 } 4428 4429 return (rval); 4430 } 4431 4432 /** 4433 * qla2x00_fw_ready() - Waits for firmware ready. 4434 * @vha: HA context 4435 * 4436 * Returns 0 on success. 4437 */ 4438 static int 4439 qla2x00_fw_ready(scsi_qla_host_t *vha) 4440 { 4441 int rval; 4442 unsigned long wtime, mtime, cs84xx_time; 4443 uint16_t min_wait; /* Minimum wait time if loop is down */ 4444 uint16_t wait_time; /* Wait time if loop is coming ready */ 4445 uint16_t state[6]; 4446 struct qla_hw_data *ha = vha->hw; 4447 4448 if (IS_QLAFX00(vha->hw)) 4449 return qlafx00_fw_ready(vha); 4450 4451 /* Time to wait for loop down */ 4452 if (IS_P3P_TYPE(ha)) 4453 min_wait = 30; 4454 else 4455 min_wait = 20; 4456 4457 /* 4458 * Firmware should take at most one RATOV to login, plus 5 seconds for 4459 * our own processing. 4460 */ 4461 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) { 4462 wait_time = min_wait; 4463 } 4464 4465 /* Min wait time if loop down */ 4466 mtime = jiffies + (min_wait * HZ); 4467 4468 /* wait time before firmware ready */ 4469 wtime = jiffies + (wait_time * HZ); 4470 4471 /* Wait for ISP to finish LIP */ 4472 if (!vha->flags.init_done) 4473 ql_log(ql_log_info, vha, 0x801e, 4474 "Waiting for LIP to complete.\n"); 4475 4476 do { 4477 memset(state, -1, sizeof(state)); 4478 rval = qla2x00_get_firmware_state(vha, state); 4479 if (rval == QLA_SUCCESS) { 4480 if (state[0] < FSTATE_LOSS_OF_SYNC) { 4481 vha->device_flags &= ~DFLG_NO_CABLE; 4482 } 4483 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { 4484 ql_dbg(ql_dbg_taskm, vha, 0x801f, 4485 "fw_state=%x 84xx=%x.\n", state[0], 4486 state[2]); 4487 if ((state[2] & FSTATE_LOGGED_IN) && 4488 (state[2] & FSTATE_WAITING_FOR_VERIFY)) { 4489 ql_dbg(ql_dbg_taskm, vha, 0x8028, 4490 "Sending verify iocb.\n"); 4491 4492 cs84xx_time = jiffies; 4493 rval = qla84xx_init_chip(vha); 4494 if (rval != QLA_SUCCESS) { 4495 ql_log(ql_log_warn, 4496 vha, 0x8007, 4497 "Init chip failed.\n"); 4498 break; 4499 } 4500 4501 /* Add time taken to initialize. */ 4502 cs84xx_time = jiffies - cs84xx_time; 4503 wtime += cs84xx_time; 4504 mtime += cs84xx_time; 4505 ql_dbg(ql_dbg_taskm, vha, 0x8008, 4506 "Increasing wait time by %ld. " 4507 "New time %ld.\n", cs84xx_time, 4508 wtime); 4509 } 4510 } else if (state[0] == FSTATE_READY) { 4511 ql_dbg(ql_dbg_taskm, vha, 0x8037, 4512 "F/W Ready - OK.\n"); 4513 4514 qla2x00_get_retry_cnt(vha, &ha->retry_count, 4515 &ha->login_timeout, &ha->r_a_tov); 4516 4517 rval = QLA_SUCCESS; 4518 break; 4519 } 4520 4521 rval = QLA_FUNCTION_FAILED; 4522 4523 if (atomic_read(&vha->loop_down_timer) && 4524 state[0] != FSTATE_READY) { 4525 /* Loop down. Timeout on min_wait for states 4526 * other than Wait for Login. 4527 */ 4528 if (time_after_eq(jiffies, mtime)) { 4529 ql_log(ql_log_info, vha, 0x8038, 4530 "Cable is unplugged...\n"); 4531 4532 vha->device_flags |= DFLG_NO_CABLE; 4533 break; 4534 } 4535 } 4536 } else { 4537 /* Mailbox cmd failed. Timeout on min_wait. */ 4538 if (time_after_eq(jiffies, mtime) || 4539 ha->flags.isp82xx_fw_hung) 4540 break; 4541 } 4542 4543 if (time_after_eq(jiffies, wtime)) 4544 break; 4545 4546 /* Delay for a while */ 4547 msleep(500); 4548 } while (1); 4549 4550 ql_dbg(ql_dbg_taskm, vha, 0x803a, 4551 "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0], 4552 state[1], state[2], state[3], state[4], state[5], jiffies); 4553 4554 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) { 4555 ql_log(ql_log_warn, vha, 0x803b, 4556 "Firmware ready **** FAILED ****.\n"); 4557 } 4558 4559 return (rval); 4560 } 4561 4562 /* 4563 * qla2x00_configure_hba 4564 * Setup adapter context. 4565 * 4566 * Input: 4567 * ha = adapter state pointer. 4568 * 4569 * Returns: 4570 * 0 = success 4571 * 4572 * Context: 4573 * Kernel context. 4574 */ 4575 static int 4576 qla2x00_configure_hba(scsi_qla_host_t *vha) 4577 { 4578 int rval; 4579 uint16_t loop_id; 4580 uint16_t topo; 4581 uint16_t sw_cap; 4582 uint8_t al_pa; 4583 uint8_t area; 4584 uint8_t domain; 4585 char connect_type[22]; 4586 struct qla_hw_data *ha = vha->hw; 4587 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 4588 port_id_t id; 4589 unsigned long flags; 4590 4591 /* Get host addresses. */ 4592 rval = qla2x00_get_adapter_id(vha, 4593 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); 4594 if (rval != QLA_SUCCESS) { 4595 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || 4596 IS_CNA_CAPABLE(ha) || 4597 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { 4598 ql_dbg(ql_dbg_disc, vha, 0x2008, 4599 "Loop is in a transition state.\n"); 4600 } else { 4601 ql_log(ql_log_warn, vha, 0x2009, 4602 "Unable to get host loop ID.\n"); 4603 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) && 4604 (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) { 4605 ql_log(ql_log_warn, vha, 0x1151, 4606 "Doing link init.\n"); 4607 if (qla24xx_link_initialize(vha) == QLA_SUCCESS) 4608 return rval; 4609 } 4610 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 4611 } 4612 return (rval); 4613 } 4614 4615 if (topo == 4) { 4616 ql_log(ql_log_info, vha, 0x200a, 4617 "Cannot get topology - retrying.\n"); 4618 return (QLA_FUNCTION_FAILED); 4619 } 4620 4621 vha->loop_id = loop_id; 4622 4623 /* initialize */ 4624 ha->min_external_loopid = SNS_FIRST_LOOP_ID; 4625 ha->operating_mode = LOOP; 4626 4627 switch (topo) { 4628 case 0: 4629 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n"); 4630 ha->switch_cap = 0; 4631 ha->current_topology = ISP_CFG_NL; 4632 strcpy(connect_type, "(Loop)"); 4633 break; 4634 4635 case 1: 4636 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n"); 4637 ha->switch_cap = sw_cap; 4638 ha->current_topology = ISP_CFG_FL; 4639 strcpy(connect_type, "(FL_Port)"); 4640 break; 4641 4642 case 2: 4643 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n"); 4644 ha->switch_cap = 0; 4645 ha->operating_mode = P2P; 4646 ha->current_topology = ISP_CFG_N; 4647 strcpy(connect_type, "(N_Port-to-N_Port)"); 4648 break; 4649 4650 case 3: 4651 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n"); 4652 ha->switch_cap = sw_cap; 4653 ha->operating_mode = P2P; 4654 ha->current_topology = ISP_CFG_F; 4655 strcpy(connect_type, "(F_Port)"); 4656 break; 4657 4658 default: 4659 ql_dbg(ql_dbg_disc, vha, 0x200f, 4660 "HBA in unknown topology %x, using NL.\n", topo); 4661 ha->switch_cap = 0; 4662 ha->current_topology = ISP_CFG_NL; 4663 strcpy(connect_type, "(Loop)"); 4664 break; 4665 } 4666 4667 /* Save Host port and loop ID. */ 4668 /* byte order - Big Endian */ 4669 id.b.domain = domain; 4670 id.b.area = area; 4671 id.b.al_pa = al_pa; 4672 id.b.rsvd_1 = 0; 4673 spin_lock_irqsave(&ha->hardware_lock, flags); 4674 if (!(topo == 2 && ha->flags.n2n_bigger)) 4675 qlt_update_host_map(vha, id); 4676 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4677 4678 if (!vha->flags.init_done) 4679 ql_log(ql_log_info, vha, 0x2010, 4680 "Topology - %s, Host Loop address 0x%x.\n", 4681 connect_type, vha->loop_id); 4682 4683 return(rval); 4684 } 4685 4686 inline void 4687 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, 4688 const char *def) 4689 { 4690 char *st, *en; 4691 uint16_t index; 4692 uint64_t zero[2] = { 0 }; 4693 struct qla_hw_data *ha = vha->hw; 4694 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && 4695 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha); 4696 4697 if (len > sizeof(zero)) 4698 len = sizeof(zero); 4699 if (memcmp(model, &zero, len) != 0) { 4700 memcpy(ha->model_number, model, len); 4701 st = en = ha->model_number; 4702 en += len - 1; 4703 while (en > st) { 4704 if (*en != 0x20 && *en != 0x00) 4705 break; 4706 *en-- = '\0'; 4707 } 4708 4709 index = (ha->pdev->subsystem_device & 0xff); 4710 if (use_tbl && 4711 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 4712 index < QLA_MODEL_NAMES) 4713 strlcpy(ha->model_desc, 4714 qla2x00_model_name[index * 2 + 1], 4715 sizeof(ha->model_desc)); 4716 } else { 4717 index = (ha->pdev->subsystem_device & 0xff); 4718 if (use_tbl && 4719 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 4720 index < QLA_MODEL_NAMES) { 4721 strlcpy(ha->model_number, 4722 qla2x00_model_name[index * 2], 4723 sizeof(ha->model_number)); 4724 strlcpy(ha->model_desc, 4725 qla2x00_model_name[index * 2 + 1], 4726 sizeof(ha->model_desc)); 4727 } else { 4728 strlcpy(ha->model_number, def, 4729 sizeof(ha->model_number)); 4730 } 4731 } 4732 if (IS_FWI2_CAPABLE(ha)) 4733 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc, 4734 sizeof(ha->model_desc)); 4735 } 4736 4737 /* On sparc systems, obtain port and node WWN from firmware 4738 * properties. 4739 */ 4740 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv) 4741 { 4742 #ifdef CONFIG_SPARC 4743 struct qla_hw_data *ha = vha->hw; 4744 struct pci_dev *pdev = ha->pdev; 4745 struct device_node *dp = pci_device_to_OF_node(pdev); 4746 const u8 *val; 4747 int len; 4748 4749 val = of_get_property(dp, "port-wwn", &len); 4750 if (val && len >= WWN_SIZE) 4751 memcpy(nv->port_name, val, WWN_SIZE); 4752 4753 val = of_get_property(dp, "node-wwn", &len); 4754 if (val && len >= WWN_SIZE) 4755 memcpy(nv->node_name, val, WWN_SIZE); 4756 #endif 4757 } 4758 4759 /* 4760 * NVRAM configuration for ISP 2xxx 4761 * 4762 * Input: 4763 * ha = adapter block pointer. 4764 * 4765 * Output: 4766 * initialization control block in response_ring 4767 * host adapters parameters in host adapter block 4768 * 4769 * Returns: 4770 * 0 = success. 4771 */ 4772 int 4773 qla2x00_nvram_config(scsi_qla_host_t *vha) 4774 { 4775 int rval; 4776 uint8_t chksum = 0; 4777 uint16_t cnt; 4778 uint8_t *dptr1, *dptr2; 4779 struct qla_hw_data *ha = vha->hw; 4780 init_cb_t *icb = ha->init_cb; 4781 nvram_t *nv = ha->nvram; 4782 uint8_t *ptr = ha->nvram; 4783 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 4784 4785 rval = QLA_SUCCESS; 4786 4787 /* Determine NVRAM starting address. */ 4788 ha->nvram_size = sizeof(*nv); 4789 ha->nvram_base = 0; 4790 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) 4791 if ((rd_reg_word(®->ctrl_status) >> 14) == 1) 4792 ha->nvram_base = 0x80; 4793 4794 /* Get NVRAM data and calculate checksum. */ 4795 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size); 4796 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) 4797 chksum += *ptr++; 4798 4799 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f, 4800 "Contents of NVRAM.\n"); 4801 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110, 4802 nv, ha->nvram_size); 4803 4804 /* Bad NVRAM data, set defaults parameters. */ 4805 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) || 4806 nv->nvram_version < 1) { 4807 /* Reset NVRAM data. */ 4808 ql_log(ql_log_warn, vha, 0x0064, 4809 "Inconsistent NVRAM detected: checksum=%#x id=%.4s version=%#x.\n", 4810 chksum, nv->id, nv->nvram_version); 4811 ql_log(ql_log_warn, vha, 0x0065, 4812 "Falling back to " 4813 "functioning (yet invalid -- WWPN) defaults.\n"); 4814 4815 /* 4816 * Set default initialization control block. 4817 */ 4818 memset(nv, 0, ha->nvram_size); 4819 nv->parameter_block_version = ICB_VERSION; 4820 4821 if (IS_QLA23XX(ha)) { 4822 nv->firmware_options[0] = BIT_2 | BIT_1; 4823 nv->firmware_options[1] = BIT_7 | BIT_5; 4824 nv->add_firmware_options[0] = BIT_5; 4825 nv->add_firmware_options[1] = BIT_5 | BIT_4; 4826 nv->frame_payload_size = cpu_to_le16(2048); 4827 nv->special_options[1] = BIT_7; 4828 } else if (IS_QLA2200(ha)) { 4829 nv->firmware_options[0] = BIT_2 | BIT_1; 4830 nv->firmware_options[1] = BIT_7 | BIT_5; 4831 nv->add_firmware_options[0] = BIT_5; 4832 nv->add_firmware_options[1] = BIT_5 | BIT_4; 4833 nv->frame_payload_size = cpu_to_le16(1024); 4834 } else if (IS_QLA2100(ha)) { 4835 nv->firmware_options[0] = BIT_3 | BIT_1; 4836 nv->firmware_options[1] = BIT_5; 4837 nv->frame_payload_size = cpu_to_le16(1024); 4838 } 4839 4840 nv->max_iocb_allocation = cpu_to_le16(256); 4841 nv->execution_throttle = cpu_to_le16(16); 4842 nv->retry_count = 8; 4843 nv->retry_delay = 1; 4844 4845 nv->port_name[0] = 33; 4846 nv->port_name[3] = 224; 4847 nv->port_name[4] = 139; 4848 4849 qla2xxx_nvram_wwn_from_ofw(vha, nv); 4850 4851 nv->login_timeout = 4; 4852 4853 /* 4854 * Set default host adapter parameters 4855 */ 4856 nv->host_p[1] = BIT_2; 4857 nv->reset_delay = 5; 4858 nv->port_down_retry_count = 8; 4859 nv->max_luns_per_target = cpu_to_le16(8); 4860 nv->link_down_timeout = 60; 4861 4862 rval = 1; 4863 } 4864 4865 /* Reset Initialization control block */ 4866 memset(icb, 0, ha->init_cb_size); 4867 4868 /* 4869 * Setup driver NVRAM options. 4870 */ 4871 nv->firmware_options[0] |= (BIT_6 | BIT_1); 4872 nv->firmware_options[0] &= ~(BIT_5 | BIT_4); 4873 nv->firmware_options[1] |= (BIT_5 | BIT_0); 4874 nv->firmware_options[1] &= ~BIT_4; 4875 4876 if (IS_QLA23XX(ha)) { 4877 nv->firmware_options[0] |= BIT_2; 4878 nv->firmware_options[0] &= ~BIT_3; 4879 nv->special_options[0] &= ~BIT_6; 4880 nv->add_firmware_options[1] |= BIT_5 | BIT_4; 4881 4882 if (IS_QLA2300(ha)) { 4883 if (ha->fb_rev == FPM_2310) { 4884 strcpy(ha->model_number, "QLA2310"); 4885 } else { 4886 strcpy(ha->model_number, "QLA2300"); 4887 } 4888 } else { 4889 qla2x00_set_model_info(vha, nv->model_number, 4890 sizeof(nv->model_number), "QLA23xx"); 4891 } 4892 } else if (IS_QLA2200(ha)) { 4893 nv->firmware_options[0] |= BIT_2; 4894 /* 4895 * 'Point-to-point preferred, else loop' is not a safe 4896 * connection mode setting. 4897 */ 4898 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) == 4899 (BIT_5 | BIT_4)) { 4900 /* Force 'loop preferred, else point-to-point'. */ 4901 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4); 4902 nv->add_firmware_options[0] |= BIT_5; 4903 } 4904 strcpy(ha->model_number, "QLA22xx"); 4905 } else /*if (IS_QLA2100(ha))*/ { 4906 strcpy(ha->model_number, "QLA2100"); 4907 } 4908 4909 /* 4910 * Copy over NVRAM RISC parameter block to initialization control block. 4911 */ 4912 dptr1 = (uint8_t *)icb; 4913 dptr2 = (uint8_t *)&nv->parameter_block_version; 4914 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version; 4915 while (cnt--) 4916 *dptr1++ = *dptr2++; 4917 4918 /* Copy 2nd half. */ 4919 dptr1 = (uint8_t *)icb->add_firmware_options; 4920 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options; 4921 while (cnt--) 4922 *dptr1++ = *dptr2++; 4923 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); 4924 /* Use alternate WWN? */ 4925 if (nv->host_p[1] & BIT_7) { 4926 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 4927 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 4928 } 4929 4930 /* Prepare nodename */ 4931 if ((icb->firmware_options[1] & BIT_6) == 0) { 4932 /* 4933 * Firmware will apply the following mask if the nodename was 4934 * not provided. 4935 */ 4936 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 4937 icb->node_name[0] &= 0xF0; 4938 } 4939 4940 /* 4941 * Set host adapter parameters. 4942 */ 4943 4944 /* 4945 * BIT_7 in the host-parameters section allows for modification to 4946 * internal driver logging. 4947 */ 4948 if (nv->host_p[0] & BIT_7) 4949 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; 4950 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); 4951 /* Always load RISC code on non ISP2[12]00 chips. */ 4952 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) 4953 ha->flags.disable_risc_code_load = 0; 4954 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0); 4955 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0); 4956 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0); 4957 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0; 4958 ha->flags.disable_serdes = 0; 4959 4960 ha->operating_mode = 4961 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4; 4962 4963 memcpy(ha->fw_seriallink_options, nv->seriallink_options, 4964 sizeof(ha->fw_seriallink_options)); 4965 4966 /* save HBA serial number */ 4967 ha->serial0 = icb->port_name[5]; 4968 ha->serial1 = icb->port_name[6]; 4969 ha->serial2 = icb->port_name[7]; 4970 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 4971 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 4972 4973 icb->execution_throttle = cpu_to_le16(0xFFFF); 4974 4975 ha->retry_count = nv->retry_count; 4976 4977 /* Set minimum login_timeout to 4 seconds. */ 4978 if (nv->login_timeout != ql2xlogintimeout) 4979 nv->login_timeout = ql2xlogintimeout; 4980 if (nv->login_timeout < 4) 4981 nv->login_timeout = 4; 4982 ha->login_timeout = nv->login_timeout; 4983 4984 /* Set minimum RATOV to 100 tenths of a second. */ 4985 ha->r_a_tov = 100; 4986 4987 ha->loop_reset_delay = nv->reset_delay; 4988 4989 /* Link Down Timeout = 0: 4990 * 4991 * When Port Down timer expires we will start returning 4992 * I/O's to OS with "DID_NO_CONNECT". 4993 * 4994 * Link Down Timeout != 0: 4995 * 4996 * The driver waits for the link to come up after link down 4997 * before returning I/Os to OS with "DID_NO_CONNECT". 4998 */ 4999 if (nv->link_down_timeout == 0) { 5000 ha->loop_down_abort_time = 5001 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 5002 } else { 5003 ha->link_down_timeout = nv->link_down_timeout; 5004 ha->loop_down_abort_time = 5005 (LOOP_DOWN_TIME - ha->link_down_timeout); 5006 } 5007 5008 /* 5009 * Need enough time to try and get the port back. 5010 */ 5011 ha->port_down_retry_count = nv->port_down_retry_count; 5012 if (qlport_down_retry) 5013 ha->port_down_retry_count = qlport_down_retry; 5014 /* Set login_retry_count */ 5015 ha->login_retry_count = nv->retry_count; 5016 if (ha->port_down_retry_count == nv->port_down_retry_count && 5017 ha->port_down_retry_count > 3) 5018 ha->login_retry_count = ha->port_down_retry_count; 5019 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 5020 ha->login_retry_count = ha->port_down_retry_count; 5021 if (ql2xloginretrycount) 5022 ha->login_retry_count = ql2xloginretrycount; 5023 5024 icb->lun_enables = cpu_to_le16(0); 5025 icb->command_resource_count = 0; 5026 icb->immediate_notify_resource_count = 0; 5027 icb->timeout = cpu_to_le16(0); 5028 5029 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 5030 /* Enable RIO */ 5031 icb->firmware_options[0] &= ~BIT_3; 5032 icb->add_firmware_options[0] &= 5033 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 5034 icb->add_firmware_options[0] |= BIT_2; 5035 icb->response_accumulation_timer = 3; 5036 icb->interrupt_delay_timer = 5; 5037 5038 vha->flags.process_response_queue = 1; 5039 } else { 5040 /* Enable ZIO. */ 5041 if (!vha->flags.init_done) { 5042 ha->zio_mode = icb->add_firmware_options[0] & 5043 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 5044 ha->zio_timer = icb->interrupt_delay_timer ? 5045 icb->interrupt_delay_timer : 2; 5046 } 5047 icb->add_firmware_options[0] &= 5048 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 5049 vha->flags.process_response_queue = 0; 5050 if (ha->zio_mode != QLA_ZIO_DISABLED) { 5051 ha->zio_mode = QLA_ZIO_MODE_6; 5052 5053 ql_log(ql_log_info, vha, 0x0068, 5054 "ZIO mode %d enabled; timer delay (%d us).\n", 5055 ha->zio_mode, ha->zio_timer * 100); 5056 5057 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode; 5058 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer; 5059 vha->flags.process_response_queue = 1; 5060 } 5061 } 5062 5063 if (rval) { 5064 ql_log(ql_log_warn, vha, 0x0069, 5065 "NVRAM configuration failed.\n"); 5066 } 5067 return (rval); 5068 } 5069 5070 static void 5071 qla2x00_rport_del(void *data) 5072 { 5073 fc_port_t *fcport = data; 5074 struct fc_rport *rport; 5075 unsigned long flags; 5076 5077 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 5078 rport = fcport->drport ? fcport->drport : fcport->rport; 5079 fcport->drport = NULL; 5080 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 5081 if (rport) { 5082 ql_dbg(ql_dbg_disc, fcport->vha, 0x210b, 5083 "%s %8phN. rport %p roles %x\n", 5084 __func__, fcport->port_name, rport, 5085 rport->roles); 5086 5087 fc_remote_port_delete(rport); 5088 } 5089 } 5090 5091 void qla2x00_set_fcport_state(fc_port_t *fcport, int state) 5092 { 5093 int old_state; 5094 5095 old_state = atomic_read(&fcport->state); 5096 atomic_set(&fcport->state, state); 5097 5098 /* Don't print state transitions during initial allocation of fcport */ 5099 if (old_state && old_state != state) { 5100 ql_dbg(ql_dbg_disc, fcport->vha, 0x207d, 5101 "FCPort %8phC state transitioned from %s to %s - portid=%02x%02x%02x.\n", 5102 fcport->port_name, port_state_str[old_state], 5103 port_state_str[state], fcport->d_id.b.domain, 5104 fcport->d_id.b.area, fcport->d_id.b.al_pa); 5105 } 5106 } 5107 5108 /** 5109 * qla2x00_alloc_fcport() - Allocate a generic fcport. 5110 * @vha: HA context 5111 * @flags: allocation flags 5112 * 5113 * Returns a pointer to the allocated fcport, or NULL, if none available. 5114 */ 5115 fc_port_t * 5116 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) 5117 { 5118 fc_port_t *fcport; 5119 5120 fcport = kzalloc(sizeof(fc_port_t), flags); 5121 if (!fcport) 5122 return NULL; 5123 5124 fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev, 5125 sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma, 5126 flags); 5127 if (!fcport->ct_desc.ct_sns) { 5128 ql_log(ql_log_warn, vha, 0xd049, 5129 "Failed to allocate ct_sns request.\n"); 5130 kfree(fcport); 5131 return NULL; 5132 } 5133 5134 /* Setup fcport template structure. */ 5135 fcport->vha = vha; 5136 fcport->port_type = FCT_UNKNOWN; 5137 fcport->loop_id = FC_NO_LOOP_ID; 5138 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); 5139 fcport->supported_classes = FC_COS_UNSPECIFIED; 5140 fcport->fp_speed = PORT_SPEED_UNKNOWN; 5141 5142 fcport->disc_state = DSC_DELETED; 5143 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 5144 fcport->deleted = QLA_SESS_DELETED; 5145 fcport->login_retry = vha->hw->login_retry_count; 5146 fcport->chip_reset = vha->hw->base_qpair->chip_reset; 5147 fcport->logout_on_delete = 1; 5148 fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; 5149 fcport->tgt_short_link_down_cnt = 0; 5150 fcport->dev_loss_tmo = 0; 5151 5152 if (!fcport->ct_desc.ct_sns) { 5153 ql_log(ql_log_warn, vha, 0xd049, 5154 "Failed to allocate ct_sns request.\n"); 5155 kfree(fcport); 5156 return NULL; 5157 } 5158 5159 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); 5160 INIT_WORK(&fcport->free_work, qlt_free_session_done); 5161 INIT_WORK(&fcport->reg_work, qla_register_fcport_fn); 5162 INIT_LIST_HEAD(&fcport->gnl_entry); 5163 INIT_LIST_HEAD(&fcport->list); 5164 5165 INIT_LIST_HEAD(&fcport->sess_cmd_list); 5166 spin_lock_init(&fcport->sess_cmd_lock); 5167 5168 spin_lock_init(&fcport->edif.sa_list_lock); 5169 INIT_LIST_HEAD(&fcport->edif.tx_sa_list); 5170 INIT_LIST_HEAD(&fcport->edif.rx_sa_list); 5171 5172 if (vha->e_dbell.db_flags == EDB_ACTIVE) 5173 fcport->edif.app_started = 1; 5174 5175 spin_lock_init(&fcport->edif.indx_list_lock); 5176 INIT_LIST_HEAD(&fcport->edif.edif_indx_list); 5177 5178 return fcport; 5179 } 5180 5181 void 5182 qla2x00_free_fcport(fc_port_t *fcport) 5183 { 5184 if (fcport->ct_desc.ct_sns) { 5185 dma_free_coherent(&fcport->vha->hw->pdev->dev, 5186 sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns, 5187 fcport->ct_desc.ct_sns_dma); 5188 5189 fcport->ct_desc.ct_sns = NULL; 5190 } 5191 5192 qla_edif_flush_sa_ctl_lists(fcport); 5193 list_del(&fcport->list); 5194 qla2x00_clear_loop_id(fcport); 5195 5196 qla_edif_list_del(fcport); 5197 5198 kfree(fcport); 5199 } 5200 5201 static void qla_get_login_template(scsi_qla_host_t *vha) 5202 { 5203 struct qla_hw_data *ha = vha->hw; 5204 int rval; 5205 u32 *bp, sz; 5206 __be32 *q; 5207 5208 memset(ha->init_cb, 0, ha->init_cb_size); 5209 sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size); 5210 rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma, 5211 ha->init_cb, sz); 5212 if (rval != QLA_SUCCESS) { 5213 ql_dbg(ql_dbg_init, vha, 0x00d1, 5214 "PLOGI ELS param read fail.\n"); 5215 return; 5216 } 5217 q = (__be32 *)&ha->plogi_els_payld.fl_csp; 5218 5219 bp = (uint32_t *)ha->init_cb; 5220 cpu_to_be32_array(q, bp, sz / 4); 5221 ha->flags.plogi_template_valid = 1; 5222 } 5223 5224 /* 5225 * qla2x00_configure_loop 5226 * Updates Fibre Channel Device Database with what is actually on loop. 5227 * 5228 * Input: 5229 * ha = adapter block pointer. 5230 * 5231 * Returns: 5232 * 0 = success. 5233 * 1 = error. 5234 * 2 = database was full and device was not configured. 5235 */ 5236 static int 5237 qla2x00_configure_loop(scsi_qla_host_t *vha) 5238 { 5239 int rval; 5240 unsigned long flags, save_flags; 5241 struct qla_hw_data *ha = vha->hw; 5242 5243 rval = QLA_SUCCESS; 5244 5245 /* Get Initiator ID */ 5246 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) { 5247 rval = qla2x00_configure_hba(vha); 5248 if (rval != QLA_SUCCESS) { 5249 ql_dbg(ql_dbg_disc, vha, 0x2013, 5250 "Unable to configure HBA.\n"); 5251 return (rval); 5252 } 5253 } 5254 5255 save_flags = flags = vha->dpc_flags; 5256 ql_dbg(ql_dbg_disc, vha, 0x2014, 5257 "Configure loop -- dpc flags = 0x%lx.\n", flags); 5258 5259 /* 5260 * If we have both an RSCN and PORT UPDATE pending then handle them 5261 * both at the same time. 5262 */ 5263 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 5264 clear_bit(RSCN_UPDATE, &vha->dpc_flags); 5265 5266 qla2x00_get_data_rate(vha); 5267 qla_get_login_template(vha); 5268 5269 /* Determine what we need to do */ 5270 if ((ha->current_topology == ISP_CFG_FL || 5271 ha->current_topology == ISP_CFG_F) && 5272 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 5273 5274 set_bit(RSCN_UPDATE, &flags); 5275 clear_bit(LOCAL_LOOP_UPDATE, &flags); 5276 5277 } else if (ha->current_topology == ISP_CFG_NL || 5278 ha->current_topology == ISP_CFG_N) { 5279 clear_bit(RSCN_UPDATE, &flags); 5280 set_bit(LOCAL_LOOP_UPDATE, &flags); 5281 } else if (!vha->flags.online || 5282 (test_bit(ABORT_ISP_ACTIVE, &flags))) { 5283 set_bit(RSCN_UPDATE, &flags); 5284 set_bit(LOCAL_LOOP_UPDATE, &flags); 5285 } 5286 5287 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) { 5288 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 5289 ql_dbg(ql_dbg_disc, vha, 0x2015, 5290 "Loop resync needed, failing.\n"); 5291 rval = QLA_FUNCTION_FAILED; 5292 } else 5293 rval = qla2x00_configure_local_loop(vha); 5294 } 5295 5296 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) { 5297 if (LOOP_TRANSITION(vha)) { 5298 ql_dbg(ql_dbg_disc, vha, 0x2099, 5299 "Needs RSCN update and loop transition.\n"); 5300 rval = QLA_FUNCTION_FAILED; 5301 } 5302 else 5303 rval = qla2x00_configure_fabric(vha); 5304 } 5305 5306 if (rval == QLA_SUCCESS) { 5307 if (atomic_read(&vha->loop_down_timer) || 5308 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 5309 rval = QLA_FUNCTION_FAILED; 5310 } else { 5311 atomic_set(&vha->loop_state, LOOP_READY); 5312 ql_dbg(ql_dbg_disc, vha, 0x2069, 5313 "LOOP READY.\n"); 5314 ha->flags.fw_init_done = 1; 5315 5316 if (vha->hw->flags.edif_enabled && 5317 vha->e_dbell.db_flags != EDB_ACTIVE) { 5318 /* wake up authentication app to get ready */ 5319 qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, 0); 5320 } 5321 5322 /* 5323 * Process any ATIO queue entries that came in 5324 * while we weren't online. 5325 */ 5326 if (qla_tgt_mode_enabled(vha) || 5327 qla_dual_mode_enabled(vha)) { 5328 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 5329 qlt_24xx_process_atio_queue(vha, 0); 5330 spin_unlock_irqrestore(&ha->tgt.atio_lock, 5331 flags); 5332 } 5333 } 5334 } 5335 5336 if (rval) { 5337 ql_dbg(ql_dbg_disc, vha, 0x206a, 5338 "%s *** FAILED ***.\n", __func__); 5339 } else { 5340 ql_dbg(ql_dbg_disc, vha, 0x206b, 5341 "%s: exiting normally. local port wwpn %8phN id %06x)\n", 5342 __func__, vha->port_name, vha->d_id.b24); 5343 } 5344 5345 /* Restore state if a resync event occurred during processing */ 5346 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 5347 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 5348 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 5349 if (test_bit(RSCN_UPDATE, &save_flags)) { 5350 set_bit(RSCN_UPDATE, &vha->dpc_flags); 5351 } 5352 } 5353 5354 return (rval); 5355 } 5356 5357 static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha) 5358 { 5359 unsigned long flags; 5360 fc_port_t *fcport; 5361 5362 if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) 5363 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 5364 5365 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5366 if (fcport->n2n_flag) { 5367 qla24xx_fcport_handle_login(vha, fcport); 5368 return QLA_SUCCESS; 5369 } 5370 } 5371 5372 spin_lock_irqsave(&vha->work_lock, flags); 5373 vha->scan.scan_retry++; 5374 spin_unlock_irqrestore(&vha->work_lock, flags); 5375 5376 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { 5377 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 5378 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5379 } 5380 return QLA_FUNCTION_FAILED; 5381 } 5382 5383 /* 5384 * qla2x00_configure_local_loop 5385 * Updates Fibre Channel Device Database with local loop devices. 5386 * 5387 * Input: 5388 * ha = adapter block pointer. 5389 * 5390 * Returns: 5391 * 0 = success. 5392 */ 5393 static int 5394 qla2x00_configure_local_loop(scsi_qla_host_t *vha) 5395 { 5396 int rval, rval2; 5397 int found_devs; 5398 int found; 5399 fc_port_t *fcport, *new_fcport; 5400 uint16_t index; 5401 uint16_t entries; 5402 struct gid_list_info *gid; 5403 uint16_t loop_id; 5404 uint8_t domain, area, al_pa; 5405 struct qla_hw_data *ha = vha->hw; 5406 unsigned long flags; 5407 5408 /* Inititae N2N login. */ 5409 if (N2N_TOPO(ha)) 5410 return qla2x00_configure_n2n_loop(vha); 5411 5412 found_devs = 0; 5413 new_fcport = NULL; 5414 entries = MAX_FIBRE_DEVICES_LOOP; 5415 5416 /* Get list of logged in devices. */ 5417 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha)); 5418 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma, 5419 &entries); 5420 if (rval != QLA_SUCCESS) 5421 goto err; 5422 5423 ql_dbg(ql_dbg_disc, vha, 0x2011, 5424 "Entries in ID list (%d).\n", entries); 5425 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075, 5426 ha->gid_list, entries * sizeof(*ha->gid_list)); 5427 5428 if (entries == 0) { 5429 spin_lock_irqsave(&vha->work_lock, flags); 5430 vha->scan.scan_retry++; 5431 spin_unlock_irqrestore(&vha->work_lock, flags); 5432 5433 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { 5434 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 5435 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5436 } 5437 } else { 5438 vha->scan.scan_retry = 0; 5439 } 5440 5441 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5442 fcport->scan_state = QLA_FCPORT_SCAN; 5443 } 5444 5445 /* Allocate temporary fcport for any new fcports discovered. */ 5446 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 5447 if (new_fcport == NULL) { 5448 ql_log(ql_log_warn, vha, 0x2012, 5449 "Memory allocation failed for fcport.\n"); 5450 rval = QLA_MEMORY_ALLOC_FAILED; 5451 goto err; 5452 } 5453 new_fcport->flags &= ~FCF_FABRIC_DEVICE; 5454 5455 /* Add devices to port list. */ 5456 gid = ha->gid_list; 5457 for (index = 0; index < entries; index++) { 5458 domain = gid->domain; 5459 area = gid->area; 5460 al_pa = gid->al_pa; 5461 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 5462 loop_id = gid->loop_id_2100; 5463 else 5464 loop_id = le16_to_cpu(gid->loop_id); 5465 gid = (void *)gid + ha->gid_list_info_size; 5466 5467 /* Bypass reserved domain fields. */ 5468 if ((domain & 0xf0) == 0xf0) 5469 continue; 5470 5471 /* Bypass if not same domain and area of adapter. */ 5472 if (area && domain && ((area != vha->d_id.b.area) || 5473 (domain != vha->d_id.b.domain)) && 5474 (ha->current_topology == ISP_CFG_NL)) 5475 continue; 5476 5477 5478 /* Bypass invalid local loop ID. */ 5479 if (loop_id > LAST_LOCAL_LOOP_ID) 5480 continue; 5481 5482 memset(new_fcport->port_name, 0, WWN_SIZE); 5483 5484 /* Fill in member data. */ 5485 new_fcport->d_id.b.domain = domain; 5486 new_fcport->d_id.b.area = area; 5487 new_fcport->d_id.b.al_pa = al_pa; 5488 new_fcport->loop_id = loop_id; 5489 new_fcport->scan_state = QLA_FCPORT_FOUND; 5490 5491 rval2 = qla2x00_get_port_database(vha, new_fcport, 0); 5492 if (rval2 != QLA_SUCCESS) { 5493 ql_dbg(ql_dbg_disc, vha, 0x2097, 5494 "Failed to retrieve fcport information " 5495 "-- get_port_database=%x, loop_id=0x%04x.\n", 5496 rval2, new_fcport->loop_id); 5497 /* Skip retry if N2N */ 5498 if (ha->current_topology != ISP_CFG_N) { 5499 ql_dbg(ql_dbg_disc, vha, 0x2105, 5500 "Scheduling resync.\n"); 5501 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5502 continue; 5503 } 5504 } 5505 5506 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5507 /* Check for matching device in port list. */ 5508 found = 0; 5509 fcport = NULL; 5510 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5511 if (memcmp(new_fcport->port_name, fcport->port_name, 5512 WWN_SIZE)) 5513 continue; 5514 5515 fcport->flags &= ~FCF_FABRIC_DEVICE; 5516 fcport->loop_id = new_fcport->loop_id; 5517 fcport->port_type = new_fcport->port_type; 5518 fcport->d_id.b24 = new_fcport->d_id.b24; 5519 memcpy(fcport->node_name, new_fcport->node_name, 5520 WWN_SIZE); 5521 fcport->scan_state = QLA_FCPORT_FOUND; 5522 found++; 5523 break; 5524 } 5525 5526 if (!found) { 5527 /* New device, add to fcports list. */ 5528 list_add_tail(&new_fcport->list, &vha->vp_fcports); 5529 5530 /* Allocate a new replacement fcport. */ 5531 fcport = new_fcport; 5532 5533 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5534 5535 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 5536 5537 if (new_fcport == NULL) { 5538 ql_log(ql_log_warn, vha, 0xd031, 5539 "Failed to allocate memory for fcport.\n"); 5540 rval = QLA_MEMORY_ALLOC_FAILED; 5541 goto err; 5542 } 5543 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5544 new_fcport->flags &= ~FCF_FABRIC_DEVICE; 5545 } 5546 5547 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5548 5549 /* Base iIDMA settings on HBA port speed. */ 5550 fcport->fp_speed = ha->link_data_rate; 5551 5552 found_devs++; 5553 } 5554 5555 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5556 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5557 break; 5558 5559 if (fcport->scan_state == QLA_FCPORT_SCAN) { 5560 if ((qla_dual_mode_enabled(vha) || 5561 qla_ini_mode_enabled(vha)) && 5562 atomic_read(&fcport->state) == FCS_ONLINE) { 5563 qla2x00_mark_device_lost(vha, fcport, 5564 ql2xplogiabsentdevice); 5565 if (fcport->loop_id != FC_NO_LOOP_ID && 5566 (fcport->flags & FCF_FCP2_DEVICE) == 0 && 5567 fcport->port_type != FCT_INITIATOR && 5568 fcport->port_type != FCT_BROADCAST) { 5569 ql_dbg(ql_dbg_disc, vha, 0x20f0, 5570 "%s %d %8phC post del sess\n", 5571 __func__, __LINE__, 5572 fcport->port_name); 5573 5574 qlt_schedule_sess_for_deletion(fcport); 5575 continue; 5576 } 5577 } 5578 } 5579 5580 if (fcport->scan_state == QLA_FCPORT_FOUND) 5581 qla24xx_fcport_handle_login(vha, fcport); 5582 } 5583 5584 qla2x00_free_fcport(new_fcport); 5585 5586 return rval; 5587 5588 err: 5589 ql_dbg(ql_dbg_disc, vha, 0x2098, 5590 "Configure local loop error exit: rval=%x.\n", rval); 5591 return rval; 5592 } 5593 5594 static void 5595 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 5596 { 5597 int rval; 5598 uint16_t mb[MAILBOX_REGISTER_COUNT]; 5599 struct qla_hw_data *ha = vha->hw; 5600 5601 if (!IS_IIDMA_CAPABLE(ha)) 5602 return; 5603 5604 if (atomic_read(&fcport->state) != FCS_ONLINE) 5605 return; 5606 5607 if (fcport->fp_speed == PORT_SPEED_UNKNOWN || 5608 fcport->fp_speed > ha->link_data_rate || 5609 !ha->flags.gpsc_supported) 5610 return; 5611 5612 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed, 5613 mb); 5614 if (rval != QLA_SUCCESS) { 5615 ql_dbg(ql_dbg_disc, vha, 0x2004, 5616 "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n", 5617 fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]); 5618 } else { 5619 ql_dbg(ql_dbg_disc, vha, 0x2005, 5620 "iIDMA adjusted to %s GB/s (%X) on %8phN.\n", 5621 qla2x00_get_link_speed_str(ha, fcport->fp_speed), 5622 fcport->fp_speed, fcport->port_name); 5623 } 5624 } 5625 5626 void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport) 5627 { 5628 qla2x00_iidma_fcport(vha, fcport); 5629 qla24xx_update_fcport_fcp_prio(vha, fcport); 5630 } 5631 5632 int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport) 5633 { 5634 struct qla_work_evt *e; 5635 5636 e = qla2x00_alloc_work(vha, QLA_EVT_IIDMA); 5637 if (!e) 5638 return QLA_FUNCTION_FAILED; 5639 5640 e->u.fcport.fcport = fcport; 5641 return qla2x00_post_work(vha, e); 5642 } 5643 5644 /* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/ 5645 static void 5646 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) 5647 { 5648 struct fc_rport_identifiers rport_ids; 5649 struct fc_rport *rport; 5650 unsigned long flags; 5651 5652 if (atomic_read(&fcport->state) == FCS_ONLINE) 5653 return; 5654 5655 rport_ids.node_name = wwn_to_u64(fcport->node_name); 5656 rport_ids.port_name = wwn_to_u64(fcport->port_name); 5657 rport_ids.port_id = fcport->d_id.b.domain << 16 | 5658 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; 5659 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 5660 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids); 5661 if (!rport) { 5662 ql_log(ql_log_warn, vha, 0x2006, 5663 "Unable to allocate fc remote port.\n"); 5664 return; 5665 } 5666 5667 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 5668 *((fc_port_t **)rport->dd_data) = fcport; 5669 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 5670 fcport->dev_loss_tmo = rport->dev_loss_tmo; 5671 5672 rport->supported_classes = fcport->supported_classes; 5673 5674 rport_ids.roles = FC_PORT_ROLE_UNKNOWN; 5675 if (fcport->port_type == FCT_INITIATOR) 5676 rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; 5677 if (fcport->port_type == FCT_TARGET) 5678 rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET; 5679 if (fcport->port_type & FCT_NVME_INITIATOR) 5680 rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR; 5681 if (fcport->port_type & FCT_NVME_TARGET) 5682 rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET; 5683 if (fcport->port_type & FCT_NVME_DISCOVERY) 5684 rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY; 5685 5686 fc_remote_port_rolechg(rport, rport_ids.roles); 5687 5688 ql_dbg(ql_dbg_disc, vha, 0x20ee, 5689 "%s: %8phN. rport %ld:0:%d (%p) is %s mode\n", 5690 __func__, fcport->port_name, vha->host_no, 5691 rport->scsi_target_id, rport, 5692 (fcport->port_type == FCT_TARGET) ? "tgt" : 5693 ((fcport->port_type & FCT_NVME) ? "nvme" : "ini")); 5694 } 5695 5696 /* 5697 * qla2x00_update_fcport 5698 * Updates device on list. 5699 * 5700 * Input: 5701 * ha = adapter block pointer. 5702 * fcport = port structure pointer. 5703 * 5704 * Return: 5705 * 0 - Success 5706 * BIT_0 - error 5707 * 5708 * Context: 5709 * Kernel context. 5710 */ 5711 void 5712 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 5713 { 5714 if (IS_SW_RESV_ADDR(fcport->d_id)) 5715 return; 5716 5717 ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n", 5718 __func__, fcport->port_name); 5719 5720 qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT); 5721 fcport->login_retry = vha->hw->login_retry_count; 5722 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 5723 fcport->deleted = 0; 5724 if (vha->hw->current_topology == ISP_CFG_NL) 5725 fcport->logout_on_delete = 0; 5726 else 5727 fcport->logout_on_delete = 1; 5728 fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0; 5729 5730 if (fcport->tgt_link_down_time < fcport->dev_loss_tmo) { 5731 fcport->tgt_short_link_down_cnt++; 5732 fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; 5733 } 5734 5735 switch (vha->hw->current_topology) { 5736 case ISP_CFG_N: 5737 case ISP_CFG_NL: 5738 fcport->keep_nport_handle = 1; 5739 break; 5740 default: 5741 break; 5742 } 5743 5744 qla2x00_iidma_fcport(vha, fcport); 5745 5746 qla2x00_dfs_create_rport(vha, fcport); 5747 5748 if (NVME_TARGET(vha->hw, fcport)) { 5749 qla_nvme_register_remote(vha, fcport); 5750 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE); 5751 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 5752 return; 5753 } 5754 5755 qla24xx_update_fcport_fcp_prio(vha, fcport); 5756 5757 switch (vha->host->active_mode) { 5758 case MODE_INITIATOR: 5759 qla2x00_reg_remote_port(vha, fcport); 5760 break; 5761 case MODE_TARGET: 5762 if (!vha->vha_tgt.qla_tgt->tgt_stop && 5763 !vha->vha_tgt.qla_tgt->tgt_stopped) 5764 qlt_fc_port_added(vha, fcport); 5765 break; 5766 case MODE_DUAL: 5767 qla2x00_reg_remote_port(vha, fcport); 5768 if (!vha->vha_tgt.qla_tgt->tgt_stop && 5769 !vha->vha_tgt.qla_tgt->tgt_stopped) 5770 qlt_fc_port_added(vha, fcport); 5771 break; 5772 default: 5773 break; 5774 } 5775 5776 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 5777 5778 if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) { 5779 if (fcport->id_changed) { 5780 fcport->id_changed = 0; 5781 ql_dbg(ql_dbg_disc, vha, 0x20d7, 5782 "%s %d %8phC post gfpnid fcp_cnt %d\n", 5783 __func__, __LINE__, fcport->port_name, 5784 vha->fcport_count); 5785 qla24xx_post_gfpnid_work(vha, fcport); 5786 } else { 5787 ql_dbg(ql_dbg_disc, vha, 0x20d7, 5788 "%s %d %8phC post gpsc fcp_cnt %d\n", 5789 __func__, __LINE__, fcport->port_name, 5790 vha->fcport_count); 5791 qla24xx_post_gpsc_work(vha, fcport); 5792 } 5793 } 5794 5795 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE); 5796 } 5797 5798 void qla_register_fcport_fn(struct work_struct *work) 5799 { 5800 fc_port_t *fcport = container_of(work, struct fc_port, reg_work); 5801 u32 rscn_gen = fcport->rscn_gen; 5802 u16 data[2]; 5803 5804 if (IS_SW_RESV_ADDR(fcport->d_id)) 5805 return; 5806 5807 qla2x00_update_fcport(fcport->vha, fcport); 5808 5809 if (rscn_gen != fcport->rscn_gen) { 5810 /* RSCN(s) came in while registration */ 5811 switch (fcport->next_disc_state) { 5812 case DSC_DELETE_PEND: 5813 qlt_schedule_sess_for_deletion(fcport); 5814 break; 5815 case DSC_ADISC: 5816 data[0] = data[1] = 0; 5817 qla2x00_post_async_adisc_work(fcport->vha, fcport, 5818 data); 5819 break; 5820 default: 5821 break; 5822 } 5823 } 5824 } 5825 5826 /* 5827 * qla2x00_configure_fabric 5828 * Setup SNS devices with loop ID's. 5829 * 5830 * Input: 5831 * ha = adapter block pointer. 5832 * 5833 * Returns: 5834 * 0 = success. 5835 * BIT_0 = error 5836 */ 5837 static int 5838 qla2x00_configure_fabric(scsi_qla_host_t *vha) 5839 { 5840 int rval; 5841 fc_port_t *fcport; 5842 uint16_t mb[MAILBOX_REGISTER_COUNT]; 5843 uint16_t loop_id; 5844 LIST_HEAD(new_fcports); 5845 struct qla_hw_data *ha = vha->hw; 5846 int discovery_gen; 5847 5848 /* If FL port exists, then SNS is present */ 5849 if (IS_FWI2_CAPABLE(ha)) 5850 loop_id = NPH_F_PORT; 5851 else 5852 loop_id = SNS_FL_PORT; 5853 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1); 5854 if (rval != QLA_SUCCESS) { 5855 ql_dbg(ql_dbg_disc, vha, 0x20a0, 5856 "MBX_GET_PORT_NAME failed, No FL Port.\n"); 5857 5858 vha->device_flags &= ~SWITCH_FOUND; 5859 return (QLA_SUCCESS); 5860 } 5861 vha->device_flags |= SWITCH_FOUND; 5862 5863 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_port_name, 0); 5864 if (rval != QLA_SUCCESS) 5865 ql_dbg(ql_dbg_disc, vha, 0x20ff, 5866 "Failed to get Fabric Port Name\n"); 5867 5868 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { 5869 rval = qla2x00_send_change_request(vha, 0x3, 0); 5870 if (rval != QLA_SUCCESS) 5871 ql_log(ql_log_warn, vha, 0x121, 5872 "Failed to enable receiving of RSCN requests: 0x%x.\n", 5873 rval); 5874 } 5875 5876 do { 5877 qla2x00_mgmt_svr_login(vha); 5878 5879 /* Ensure we are logged into the SNS. */ 5880 loop_id = NPH_SNS_LID(ha); 5881 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, 5882 0xfc, mb, BIT_1|BIT_0); 5883 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { 5884 ql_dbg(ql_dbg_disc, vha, 0x20a1, 5885 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n", 5886 loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval); 5887 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5888 return rval; 5889 } 5890 5891 /* FDMI support. */ 5892 if (ql2xfdmienable && 5893 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags)) 5894 qla2x00_fdmi_register(vha); 5895 5896 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) { 5897 if (qla2x00_rft_id(vha)) { 5898 /* EMPTY */ 5899 ql_dbg(ql_dbg_disc, vha, 0x20a2, 5900 "Register FC-4 TYPE failed.\n"); 5901 if (test_bit(LOOP_RESYNC_NEEDED, 5902 &vha->dpc_flags)) 5903 break; 5904 } 5905 if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) { 5906 /* EMPTY */ 5907 ql_dbg(ql_dbg_disc, vha, 0x209a, 5908 "Register FC-4 Features failed.\n"); 5909 if (test_bit(LOOP_RESYNC_NEEDED, 5910 &vha->dpc_flags)) 5911 break; 5912 } 5913 if (vha->flags.nvme_enabled) { 5914 if (qla2x00_rff_id(vha, FC_TYPE_NVME)) { 5915 ql_dbg(ql_dbg_disc, vha, 0x2049, 5916 "Register NVME FC Type Features failed.\n"); 5917 } 5918 } 5919 if (qla2x00_rnn_id(vha)) { 5920 /* EMPTY */ 5921 ql_dbg(ql_dbg_disc, vha, 0x2104, 5922 "Register Node Name failed.\n"); 5923 if (test_bit(LOOP_RESYNC_NEEDED, 5924 &vha->dpc_flags)) 5925 break; 5926 } else if (qla2x00_rsnn_nn(vha)) { 5927 /* EMPTY */ 5928 ql_dbg(ql_dbg_disc, vha, 0x209b, 5929 "Register Symbolic Node Name failed.\n"); 5930 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5931 break; 5932 } 5933 } 5934 5935 5936 /* Mark the time right before querying FW for connected ports. 5937 * This process is long, asynchronous and by the time it's done, 5938 * collected information might not be accurate anymore. E.g. 5939 * disconnected port might have re-connected and a brand new 5940 * session has been created. In this case session's generation 5941 * will be newer than discovery_gen. */ 5942 qlt_do_generation_tick(vha, &discovery_gen); 5943 5944 if (USE_ASYNC_SCAN(ha)) { 5945 rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI, 5946 NULL); 5947 if (rval) 5948 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5949 } else { 5950 list_for_each_entry(fcport, &vha->vp_fcports, list) 5951 fcport->scan_state = QLA_FCPORT_SCAN; 5952 5953 rval = qla2x00_find_all_fabric_devs(vha); 5954 } 5955 if (rval != QLA_SUCCESS) 5956 break; 5957 } while (0); 5958 5959 if (!vha->nvme_local_port && vha->flags.nvme_enabled) 5960 qla_nvme_register_hba(vha); 5961 5962 if (rval) 5963 ql_dbg(ql_dbg_disc, vha, 0x2068, 5964 "Configure fabric error exit rval=%d.\n", rval); 5965 5966 return (rval); 5967 } 5968 5969 /* 5970 * qla2x00_find_all_fabric_devs 5971 * 5972 * Input: 5973 * ha = adapter block pointer. 5974 * dev = database device entry pointer. 5975 * 5976 * Returns: 5977 * 0 = success. 5978 * 5979 * Context: 5980 * Kernel context. 5981 */ 5982 static int 5983 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) 5984 { 5985 int rval; 5986 uint16_t loop_id; 5987 fc_port_t *fcport, *new_fcport; 5988 int found; 5989 5990 sw_info_t *swl; 5991 int swl_idx; 5992 int first_dev, last_dev; 5993 port_id_t wrap = {}, nxt_d_id; 5994 struct qla_hw_data *ha = vha->hw; 5995 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 5996 unsigned long flags; 5997 5998 rval = QLA_SUCCESS; 5999 6000 /* Try GID_PT to get device list, else GAN. */ 6001 if (!ha->swl) 6002 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t), 6003 GFP_KERNEL); 6004 swl = ha->swl; 6005 if (!swl) { 6006 /*EMPTY*/ 6007 ql_dbg(ql_dbg_disc, vha, 0x209c, 6008 "GID_PT allocations failed, fallback on GA_NXT.\n"); 6009 } else { 6010 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t)); 6011 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) { 6012 swl = NULL; 6013 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 6014 return rval; 6015 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) { 6016 swl = NULL; 6017 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 6018 return rval; 6019 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) { 6020 swl = NULL; 6021 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 6022 return rval; 6023 } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) { 6024 swl = NULL; 6025 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 6026 return rval; 6027 } 6028 6029 /* If other queries succeeded probe for FC-4 type */ 6030 if (swl) { 6031 qla2x00_gff_id(vha, swl); 6032 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 6033 return rval; 6034 } 6035 } 6036 swl_idx = 0; 6037 6038 /* Allocate temporary fcport for any new fcports discovered. */ 6039 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 6040 if (new_fcport == NULL) { 6041 ql_log(ql_log_warn, vha, 0x209d, 6042 "Failed to allocate memory for fcport.\n"); 6043 return (QLA_MEMORY_ALLOC_FAILED); 6044 } 6045 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 6046 /* Set start port ID scan at adapter ID. */ 6047 first_dev = 1; 6048 last_dev = 0; 6049 6050 /* Starting free loop ID. */ 6051 loop_id = ha->min_external_loopid; 6052 for (; loop_id <= ha->max_loop_id; loop_id++) { 6053 if (qla2x00_is_reserved_id(vha, loop_id)) 6054 continue; 6055 6056 if (ha->current_topology == ISP_CFG_FL && 6057 (atomic_read(&vha->loop_down_timer) || 6058 LOOP_TRANSITION(vha))) { 6059 atomic_set(&vha->loop_down_timer, 0); 6060 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 6061 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 6062 break; 6063 } 6064 6065 if (swl != NULL) { 6066 if (last_dev) { 6067 wrap.b24 = new_fcport->d_id.b24; 6068 } else { 6069 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24; 6070 memcpy(new_fcport->node_name, 6071 swl[swl_idx].node_name, WWN_SIZE); 6072 memcpy(new_fcport->port_name, 6073 swl[swl_idx].port_name, WWN_SIZE); 6074 memcpy(new_fcport->fabric_port_name, 6075 swl[swl_idx].fabric_port_name, WWN_SIZE); 6076 new_fcport->fp_speed = swl[swl_idx].fp_speed; 6077 new_fcport->fc4_type = swl[swl_idx].fc4_type; 6078 6079 new_fcport->nvme_flag = 0; 6080 if (vha->flags.nvme_enabled && 6081 swl[swl_idx].fc4_type & FS_FC4TYPE_NVME) { 6082 ql_log(ql_log_info, vha, 0x2131, 6083 "FOUND: NVME port %8phC as FC Type 28h\n", 6084 new_fcport->port_name); 6085 } 6086 6087 if (swl[swl_idx].d_id.b.rsvd_1 != 0) { 6088 last_dev = 1; 6089 } 6090 swl_idx++; 6091 } 6092 } else { 6093 /* Send GA_NXT to the switch */ 6094 rval = qla2x00_ga_nxt(vha, new_fcport); 6095 if (rval != QLA_SUCCESS) { 6096 ql_log(ql_log_warn, vha, 0x209e, 6097 "SNS scan failed -- assuming " 6098 "zero-entry result.\n"); 6099 rval = QLA_SUCCESS; 6100 break; 6101 } 6102 } 6103 6104 /* If wrap on switch device list, exit. */ 6105 if (first_dev) { 6106 wrap.b24 = new_fcport->d_id.b24; 6107 first_dev = 0; 6108 } else if (new_fcport->d_id.b24 == wrap.b24) { 6109 ql_dbg(ql_dbg_disc, vha, 0x209f, 6110 "Device wrap (%02x%02x%02x).\n", 6111 new_fcport->d_id.b.domain, 6112 new_fcport->d_id.b.area, 6113 new_fcport->d_id.b.al_pa); 6114 break; 6115 } 6116 6117 /* Bypass if same physical adapter. */ 6118 if (new_fcport->d_id.b24 == base_vha->d_id.b24) 6119 continue; 6120 6121 /* Bypass virtual ports of the same host. */ 6122 if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24)) 6123 continue; 6124 6125 /* Bypass if same domain and area of adapter. */ 6126 if (((new_fcport->d_id.b24 & 0xffff00) == 6127 (vha->d_id.b24 & 0xffff00)) && ha->current_topology == 6128 ISP_CFG_FL) 6129 continue; 6130 6131 /* Bypass reserved domain fields. */ 6132 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0) 6133 continue; 6134 6135 /* Bypass ports whose FCP-4 type is not FCP_SCSI */ 6136 if (ql2xgffidenable && 6137 (!(new_fcport->fc4_type & FS_FC4TYPE_FCP) && 6138 new_fcport->fc4_type != 0)) 6139 continue; 6140 6141 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 6142 6143 /* Locate matching device in database. */ 6144 found = 0; 6145 list_for_each_entry(fcport, &vha->vp_fcports, list) { 6146 if (memcmp(new_fcport->port_name, fcport->port_name, 6147 WWN_SIZE)) 6148 continue; 6149 6150 fcport->scan_state = QLA_FCPORT_FOUND; 6151 6152 found++; 6153 6154 /* Update port state. */ 6155 memcpy(fcport->fabric_port_name, 6156 new_fcport->fabric_port_name, WWN_SIZE); 6157 fcport->fp_speed = new_fcport->fp_speed; 6158 6159 /* 6160 * If address the same and state FCS_ONLINE 6161 * (or in target mode), nothing changed. 6162 */ 6163 if (fcport->d_id.b24 == new_fcport->d_id.b24 && 6164 (atomic_read(&fcport->state) == FCS_ONLINE || 6165 (vha->host->active_mode == MODE_TARGET))) { 6166 break; 6167 } 6168 6169 if (fcport->login_retry == 0) 6170 fcport->login_retry = 6171 vha->hw->login_retry_count; 6172 /* 6173 * If device was not a fabric device before. 6174 */ 6175 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { 6176 fcport->d_id.b24 = new_fcport->d_id.b24; 6177 qla2x00_clear_loop_id(fcport); 6178 fcport->flags |= (FCF_FABRIC_DEVICE | 6179 FCF_LOGIN_NEEDED); 6180 break; 6181 } 6182 6183 /* 6184 * Port ID changed or device was marked to be updated; 6185 * Log it out if still logged in and mark it for 6186 * relogin later. 6187 */ 6188 if (qla_tgt_mode_enabled(base_vha)) { 6189 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080, 6190 "port changed FC ID, %8phC" 6191 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n", 6192 fcport->port_name, 6193 fcport->d_id.b.domain, 6194 fcport->d_id.b.area, 6195 fcport->d_id.b.al_pa, 6196 fcport->loop_id, 6197 new_fcport->d_id.b.domain, 6198 new_fcport->d_id.b.area, 6199 new_fcport->d_id.b.al_pa); 6200 fcport->d_id.b24 = new_fcport->d_id.b24; 6201 break; 6202 } 6203 6204 fcport->d_id.b24 = new_fcport->d_id.b24; 6205 fcport->flags |= FCF_LOGIN_NEEDED; 6206 break; 6207 } 6208 6209 if (found && NVME_TARGET(vha->hw, fcport)) { 6210 if (fcport->disc_state == DSC_DELETE_PEND) { 6211 qla2x00_set_fcport_disc_state(fcport, DSC_GNL); 6212 vha->fcport_count--; 6213 fcport->login_succ = 0; 6214 } 6215 } 6216 6217 if (found) { 6218 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 6219 continue; 6220 } 6221 /* If device was not in our fcports list, then add it. */ 6222 new_fcport->scan_state = QLA_FCPORT_FOUND; 6223 list_add_tail(&new_fcport->list, &vha->vp_fcports); 6224 6225 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 6226 6227 6228 /* Allocate a new replacement fcport. */ 6229 nxt_d_id.b24 = new_fcport->d_id.b24; 6230 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 6231 if (new_fcport == NULL) { 6232 ql_log(ql_log_warn, vha, 0xd032, 6233 "Memory allocation failed for fcport.\n"); 6234 return (QLA_MEMORY_ALLOC_FAILED); 6235 } 6236 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 6237 new_fcport->d_id.b24 = nxt_d_id.b24; 6238 } 6239 6240 qla2x00_free_fcport(new_fcport); 6241 6242 /* 6243 * Logout all previous fabric dev marked lost, except FCP2 devices. 6244 */ 6245 list_for_each_entry(fcport, &vha->vp_fcports, list) { 6246 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 6247 break; 6248 6249 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) 6250 continue; 6251 6252 if (fcport->scan_state == QLA_FCPORT_SCAN) { 6253 if ((qla_dual_mode_enabled(vha) || 6254 qla_ini_mode_enabled(vha)) && 6255 atomic_read(&fcport->state) == FCS_ONLINE) { 6256 qla2x00_mark_device_lost(vha, fcport, 6257 ql2xplogiabsentdevice); 6258 if (fcport->loop_id != FC_NO_LOOP_ID && 6259 (fcport->flags & FCF_FCP2_DEVICE) == 0 && 6260 fcport->port_type != FCT_INITIATOR && 6261 fcport->port_type != FCT_BROADCAST) { 6262 ql_dbg(ql_dbg_disc, vha, 0x20f0, 6263 "%s %d %8phC post del sess\n", 6264 __func__, __LINE__, 6265 fcport->port_name); 6266 qlt_schedule_sess_for_deletion(fcport); 6267 continue; 6268 } 6269 } 6270 } 6271 6272 if (fcport->scan_state == QLA_FCPORT_FOUND && 6273 (fcport->flags & FCF_LOGIN_NEEDED) != 0) 6274 qla24xx_fcport_handle_login(vha, fcport); 6275 } 6276 return (rval); 6277 } 6278 6279 /* FW does not set aside Loop id for MGMT Server/FFFFFAh */ 6280 int 6281 qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha) 6282 { 6283 int loop_id = FC_NO_LOOP_ID; 6284 int lid = NPH_MGMT_SERVER - vha->vp_idx; 6285 unsigned long flags; 6286 struct qla_hw_data *ha = vha->hw; 6287 6288 if (vha->vp_idx == 0) { 6289 set_bit(NPH_MGMT_SERVER, ha->loop_id_map); 6290 return NPH_MGMT_SERVER; 6291 } 6292 6293 /* pick id from high and work down to low */ 6294 spin_lock_irqsave(&ha->vport_slock, flags); 6295 for (; lid > 0; lid--) { 6296 if (!test_bit(lid, vha->hw->loop_id_map)) { 6297 set_bit(lid, vha->hw->loop_id_map); 6298 loop_id = lid; 6299 break; 6300 } 6301 } 6302 spin_unlock_irqrestore(&ha->vport_slock, flags); 6303 6304 return loop_id; 6305 } 6306 6307 /* 6308 * qla2x00_fabric_login 6309 * Issue fabric login command. 6310 * 6311 * Input: 6312 * ha = adapter block pointer. 6313 * device = pointer to FC device type structure. 6314 * 6315 * Returns: 6316 * 0 - Login successfully 6317 * 1 - Login failed 6318 * 2 - Initiator device 6319 * 3 - Fatal error 6320 */ 6321 int 6322 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport, 6323 uint16_t *next_loopid) 6324 { 6325 int rval; 6326 int retry; 6327 uint16_t tmp_loopid; 6328 uint16_t mb[MAILBOX_REGISTER_COUNT]; 6329 struct qla_hw_data *ha = vha->hw; 6330 6331 retry = 0; 6332 tmp_loopid = 0; 6333 6334 for (;;) { 6335 ql_dbg(ql_dbg_disc, vha, 0x2000, 6336 "Trying Fabric Login w/loop id 0x%04x for port " 6337 "%02x%02x%02x.\n", 6338 fcport->loop_id, fcport->d_id.b.domain, 6339 fcport->d_id.b.area, fcport->d_id.b.al_pa); 6340 6341 /* Login fcport on switch. */ 6342 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id, 6343 fcport->d_id.b.domain, fcport->d_id.b.area, 6344 fcport->d_id.b.al_pa, mb, BIT_0); 6345 if (rval != QLA_SUCCESS) { 6346 return rval; 6347 } 6348 if (mb[0] == MBS_PORT_ID_USED) { 6349 /* 6350 * Device has another loop ID. The firmware team 6351 * recommends the driver perform an implicit login with 6352 * the specified ID again. The ID we just used is save 6353 * here so we return with an ID that can be tried by 6354 * the next login. 6355 */ 6356 retry++; 6357 tmp_loopid = fcport->loop_id; 6358 fcport->loop_id = mb[1]; 6359 6360 ql_dbg(ql_dbg_disc, vha, 0x2001, 6361 "Fabric Login: port in use - next loop " 6362 "id=0x%04x, port id= %02x%02x%02x.\n", 6363 fcport->loop_id, fcport->d_id.b.domain, 6364 fcport->d_id.b.area, fcport->d_id.b.al_pa); 6365 6366 } else if (mb[0] == MBS_COMMAND_COMPLETE) { 6367 /* 6368 * Login succeeded. 6369 */ 6370 if (retry) { 6371 /* A retry occurred before. */ 6372 *next_loopid = tmp_loopid; 6373 } else { 6374 /* 6375 * No retry occurred before. Just increment the 6376 * ID value for next login. 6377 */ 6378 *next_loopid = (fcport->loop_id + 1); 6379 } 6380 6381 if (mb[1] & BIT_0) { 6382 fcport->port_type = FCT_INITIATOR; 6383 } else { 6384 fcport->port_type = FCT_TARGET; 6385 if (mb[1] & BIT_1) { 6386 fcport->flags |= FCF_FCP2_DEVICE; 6387 } 6388 } 6389 6390 if (mb[10] & BIT_0) 6391 fcport->supported_classes |= FC_COS_CLASS2; 6392 if (mb[10] & BIT_1) 6393 fcport->supported_classes |= FC_COS_CLASS3; 6394 6395 if (IS_FWI2_CAPABLE(ha)) { 6396 if (mb[10] & BIT_7) 6397 fcport->flags |= 6398 FCF_CONF_COMP_SUPPORTED; 6399 } 6400 6401 rval = QLA_SUCCESS; 6402 break; 6403 } else if (mb[0] == MBS_LOOP_ID_USED) { 6404 /* 6405 * Loop ID already used, try next loop ID. 6406 */ 6407 fcport->loop_id++; 6408 rval = qla2x00_find_new_loop_id(vha, fcport); 6409 if (rval != QLA_SUCCESS) { 6410 /* Ran out of loop IDs to use */ 6411 break; 6412 } 6413 } else if (mb[0] == MBS_COMMAND_ERROR) { 6414 /* 6415 * Firmware possibly timed out during login. If NO 6416 * retries are left to do then the device is declared 6417 * dead. 6418 */ 6419 *next_loopid = fcport->loop_id; 6420 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 6421 fcport->d_id.b.domain, fcport->d_id.b.area, 6422 fcport->d_id.b.al_pa); 6423 qla2x00_mark_device_lost(vha, fcport, 1); 6424 6425 rval = 1; 6426 break; 6427 } else { 6428 /* 6429 * unrecoverable / not handled error 6430 */ 6431 ql_dbg(ql_dbg_disc, vha, 0x2002, 6432 "Failed=%x port_id=%02x%02x%02x loop_id=%x " 6433 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain, 6434 fcport->d_id.b.area, fcport->d_id.b.al_pa, 6435 fcport->loop_id, jiffies); 6436 6437 *next_loopid = fcport->loop_id; 6438 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 6439 fcport->d_id.b.domain, fcport->d_id.b.area, 6440 fcport->d_id.b.al_pa); 6441 qla2x00_clear_loop_id(fcport); 6442 fcport->login_retry = 0; 6443 6444 rval = 3; 6445 break; 6446 } 6447 } 6448 6449 return (rval); 6450 } 6451 6452 /* 6453 * qla2x00_local_device_login 6454 * Issue local device login command. 6455 * 6456 * Input: 6457 * ha = adapter block pointer. 6458 * loop_id = loop id of device to login to. 6459 * 6460 * Returns (Where's the #define!!!!): 6461 * 0 - Login successfully 6462 * 1 - Login failed 6463 * 3 - Fatal error 6464 */ 6465 int 6466 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport) 6467 { 6468 int rval; 6469 uint16_t mb[MAILBOX_REGISTER_COUNT]; 6470 6471 memset(mb, 0, sizeof(mb)); 6472 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0); 6473 if (rval == QLA_SUCCESS) { 6474 /* Interrogate mailbox registers for any errors */ 6475 if (mb[0] == MBS_COMMAND_ERROR) 6476 rval = 1; 6477 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR) 6478 /* device not in PCB table */ 6479 rval = 3; 6480 } 6481 6482 return (rval); 6483 } 6484 6485 /* 6486 * qla2x00_loop_resync 6487 * Resync with fibre channel devices. 6488 * 6489 * Input: 6490 * ha = adapter block pointer. 6491 * 6492 * Returns: 6493 * 0 = success 6494 */ 6495 int 6496 qla2x00_loop_resync(scsi_qla_host_t *vha) 6497 { 6498 int rval = QLA_SUCCESS; 6499 uint32_t wait_time; 6500 6501 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 6502 if (vha->flags.online) { 6503 if (!(rval = qla2x00_fw_ready(vha))) { 6504 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 6505 wait_time = 256; 6506 do { 6507 if (!IS_QLAFX00(vha->hw)) { 6508 /* 6509 * Issue a marker after FW becomes 6510 * ready. 6511 */ 6512 qla2x00_marker(vha, vha->hw->base_qpair, 6513 0, 0, MK_SYNC_ALL); 6514 vha->marker_needed = 0; 6515 } 6516 6517 /* Remap devices on Loop. */ 6518 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 6519 6520 if (IS_QLAFX00(vha->hw)) 6521 qlafx00_configure_devices(vha); 6522 else 6523 qla2x00_configure_loop(vha); 6524 6525 wait_time--; 6526 } while (!atomic_read(&vha->loop_down_timer) && 6527 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 6528 && wait_time && (test_bit(LOOP_RESYNC_NEEDED, 6529 &vha->dpc_flags))); 6530 } 6531 } 6532 6533 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 6534 return (QLA_FUNCTION_FAILED); 6535 6536 if (rval) 6537 ql_dbg(ql_dbg_disc, vha, 0x206c, 6538 "%s *** FAILED ***.\n", __func__); 6539 6540 return (rval); 6541 } 6542 6543 /* 6544 * qla2x00_perform_loop_resync 6545 * Description: This function will set the appropriate flags and call 6546 * qla2x00_loop_resync. If successful loop will be resynced 6547 * Arguments : scsi_qla_host_t pointer 6548 * returm : Success or Failure 6549 */ 6550 6551 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha) 6552 { 6553 int32_t rval = 0; 6554 6555 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) { 6556 /*Configure the flags so that resync happens properly*/ 6557 atomic_set(&ha->loop_down_timer, 0); 6558 if (!(ha->device_flags & DFLG_NO_CABLE)) { 6559 atomic_set(&ha->loop_state, LOOP_UP); 6560 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 6561 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); 6562 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 6563 6564 rval = qla2x00_loop_resync(ha); 6565 } else 6566 atomic_set(&ha->loop_state, LOOP_DEAD); 6567 6568 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); 6569 } 6570 6571 return rval; 6572 } 6573 6574 void 6575 qla2x00_update_fcports(scsi_qla_host_t *base_vha) 6576 { 6577 fc_port_t *fcport; 6578 struct scsi_qla_host *vha, *tvp; 6579 struct qla_hw_data *ha = base_vha->hw; 6580 unsigned long flags; 6581 6582 spin_lock_irqsave(&ha->vport_slock, flags); 6583 /* Go with deferred removal of rport references. */ 6584 list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list) { 6585 atomic_inc(&vha->vref_count); 6586 list_for_each_entry(fcport, &vha->vp_fcports, list) { 6587 if (fcport->drport && 6588 atomic_read(&fcport->state) != FCS_UNCONFIGURED) { 6589 spin_unlock_irqrestore(&ha->vport_slock, flags); 6590 qla2x00_rport_del(fcport); 6591 6592 spin_lock_irqsave(&ha->vport_slock, flags); 6593 } 6594 } 6595 atomic_dec(&vha->vref_count); 6596 wake_up(&vha->vref_waitq); 6597 } 6598 spin_unlock_irqrestore(&ha->vport_slock, flags); 6599 } 6600 6601 /* Assumes idc_lock always held on entry */ 6602 void 6603 qla83xx_reset_ownership(scsi_qla_host_t *vha) 6604 { 6605 struct qla_hw_data *ha = vha->hw; 6606 uint32_t drv_presence, drv_presence_mask; 6607 uint32_t dev_part_info1, dev_part_info2, class_type; 6608 uint32_t class_type_mask = 0x3; 6609 uint16_t fcoe_other_function = 0xffff, i; 6610 6611 if (IS_QLA8044(ha)) { 6612 drv_presence = qla8044_rd_direct(vha, 6613 QLA8044_CRB_DRV_ACTIVE_INDEX); 6614 dev_part_info1 = qla8044_rd_direct(vha, 6615 QLA8044_CRB_DEV_PART_INFO_INDEX); 6616 dev_part_info2 = qla8044_rd_direct(vha, 6617 QLA8044_CRB_DEV_PART_INFO2); 6618 } else { 6619 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 6620 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1); 6621 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2); 6622 } 6623 for (i = 0; i < 8; i++) { 6624 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask); 6625 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) && 6626 (i != ha->portnum)) { 6627 fcoe_other_function = i; 6628 break; 6629 } 6630 } 6631 if (fcoe_other_function == 0xffff) { 6632 for (i = 0; i < 8; i++) { 6633 class_type = ((dev_part_info2 >> (i * 4)) & 6634 class_type_mask); 6635 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) && 6636 ((i + 8) != ha->portnum)) { 6637 fcoe_other_function = i + 8; 6638 break; 6639 } 6640 } 6641 } 6642 /* 6643 * Prepare drv-presence mask based on fcoe functions present. 6644 * However consider only valid physical fcoe function numbers (0-15). 6645 */ 6646 drv_presence_mask = ~((1 << (ha->portnum)) | 6647 ((fcoe_other_function == 0xffff) ? 6648 0 : (1 << (fcoe_other_function)))); 6649 6650 /* We are the reset owner iff: 6651 * - No other protocol drivers present. 6652 * - This is the lowest among fcoe functions. */ 6653 if (!(drv_presence & drv_presence_mask) && 6654 (ha->portnum < fcoe_other_function)) { 6655 ql_dbg(ql_dbg_p3p, vha, 0xb07f, 6656 "This host is Reset owner.\n"); 6657 ha->flags.nic_core_reset_owner = 1; 6658 } 6659 } 6660 6661 static int 6662 __qla83xx_set_drv_ack(scsi_qla_host_t *vha) 6663 { 6664 int rval = QLA_SUCCESS; 6665 struct qla_hw_data *ha = vha->hw; 6666 uint32_t drv_ack; 6667 6668 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); 6669 if (rval == QLA_SUCCESS) { 6670 drv_ack |= (1 << ha->portnum); 6671 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack); 6672 } 6673 6674 return rval; 6675 } 6676 6677 static int 6678 __qla83xx_clear_drv_ack(scsi_qla_host_t *vha) 6679 { 6680 int rval = QLA_SUCCESS; 6681 struct qla_hw_data *ha = vha->hw; 6682 uint32_t drv_ack; 6683 6684 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); 6685 if (rval == QLA_SUCCESS) { 6686 drv_ack &= ~(1 << ha->portnum); 6687 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack); 6688 } 6689 6690 return rval; 6691 } 6692 6693 static const char * 6694 qla83xx_dev_state_to_string(uint32_t dev_state) 6695 { 6696 switch (dev_state) { 6697 case QLA8XXX_DEV_COLD: 6698 return "COLD/RE-INIT"; 6699 case QLA8XXX_DEV_INITIALIZING: 6700 return "INITIALIZING"; 6701 case QLA8XXX_DEV_READY: 6702 return "READY"; 6703 case QLA8XXX_DEV_NEED_RESET: 6704 return "NEED RESET"; 6705 case QLA8XXX_DEV_NEED_QUIESCENT: 6706 return "NEED QUIESCENT"; 6707 case QLA8XXX_DEV_FAILED: 6708 return "FAILED"; 6709 case QLA8XXX_DEV_QUIESCENT: 6710 return "QUIESCENT"; 6711 default: 6712 return "Unknown"; 6713 } 6714 } 6715 6716 /* Assumes idc-lock always held on entry */ 6717 void 6718 qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type) 6719 { 6720 struct qla_hw_data *ha = vha->hw; 6721 uint32_t idc_audit_reg = 0, duration_secs = 0; 6722 6723 switch (audit_type) { 6724 case IDC_AUDIT_TIMESTAMP: 6725 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000); 6726 idc_audit_reg = (ha->portnum) | 6727 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8); 6728 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg); 6729 break; 6730 6731 case IDC_AUDIT_COMPLETION: 6732 duration_secs = ((jiffies_to_msecs(jiffies) - 6733 jiffies_to_msecs(ha->idc_audit_ts)) / 1000); 6734 idc_audit_reg = (ha->portnum) | 6735 (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8); 6736 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg); 6737 break; 6738 6739 default: 6740 ql_log(ql_log_warn, vha, 0xb078, 6741 "Invalid audit type specified.\n"); 6742 break; 6743 } 6744 } 6745 6746 /* Assumes idc_lock always held on entry */ 6747 static int 6748 qla83xx_initiating_reset(scsi_qla_host_t *vha) 6749 { 6750 struct qla_hw_data *ha = vha->hw; 6751 uint32_t idc_control, dev_state; 6752 6753 __qla83xx_get_idc_control(vha, &idc_control); 6754 if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) { 6755 ql_log(ql_log_info, vha, 0xb080, 6756 "NIC Core reset has been disabled. idc-control=0x%x\n", 6757 idc_control); 6758 return QLA_FUNCTION_FAILED; 6759 } 6760 6761 /* Set NEED-RESET iff in READY state and we are the reset-owner */ 6762 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state); 6763 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) { 6764 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, 6765 QLA8XXX_DEV_NEED_RESET); 6766 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n"); 6767 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP); 6768 } else { 6769 const char *state = qla83xx_dev_state_to_string(dev_state); 6770 6771 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state); 6772 6773 /* SV: XXX: Is timeout required here? */ 6774 /* Wait for IDC state change READY -> NEED_RESET */ 6775 while (dev_state == QLA8XXX_DEV_READY) { 6776 qla83xx_idc_unlock(vha, 0); 6777 msleep(200); 6778 qla83xx_idc_lock(vha, 0); 6779 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state); 6780 } 6781 } 6782 6783 /* Send IDC ack by writing to drv-ack register */ 6784 __qla83xx_set_drv_ack(vha); 6785 6786 return QLA_SUCCESS; 6787 } 6788 6789 int 6790 __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control) 6791 { 6792 return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control); 6793 } 6794 6795 int 6796 __qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control) 6797 { 6798 return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control); 6799 } 6800 6801 static int 6802 qla83xx_check_driver_presence(scsi_qla_host_t *vha) 6803 { 6804 uint32_t drv_presence = 0; 6805 struct qla_hw_data *ha = vha->hw; 6806 6807 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 6808 if (drv_presence & (1 << ha->portnum)) 6809 return QLA_SUCCESS; 6810 else 6811 return QLA_TEST_FAILED; 6812 } 6813 6814 int 6815 qla83xx_nic_core_reset(scsi_qla_host_t *vha) 6816 { 6817 int rval = QLA_SUCCESS; 6818 struct qla_hw_data *ha = vha->hw; 6819 6820 ql_dbg(ql_dbg_p3p, vha, 0xb058, 6821 "Entered %s().\n", __func__); 6822 6823 if (vha->device_flags & DFLG_DEV_FAILED) { 6824 ql_log(ql_log_warn, vha, 0xb059, 6825 "Device in unrecoverable FAILED state.\n"); 6826 return QLA_FUNCTION_FAILED; 6827 } 6828 6829 qla83xx_idc_lock(vha, 0); 6830 6831 if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) { 6832 ql_log(ql_log_warn, vha, 0xb05a, 6833 "Function=0x%x has been removed from IDC participation.\n", 6834 ha->portnum); 6835 rval = QLA_FUNCTION_FAILED; 6836 goto exit; 6837 } 6838 6839 qla83xx_reset_ownership(vha); 6840 6841 rval = qla83xx_initiating_reset(vha); 6842 6843 /* 6844 * Perform reset if we are the reset-owner, 6845 * else wait till IDC state changes to READY/FAILED. 6846 */ 6847 if (rval == QLA_SUCCESS) { 6848 rval = qla83xx_idc_state_handler(vha); 6849 6850 if (rval == QLA_SUCCESS) 6851 ha->flags.nic_core_hung = 0; 6852 __qla83xx_clear_drv_ack(vha); 6853 } 6854 6855 exit: 6856 qla83xx_idc_unlock(vha, 0); 6857 6858 ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__); 6859 6860 return rval; 6861 } 6862 6863 int 6864 qla2xxx_mctp_dump(scsi_qla_host_t *vha) 6865 { 6866 struct qla_hw_data *ha = vha->hw; 6867 int rval = QLA_FUNCTION_FAILED; 6868 6869 if (!IS_MCTP_CAPABLE(ha)) { 6870 /* This message can be removed from the final version */ 6871 ql_log(ql_log_info, vha, 0x506d, 6872 "This board is not MCTP capable\n"); 6873 return rval; 6874 } 6875 6876 if (!ha->mctp_dump) { 6877 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev, 6878 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL); 6879 6880 if (!ha->mctp_dump) { 6881 ql_log(ql_log_warn, vha, 0x506e, 6882 "Failed to allocate memory for mctp dump\n"); 6883 return rval; 6884 } 6885 } 6886 6887 #define MCTP_DUMP_STR_ADDR 0x00000000 6888 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma, 6889 MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4); 6890 if (rval != QLA_SUCCESS) { 6891 ql_log(ql_log_warn, vha, 0x506f, 6892 "Failed to capture mctp dump\n"); 6893 } else { 6894 ql_log(ql_log_info, vha, 0x5070, 6895 "Mctp dump capture for host (%ld/%p).\n", 6896 vha->host_no, ha->mctp_dump); 6897 ha->mctp_dumped = 1; 6898 } 6899 6900 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) { 6901 ha->flags.nic_core_reset_hdlr_active = 1; 6902 rval = qla83xx_restart_nic_firmware(vha); 6903 if (rval) 6904 /* NIC Core reset failed. */ 6905 ql_log(ql_log_warn, vha, 0x5071, 6906 "Failed to restart nic firmware\n"); 6907 else 6908 ql_dbg(ql_dbg_p3p, vha, 0xb084, 6909 "Restarted NIC firmware successfully.\n"); 6910 ha->flags.nic_core_reset_hdlr_active = 0; 6911 } 6912 6913 return rval; 6914 6915 } 6916 6917 /* 6918 * qla2x00_quiesce_io 6919 * Description: This function will block the new I/Os 6920 * Its not aborting any I/Os as context 6921 * is not destroyed during quiescence 6922 * Arguments: scsi_qla_host_t 6923 * return : void 6924 */ 6925 void 6926 qla2x00_quiesce_io(scsi_qla_host_t *vha) 6927 { 6928 struct qla_hw_data *ha = vha->hw; 6929 struct scsi_qla_host *vp, *tvp; 6930 unsigned long flags; 6931 6932 ql_dbg(ql_dbg_dpc, vha, 0x401d, 6933 "Quiescing I/O - ha=%p.\n", ha); 6934 6935 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 6936 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 6937 atomic_set(&vha->loop_state, LOOP_DOWN); 6938 qla2x00_mark_all_devices_lost(vha); 6939 6940 spin_lock_irqsave(&ha->vport_slock, flags); 6941 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 6942 atomic_inc(&vp->vref_count); 6943 spin_unlock_irqrestore(&ha->vport_slock, flags); 6944 6945 qla2x00_mark_all_devices_lost(vp); 6946 6947 spin_lock_irqsave(&ha->vport_slock, flags); 6948 atomic_dec(&vp->vref_count); 6949 } 6950 spin_unlock_irqrestore(&ha->vport_slock, flags); 6951 } else { 6952 if (!atomic_read(&vha->loop_down_timer)) 6953 atomic_set(&vha->loop_down_timer, 6954 LOOP_DOWN_TIME); 6955 } 6956 /* Wait for pending cmds to complete */ 6957 WARN_ON_ONCE(qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) 6958 != QLA_SUCCESS); 6959 } 6960 6961 void 6962 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) 6963 { 6964 struct qla_hw_data *ha = vha->hw; 6965 struct scsi_qla_host *vp, *tvp; 6966 unsigned long flags; 6967 fc_port_t *fcport; 6968 u16 i; 6969 6970 /* For ISP82XX, driver waits for completion of the commands. 6971 * online flag should be set. 6972 */ 6973 if (!(IS_P3P_TYPE(ha))) 6974 vha->flags.online = 0; 6975 ha->flags.chip_reset_done = 0; 6976 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 6977 vha->qla_stats.total_isp_aborts++; 6978 6979 ql_log(ql_log_info, vha, 0x00af, 6980 "Performing ISP error recovery - ha=%p.\n", ha); 6981 6982 ha->flags.purge_mbox = 1; 6983 /* For ISP82XX, reset_chip is just disabling interrupts. 6984 * Driver waits for the completion of the commands. 6985 * the interrupts need to be enabled. 6986 */ 6987 if (!(IS_P3P_TYPE(ha))) 6988 ha->isp_ops->reset_chip(vha); 6989 6990 ha->link_data_rate = PORT_SPEED_UNKNOWN; 6991 SAVE_TOPO(ha); 6992 ha->flags.rida_fmt2 = 0; 6993 ha->flags.n2n_ae = 0; 6994 ha->flags.lip_ae = 0; 6995 ha->current_topology = 0; 6996 QLA_FW_STOPPED(ha); 6997 ha->flags.fw_init_done = 0; 6998 ha->chip_reset++; 6999 ha->base_qpair->chip_reset = ha->chip_reset; 7000 ha->base_qpair->cmd_cnt = ha->base_qpair->cmd_completion_cnt = 0; 7001 for (i = 0; i < ha->max_qpairs; i++) { 7002 if (ha->queue_pair_map[i]) { 7003 ha->queue_pair_map[i]->chip_reset = 7004 ha->base_qpair->chip_reset; 7005 ha->queue_pair_map[i]->cmd_cnt = 7006 ha->queue_pair_map[i]->cmd_completion_cnt = 0; 7007 } 7008 } 7009 7010 /* purge MBox commands */ 7011 if (atomic_read(&ha->num_pend_mbx_stage3)) { 7012 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 7013 complete(&ha->mbx_intr_comp); 7014 } 7015 7016 i = 0; 7017 while (atomic_read(&ha->num_pend_mbx_stage3) || 7018 atomic_read(&ha->num_pend_mbx_stage2) || 7019 atomic_read(&ha->num_pend_mbx_stage1)) { 7020 msleep(20); 7021 i++; 7022 if (i > 50) 7023 break; 7024 } 7025 ha->flags.purge_mbox = 0; 7026 7027 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 7028 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 7029 atomic_set(&vha->loop_state, LOOP_DOWN); 7030 qla2x00_mark_all_devices_lost(vha); 7031 7032 spin_lock_irqsave(&ha->vport_slock, flags); 7033 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 7034 atomic_inc(&vp->vref_count); 7035 spin_unlock_irqrestore(&ha->vport_slock, flags); 7036 7037 qla2x00_mark_all_devices_lost(vp); 7038 7039 spin_lock_irqsave(&ha->vport_slock, flags); 7040 atomic_dec(&vp->vref_count); 7041 } 7042 spin_unlock_irqrestore(&ha->vport_slock, flags); 7043 } else { 7044 if (!atomic_read(&vha->loop_down_timer)) 7045 atomic_set(&vha->loop_down_timer, 7046 LOOP_DOWN_TIME); 7047 } 7048 7049 /* Clear all async request states across all VPs. */ 7050 list_for_each_entry(fcport, &vha->vp_fcports, list) { 7051 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 7052 fcport->scan_state = 0; 7053 } 7054 spin_lock_irqsave(&ha->vport_slock, flags); 7055 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 7056 atomic_inc(&vp->vref_count); 7057 spin_unlock_irqrestore(&ha->vport_slock, flags); 7058 7059 list_for_each_entry(fcport, &vp->vp_fcports, list) 7060 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 7061 7062 spin_lock_irqsave(&ha->vport_slock, flags); 7063 atomic_dec(&vp->vref_count); 7064 } 7065 spin_unlock_irqrestore(&ha->vport_slock, flags); 7066 7067 /* Make sure for ISP 82XX IO DMA is complete */ 7068 if (IS_P3P_TYPE(ha)) { 7069 qla82xx_chip_reset_cleanup(vha); 7070 ql_log(ql_log_info, vha, 0x00b4, 7071 "Done chip reset cleanup.\n"); 7072 7073 /* Done waiting for pending commands. Reset online flag */ 7074 vha->flags.online = 0; 7075 } 7076 7077 /* Requeue all commands in outstanding command list. */ 7078 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 7079 /* memory barrier */ 7080 wmb(); 7081 } 7082 7083 /* 7084 * qla2x00_abort_isp 7085 * Resets ISP and aborts all outstanding commands. 7086 * 7087 * Input: 7088 * ha = adapter block pointer. 7089 * 7090 * Returns: 7091 * 0 = success 7092 */ 7093 int 7094 qla2x00_abort_isp(scsi_qla_host_t *vha) 7095 { 7096 int rval; 7097 uint8_t status = 0; 7098 struct qla_hw_data *ha = vha->hw; 7099 struct scsi_qla_host *vp, *tvp; 7100 struct req_que *req = ha->req_q_map[0]; 7101 unsigned long flags; 7102 7103 if (vha->flags.online) { 7104 qla2x00_abort_isp_cleanup(vha); 7105 7106 if (vha->hw->flags.port_isolated) 7107 return status; 7108 7109 if (qla2x00_isp_reg_stat(ha)) { 7110 ql_log(ql_log_info, vha, 0x803f, 7111 "ISP Abort - ISP reg disconnect, exiting.\n"); 7112 return status; 7113 } 7114 7115 if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) { 7116 ha->flags.chip_reset_done = 1; 7117 vha->flags.online = 1; 7118 status = 0; 7119 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 7120 return status; 7121 } 7122 7123 if (IS_QLA8031(ha)) { 7124 ql_dbg(ql_dbg_p3p, vha, 0xb05c, 7125 "Clearing fcoe driver presence.\n"); 7126 if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS) 7127 ql_dbg(ql_dbg_p3p, vha, 0xb073, 7128 "Error while clearing DRV-Presence.\n"); 7129 } 7130 7131 if (unlikely(pci_channel_offline(ha->pdev) && 7132 ha->flags.pci_channel_io_perm_failure)) { 7133 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 7134 status = 0; 7135 return status; 7136 } 7137 7138 switch (vha->qlini_mode) { 7139 case QLA2XXX_INI_MODE_DISABLED: 7140 if (!qla_tgt_mode_enabled(vha)) 7141 return 0; 7142 break; 7143 case QLA2XXX_INI_MODE_DUAL: 7144 if (!qla_dual_mode_enabled(vha)) 7145 return 0; 7146 break; 7147 case QLA2XXX_INI_MODE_ENABLED: 7148 default: 7149 break; 7150 } 7151 7152 ha->isp_ops->get_flash_version(vha, req->ring); 7153 7154 if (qla2x00_isp_reg_stat(ha)) { 7155 ql_log(ql_log_info, vha, 0x803f, 7156 "ISP Abort - ISP reg disconnect pre nvram config, exiting.\n"); 7157 return status; 7158 } 7159 ha->isp_ops->nvram_config(vha); 7160 7161 if (qla2x00_isp_reg_stat(ha)) { 7162 ql_log(ql_log_info, vha, 0x803f, 7163 "ISP Abort - ISP reg disconnect post nvmram config, exiting.\n"); 7164 return status; 7165 } 7166 if (!qla2x00_restart_isp(vha)) { 7167 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 7168 7169 if (!atomic_read(&vha->loop_down_timer)) { 7170 /* 7171 * Issue marker command only when we are going 7172 * to start the I/O . 7173 */ 7174 vha->marker_needed = 1; 7175 } 7176 7177 vha->flags.online = 1; 7178 7179 ha->isp_ops->enable_intrs(ha); 7180 7181 ha->isp_abort_cnt = 0; 7182 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 7183 7184 if (IS_QLA81XX(ha) || IS_QLA8031(ha)) 7185 qla2x00_get_fw_version(vha); 7186 if (ha->fce) { 7187 ha->flags.fce_enabled = 1; 7188 memset(ha->fce, 0, 7189 fce_calc_size(ha->fce_bufs)); 7190 rval = qla2x00_enable_fce_trace(vha, 7191 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 7192 &ha->fce_bufs); 7193 if (rval) { 7194 ql_log(ql_log_warn, vha, 0x8033, 7195 "Unable to reinitialize FCE " 7196 "(%d).\n", rval); 7197 ha->flags.fce_enabled = 0; 7198 } 7199 } 7200 7201 if (ha->eft) { 7202 memset(ha->eft, 0, EFT_SIZE); 7203 rval = qla2x00_enable_eft_trace(vha, 7204 ha->eft_dma, EFT_NUM_BUFFERS); 7205 if (rval) { 7206 ql_log(ql_log_warn, vha, 0x8034, 7207 "Unable to reinitialize EFT " 7208 "(%d).\n", rval); 7209 } 7210 } 7211 } else { /* failed the ISP abort */ 7212 vha->flags.online = 1; 7213 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 7214 if (ha->isp_abort_cnt == 0) { 7215 ql_log(ql_log_fatal, vha, 0x8035, 7216 "ISP error recover failed - " 7217 "board disabled.\n"); 7218 /* 7219 * The next call disables the board 7220 * completely. 7221 */ 7222 qla2x00_abort_isp_cleanup(vha); 7223 vha->flags.online = 0; 7224 clear_bit(ISP_ABORT_RETRY, 7225 &vha->dpc_flags); 7226 status = 0; 7227 } else { /* schedule another ISP abort */ 7228 ha->isp_abort_cnt--; 7229 ql_dbg(ql_dbg_taskm, vha, 0x8020, 7230 "ISP abort - retry remaining %d.\n", 7231 ha->isp_abort_cnt); 7232 status = 1; 7233 } 7234 } else { 7235 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 7236 ql_dbg(ql_dbg_taskm, vha, 0x8021, 7237 "ISP error recovery - retrying (%d) " 7238 "more times.\n", ha->isp_abort_cnt); 7239 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 7240 status = 1; 7241 } 7242 } 7243 7244 } 7245 7246 if (vha->hw->flags.port_isolated) { 7247 qla2x00_abort_isp_cleanup(vha); 7248 return status; 7249 } 7250 7251 if (!status) { 7252 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__); 7253 qla2x00_configure_hba(vha); 7254 spin_lock_irqsave(&ha->vport_slock, flags); 7255 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 7256 if (vp->vp_idx) { 7257 atomic_inc(&vp->vref_count); 7258 spin_unlock_irqrestore(&ha->vport_slock, flags); 7259 7260 qla2x00_vp_abort_isp(vp); 7261 7262 spin_lock_irqsave(&ha->vport_slock, flags); 7263 atomic_dec(&vp->vref_count); 7264 } 7265 } 7266 spin_unlock_irqrestore(&ha->vport_slock, flags); 7267 7268 if (IS_QLA8031(ha)) { 7269 ql_dbg(ql_dbg_p3p, vha, 0xb05d, 7270 "Setting back fcoe driver presence.\n"); 7271 if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS) 7272 ql_dbg(ql_dbg_p3p, vha, 0xb074, 7273 "Error while setting DRV-Presence.\n"); 7274 } 7275 } else { 7276 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n", 7277 __func__); 7278 } 7279 7280 return(status); 7281 } 7282 7283 /* 7284 * qla2x00_restart_isp 7285 * restarts the ISP after a reset 7286 * 7287 * Input: 7288 * ha = adapter block pointer. 7289 * 7290 * Returns: 7291 * 0 = success 7292 */ 7293 static int 7294 qla2x00_restart_isp(scsi_qla_host_t *vha) 7295 { 7296 int status; 7297 struct qla_hw_data *ha = vha->hw; 7298 7299 /* If firmware needs to be loaded */ 7300 if (qla2x00_isp_firmware(vha)) { 7301 vha->flags.online = 0; 7302 status = ha->isp_ops->chip_diag(vha); 7303 if (status) 7304 return status; 7305 status = qla2x00_setup_chip(vha); 7306 if (status) 7307 return status; 7308 } 7309 7310 status = qla2x00_init_rings(vha); 7311 if (status) 7312 return status; 7313 7314 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 7315 ha->flags.chip_reset_done = 1; 7316 7317 /* Initialize the queues in use */ 7318 qla25xx_init_queues(ha); 7319 7320 status = qla2x00_fw_ready(vha); 7321 if (status) { 7322 /* if no cable then assume it's good */ 7323 return vha->device_flags & DFLG_NO_CABLE ? 0 : status; 7324 } 7325 7326 /* Issue a marker after FW becomes ready. */ 7327 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); 7328 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 7329 7330 return 0; 7331 } 7332 7333 static int 7334 qla25xx_init_queues(struct qla_hw_data *ha) 7335 { 7336 struct rsp_que *rsp = NULL; 7337 struct req_que *req = NULL; 7338 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 7339 int ret = -1; 7340 int i; 7341 7342 for (i = 1; i < ha->max_rsp_queues; i++) { 7343 rsp = ha->rsp_q_map[i]; 7344 if (rsp && test_bit(i, ha->rsp_qid_map)) { 7345 rsp->options &= ~BIT_0; 7346 ret = qla25xx_init_rsp_que(base_vha, rsp); 7347 if (ret != QLA_SUCCESS) 7348 ql_dbg(ql_dbg_init, base_vha, 0x00ff, 7349 "%s Rsp que: %d init failed.\n", 7350 __func__, rsp->id); 7351 else 7352 ql_dbg(ql_dbg_init, base_vha, 0x0100, 7353 "%s Rsp que: %d inited.\n", 7354 __func__, rsp->id); 7355 } 7356 } 7357 for (i = 1; i < ha->max_req_queues; i++) { 7358 req = ha->req_q_map[i]; 7359 if (req && test_bit(i, ha->req_qid_map)) { 7360 /* Clear outstanding commands array. */ 7361 req->options &= ~BIT_0; 7362 ret = qla25xx_init_req_que(base_vha, req); 7363 if (ret != QLA_SUCCESS) 7364 ql_dbg(ql_dbg_init, base_vha, 0x0101, 7365 "%s Req que: %d init failed.\n", 7366 __func__, req->id); 7367 else 7368 ql_dbg(ql_dbg_init, base_vha, 0x0102, 7369 "%s Req que: %d inited.\n", 7370 __func__, req->id); 7371 } 7372 } 7373 return ret; 7374 } 7375 7376 /* 7377 * qla2x00_reset_adapter 7378 * Reset adapter. 7379 * 7380 * Input: 7381 * ha = adapter block pointer. 7382 */ 7383 int 7384 qla2x00_reset_adapter(scsi_qla_host_t *vha) 7385 { 7386 unsigned long flags = 0; 7387 struct qla_hw_data *ha = vha->hw; 7388 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 7389 7390 vha->flags.online = 0; 7391 ha->isp_ops->disable_intrs(ha); 7392 7393 spin_lock_irqsave(&ha->hardware_lock, flags); 7394 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 7395 rd_reg_word(®->hccr); /* PCI Posting. */ 7396 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); 7397 rd_reg_word(®->hccr); /* PCI Posting. */ 7398 spin_unlock_irqrestore(&ha->hardware_lock, flags); 7399 7400 return QLA_SUCCESS; 7401 } 7402 7403 int 7404 qla24xx_reset_adapter(scsi_qla_host_t *vha) 7405 { 7406 unsigned long flags = 0; 7407 struct qla_hw_data *ha = vha->hw; 7408 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 7409 7410 if (IS_P3P_TYPE(ha)) 7411 return QLA_SUCCESS; 7412 7413 vha->flags.online = 0; 7414 ha->isp_ops->disable_intrs(ha); 7415 7416 spin_lock_irqsave(&ha->hardware_lock, flags); 7417 wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET); 7418 rd_reg_dword(®->hccr); 7419 wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE); 7420 rd_reg_dword(®->hccr); 7421 spin_unlock_irqrestore(&ha->hardware_lock, flags); 7422 7423 if (IS_NOPOLLING_TYPE(ha)) 7424 ha->isp_ops->enable_intrs(ha); 7425 7426 return QLA_SUCCESS; 7427 } 7428 7429 /* On sparc systems, obtain port and node WWN from firmware 7430 * properties. 7431 */ 7432 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, 7433 struct nvram_24xx *nv) 7434 { 7435 #ifdef CONFIG_SPARC 7436 struct qla_hw_data *ha = vha->hw; 7437 struct pci_dev *pdev = ha->pdev; 7438 struct device_node *dp = pci_device_to_OF_node(pdev); 7439 const u8 *val; 7440 int len; 7441 7442 val = of_get_property(dp, "port-wwn", &len); 7443 if (val && len >= WWN_SIZE) 7444 memcpy(nv->port_name, val, WWN_SIZE); 7445 7446 val = of_get_property(dp, "node-wwn", &len); 7447 if (val && len >= WWN_SIZE) 7448 memcpy(nv->node_name, val, WWN_SIZE); 7449 #endif 7450 } 7451 7452 int 7453 qla24xx_nvram_config(scsi_qla_host_t *vha) 7454 { 7455 int rval; 7456 struct init_cb_24xx *icb; 7457 struct nvram_24xx *nv; 7458 __le32 *dptr; 7459 uint8_t *dptr1, *dptr2; 7460 uint32_t chksum; 7461 uint16_t cnt; 7462 struct qla_hw_data *ha = vha->hw; 7463 7464 rval = QLA_SUCCESS; 7465 icb = (struct init_cb_24xx *)ha->init_cb; 7466 nv = ha->nvram; 7467 7468 /* Determine NVRAM starting address. */ 7469 if (ha->port_no == 0) { 7470 ha->nvram_base = FA_NVRAM_FUNC0_ADDR; 7471 ha->vpd_base = FA_NVRAM_VPD0_ADDR; 7472 } else { 7473 ha->nvram_base = FA_NVRAM_FUNC1_ADDR; 7474 ha->vpd_base = FA_NVRAM_VPD1_ADDR; 7475 } 7476 7477 ha->nvram_size = sizeof(*nv); 7478 ha->vpd_size = FA_NVRAM_VPD_SIZE; 7479 7480 /* Get VPD data into cache */ 7481 ha->vpd = ha->nvram + VPD_OFFSET; 7482 ha->isp_ops->read_nvram(vha, ha->vpd, 7483 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); 7484 7485 /* Get NVRAM data into cache and calculate checksum. */ 7486 dptr = (__force __le32 *)nv; 7487 ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size); 7488 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) 7489 chksum += le32_to_cpu(*dptr); 7490 7491 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a, 7492 "Contents of NVRAM\n"); 7493 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d, 7494 nv, ha->nvram_size); 7495 7496 /* Bad NVRAM data, set defaults parameters. */ 7497 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) || 7498 le16_to_cpu(nv->nvram_version) < ICB_VERSION) { 7499 /* Reset NVRAM data. */ 7500 ql_log(ql_log_warn, vha, 0x006b, 7501 "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n", 7502 chksum, nv->id, nv->nvram_version); 7503 ql_dump_buffer(ql_dbg_init, vha, 0x006b, nv, sizeof(*nv)); 7504 ql_log(ql_log_warn, vha, 0x006c, 7505 "Falling back to functioning (yet invalid -- WWPN) " 7506 "defaults.\n"); 7507 7508 /* 7509 * Set default initialization control block. 7510 */ 7511 memset(nv, 0, ha->nvram_size); 7512 nv->nvram_version = cpu_to_le16(ICB_VERSION); 7513 nv->version = cpu_to_le16(ICB_VERSION); 7514 nv->frame_payload_size = cpu_to_le16(2048); 7515 nv->execution_throttle = cpu_to_le16(0xFFFF); 7516 nv->exchange_count = cpu_to_le16(0); 7517 nv->hard_address = cpu_to_le16(124); 7518 nv->port_name[0] = 0x21; 7519 nv->port_name[1] = 0x00 + ha->port_no + 1; 7520 nv->port_name[2] = 0x00; 7521 nv->port_name[3] = 0xe0; 7522 nv->port_name[4] = 0x8b; 7523 nv->port_name[5] = 0x1c; 7524 nv->port_name[6] = 0x55; 7525 nv->port_name[7] = 0x86; 7526 nv->node_name[0] = 0x20; 7527 nv->node_name[1] = 0x00; 7528 nv->node_name[2] = 0x00; 7529 nv->node_name[3] = 0xe0; 7530 nv->node_name[4] = 0x8b; 7531 nv->node_name[5] = 0x1c; 7532 nv->node_name[6] = 0x55; 7533 nv->node_name[7] = 0x86; 7534 qla24xx_nvram_wwn_from_ofw(vha, nv); 7535 nv->login_retry_count = cpu_to_le16(8); 7536 nv->interrupt_delay_timer = cpu_to_le16(0); 7537 nv->login_timeout = cpu_to_le16(0); 7538 nv->firmware_options_1 = 7539 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); 7540 nv->firmware_options_2 = cpu_to_le32(2 << 4); 7541 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 7542 nv->firmware_options_3 = cpu_to_le32(2 << 13); 7543 nv->host_p = cpu_to_le32(BIT_11|BIT_10); 7544 nv->efi_parameters = cpu_to_le32(0); 7545 nv->reset_delay = 5; 7546 nv->max_luns_per_target = cpu_to_le16(128); 7547 nv->port_down_retry_count = cpu_to_le16(30); 7548 nv->link_down_timeout = cpu_to_le16(30); 7549 7550 rval = 1; 7551 } 7552 7553 if (qla_tgt_mode_enabled(vha)) { 7554 /* Don't enable full login after initial LIP */ 7555 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 7556 /* Don't enable LIP full login for initiator */ 7557 nv->host_p &= cpu_to_le32(~BIT_10); 7558 } 7559 7560 qlt_24xx_config_nvram_stage1(vha, nv); 7561 7562 /* Reset Initialization control block */ 7563 memset(icb, 0, ha->init_cb_size); 7564 7565 /* Copy 1st segment. */ 7566 dptr1 = (uint8_t *)icb; 7567 dptr2 = (uint8_t *)&nv->version; 7568 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; 7569 while (cnt--) 7570 *dptr1++ = *dptr2++; 7571 7572 icb->login_retry_count = nv->login_retry_count; 7573 icb->link_down_on_nos = nv->link_down_on_nos; 7574 7575 /* Copy 2nd segment. */ 7576 dptr1 = (uint8_t *)&icb->interrupt_delay_timer; 7577 dptr2 = (uint8_t *)&nv->interrupt_delay_timer; 7578 cnt = (uint8_t *)&icb->reserved_3 - 7579 (uint8_t *)&icb->interrupt_delay_timer; 7580 while (cnt--) 7581 *dptr1++ = *dptr2++; 7582 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); 7583 /* 7584 * Setup driver NVRAM options. 7585 */ 7586 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), 7587 "QLA2462"); 7588 7589 qlt_24xx_config_nvram_stage2(vha, icb); 7590 7591 if (nv->host_p & cpu_to_le32(BIT_15)) { 7592 /* Use alternate WWN? */ 7593 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 7594 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 7595 } 7596 7597 /* Prepare nodename */ 7598 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) { 7599 /* 7600 * Firmware will apply the following mask if the nodename was 7601 * not provided. 7602 */ 7603 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 7604 icb->node_name[0] &= 0xF0; 7605 } 7606 7607 /* Set host adapter parameters. */ 7608 ha->flags.disable_risc_code_load = 0; 7609 ha->flags.enable_lip_reset = 0; 7610 ha->flags.enable_lip_full_login = 7611 le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0; 7612 ha->flags.enable_target_reset = 7613 le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0; 7614 ha->flags.enable_led_scheme = 0; 7615 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0; 7616 7617 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & 7618 (BIT_6 | BIT_5 | BIT_4)) >> 4; 7619 7620 memcpy(ha->fw_seriallink_options24, nv->seriallink_options, 7621 sizeof(ha->fw_seriallink_options24)); 7622 7623 /* save HBA serial number */ 7624 ha->serial0 = icb->port_name[5]; 7625 ha->serial1 = icb->port_name[6]; 7626 ha->serial2 = icb->port_name[7]; 7627 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 7628 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 7629 7630 icb->execution_throttle = cpu_to_le16(0xFFFF); 7631 7632 ha->retry_count = le16_to_cpu(nv->login_retry_count); 7633 7634 /* Set minimum login_timeout to 4 seconds. */ 7635 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) 7636 nv->login_timeout = cpu_to_le16(ql2xlogintimeout); 7637 if (le16_to_cpu(nv->login_timeout) < 4) 7638 nv->login_timeout = cpu_to_le16(4); 7639 ha->login_timeout = le16_to_cpu(nv->login_timeout); 7640 7641 /* Set minimum RATOV to 100 tenths of a second. */ 7642 ha->r_a_tov = 100; 7643 7644 ha->loop_reset_delay = nv->reset_delay; 7645 7646 /* Link Down Timeout = 0: 7647 * 7648 * When Port Down timer expires we will start returning 7649 * I/O's to OS with "DID_NO_CONNECT". 7650 * 7651 * Link Down Timeout != 0: 7652 * 7653 * The driver waits for the link to come up after link down 7654 * before returning I/Os to OS with "DID_NO_CONNECT". 7655 */ 7656 if (le16_to_cpu(nv->link_down_timeout) == 0) { 7657 ha->loop_down_abort_time = 7658 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 7659 } else { 7660 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); 7661 ha->loop_down_abort_time = 7662 (LOOP_DOWN_TIME - ha->link_down_timeout); 7663 } 7664 7665 /* Need enough time to try and get the port back. */ 7666 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); 7667 if (qlport_down_retry) 7668 ha->port_down_retry_count = qlport_down_retry; 7669 7670 /* Set login_retry_count */ 7671 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); 7672 if (ha->port_down_retry_count == 7673 le16_to_cpu(nv->port_down_retry_count) && 7674 ha->port_down_retry_count > 3) 7675 ha->login_retry_count = ha->port_down_retry_count; 7676 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 7677 ha->login_retry_count = ha->port_down_retry_count; 7678 if (ql2xloginretrycount) 7679 ha->login_retry_count = ql2xloginretrycount; 7680 7681 /* N2N: driver will initiate Login instead of FW */ 7682 icb->firmware_options_3 |= cpu_to_le32(BIT_8); 7683 7684 /* Enable ZIO. */ 7685 if (!vha->flags.init_done) { 7686 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 7687 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 7688 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 7689 le16_to_cpu(icb->interrupt_delay_timer) : 2; 7690 } 7691 icb->firmware_options_2 &= cpu_to_le32( 7692 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 7693 if (ha->zio_mode != QLA_ZIO_DISABLED) { 7694 ha->zio_mode = QLA_ZIO_MODE_6; 7695 7696 ql_log(ql_log_info, vha, 0x006f, 7697 "ZIO mode %d enabled; timer delay (%d us).\n", 7698 ha->zio_mode, ha->zio_timer * 100); 7699 7700 icb->firmware_options_2 |= cpu_to_le32( 7701 (uint32_t)ha->zio_mode); 7702 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); 7703 } 7704 7705 if (rval) { 7706 ql_log(ql_log_warn, vha, 0x0070, 7707 "NVRAM configuration failed.\n"); 7708 } 7709 return (rval); 7710 } 7711 7712 static void 7713 qla27xx_print_image(struct scsi_qla_host *vha, char *name, 7714 struct qla27xx_image_status *image_status) 7715 { 7716 ql_dbg(ql_dbg_init, vha, 0x018b, 7717 "%s %s: mask=%#02x gen=%#04x ver=%u.%u map=%#01x sum=%#08x sig=%#08x\n", 7718 name, "status", 7719 image_status->image_status_mask, 7720 le16_to_cpu(image_status->generation), 7721 image_status->ver_major, 7722 image_status->ver_minor, 7723 image_status->bitmap, 7724 le32_to_cpu(image_status->checksum), 7725 le32_to_cpu(image_status->signature)); 7726 } 7727 7728 static bool 7729 qla28xx_check_aux_image_status_signature( 7730 struct qla27xx_image_status *image_status) 7731 { 7732 ulong signature = le32_to_cpu(image_status->signature); 7733 7734 return signature != QLA28XX_AUX_IMG_STATUS_SIGN; 7735 } 7736 7737 static bool 7738 qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status) 7739 { 7740 ulong signature = le32_to_cpu(image_status->signature); 7741 7742 return 7743 signature != QLA27XX_IMG_STATUS_SIGN && 7744 signature != QLA28XX_IMG_STATUS_SIGN; 7745 } 7746 7747 static ulong 7748 qla27xx_image_status_checksum(struct qla27xx_image_status *image_status) 7749 { 7750 __le32 *p = (__force __le32 *)image_status; 7751 uint n = sizeof(*image_status) / sizeof(*p); 7752 uint32_t sum = 0; 7753 7754 for ( ; n--; p++) 7755 sum += le32_to_cpup(p); 7756 7757 return sum; 7758 } 7759 7760 static inline uint 7761 qla28xx_component_bitmask(struct qla27xx_image_status *aux, uint bitmask) 7762 { 7763 return aux->bitmap & bitmask ? 7764 QLA27XX_SECONDARY_IMAGE : QLA27XX_PRIMARY_IMAGE; 7765 } 7766 7767 static void 7768 qla28xx_component_status( 7769 struct active_regions *active_regions, struct qla27xx_image_status *aux) 7770 { 7771 active_regions->aux.board_config = 7772 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_BOARD_CONFIG); 7773 7774 active_regions->aux.vpd_nvram = 7775 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_VPD_NVRAM); 7776 7777 active_regions->aux.npiv_config_0_1 = 7778 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_0_1); 7779 7780 active_regions->aux.npiv_config_2_3 = 7781 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3); 7782 } 7783 7784 static int 7785 qla27xx_compare_image_generation( 7786 struct qla27xx_image_status *pri_image_status, 7787 struct qla27xx_image_status *sec_image_status) 7788 { 7789 /* calculate generation delta as uint16 (this accounts for wrap) */ 7790 int16_t delta = 7791 le16_to_cpu(pri_image_status->generation) - 7792 le16_to_cpu(sec_image_status->generation); 7793 7794 ql_dbg(ql_dbg_init, NULL, 0x0180, "generation delta = %d\n", delta); 7795 7796 return delta; 7797 } 7798 7799 void 7800 qla28xx_get_aux_images( 7801 struct scsi_qla_host *vha, struct active_regions *active_regions) 7802 { 7803 struct qla_hw_data *ha = vha->hw; 7804 struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status; 7805 bool valid_pri_image = false, valid_sec_image = false; 7806 bool active_pri_image = false, active_sec_image = false; 7807 7808 if (!ha->flt_region_aux_img_status_pri) { 7809 ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n"); 7810 goto check_sec_image; 7811 } 7812 7813 qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status, 7814 ha->flt_region_aux_img_status_pri, 7815 sizeof(pri_aux_image_status) >> 2); 7816 qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status); 7817 7818 if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) { 7819 ql_dbg(ql_dbg_init, vha, 0x018b, 7820 "Primary aux image signature (%#x) not valid\n", 7821 le32_to_cpu(pri_aux_image_status.signature)); 7822 goto check_sec_image; 7823 } 7824 7825 if (qla27xx_image_status_checksum(&pri_aux_image_status)) { 7826 ql_dbg(ql_dbg_init, vha, 0x018c, 7827 "Primary aux image checksum failed\n"); 7828 goto check_sec_image; 7829 } 7830 7831 valid_pri_image = true; 7832 7833 if (pri_aux_image_status.image_status_mask & 1) { 7834 ql_dbg(ql_dbg_init, vha, 0x018d, 7835 "Primary aux image is active\n"); 7836 active_pri_image = true; 7837 } 7838 7839 check_sec_image: 7840 if (!ha->flt_region_aux_img_status_sec) { 7841 ql_dbg(ql_dbg_init, vha, 0x018a, 7842 "Secondary aux image not addressed\n"); 7843 goto check_valid_image; 7844 } 7845 7846 qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status, 7847 ha->flt_region_aux_img_status_sec, 7848 sizeof(sec_aux_image_status) >> 2); 7849 qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status); 7850 7851 if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) { 7852 ql_dbg(ql_dbg_init, vha, 0x018b, 7853 "Secondary aux image signature (%#x) not valid\n", 7854 le32_to_cpu(sec_aux_image_status.signature)); 7855 goto check_valid_image; 7856 } 7857 7858 if (qla27xx_image_status_checksum(&sec_aux_image_status)) { 7859 ql_dbg(ql_dbg_init, vha, 0x018c, 7860 "Secondary aux image checksum failed\n"); 7861 goto check_valid_image; 7862 } 7863 7864 valid_sec_image = true; 7865 7866 if (sec_aux_image_status.image_status_mask & 1) { 7867 ql_dbg(ql_dbg_init, vha, 0x018d, 7868 "Secondary aux image is active\n"); 7869 active_sec_image = true; 7870 } 7871 7872 check_valid_image: 7873 if (valid_pri_image && active_pri_image && 7874 valid_sec_image && active_sec_image) { 7875 if (qla27xx_compare_image_generation(&pri_aux_image_status, 7876 &sec_aux_image_status) >= 0) { 7877 qla28xx_component_status(active_regions, 7878 &pri_aux_image_status); 7879 } else { 7880 qla28xx_component_status(active_regions, 7881 &sec_aux_image_status); 7882 } 7883 } else if (valid_pri_image && active_pri_image) { 7884 qla28xx_component_status(active_regions, &pri_aux_image_status); 7885 } else if (valid_sec_image && active_sec_image) { 7886 qla28xx_component_status(active_regions, &sec_aux_image_status); 7887 } 7888 7889 ql_dbg(ql_dbg_init, vha, 0x018f, 7890 "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u\n", 7891 active_regions->aux.board_config, 7892 active_regions->aux.vpd_nvram, 7893 active_regions->aux.npiv_config_0_1, 7894 active_regions->aux.npiv_config_2_3); 7895 } 7896 7897 void 7898 qla27xx_get_active_image(struct scsi_qla_host *vha, 7899 struct active_regions *active_regions) 7900 { 7901 struct qla_hw_data *ha = vha->hw; 7902 struct qla27xx_image_status pri_image_status, sec_image_status; 7903 bool valid_pri_image = false, valid_sec_image = false; 7904 bool active_pri_image = false, active_sec_image = false; 7905 7906 if (!ha->flt_region_img_status_pri) { 7907 ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n"); 7908 goto check_sec_image; 7909 } 7910 7911 if (qla24xx_read_flash_data(vha, (uint32_t *)&pri_image_status, 7912 ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) != 7913 QLA_SUCCESS) { 7914 WARN_ON_ONCE(true); 7915 goto check_sec_image; 7916 } 7917 qla27xx_print_image(vha, "Primary image", &pri_image_status); 7918 7919 if (qla27xx_check_image_status_signature(&pri_image_status)) { 7920 ql_dbg(ql_dbg_init, vha, 0x018b, 7921 "Primary image signature (%#x) not valid\n", 7922 le32_to_cpu(pri_image_status.signature)); 7923 goto check_sec_image; 7924 } 7925 7926 if (qla27xx_image_status_checksum(&pri_image_status)) { 7927 ql_dbg(ql_dbg_init, vha, 0x018c, 7928 "Primary image checksum failed\n"); 7929 goto check_sec_image; 7930 } 7931 7932 valid_pri_image = true; 7933 7934 if (pri_image_status.image_status_mask & 1) { 7935 ql_dbg(ql_dbg_init, vha, 0x018d, 7936 "Primary image is active\n"); 7937 active_pri_image = true; 7938 } 7939 7940 check_sec_image: 7941 if (!ha->flt_region_img_status_sec) { 7942 ql_dbg(ql_dbg_init, vha, 0x018a, "Secondary image not addressed\n"); 7943 goto check_valid_image; 7944 } 7945 7946 qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status), 7947 ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2); 7948 qla27xx_print_image(vha, "Secondary image", &sec_image_status); 7949 7950 if (qla27xx_check_image_status_signature(&sec_image_status)) { 7951 ql_dbg(ql_dbg_init, vha, 0x018b, 7952 "Secondary image signature (%#x) not valid\n", 7953 le32_to_cpu(sec_image_status.signature)); 7954 goto check_valid_image; 7955 } 7956 7957 if (qla27xx_image_status_checksum(&sec_image_status)) { 7958 ql_dbg(ql_dbg_init, vha, 0x018c, 7959 "Secondary image checksum failed\n"); 7960 goto check_valid_image; 7961 } 7962 7963 valid_sec_image = true; 7964 7965 if (sec_image_status.image_status_mask & 1) { 7966 ql_dbg(ql_dbg_init, vha, 0x018d, 7967 "Secondary image is active\n"); 7968 active_sec_image = true; 7969 } 7970 7971 check_valid_image: 7972 if (valid_pri_image && active_pri_image) 7973 active_regions->global = QLA27XX_PRIMARY_IMAGE; 7974 7975 if (valid_sec_image && active_sec_image) { 7976 if (!active_regions->global || 7977 qla27xx_compare_image_generation( 7978 &pri_image_status, &sec_image_status) < 0) { 7979 active_regions->global = QLA27XX_SECONDARY_IMAGE; 7980 } 7981 } 7982 7983 ql_dbg(ql_dbg_init, vha, 0x018f, "active image %s (%u)\n", 7984 active_regions->global == QLA27XX_DEFAULT_IMAGE ? 7985 "default (boot/fw)" : 7986 active_regions->global == QLA27XX_PRIMARY_IMAGE ? 7987 "primary" : 7988 active_regions->global == QLA27XX_SECONDARY_IMAGE ? 7989 "secondary" : "invalid", 7990 active_regions->global); 7991 } 7992 7993 bool qla24xx_risc_firmware_invalid(uint32_t *dword) 7994 { 7995 return 7996 !(dword[4] | dword[5] | dword[6] | dword[7]) || 7997 !(~dword[4] | ~dword[5] | ~dword[6] | ~dword[7]); 7998 } 7999 8000 static int 8001 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, 8002 uint32_t faddr) 8003 { 8004 int rval; 8005 uint templates, segments, fragment; 8006 ulong i; 8007 uint j; 8008 ulong dlen; 8009 uint32_t *dcode; 8010 uint32_t risc_addr, risc_size, risc_attr = 0; 8011 struct qla_hw_data *ha = vha->hw; 8012 struct req_que *req = ha->req_q_map[0]; 8013 struct fwdt *fwdt = ha->fwdt; 8014 8015 ql_dbg(ql_dbg_init, vha, 0x008b, 8016 "FW: Loading firmware from flash (%x).\n", faddr); 8017 8018 dcode = (uint32_t *)req->ring; 8019 qla24xx_read_flash_data(vha, dcode, faddr, 8); 8020 if (qla24xx_risc_firmware_invalid(dcode)) { 8021 ql_log(ql_log_fatal, vha, 0x008c, 8022 "Unable to verify the integrity of flash firmware " 8023 "image.\n"); 8024 ql_log(ql_log_fatal, vha, 0x008d, 8025 "Firmware data: %08x %08x %08x %08x.\n", 8026 dcode[0], dcode[1], dcode[2], dcode[3]); 8027 8028 return QLA_FUNCTION_FAILED; 8029 } 8030 8031 dcode = (uint32_t *)req->ring; 8032 *srisc_addr = 0; 8033 segments = FA_RISC_CODE_SEGMENTS; 8034 for (j = 0; j < segments; j++) { 8035 ql_dbg(ql_dbg_init, vha, 0x008d, 8036 "-> Loading segment %u...\n", j); 8037 qla24xx_read_flash_data(vha, dcode, faddr, 10); 8038 risc_addr = be32_to_cpu((__force __be32)dcode[2]); 8039 risc_size = be32_to_cpu((__force __be32)dcode[3]); 8040 if (!*srisc_addr) { 8041 *srisc_addr = risc_addr; 8042 risc_attr = be32_to_cpu((__force __be32)dcode[9]); 8043 } 8044 8045 dlen = ha->fw_transfer_size >> 2; 8046 for (fragment = 0; risc_size; fragment++) { 8047 if (dlen > risc_size) 8048 dlen = risc_size; 8049 8050 ql_dbg(ql_dbg_init, vha, 0x008e, 8051 "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n", 8052 fragment, risc_addr, faddr, dlen); 8053 qla24xx_read_flash_data(vha, dcode, faddr, dlen); 8054 for (i = 0; i < dlen; i++) 8055 dcode[i] = swab32(dcode[i]); 8056 8057 rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen); 8058 if (rval) { 8059 ql_log(ql_log_fatal, vha, 0x008f, 8060 "-> Failed load firmware fragment %u.\n", 8061 fragment); 8062 return QLA_FUNCTION_FAILED; 8063 } 8064 8065 faddr += dlen; 8066 risc_addr += dlen; 8067 risc_size -= dlen; 8068 } 8069 } 8070 8071 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 8072 return QLA_SUCCESS; 8073 8074 templates = (risc_attr & BIT_9) ? 2 : 1; 8075 ql_dbg(ql_dbg_init, vha, 0x0160, "-> templates = %u\n", templates); 8076 for (j = 0; j < templates; j++, fwdt++) { 8077 vfree(fwdt->template); 8078 fwdt->template = NULL; 8079 fwdt->length = 0; 8080 8081 dcode = (uint32_t *)req->ring; 8082 qla24xx_read_flash_data(vha, dcode, faddr, 7); 8083 risc_size = be32_to_cpu((__force __be32)dcode[2]); 8084 ql_dbg(ql_dbg_init, vha, 0x0161, 8085 "-> fwdt%u template array at %#x (%#x dwords)\n", 8086 j, faddr, risc_size); 8087 if (!risc_size || !~risc_size) { 8088 ql_dbg(ql_dbg_init, vha, 0x0162, 8089 "-> fwdt%u failed to read array\n", j); 8090 goto failed; 8091 } 8092 8093 /* skip header and ignore checksum */ 8094 faddr += 7; 8095 risc_size -= 8; 8096 8097 ql_dbg(ql_dbg_init, vha, 0x0163, 8098 "-> fwdt%u template allocate template %#x words...\n", 8099 j, risc_size); 8100 fwdt->template = vmalloc(risc_size * sizeof(*dcode)); 8101 if (!fwdt->template) { 8102 ql_log(ql_log_warn, vha, 0x0164, 8103 "-> fwdt%u failed allocate template.\n", j); 8104 goto failed; 8105 } 8106 8107 dcode = fwdt->template; 8108 qla24xx_read_flash_data(vha, dcode, faddr, risc_size); 8109 8110 if (!qla27xx_fwdt_template_valid(dcode)) { 8111 ql_log(ql_log_warn, vha, 0x0165, 8112 "-> fwdt%u failed template validate\n", j); 8113 goto failed; 8114 } 8115 8116 dlen = qla27xx_fwdt_template_size(dcode); 8117 ql_dbg(ql_dbg_init, vha, 0x0166, 8118 "-> fwdt%u template size %#lx bytes (%#lx words)\n", 8119 j, dlen, dlen / sizeof(*dcode)); 8120 if (dlen > risc_size * sizeof(*dcode)) { 8121 ql_log(ql_log_warn, vha, 0x0167, 8122 "-> fwdt%u template exceeds array (%-lu bytes)\n", 8123 j, dlen - risc_size * sizeof(*dcode)); 8124 goto failed; 8125 } 8126 8127 fwdt->length = dlen; 8128 ql_dbg(ql_dbg_init, vha, 0x0168, 8129 "-> fwdt%u loaded template ok\n", j); 8130 8131 faddr += risc_size + 1; 8132 } 8133 8134 return QLA_SUCCESS; 8135 8136 failed: 8137 vfree(fwdt->template); 8138 fwdt->template = NULL; 8139 fwdt->length = 0; 8140 8141 return QLA_SUCCESS; 8142 } 8143 8144 #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/" 8145 8146 int 8147 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 8148 { 8149 int rval; 8150 int i, fragment; 8151 uint16_t *wcode; 8152 __be16 *fwcode; 8153 uint32_t risc_addr, risc_size, fwclen, wlen, *seg; 8154 struct fw_blob *blob; 8155 struct qla_hw_data *ha = vha->hw; 8156 struct req_que *req = ha->req_q_map[0]; 8157 8158 /* Load firmware blob. */ 8159 blob = qla2x00_request_firmware(vha); 8160 if (!blob) { 8161 ql_log(ql_log_info, vha, 0x0083, 8162 "Firmware image unavailable.\n"); 8163 ql_log(ql_log_info, vha, 0x0084, 8164 "Firmware images can be retrieved from: "QLA_FW_URL ".\n"); 8165 return QLA_FUNCTION_FAILED; 8166 } 8167 8168 rval = QLA_SUCCESS; 8169 8170 wcode = (uint16_t *)req->ring; 8171 *srisc_addr = 0; 8172 fwcode = (__force __be16 *)blob->fw->data; 8173 fwclen = 0; 8174 8175 /* Validate firmware image by checking version. */ 8176 if (blob->fw->size < 8 * sizeof(uint16_t)) { 8177 ql_log(ql_log_fatal, vha, 0x0085, 8178 "Unable to verify integrity of firmware image (%zd).\n", 8179 blob->fw->size); 8180 goto fail_fw_integrity; 8181 } 8182 for (i = 0; i < 4; i++) 8183 wcode[i] = be16_to_cpu(fwcode[i + 4]); 8184 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff && 8185 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 && 8186 wcode[2] == 0 && wcode[3] == 0)) { 8187 ql_log(ql_log_fatal, vha, 0x0086, 8188 "Unable to verify integrity of firmware image.\n"); 8189 ql_log(ql_log_fatal, vha, 0x0087, 8190 "Firmware data: %04x %04x %04x %04x.\n", 8191 wcode[0], wcode[1], wcode[2], wcode[3]); 8192 goto fail_fw_integrity; 8193 } 8194 8195 seg = blob->segs; 8196 while (*seg && rval == QLA_SUCCESS) { 8197 risc_addr = *seg; 8198 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr; 8199 risc_size = be16_to_cpu(fwcode[3]); 8200 8201 /* Validate firmware image size. */ 8202 fwclen += risc_size * sizeof(uint16_t); 8203 if (blob->fw->size < fwclen) { 8204 ql_log(ql_log_fatal, vha, 0x0088, 8205 "Unable to verify integrity of firmware image " 8206 "(%zd).\n", blob->fw->size); 8207 goto fail_fw_integrity; 8208 } 8209 8210 fragment = 0; 8211 while (risc_size > 0 && rval == QLA_SUCCESS) { 8212 wlen = (uint16_t)(ha->fw_transfer_size >> 1); 8213 if (wlen > risc_size) 8214 wlen = risc_size; 8215 ql_dbg(ql_dbg_init, vha, 0x0089, 8216 "Loading risc segment@ risc addr %x number of " 8217 "words 0x%x.\n", risc_addr, wlen); 8218 8219 for (i = 0; i < wlen; i++) 8220 wcode[i] = swab16((__force u32)fwcode[i]); 8221 8222 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 8223 wlen); 8224 if (rval) { 8225 ql_log(ql_log_fatal, vha, 0x008a, 8226 "Failed to load segment %d of firmware.\n", 8227 fragment); 8228 break; 8229 } 8230 8231 fwcode += wlen; 8232 risc_addr += wlen; 8233 risc_size -= wlen; 8234 fragment++; 8235 } 8236 8237 /* Next segment. */ 8238 seg++; 8239 } 8240 return rval; 8241 8242 fail_fw_integrity: 8243 return QLA_FUNCTION_FAILED; 8244 } 8245 8246 static int 8247 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) 8248 { 8249 int rval; 8250 uint templates, segments, fragment; 8251 uint32_t *dcode; 8252 ulong dlen; 8253 uint32_t risc_addr, risc_size, risc_attr = 0; 8254 ulong i; 8255 uint j; 8256 struct fw_blob *blob; 8257 __be32 *fwcode; 8258 struct qla_hw_data *ha = vha->hw; 8259 struct req_que *req = ha->req_q_map[0]; 8260 struct fwdt *fwdt = ha->fwdt; 8261 8262 ql_dbg(ql_dbg_init, vha, 0x0090, 8263 "-> FW: Loading via request-firmware.\n"); 8264 8265 blob = qla2x00_request_firmware(vha); 8266 if (!blob) { 8267 ql_log(ql_log_warn, vha, 0x0092, 8268 "-> Firmware file not found.\n"); 8269 8270 return QLA_FUNCTION_FAILED; 8271 } 8272 8273 fwcode = (__force __be32 *)blob->fw->data; 8274 dcode = (__force uint32_t *)fwcode; 8275 if (qla24xx_risc_firmware_invalid(dcode)) { 8276 ql_log(ql_log_fatal, vha, 0x0093, 8277 "Unable to verify integrity of firmware image (%zd).\n", 8278 blob->fw->size); 8279 ql_log(ql_log_fatal, vha, 0x0095, 8280 "Firmware data: %08x %08x %08x %08x.\n", 8281 dcode[0], dcode[1], dcode[2], dcode[3]); 8282 return QLA_FUNCTION_FAILED; 8283 } 8284 8285 dcode = (uint32_t *)req->ring; 8286 *srisc_addr = 0; 8287 segments = FA_RISC_CODE_SEGMENTS; 8288 for (j = 0; j < segments; j++) { 8289 ql_dbg(ql_dbg_init, vha, 0x0096, 8290 "-> Loading segment %u...\n", j); 8291 risc_addr = be32_to_cpu(fwcode[2]); 8292 risc_size = be32_to_cpu(fwcode[3]); 8293 8294 if (!*srisc_addr) { 8295 *srisc_addr = risc_addr; 8296 risc_attr = be32_to_cpu(fwcode[9]); 8297 } 8298 8299 dlen = ha->fw_transfer_size >> 2; 8300 for (fragment = 0; risc_size; fragment++) { 8301 if (dlen > risc_size) 8302 dlen = risc_size; 8303 8304 ql_dbg(ql_dbg_init, vha, 0x0097, 8305 "-> Loading fragment %u: %#x <- %#x (%#lx words)...\n", 8306 fragment, risc_addr, 8307 (uint32_t)(fwcode - (typeof(fwcode))blob->fw->data), 8308 dlen); 8309 8310 for (i = 0; i < dlen; i++) 8311 dcode[i] = swab32((__force u32)fwcode[i]); 8312 8313 rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen); 8314 if (rval) { 8315 ql_log(ql_log_fatal, vha, 0x0098, 8316 "-> Failed load firmware fragment %u.\n", 8317 fragment); 8318 return QLA_FUNCTION_FAILED; 8319 } 8320 8321 fwcode += dlen; 8322 risc_addr += dlen; 8323 risc_size -= dlen; 8324 } 8325 } 8326 8327 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 8328 return QLA_SUCCESS; 8329 8330 templates = (risc_attr & BIT_9) ? 2 : 1; 8331 ql_dbg(ql_dbg_init, vha, 0x0170, "-> templates = %u\n", templates); 8332 for (j = 0; j < templates; j++, fwdt++) { 8333 vfree(fwdt->template); 8334 fwdt->template = NULL; 8335 fwdt->length = 0; 8336 8337 risc_size = be32_to_cpu(fwcode[2]); 8338 ql_dbg(ql_dbg_init, vha, 0x0171, 8339 "-> fwdt%u template array at %#x (%#x dwords)\n", 8340 j, (uint32_t)((void *)fwcode - (void *)blob->fw->data), 8341 risc_size); 8342 if (!risc_size || !~risc_size) { 8343 ql_dbg(ql_dbg_init, vha, 0x0172, 8344 "-> fwdt%u failed to read array\n", j); 8345 goto failed; 8346 } 8347 8348 /* skip header and ignore checksum */ 8349 fwcode += 7; 8350 risc_size -= 8; 8351 8352 ql_dbg(ql_dbg_init, vha, 0x0173, 8353 "-> fwdt%u template allocate template %#x words...\n", 8354 j, risc_size); 8355 fwdt->template = vmalloc(risc_size * sizeof(*dcode)); 8356 if (!fwdt->template) { 8357 ql_log(ql_log_warn, vha, 0x0174, 8358 "-> fwdt%u failed allocate template.\n", j); 8359 goto failed; 8360 } 8361 8362 dcode = fwdt->template; 8363 for (i = 0; i < risc_size; i++) 8364 dcode[i] = (__force u32)fwcode[i]; 8365 8366 if (!qla27xx_fwdt_template_valid(dcode)) { 8367 ql_log(ql_log_warn, vha, 0x0175, 8368 "-> fwdt%u failed template validate\n", j); 8369 goto failed; 8370 } 8371 8372 dlen = qla27xx_fwdt_template_size(dcode); 8373 ql_dbg(ql_dbg_init, vha, 0x0176, 8374 "-> fwdt%u template size %#lx bytes (%#lx words)\n", 8375 j, dlen, dlen / sizeof(*dcode)); 8376 if (dlen > risc_size * sizeof(*dcode)) { 8377 ql_log(ql_log_warn, vha, 0x0177, 8378 "-> fwdt%u template exceeds array (%-lu bytes)\n", 8379 j, dlen - risc_size * sizeof(*dcode)); 8380 goto failed; 8381 } 8382 8383 fwdt->length = dlen; 8384 ql_dbg(ql_dbg_init, vha, 0x0178, 8385 "-> fwdt%u loaded template ok\n", j); 8386 8387 fwcode += risc_size + 1; 8388 } 8389 8390 return QLA_SUCCESS; 8391 8392 failed: 8393 vfree(fwdt->template); 8394 fwdt->template = NULL; 8395 fwdt->length = 0; 8396 8397 return QLA_SUCCESS; 8398 } 8399 8400 int 8401 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 8402 { 8403 int rval; 8404 8405 if (ql2xfwloadbin == 1) 8406 return qla81xx_load_risc(vha, srisc_addr); 8407 8408 /* 8409 * FW Load priority: 8410 * 1) Firmware via request-firmware interface (.bin file). 8411 * 2) Firmware residing in flash. 8412 */ 8413 rval = qla24xx_load_risc_blob(vha, srisc_addr); 8414 if (rval == QLA_SUCCESS) 8415 return rval; 8416 8417 return qla24xx_load_risc_flash(vha, srisc_addr, 8418 vha->hw->flt_region_fw); 8419 } 8420 8421 int 8422 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 8423 { 8424 int rval; 8425 struct qla_hw_data *ha = vha->hw; 8426 struct active_regions active_regions = { }; 8427 8428 if (ql2xfwloadbin == 2) 8429 goto try_blob_fw; 8430 8431 /* FW Load priority: 8432 * 1) Firmware residing in flash. 8433 * 2) Firmware via request-firmware interface (.bin file). 8434 * 3) Golden-Firmware residing in flash -- (limited operation). 8435 */ 8436 8437 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 8438 goto try_primary_fw; 8439 8440 qla27xx_get_active_image(vha, &active_regions); 8441 8442 if (active_regions.global != QLA27XX_SECONDARY_IMAGE) 8443 goto try_primary_fw; 8444 8445 ql_dbg(ql_dbg_init, vha, 0x008b, 8446 "Loading secondary firmware image.\n"); 8447 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw_sec); 8448 if (!rval) 8449 return rval; 8450 8451 try_primary_fw: 8452 ql_dbg(ql_dbg_init, vha, 0x008b, 8453 "Loading primary firmware image.\n"); 8454 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw); 8455 if (!rval) 8456 return rval; 8457 8458 try_blob_fw: 8459 rval = qla24xx_load_risc_blob(vha, srisc_addr); 8460 if (!rval || !ha->flt_region_gold_fw) 8461 return rval; 8462 8463 ql_log(ql_log_info, vha, 0x0099, 8464 "Attempting to fallback to golden firmware.\n"); 8465 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); 8466 if (rval) 8467 return rval; 8468 8469 ql_log(ql_log_info, vha, 0x009a, "Need firmware flash update.\n"); 8470 ha->flags.running_gold_fw = 1; 8471 return rval; 8472 } 8473 8474 void 8475 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) 8476 { 8477 int ret, retries; 8478 struct qla_hw_data *ha = vha->hw; 8479 8480 if (ha->flags.pci_channel_io_perm_failure) 8481 return; 8482 if (!IS_FWI2_CAPABLE(ha)) 8483 return; 8484 if (!ha->fw_major_version) 8485 return; 8486 if (!ha->flags.fw_started) 8487 return; 8488 8489 ret = qla2x00_stop_firmware(vha); 8490 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && 8491 ret != QLA_INVALID_COMMAND && retries ; retries--) { 8492 ha->isp_ops->reset_chip(vha); 8493 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS) 8494 continue; 8495 if (qla2x00_setup_chip(vha) != QLA_SUCCESS) 8496 continue; 8497 ql_log(ql_log_info, vha, 0x8015, 8498 "Attempting retry of stop-firmware command.\n"); 8499 ret = qla2x00_stop_firmware(vha); 8500 } 8501 8502 QLA_FW_STOPPED(ha); 8503 ha->flags.fw_init_done = 0; 8504 } 8505 8506 int 8507 qla24xx_configure_vhba(scsi_qla_host_t *vha) 8508 { 8509 int rval = QLA_SUCCESS; 8510 int rval2; 8511 uint16_t mb[MAILBOX_REGISTER_COUNT]; 8512 struct qla_hw_data *ha = vha->hw; 8513 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 8514 8515 if (!vha->vp_idx) 8516 return -EINVAL; 8517 8518 rval = qla2x00_fw_ready(base_vha); 8519 8520 if (rval == QLA_SUCCESS) { 8521 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 8522 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); 8523 } 8524 8525 vha->flags.management_server_logged_in = 0; 8526 8527 /* Login to SNS first */ 8528 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, 8529 BIT_1); 8530 if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { 8531 if (rval2 == QLA_MEMORY_ALLOC_FAILED) 8532 ql_dbg(ql_dbg_init, vha, 0x0120, 8533 "Failed SNS login: loop_id=%x, rval2=%d\n", 8534 NPH_SNS, rval2); 8535 else 8536 ql_dbg(ql_dbg_init, vha, 0x0103, 8537 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " 8538 "mb[2]=%x mb[6]=%x mb[7]=%x.\n", 8539 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]); 8540 return (QLA_FUNCTION_FAILED); 8541 } 8542 8543 atomic_set(&vha->loop_down_timer, 0); 8544 atomic_set(&vha->loop_state, LOOP_UP); 8545 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 8546 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 8547 rval = qla2x00_loop_resync(base_vha); 8548 8549 return rval; 8550 } 8551 8552 /* 84XX Support **************************************************************/ 8553 8554 static LIST_HEAD(qla_cs84xx_list); 8555 static DEFINE_MUTEX(qla_cs84xx_mutex); 8556 8557 static struct qla_chip_state_84xx * 8558 qla84xx_get_chip(struct scsi_qla_host *vha) 8559 { 8560 struct qla_chip_state_84xx *cs84xx; 8561 struct qla_hw_data *ha = vha->hw; 8562 8563 mutex_lock(&qla_cs84xx_mutex); 8564 8565 /* Find any shared 84xx chip. */ 8566 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) { 8567 if (cs84xx->bus == ha->pdev->bus) { 8568 kref_get(&cs84xx->kref); 8569 goto done; 8570 } 8571 } 8572 8573 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL); 8574 if (!cs84xx) 8575 goto done; 8576 8577 kref_init(&cs84xx->kref); 8578 spin_lock_init(&cs84xx->access_lock); 8579 mutex_init(&cs84xx->fw_update_mutex); 8580 cs84xx->bus = ha->pdev->bus; 8581 8582 list_add_tail(&cs84xx->list, &qla_cs84xx_list); 8583 done: 8584 mutex_unlock(&qla_cs84xx_mutex); 8585 return cs84xx; 8586 } 8587 8588 static void 8589 __qla84xx_chip_release(struct kref *kref) 8590 { 8591 struct qla_chip_state_84xx *cs84xx = 8592 container_of(kref, struct qla_chip_state_84xx, kref); 8593 8594 mutex_lock(&qla_cs84xx_mutex); 8595 list_del(&cs84xx->list); 8596 mutex_unlock(&qla_cs84xx_mutex); 8597 kfree(cs84xx); 8598 } 8599 8600 void 8601 qla84xx_put_chip(struct scsi_qla_host *vha) 8602 { 8603 struct qla_hw_data *ha = vha->hw; 8604 8605 if (ha->cs84xx) 8606 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release); 8607 } 8608 8609 static int 8610 qla84xx_init_chip(scsi_qla_host_t *vha) 8611 { 8612 int rval; 8613 uint16_t status[2]; 8614 struct qla_hw_data *ha = vha->hw; 8615 8616 mutex_lock(&ha->cs84xx->fw_update_mutex); 8617 8618 rval = qla84xx_verify_chip(vha, status); 8619 8620 mutex_unlock(&ha->cs84xx->fw_update_mutex); 8621 8622 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED : 8623 QLA_SUCCESS; 8624 } 8625 8626 /* 81XX Support **************************************************************/ 8627 8628 int 8629 qla81xx_nvram_config(scsi_qla_host_t *vha) 8630 { 8631 int rval; 8632 struct init_cb_81xx *icb; 8633 struct nvram_81xx *nv; 8634 __le32 *dptr; 8635 uint8_t *dptr1, *dptr2; 8636 uint32_t chksum; 8637 uint16_t cnt; 8638 struct qla_hw_data *ha = vha->hw; 8639 uint32_t faddr; 8640 struct active_regions active_regions = { }; 8641 8642 rval = QLA_SUCCESS; 8643 icb = (struct init_cb_81xx *)ha->init_cb; 8644 nv = ha->nvram; 8645 8646 /* Determine NVRAM starting address. */ 8647 ha->nvram_size = sizeof(*nv); 8648 ha->vpd_size = FA_NVRAM_VPD_SIZE; 8649 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha)) 8650 ha->vpd_size = FA_VPD_SIZE_82XX; 8651 8652 if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) 8653 qla28xx_get_aux_images(vha, &active_regions); 8654 8655 /* Get VPD data into cache */ 8656 ha->vpd = ha->nvram + VPD_OFFSET; 8657 8658 faddr = ha->flt_region_vpd; 8659 if (IS_QLA28XX(ha)) { 8660 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) 8661 faddr = ha->flt_region_vpd_sec; 8662 ql_dbg(ql_dbg_init, vha, 0x0110, 8663 "Loading %s nvram image.\n", 8664 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ? 8665 "primary" : "secondary"); 8666 } 8667 ha->isp_ops->read_optrom(vha, ha->vpd, faddr << 2, ha->vpd_size); 8668 8669 /* Get NVRAM data into cache and calculate checksum. */ 8670 faddr = ha->flt_region_nvram; 8671 if (IS_QLA28XX(ha)) { 8672 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) 8673 faddr = ha->flt_region_nvram_sec; 8674 } 8675 ql_dbg(ql_dbg_init, vha, 0x0110, 8676 "Loading %s nvram image.\n", 8677 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ? 8678 "primary" : "secondary"); 8679 ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size); 8680 8681 dptr = (__force __le32 *)nv; 8682 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) 8683 chksum += le32_to_cpu(*dptr); 8684 8685 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111, 8686 "Contents of NVRAM:\n"); 8687 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112, 8688 nv, ha->nvram_size); 8689 8690 /* Bad NVRAM data, set defaults parameters. */ 8691 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) || 8692 le16_to_cpu(nv->nvram_version) < ICB_VERSION) { 8693 /* Reset NVRAM data. */ 8694 ql_log(ql_log_info, vha, 0x0073, 8695 "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n", 8696 chksum, nv->id, le16_to_cpu(nv->nvram_version)); 8697 ql_dump_buffer(ql_dbg_init, vha, 0x0073, nv, sizeof(*nv)); 8698 ql_log(ql_log_info, vha, 0x0074, 8699 "Falling back to functioning (yet invalid -- WWPN) " 8700 "defaults.\n"); 8701 8702 /* 8703 * Set default initialization control block. 8704 */ 8705 memset(nv, 0, ha->nvram_size); 8706 nv->nvram_version = cpu_to_le16(ICB_VERSION); 8707 nv->version = cpu_to_le16(ICB_VERSION); 8708 nv->frame_payload_size = cpu_to_le16(2048); 8709 nv->execution_throttle = cpu_to_le16(0xFFFF); 8710 nv->exchange_count = cpu_to_le16(0); 8711 nv->port_name[0] = 0x21; 8712 nv->port_name[1] = 0x00 + ha->port_no + 1; 8713 nv->port_name[2] = 0x00; 8714 nv->port_name[3] = 0xe0; 8715 nv->port_name[4] = 0x8b; 8716 nv->port_name[5] = 0x1c; 8717 nv->port_name[6] = 0x55; 8718 nv->port_name[7] = 0x86; 8719 nv->node_name[0] = 0x20; 8720 nv->node_name[1] = 0x00; 8721 nv->node_name[2] = 0x00; 8722 nv->node_name[3] = 0xe0; 8723 nv->node_name[4] = 0x8b; 8724 nv->node_name[5] = 0x1c; 8725 nv->node_name[6] = 0x55; 8726 nv->node_name[7] = 0x86; 8727 nv->login_retry_count = cpu_to_le16(8); 8728 nv->interrupt_delay_timer = cpu_to_le16(0); 8729 nv->login_timeout = cpu_to_le16(0); 8730 nv->firmware_options_1 = 8731 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); 8732 nv->firmware_options_2 = cpu_to_le32(2 << 4); 8733 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 8734 nv->firmware_options_3 = cpu_to_le32(2 << 13); 8735 nv->host_p = cpu_to_le32(BIT_11|BIT_10); 8736 nv->efi_parameters = cpu_to_le32(0); 8737 nv->reset_delay = 5; 8738 nv->max_luns_per_target = cpu_to_le16(128); 8739 nv->port_down_retry_count = cpu_to_le16(30); 8740 nv->link_down_timeout = cpu_to_le16(180); 8741 nv->enode_mac[0] = 0x00; 8742 nv->enode_mac[1] = 0xC0; 8743 nv->enode_mac[2] = 0xDD; 8744 nv->enode_mac[3] = 0x04; 8745 nv->enode_mac[4] = 0x05; 8746 nv->enode_mac[5] = 0x06 + ha->port_no + 1; 8747 8748 rval = 1; 8749 } 8750 8751 if (IS_T10_PI_CAPABLE(ha)) 8752 nv->frame_payload_size &= cpu_to_le16(~7); 8753 8754 qlt_81xx_config_nvram_stage1(vha, nv); 8755 8756 /* Reset Initialization control block */ 8757 memset(icb, 0, ha->init_cb_size); 8758 8759 /* Copy 1st segment. */ 8760 dptr1 = (uint8_t *)icb; 8761 dptr2 = (uint8_t *)&nv->version; 8762 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; 8763 while (cnt--) 8764 *dptr1++ = *dptr2++; 8765 8766 icb->login_retry_count = nv->login_retry_count; 8767 8768 /* Copy 2nd segment. */ 8769 dptr1 = (uint8_t *)&icb->interrupt_delay_timer; 8770 dptr2 = (uint8_t *)&nv->interrupt_delay_timer; 8771 cnt = (uint8_t *)&icb->reserved_5 - 8772 (uint8_t *)&icb->interrupt_delay_timer; 8773 while (cnt--) 8774 *dptr1++ = *dptr2++; 8775 8776 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac)); 8777 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */ 8778 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) { 8779 icb->enode_mac[0] = 0x00; 8780 icb->enode_mac[1] = 0xC0; 8781 icb->enode_mac[2] = 0xDD; 8782 icb->enode_mac[3] = 0x04; 8783 icb->enode_mac[4] = 0x05; 8784 icb->enode_mac[5] = 0x06 + ha->port_no + 1; 8785 } 8786 8787 /* Use extended-initialization control block. */ 8788 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb)); 8789 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); 8790 /* 8791 * Setup driver NVRAM options. 8792 */ 8793 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), 8794 "QLE8XXX"); 8795 8796 qlt_81xx_config_nvram_stage2(vha, icb); 8797 8798 /* Use alternate WWN? */ 8799 if (nv->host_p & cpu_to_le32(BIT_15)) { 8800 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 8801 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 8802 } 8803 8804 /* Prepare nodename */ 8805 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) { 8806 /* 8807 * Firmware will apply the following mask if the nodename was 8808 * not provided. 8809 */ 8810 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 8811 icb->node_name[0] &= 0xF0; 8812 } 8813 8814 if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) { 8815 if ((nv->enhanced_features & BIT_7) == 0) 8816 ha->flags.scm_supported_a = 1; 8817 } 8818 8819 /* Set host adapter parameters. */ 8820 ha->flags.disable_risc_code_load = 0; 8821 ha->flags.enable_lip_reset = 0; 8822 ha->flags.enable_lip_full_login = 8823 le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0; 8824 ha->flags.enable_target_reset = 8825 le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0; 8826 ha->flags.enable_led_scheme = 0; 8827 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0; 8828 8829 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & 8830 (BIT_6 | BIT_5 | BIT_4)) >> 4; 8831 8832 /* save HBA serial number */ 8833 ha->serial0 = icb->port_name[5]; 8834 ha->serial1 = icb->port_name[6]; 8835 ha->serial2 = icb->port_name[7]; 8836 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 8837 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 8838 8839 icb->execution_throttle = cpu_to_le16(0xFFFF); 8840 8841 ha->retry_count = le16_to_cpu(nv->login_retry_count); 8842 8843 /* Set minimum login_timeout to 4 seconds. */ 8844 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) 8845 nv->login_timeout = cpu_to_le16(ql2xlogintimeout); 8846 if (le16_to_cpu(nv->login_timeout) < 4) 8847 nv->login_timeout = cpu_to_le16(4); 8848 ha->login_timeout = le16_to_cpu(nv->login_timeout); 8849 8850 /* Set minimum RATOV to 100 tenths of a second. */ 8851 ha->r_a_tov = 100; 8852 8853 ha->loop_reset_delay = nv->reset_delay; 8854 8855 /* Link Down Timeout = 0: 8856 * 8857 * When Port Down timer expires we will start returning 8858 * I/O's to OS with "DID_NO_CONNECT". 8859 * 8860 * Link Down Timeout != 0: 8861 * 8862 * The driver waits for the link to come up after link down 8863 * before returning I/Os to OS with "DID_NO_CONNECT". 8864 */ 8865 if (le16_to_cpu(nv->link_down_timeout) == 0) { 8866 ha->loop_down_abort_time = 8867 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 8868 } else { 8869 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); 8870 ha->loop_down_abort_time = 8871 (LOOP_DOWN_TIME - ha->link_down_timeout); 8872 } 8873 8874 /* Need enough time to try and get the port back. */ 8875 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); 8876 if (qlport_down_retry) 8877 ha->port_down_retry_count = qlport_down_retry; 8878 8879 /* Set login_retry_count */ 8880 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); 8881 if (ha->port_down_retry_count == 8882 le16_to_cpu(nv->port_down_retry_count) && 8883 ha->port_down_retry_count > 3) 8884 ha->login_retry_count = ha->port_down_retry_count; 8885 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 8886 ha->login_retry_count = ha->port_down_retry_count; 8887 if (ql2xloginretrycount) 8888 ha->login_retry_count = ql2xloginretrycount; 8889 8890 /* if not running MSI-X we need handshaking on interrupts */ 8891 if (!vha->hw->flags.msix_enabled && 8892 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) 8893 icb->firmware_options_2 |= cpu_to_le32(BIT_22); 8894 8895 /* Enable ZIO. */ 8896 if (!vha->flags.init_done) { 8897 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 8898 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 8899 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 8900 le16_to_cpu(icb->interrupt_delay_timer) : 2; 8901 } 8902 icb->firmware_options_2 &= cpu_to_le32( 8903 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 8904 vha->flags.process_response_queue = 0; 8905 if (ha->zio_mode != QLA_ZIO_DISABLED) { 8906 ha->zio_mode = QLA_ZIO_MODE_6; 8907 8908 ql_log(ql_log_info, vha, 0x0075, 8909 "ZIO mode %d enabled; timer delay (%d us).\n", 8910 ha->zio_mode, 8911 ha->zio_timer * 100); 8912 8913 icb->firmware_options_2 |= cpu_to_le32( 8914 (uint32_t)ha->zio_mode); 8915 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); 8916 vha->flags.process_response_queue = 1; 8917 } 8918 8919 /* enable RIDA Format2 */ 8920 icb->firmware_options_3 |= cpu_to_le32(BIT_0); 8921 8922 /* N2N: driver will initiate Login instead of FW */ 8923 icb->firmware_options_3 |= cpu_to_le32(BIT_8); 8924 8925 /* Determine NVMe/FCP priority for target ports */ 8926 ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha); 8927 8928 if (rval) { 8929 ql_log(ql_log_warn, vha, 0x0076, 8930 "NVRAM configuration failed.\n"); 8931 } 8932 return (rval); 8933 } 8934 8935 int 8936 qla82xx_restart_isp(scsi_qla_host_t *vha) 8937 { 8938 int status, rval; 8939 struct qla_hw_data *ha = vha->hw; 8940 struct scsi_qla_host *vp, *tvp; 8941 unsigned long flags; 8942 8943 status = qla2x00_init_rings(vha); 8944 if (!status) { 8945 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 8946 ha->flags.chip_reset_done = 1; 8947 8948 status = qla2x00_fw_ready(vha); 8949 if (!status) { 8950 /* Issue a marker after FW becomes ready. */ 8951 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); 8952 vha->flags.online = 1; 8953 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 8954 } 8955 8956 /* if no cable then assume it's good */ 8957 if ((vha->device_flags & DFLG_NO_CABLE)) 8958 status = 0; 8959 } 8960 8961 if (!status) { 8962 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 8963 8964 if (!atomic_read(&vha->loop_down_timer)) { 8965 /* 8966 * Issue marker command only when we are going 8967 * to start the I/O . 8968 */ 8969 vha->marker_needed = 1; 8970 } 8971 8972 ha->isp_ops->enable_intrs(ha); 8973 8974 ha->isp_abort_cnt = 0; 8975 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 8976 8977 /* Update the firmware version */ 8978 status = qla82xx_check_md_needed(vha); 8979 8980 if (ha->fce) { 8981 ha->flags.fce_enabled = 1; 8982 memset(ha->fce, 0, 8983 fce_calc_size(ha->fce_bufs)); 8984 rval = qla2x00_enable_fce_trace(vha, 8985 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 8986 &ha->fce_bufs); 8987 if (rval) { 8988 ql_log(ql_log_warn, vha, 0x8001, 8989 "Unable to reinitialize FCE (%d).\n", 8990 rval); 8991 ha->flags.fce_enabled = 0; 8992 } 8993 } 8994 8995 if (ha->eft) { 8996 memset(ha->eft, 0, EFT_SIZE); 8997 rval = qla2x00_enable_eft_trace(vha, 8998 ha->eft_dma, EFT_NUM_BUFFERS); 8999 if (rval) { 9000 ql_log(ql_log_warn, vha, 0x8010, 9001 "Unable to reinitialize EFT (%d).\n", 9002 rval); 9003 } 9004 } 9005 } 9006 9007 if (!status) { 9008 ql_dbg(ql_dbg_taskm, vha, 0x8011, 9009 "qla82xx_restart_isp succeeded.\n"); 9010 9011 spin_lock_irqsave(&ha->vport_slock, flags); 9012 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 9013 if (vp->vp_idx) { 9014 atomic_inc(&vp->vref_count); 9015 spin_unlock_irqrestore(&ha->vport_slock, flags); 9016 9017 qla2x00_vp_abort_isp(vp); 9018 9019 spin_lock_irqsave(&ha->vport_slock, flags); 9020 atomic_dec(&vp->vref_count); 9021 } 9022 } 9023 spin_unlock_irqrestore(&ha->vport_slock, flags); 9024 9025 } else { 9026 ql_log(ql_log_warn, vha, 0x8016, 9027 "qla82xx_restart_isp **** FAILED ****.\n"); 9028 } 9029 9030 return status; 9031 } 9032 9033 /* 9034 * qla24xx_get_fcp_prio 9035 * Gets the fcp cmd priority value for the logged in port. 9036 * Looks for a match of the port descriptors within 9037 * each of the fcp prio config entries. If a match is found, 9038 * the tag (priority) value is returned. 9039 * 9040 * Input: 9041 * vha = scsi host structure pointer. 9042 * fcport = port structure pointer. 9043 * 9044 * Return: 9045 * non-zero (if found) 9046 * -1 (if not found) 9047 * 9048 * Context: 9049 * Kernel context 9050 */ 9051 static int 9052 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) 9053 { 9054 int i, entries; 9055 uint8_t pid_match, wwn_match; 9056 int priority; 9057 uint32_t pid1, pid2; 9058 uint64_t wwn1, wwn2; 9059 struct qla_fcp_prio_entry *pri_entry; 9060 struct qla_hw_data *ha = vha->hw; 9061 9062 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled) 9063 return -1; 9064 9065 priority = -1; 9066 entries = ha->fcp_prio_cfg->num_entries; 9067 pri_entry = &ha->fcp_prio_cfg->entry[0]; 9068 9069 for (i = 0; i < entries; i++) { 9070 pid_match = wwn_match = 0; 9071 9072 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) { 9073 pri_entry++; 9074 continue; 9075 } 9076 9077 /* check source pid for a match */ 9078 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) { 9079 pid1 = pri_entry->src_pid & INVALID_PORT_ID; 9080 pid2 = vha->d_id.b24 & INVALID_PORT_ID; 9081 if (pid1 == INVALID_PORT_ID) 9082 pid_match++; 9083 else if (pid1 == pid2) 9084 pid_match++; 9085 } 9086 9087 /* check destination pid for a match */ 9088 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) { 9089 pid1 = pri_entry->dst_pid & INVALID_PORT_ID; 9090 pid2 = fcport->d_id.b24 & INVALID_PORT_ID; 9091 if (pid1 == INVALID_PORT_ID) 9092 pid_match++; 9093 else if (pid1 == pid2) 9094 pid_match++; 9095 } 9096 9097 /* check source WWN for a match */ 9098 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) { 9099 wwn1 = wwn_to_u64(vha->port_name); 9100 wwn2 = wwn_to_u64(pri_entry->src_wwpn); 9101 if (wwn2 == (uint64_t)-1) 9102 wwn_match++; 9103 else if (wwn1 == wwn2) 9104 wwn_match++; 9105 } 9106 9107 /* check destination WWN for a match */ 9108 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) { 9109 wwn1 = wwn_to_u64(fcport->port_name); 9110 wwn2 = wwn_to_u64(pri_entry->dst_wwpn); 9111 if (wwn2 == (uint64_t)-1) 9112 wwn_match++; 9113 else if (wwn1 == wwn2) 9114 wwn_match++; 9115 } 9116 9117 if (pid_match == 2 || wwn_match == 2) { 9118 /* Found a matching entry */ 9119 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID) 9120 priority = pri_entry->tag; 9121 break; 9122 } 9123 9124 pri_entry++; 9125 } 9126 9127 return priority; 9128 } 9129 9130 /* 9131 * qla24xx_update_fcport_fcp_prio 9132 * Activates fcp priority for the logged in fc port 9133 * 9134 * Input: 9135 * vha = scsi host structure pointer. 9136 * fcp = port structure pointer. 9137 * 9138 * Return: 9139 * QLA_SUCCESS or QLA_FUNCTION_FAILED 9140 * 9141 * Context: 9142 * Kernel context. 9143 */ 9144 int 9145 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) 9146 { 9147 int ret; 9148 int priority; 9149 uint16_t mb[5]; 9150 9151 if (fcport->port_type != FCT_TARGET || 9152 fcport->loop_id == FC_NO_LOOP_ID) 9153 return QLA_FUNCTION_FAILED; 9154 9155 priority = qla24xx_get_fcp_prio(vha, fcport); 9156 if (priority < 0) 9157 return QLA_FUNCTION_FAILED; 9158 9159 if (IS_P3P_TYPE(vha->hw)) { 9160 fcport->fcp_prio = priority & 0xf; 9161 return QLA_SUCCESS; 9162 } 9163 9164 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb); 9165 if (ret == QLA_SUCCESS) { 9166 if (fcport->fcp_prio != priority) 9167 ql_dbg(ql_dbg_user, vha, 0x709e, 9168 "Updated FCP_CMND priority - value=%d loop_id=%d " 9169 "port_id=%02x%02x%02x.\n", priority, 9170 fcport->loop_id, fcport->d_id.b.domain, 9171 fcport->d_id.b.area, fcport->d_id.b.al_pa); 9172 fcport->fcp_prio = priority & 0xf; 9173 } else 9174 ql_dbg(ql_dbg_user, vha, 0x704f, 9175 "Unable to update FCP_CMND priority - ret=0x%x for " 9176 "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id, 9177 fcport->d_id.b.domain, fcport->d_id.b.area, 9178 fcport->d_id.b.al_pa); 9179 return ret; 9180 } 9181 9182 /* 9183 * qla24xx_update_all_fcp_prio 9184 * Activates fcp priority for all the logged in ports 9185 * 9186 * Input: 9187 * ha = adapter block pointer. 9188 * 9189 * Return: 9190 * QLA_SUCCESS or QLA_FUNCTION_FAILED 9191 * 9192 * Context: 9193 * Kernel context. 9194 */ 9195 int 9196 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha) 9197 { 9198 int ret; 9199 fc_port_t *fcport; 9200 9201 ret = QLA_FUNCTION_FAILED; 9202 /* We need to set priority for all logged in ports */ 9203 list_for_each_entry(fcport, &vha->vp_fcports, list) 9204 ret = qla24xx_update_fcport_fcp_prio(vha, fcport); 9205 9206 return ret; 9207 } 9208 9209 struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, 9210 int vp_idx, bool startqp) 9211 { 9212 int rsp_id = 0; 9213 int req_id = 0; 9214 int i; 9215 struct qla_hw_data *ha = vha->hw; 9216 uint16_t qpair_id = 0; 9217 struct qla_qpair *qpair = NULL; 9218 struct qla_msix_entry *msix; 9219 9220 if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) { 9221 ql_log(ql_log_warn, vha, 0x00181, 9222 "FW/Driver is not multi-queue capable.\n"); 9223 return NULL; 9224 } 9225 9226 if (ql2xmqsupport || ql2xnvmeenable) { 9227 qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL); 9228 if (qpair == NULL) { 9229 ql_log(ql_log_warn, vha, 0x0182, 9230 "Failed to allocate memory for queue pair.\n"); 9231 return NULL; 9232 } 9233 9234 qpair->hw = vha->hw; 9235 qpair->vha = vha; 9236 qpair->qp_lock_ptr = &qpair->qp_lock; 9237 spin_lock_init(&qpair->qp_lock); 9238 qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; 9239 9240 /* Assign available que pair id */ 9241 mutex_lock(&ha->mq_lock); 9242 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs); 9243 if (ha->num_qpairs >= ha->max_qpairs) { 9244 mutex_unlock(&ha->mq_lock); 9245 ql_log(ql_log_warn, vha, 0x0183, 9246 "No resources to create additional q pair.\n"); 9247 goto fail_qid_map; 9248 } 9249 ha->num_qpairs++; 9250 set_bit(qpair_id, ha->qpair_qid_map); 9251 ha->queue_pair_map[qpair_id] = qpair; 9252 qpair->id = qpair_id; 9253 qpair->vp_idx = vp_idx; 9254 qpair->fw_started = ha->flags.fw_started; 9255 INIT_LIST_HEAD(&qpair->hints_list); 9256 qpair->chip_reset = ha->base_qpair->chip_reset; 9257 qpair->enable_class_2 = ha->base_qpair->enable_class_2; 9258 qpair->enable_explicit_conf = 9259 ha->base_qpair->enable_explicit_conf; 9260 9261 for (i = 0; i < ha->msix_count; i++) { 9262 msix = &ha->msix_entries[i]; 9263 if (msix->in_use) 9264 continue; 9265 qpair->msix = msix; 9266 ql_dbg(ql_dbg_multiq, vha, 0xc00f, 9267 "Vector %x selected for qpair\n", msix->vector); 9268 break; 9269 } 9270 if (!qpair->msix) { 9271 ql_log(ql_log_warn, vha, 0x0184, 9272 "Out of MSI-X vectors!.\n"); 9273 goto fail_msix; 9274 } 9275 9276 qpair->msix->in_use = 1; 9277 list_add_tail(&qpair->qp_list_elem, &vha->qp_list); 9278 qpair->pdev = ha->pdev; 9279 if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) 9280 qpair->reqq_start_iocbs = qla_83xx_start_iocbs; 9281 9282 mutex_unlock(&ha->mq_lock); 9283 9284 /* Create response queue first */ 9285 rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp); 9286 if (!rsp_id) { 9287 ql_log(ql_log_warn, vha, 0x0185, 9288 "Failed to create response queue.\n"); 9289 goto fail_rsp; 9290 } 9291 9292 qpair->rsp = ha->rsp_q_map[rsp_id]; 9293 9294 /* Create request queue */ 9295 req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos, 9296 startqp); 9297 if (!req_id) { 9298 ql_log(ql_log_warn, vha, 0x0186, 9299 "Failed to create request queue.\n"); 9300 goto fail_req; 9301 } 9302 9303 qpair->req = ha->req_q_map[req_id]; 9304 qpair->rsp->req = qpair->req; 9305 qpair->rsp->qpair = qpair; 9306 /* init qpair to this cpu. Will adjust at run time. */ 9307 qla_cpu_update(qpair, smp_processor_id()); 9308 9309 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 9310 if (ha->fw_attributes & BIT_4) 9311 qpair->difdix_supported = 1; 9312 } 9313 9314 qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 9315 if (!qpair->srb_mempool) { 9316 ql_log(ql_log_warn, vha, 0xd036, 9317 "Failed to create srb mempool for qpair %d\n", 9318 qpair->id); 9319 goto fail_mempool; 9320 } 9321 9322 /* Mark as online */ 9323 qpair->online = 1; 9324 9325 if (!vha->flags.qpairs_available) 9326 vha->flags.qpairs_available = 1; 9327 9328 ql_dbg(ql_dbg_multiq, vha, 0xc00d, 9329 "Request/Response queue pair created, id %d\n", 9330 qpair->id); 9331 ql_dbg(ql_dbg_init, vha, 0x0187, 9332 "Request/Response queue pair created, id %d\n", 9333 qpair->id); 9334 } 9335 return qpair; 9336 9337 fail_mempool: 9338 fail_req: 9339 qla25xx_delete_rsp_que(vha, qpair->rsp); 9340 fail_rsp: 9341 mutex_lock(&ha->mq_lock); 9342 qpair->msix->in_use = 0; 9343 list_del(&qpair->qp_list_elem); 9344 if (list_empty(&vha->qp_list)) 9345 vha->flags.qpairs_available = 0; 9346 fail_msix: 9347 ha->queue_pair_map[qpair_id] = NULL; 9348 clear_bit(qpair_id, ha->qpair_qid_map); 9349 ha->num_qpairs--; 9350 mutex_unlock(&ha->mq_lock); 9351 fail_qid_map: 9352 kfree(qpair); 9353 return NULL; 9354 } 9355 9356 int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) 9357 { 9358 int ret = QLA_FUNCTION_FAILED; 9359 struct qla_hw_data *ha = qpair->hw; 9360 9361 qpair->delete_in_progress = 1; 9362 9363 ret = qla25xx_delete_req_que(vha, qpair->req); 9364 if (ret != QLA_SUCCESS) 9365 goto fail; 9366 9367 ret = qla25xx_delete_rsp_que(vha, qpair->rsp); 9368 if (ret != QLA_SUCCESS) 9369 goto fail; 9370 9371 mutex_lock(&ha->mq_lock); 9372 ha->queue_pair_map[qpair->id] = NULL; 9373 clear_bit(qpair->id, ha->qpair_qid_map); 9374 ha->num_qpairs--; 9375 list_del(&qpair->qp_list_elem); 9376 if (list_empty(&vha->qp_list)) { 9377 vha->flags.qpairs_available = 0; 9378 vha->flags.qpairs_req_created = 0; 9379 vha->flags.qpairs_rsp_created = 0; 9380 } 9381 mempool_destroy(qpair->srb_mempool); 9382 kfree(qpair); 9383 mutex_unlock(&ha->mq_lock); 9384 9385 return QLA_SUCCESS; 9386 fail: 9387 return ret; 9388 } 9389 9390 uint64_t 9391 qla2x00_count_set_bits(uint32_t num) 9392 { 9393 /* Brian Kernighan's Algorithm */ 9394 u64 count = 0; 9395 9396 while (num) { 9397 num &= (num - 1); 9398 count++; 9399 } 9400 return count; 9401 } 9402 9403 uint64_t 9404 qla2x00_get_num_tgts(scsi_qla_host_t *vha) 9405 { 9406 fc_port_t *f, *tf; 9407 u64 count = 0; 9408 9409 f = NULL; 9410 tf = NULL; 9411 9412 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { 9413 if (f->port_type != FCT_TARGET) 9414 continue; 9415 count++; 9416 } 9417 return count; 9418 } 9419 9420 int qla2xxx_reset_stats(struct Scsi_Host *host, u32 flags) 9421 { 9422 scsi_qla_host_t *vha = shost_priv(host); 9423 fc_port_t *fcport = NULL; 9424 unsigned long int_flags; 9425 9426 if (flags & QLA2XX_HW_ERROR) 9427 vha->hw_err_cnt = 0; 9428 if (flags & QLA2XX_SHT_LNK_DWN) 9429 vha->short_link_down_cnt = 0; 9430 if (flags & QLA2XX_INT_ERR) 9431 vha->interface_err_cnt = 0; 9432 if (flags & QLA2XX_CMD_TIMEOUT) 9433 vha->cmd_timeout_cnt = 0; 9434 if (flags & QLA2XX_RESET_CMD_ERR) 9435 vha->reset_cmd_err_cnt = 0; 9436 if (flags & QLA2XX_TGT_SHT_LNK_DOWN) { 9437 spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags); 9438 list_for_each_entry(fcport, &vha->vp_fcports, list) { 9439 fcport->tgt_short_link_down_cnt = 0; 9440 fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; 9441 } 9442 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags); 9443 } 9444 vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; 9445 return 0; 9446 } 9447 9448 int qla2xxx_start_stats(struct Scsi_Host *host, u32 flags) 9449 { 9450 return qla2xxx_reset_stats(host, flags); 9451 } 9452 9453 int qla2xxx_stop_stats(struct Scsi_Host *host, u32 flags) 9454 { 9455 return qla2xxx_reset_stats(host, flags); 9456 } 9457 9458 int qla2xxx_get_ini_stats(struct Scsi_Host *host, u32 flags, 9459 void *data, u64 size) 9460 { 9461 scsi_qla_host_t *vha = shost_priv(host); 9462 struct ql_vnd_host_stats_resp *resp = (struct ql_vnd_host_stats_resp *)data; 9463 struct ql_vnd_stats *rsp_data = &resp->stats; 9464 u64 ini_entry_count = 0; 9465 u64 i = 0; 9466 u64 entry_count = 0; 9467 u64 num_tgt = 0; 9468 u32 tmp_stat_type = 0; 9469 fc_port_t *fcport = NULL; 9470 unsigned long int_flags; 9471 9472 /* Copy stat type to work on it */ 9473 tmp_stat_type = flags; 9474 9475 if (tmp_stat_type & BIT_17) { 9476 num_tgt = qla2x00_get_num_tgts(vha); 9477 /* unset BIT_17 */ 9478 tmp_stat_type &= ~(1 << 17); 9479 } 9480 ini_entry_count = qla2x00_count_set_bits(tmp_stat_type); 9481 9482 entry_count = ini_entry_count + num_tgt; 9483 9484 rsp_data->entry_count = entry_count; 9485 9486 i = 0; 9487 if (flags & QLA2XX_HW_ERROR) { 9488 rsp_data->entry[i].stat_type = QLA2XX_HW_ERROR; 9489 rsp_data->entry[i].tgt_num = 0x0; 9490 rsp_data->entry[i].cnt = vha->hw_err_cnt; 9491 i++; 9492 } 9493 9494 if (flags & QLA2XX_SHT_LNK_DWN) { 9495 rsp_data->entry[i].stat_type = QLA2XX_SHT_LNK_DWN; 9496 rsp_data->entry[i].tgt_num = 0x0; 9497 rsp_data->entry[i].cnt = vha->short_link_down_cnt; 9498 i++; 9499 } 9500 9501 if (flags & QLA2XX_INT_ERR) { 9502 rsp_data->entry[i].stat_type = QLA2XX_INT_ERR; 9503 rsp_data->entry[i].tgt_num = 0x0; 9504 rsp_data->entry[i].cnt = vha->interface_err_cnt; 9505 i++; 9506 } 9507 9508 if (flags & QLA2XX_CMD_TIMEOUT) { 9509 rsp_data->entry[i].stat_type = QLA2XX_CMD_TIMEOUT; 9510 rsp_data->entry[i].tgt_num = 0x0; 9511 rsp_data->entry[i].cnt = vha->cmd_timeout_cnt; 9512 i++; 9513 } 9514 9515 if (flags & QLA2XX_RESET_CMD_ERR) { 9516 rsp_data->entry[i].stat_type = QLA2XX_RESET_CMD_ERR; 9517 rsp_data->entry[i].tgt_num = 0x0; 9518 rsp_data->entry[i].cnt = vha->reset_cmd_err_cnt; 9519 i++; 9520 } 9521 9522 /* i will continue from previous loop, as target 9523 * entries are after initiator 9524 */ 9525 if (flags & QLA2XX_TGT_SHT_LNK_DOWN) { 9526 spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags); 9527 list_for_each_entry(fcport, &vha->vp_fcports, list) { 9528 if (fcport->port_type != FCT_TARGET) 9529 continue; 9530 if (!fcport->rport) 9531 continue; 9532 rsp_data->entry[i].stat_type = QLA2XX_TGT_SHT_LNK_DOWN; 9533 rsp_data->entry[i].tgt_num = fcport->rport->number; 9534 rsp_data->entry[i].cnt = fcport->tgt_short_link_down_cnt; 9535 i++; 9536 } 9537 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags); 9538 } 9539 resp->status = EXT_STATUS_OK; 9540 9541 return 0; 9542 } 9543 9544 int qla2xxx_get_tgt_stats(struct Scsi_Host *host, u32 flags, 9545 struct fc_rport *rport, void *data, u64 size) 9546 { 9547 struct ql_vnd_tgt_stats_resp *tgt_data = data; 9548 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 9549 9550 tgt_data->status = 0; 9551 tgt_data->stats.entry_count = 1; 9552 tgt_data->stats.entry[0].stat_type = flags; 9553 tgt_data->stats.entry[0].tgt_num = rport->number; 9554 tgt_data->stats.entry[0].cnt = fcport->tgt_short_link_down_cnt; 9555 9556 return 0; 9557 } 9558 9559 int qla2xxx_disable_port(struct Scsi_Host *host) 9560 { 9561 scsi_qla_host_t *vha = shost_priv(host); 9562 9563 vha->hw->flags.port_isolated = 1; 9564 9565 if (qla2x00_chip_is_down(vha)) 9566 return 0; 9567 9568 if (vha->flags.online) { 9569 qla2x00_abort_isp_cleanup(vha); 9570 qla2x00_wait_for_sess_deletion(vha); 9571 } 9572 9573 return 0; 9574 } 9575 9576 int qla2xxx_enable_port(struct Scsi_Host *host) 9577 { 9578 scsi_qla_host_t *vha = shost_priv(host); 9579 9580 vha->hw->flags.port_isolated = 0; 9581 /* Set the flag to 1, so that isp_abort can proceed */ 9582 vha->flags.online = 1; 9583 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 9584 qla2xxx_wake_dpc(vha); 9585 9586 return 0; 9587 } 9588