1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 #include "qla_def.h" 7 #include "qla_gbl.h" 8 9 #include <linux/delay.h> 10 #include <linux/slab.h> 11 #include <linux/vmalloc.h> 12 13 #include "qla_devtbl.h" 14 15 #ifdef CONFIG_SPARC 16 #include <asm/prom.h> 17 #endif 18 19 #include "qla_target.h" 20 21 /* 22 * QLogic ISP2x00 Hardware Support Function Prototypes. 23 */ 24 static int qla2x00_isp_firmware(scsi_qla_host_t *); 25 static int qla2x00_setup_chip(scsi_qla_host_t *); 26 static int qla2x00_fw_ready(scsi_qla_host_t *); 27 static int qla2x00_configure_hba(scsi_qla_host_t *); 28 static int qla2x00_configure_loop(scsi_qla_host_t *); 29 static int qla2x00_configure_local_loop(scsi_qla_host_t *); 30 static int qla2x00_configure_fabric(scsi_qla_host_t *); 31 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *); 32 static int qla2x00_restart_isp(scsi_qla_host_t *); 33 34 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); 35 static int qla84xx_init_chip(scsi_qla_host_t *); 36 static int qla25xx_init_queues(struct qla_hw_data *); 37 static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, 38 struct event_arg *ea); 39 static void qla24xx_handle_prli_done_event(struct scsi_qla_host *, 40 struct event_arg *); 41 static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *); 42 43 /* SRB Extensions ---------------------------------------------------------- */ 44 45 void 46 qla2x00_sp_timeout(struct timer_list *t) 47 { 48 srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer); 49 struct srb_iocb *iocb; 50 51 WARN_ON(irqs_disabled()); 52 iocb = &sp->u.iocb_cmd; 53 iocb->timeout(sp); 54 } 55 56 void qla2x00_sp_free(srb_t *sp) 57 { 58 struct srb_iocb *iocb = &sp->u.iocb_cmd; 59 60 del_timer(&iocb->timer); 61 qla2x00_rel_sp(sp); 62 } 63 64 void qla2xxx_rel_done_warning(srb_t *sp, int res) 65 { 66 WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp); 67 } 68 69 void qla2xxx_rel_free_warning(srb_t *sp) 70 { 71 WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp); 72 } 73 74 /* Asynchronous Login/Logout Routines -------------------------------------- */ 75 76 unsigned long 77 qla2x00_get_async_timeout(struct scsi_qla_host *vha) 78 { 79 unsigned long tmo; 80 struct qla_hw_data *ha = vha->hw; 81 82 /* Firmware should use switch negotiated r_a_tov for timeout. */ 83 tmo = ha->r_a_tov / 10 * 2; 84 if (IS_QLAFX00(ha)) { 85 tmo = FX00_DEF_RATOV * 2; 86 } else if (!IS_FWI2_CAPABLE(ha)) { 87 /* 88 * Except for earlier ISPs where the timeout is seeded from the 89 * initialization control block. 90 */ 91 tmo = ha->login_timeout; 92 } 93 return tmo; 94 } 95 96 static void qla24xx_abort_iocb_timeout(void *data) 97 { 98 srb_t *sp = data; 99 struct srb_iocb *abt = &sp->u.iocb_cmd; 100 struct qla_qpair *qpair = sp->qpair; 101 u32 handle; 102 unsigned long flags; 103 104 if (sp->cmd_sp) 105 ql_dbg(ql_dbg_async, sp->vha, 0x507c, 106 "Abort timeout - cmd hdl=%x, cmd type=%x hdl=%x, type=%x\n", 107 sp->cmd_sp->handle, sp->cmd_sp->type, 108 sp->handle, sp->type); 109 else 110 ql_dbg(ql_dbg_async, sp->vha, 0x507c, 111 "Abort timeout 2 - hdl=%x, type=%x\n", 112 sp->handle, sp->type); 113 114 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 115 for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) { 116 if (sp->cmd_sp && (qpair->req->outstanding_cmds[handle] == 117 sp->cmd_sp)) 118 qpair->req->outstanding_cmds[handle] = NULL; 119 120 /* removing the abort */ 121 if (qpair->req->outstanding_cmds[handle] == sp) { 122 qpair->req->outstanding_cmds[handle] = NULL; 123 break; 124 } 125 } 126 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 127 128 if (sp->cmd_sp) 129 sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED); 130 131 abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT); 132 sp->done(sp, QLA_OS_TIMER_EXPIRED); 133 } 134 135 static void qla24xx_abort_sp_done(srb_t *sp, int res) 136 { 137 struct srb_iocb *abt = &sp->u.iocb_cmd; 138 srb_t *orig_sp = sp->cmd_sp; 139 140 if (orig_sp) 141 qla_wait_nvme_release_cmd_kref(orig_sp); 142 143 del_timer(&sp->u.iocb_cmd.timer); 144 if (sp->flags & SRB_WAKEUP_ON_COMP) 145 complete(&abt->u.abt.comp); 146 else 147 sp->free(sp); 148 } 149 150 int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) 151 { 152 scsi_qla_host_t *vha = cmd_sp->vha; 153 struct srb_iocb *abt_iocb; 154 srb_t *sp; 155 int rval = QLA_FUNCTION_FAILED; 156 157 sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport, 158 GFP_ATOMIC); 159 if (!sp) 160 return rval; 161 162 abt_iocb = &sp->u.iocb_cmd; 163 sp->type = SRB_ABT_CMD; 164 sp->name = "abort"; 165 sp->qpair = cmd_sp->qpair; 166 sp->cmd_sp = cmd_sp; 167 if (wait) 168 sp->flags = SRB_WAKEUP_ON_COMP; 169 170 abt_iocb->timeout = qla24xx_abort_iocb_timeout; 171 init_completion(&abt_iocb->u.abt.comp); 172 /* FW can send 2 x ABTS's timeout/20s */ 173 qla2x00_init_timer(sp, 42); 174 175 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle; 176 abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id); 177 178 sp->done = qla24xx_abort_sp_done; 179 180 ql_dbg(ql_dbg_async, vha, 0x507c, 181 "Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle, 182 cmd_sp->type); 183 184 rval = qla2x00_start_sp(sp); 185 if (rval != QLA_SUCCESS) { 186 sp->free(sp); 187 return rval; 188 } 189 190 if (wait) { 191 wait_for_completion(&abt_iocb->u.abt.comp); 192 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ? 193 QLA_SUCCESS : QLA_FUNCTION_FAILED; 194 sp->free(sp); 195 } 196 197 return rval; 198 } 199 200 void 201 qla2x00_async_iocb_timeout(void *data) 202 { 203 srb_t *sp = data; 204 fc_port_t *fcport = sp->fcport; 205 struct srb_iocb *lio = &sp->u.iocb_cmd; 206 int rc, h; 207 unsigned long flags; 208 209 if (fcport) { 210 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, 211 "Async-%s timeout - hdl=%x portid=%06x %8phC.\n", 212 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name); 213 214 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 215 } else { 216 pr_info("Async-%s timeout - hdl=%x.\n", 217 sp->name, sp->handle); 218 } 219 220 switch (sp->type) { 221 case SRB_LOGIN_CMD: 222 rc = qla24xx_async_abort_cmd(sp, false); 223 if (rc) { 224 /* Retry as needed. */ 225 lio->u.logio.data[0] = MBS_COMMAND_ERROR; 226 lio->u.logio.data[1] = 227 lio->u.logio.flags & SRB_LOGIN_RETRIED ? 228 QLA_LOGIO_LOGIN_RETRIED : 0; 229 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); 230 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; 231 h++) { 232 if (sp->qpair->req->outstanding_cmds[h] == 233 sp) { 234 sp->qpair->req->outstanding_cmds[h] = 235 NULL; 236 break; 237 } 238 } 239 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); 240 sp->done(sp, QLA_FUNCTION_TIMEOUT); 241 } 242 break; 243 case SRB_LOGOUT_CMD: 244 case SRB_CT_PTHRU_CMD: 245 case SRB_MB_IOCB: 246 case SRB_NACK_PLOGI: 247 case SRB_NACK_PRLI: 248 case SRB_NACK_LOGO: 249 case SRB_CTRL_VP: 250 default: 251 rc = qla24xx_async_abort_cmd(sp, false); 252 if (rc) { 253 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); 254 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; 255 h++) { 256 if (sp->qpair->req->outstanding_cmds[h] == 257 sp) { 258 sp->qpair->req->outstanding_cmds[h] = 259 NULL; 260 break; 261 } 262 } 263 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); 264 sp->done(sp, QLA_FUNCTION_TIMEOUT); 265 } 266 break; 267 } 268 } 269 270 static void qla2x00_async_login_sp_done(srb_t *sp, int res) 271 { 272 struct scsi_qla_host *vha = sp->vha; 273 struct srb_iocb *lio = &sp->u.iocb_cmd; 274 struct event_arg ea; 275 276 ql_dbg(ql_dbg_disc, vha, 0x20dd, 277 "%s %8phC res %d \n", __func__, sp->fcport->port_name, res); 278 279 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 280 281 if (!test_bit(UNLOADING, &vha->dpc_flags)) { 282 memset(&ea, 0, sizeof(ea)); 283 ea.fcport = sp->fcport; 284 ea.data[0] = lio->u.logio.data[0]; 285 ea.data[1] = lio->u.logio.data[1]; 286 ea.iop[0] = lio->u.logio.iop[0]; 287 ea.iop[1] = lio->u.logio.iop[1]; 288 ea.sp = sp; 289 qla24xx_handle_plogi_done_event(vha, &ea); 290 } 291 292 sp->free(sp); 293 } 294 295 static inline bool 296 fcport_is_smaller(fc_port_t *fcport) 297 { 298 if (wwn_to_u64(fcport->port_name) < 299 wwn_to_u64(fcport->vha->port_name)) 300 return true; 301 else 302 return false; 303 } 304 305 static inline bool 306 fcport_is_bigger(fc_port_t *fcport) 307 { 308 return !fcport_is_smaller(fcport); 309 } 310 311 int 312 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, 313 uint16_t *data) 314 { 315 srb_t *sp; 316 struct srb_iocb *lio; 317 int rval = QLA_FUNCTION_FAILED; 318 319 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) || 320 fcport->loop_id == FC_NO_LOOP_ID) { 321 ql_log(ql_log_warn, vha, 0xffff, 322 "%s: %8phC - not sending command.\n", 323 __func__, fcport->port_name); 324 return rval; 325 } 326 327 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 328 if (!sp) 329 goto done; 330 331 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); 332 fcport->flags |= FCF_ASYNC_SENT; 333 fcport->logout_completed = 0; 334 335 sp->type = SRB_LOGIN_CMD; 336 sp->name = "login"; 337 sp->gen1 = fcport->rscn_gen; 338 sp->gen2 = fcport->login_gen; 339 340 lio = &sp->u.iocb_cmd; 341 lio->timeout = qla2x00_async_iocb_timeout; 342 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 343 344 sp->done = qla2x00_async_login_sp_done; 345 if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) { 346 lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY; 347 } else { 348 if (vha->hw->flags.edif_enabled && 349 vha->e_dbell.db_flags & EDB_ACTIVE) { 350 lio->u.logio.flags |= 351 (SRB_LOGIN_FCSP | SRB_LOGIN_SKIP_PRLI); 352 ql_dbg(ql_dbg_disc, vha, 0x2072, 353 "Async-login: w/ FCSP %8phC hdl=%x, loopid=%x portid=%06x\n", 354 fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24); 355 } else { 356 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; 357 } 358 } 359 360 if (NVME_TARGET(vha->hw, fcport)) 361 lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI; 362 363 ql_log(ql_log_warn, vha, 0x2072, 364 "Async-login - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n", 365 fcport->port_name, sp->handle, fcport->loop_id, 366 fcport->d_id.b24, fcport->login_retry); 367 368 rval = qla2x00_start_sp(sp); 369 if (rval != QLA_SUCCESS) { 370 fcport->flags |= FCF_LOGIN_NEEDED; 371 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 372 goto done_free_sp; 373 } 374 375 return rval; 376 377 done_free_sp: 378 sp->free(sp); 379 fcport->flags &= ~FCF_ASYNC_SENT; 380 done: 381 fcport->flags &= ~FCF_ASYNC_ACTIVE; 382 return rval; 383 } 384 385 static void qla2x00_async_logout_sp_done(srb_t *sp, int res) 386 { 387 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 388 sp->fcport->login_gen++; 389 qlt_logo_completion_handler(sp->fcport, sp->u.iocb_cmd.u.logio.data[0]); 390 sp->free(sp); 391 } 392 393 int 394 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) 395 { 396 srb_t *sp; 397 struct srb_iocb *lio; 398 int rval = QLA_FUNCTION_FAILED; 399 400 fcport->flags |= FCF_ASYNC_SENT; 401 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 402 if (!sp) 403 goto done; 404 405 sp->type = SRB_LOGOUT_CMD; 406 sp->name = "logout"; 407 408 lio = &sp->u.iocb_cmd; 409 lio->timeout = qla2x00_async_iocb_timeout; 410 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 411 412 sp->done = qla2x00_async_logout_sp_done; 413 414 ql_dbg(ql_dbg_disc, vha, 0x2070, 415 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n", 416 sp->handle, fcport->loop_id, fcport->d_id.b.domain, 417 fcport->d_id.b.area, fcport->d_id.b.al_pa, 418 fcport->port_name, fcport->explicit_logout); 419 420 rval = qla2x00_start_sp(sp); 421 if (rval != QLA_SUCCESS) 422 goto done_free_sp; 423 return rval; 424 425 done_free_sp: 426 sp->free(sp); 427 done: 428 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 429 return rval; 430 } 431 432 void 433 qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport, 434 uint16_t *data) 435 { 436 fcport->flags &= ~FCF_ASYNC_ACTIVE; 437 /* Don't re-login in target mode */ 438 if (!fcport->tgt_session) 439 qla2x00_mark_device_lost(vha, fcport, 1); 440 qlt_logo_completion_handler(fcport, data[0]); 441 } 442 443 static void qla2x00_async_prlo_sp_done(srb_t *sp, int res) 444 { 445 struct srb_iocb *lio = &sp->u.iocb_cmd; 446 struct scsi_qla_host *vha = sp->vha; 447 448 sp->fcport->flags &= ~FCF_ASYNC_ACTIVE; 449 if (!test_bit(UNLOADING, &vha->dpc_flags)) 450 qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport, 451 lio->u.logio.data); 452 sp->free(sp); 453 } 454 455 int 456 qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport) 457 { 458 srb_t *sp; 459 struct srb_iocb *lio; 460 int rval; 461 462 rval = QLA_FUNCTION_FAILED; 463 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 464 if (!sp) 465 goto done; 466 467 sp->type = SRB_PRLO_CMD; 468 sp->name = "prlo"; 469 470 lio = &sp->u.iocb_cmd; 471 lio->timeout = qla2x00_async_iocb_timeout; 472 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 473 474 sp->done = qla2x00_async_prlo_sp_done; 475 476 ql_dbg(ql_dbg_disc, vha, 0x2070, 477 "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n", 478 sp->handle, fcport->loop_id, fcport->d_id.b.domain, 479 fcport->d_id.b.area, fcport->d_id.b.al_pa); 480 481 rval = qla2x00_start_sp(sp); 482 if (rval != QLA_SUCCESS) 483 goto done_free_sp; 484 485 return rval; 486 487 done_free_sp: 488 sp->free(sp); 489 done: 490 fcport->flags &= ~FCF_ASYNC_ACTIVE; 491 return rval; 492 } 493 494 static 495 void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea) 496 { 497 struct fc_port *fcport = ea->fcport; 498 499 ql_dbg(ql_dbg_disc, vha, 0x20d2, 500 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n", 501 __func__, fcport->port_name, fcport->disc_state, 502 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2, 503 fcport->rscn_gen, ea->sp->gen1, fcport->loop_id); 504 505 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n", 506 ea->data[0]); 507 508 if (ea->data[0] != MBS_COMMAND_COMPLETE) { 509 ql_dbg(ql_dbg_disc, vha, 0x2066, 510 "%s %8phC: adisc fail: post delete\n", 511 __func__, ea->fcport->port_name); 512 /* deleted = 0 & logout_on_delete = force fw cleanup */ 513 fcport->deleted = 0; 514 fcport->logout_on_delete = 1; 515 qlt_schedule_sess_for_deletion(ea->fcport); 516 return; 517 } 518 519 if (ea->fcport->disc_state == DSC_DELETE_PEND) 520 return; 521 522 if (ea->sp->gen2 != ea->fcport->login_gen) { 523 /* target side must have changed it. */ 524 ql_dbg(ql_dbg_disc, vha, 0x20d3, 525 "%s %8phC generation changed\n", 526 __func__, ea->fcport->port_name); 527 return; 528 } else if (ea->sp->gen1 != ea->fcport->rscn_gen) { 529 qla_rscn_replay(fcport); 530 qlt_schedule_sess_for_deletion(fcport); 531 return; 532 } 533 534 __qla24xx_handle_gpdb_event(vha, ea); 535 } 536 537 static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport) 538 { 539 struct qla_work_evt *e; 540 541 e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI); 542 if (!e) 543 return QLA_FUNCTION_FAILED; 544 545 e->u.fcport.fcport = fcport; 546 fcport->flags |= FCF_ASYNC_ACTIVE; 547 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); 548 return qla2x00_post_work(vha, e); 549 } 550 551 static void qla2x00_async_adisc_sp_done(srb_t *sp, int res) 552 { 553 struct scsi_qla_host *vha = sp->vha; 554 struct event_arg ea; 555 struct srb_iocb *lio = &sp->u.iocb_cmd; 556 557 ql_dbg(ql_dbg_disc, vha, 0x2066, 558 "Async done-%s res %x %8phC\n", 559 sp->name, res, sp->fcport->port_name); 560 561 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 562 563 memset(&ea, 0, sizeof(ea)); 564 ea.rc = res; 565 ea.data[0] = lio->u.logio.data[0]; 566 ea.data[1] = lio->u.logio.data[1]; 567 ea.iop[0] = lio->u.logio.iop[0]; 568 ea.iop[1] = lio->u.logio.iop[1]; 569 ea.fcport = sp->fcport; 570 ea.sp = sp; 571 572 qla24xx_handle_adisc_event(vha, &ea); 573 574 sp->free(sp); 575 } 576 577 int 578 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, 579 uint16_t *data) 580 { 581 srb_t *sp; 582 struct srb_iocb *lio; 583 int rval = QLA_FUNCTION_FAILED; 584 585 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) 586 return rval; 587 588 fcport->flags |= FCF_ASYNC_SENT; 589 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 590 if (!sp) 591 goto done; 592 593 sp->type = SRB_ADISC_CMD; 594 sp->name = "adisc"; 595 596 lio = &sp->u.iocb_cmd; 597 lio->timeout = qla2x00_async_iocb_timeout; 598 sp->gen1 = fcport->rscn_gen; 599 sp->gen2 = fcport->login_gen; 600 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 601 602 sp->done = qla2x00_async_adisc_sp_done; 603 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 604 lio->u.logio.flags |= SRB_LOGIN_RETRIED; 605 606 ql_dbg(ql_dbg_disc, vha, 0x206f, 607 "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n", 608 sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name); 609 610 rval = qla2x00_start_sp(sp); 611 if (rval != QLA_SUCCESS) 612 goto done_free_sp; 613 614 return rval; 615 616 done_free_sp: 617 sp->free(sp); 618 done: 619 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 620 qla2x00_post_async_adisc_work(vha, fcport, data); 621 return rval; 622 } 623 624 static bool qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id) 625 { 626 struct qla_hw_data *ha = vha->hw; 627 628 if (IS_FWI2_CAPABLE(ha)) 629 return loop_id > NPH_LAST_HANDLE; 630 631 return (loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) || 632 loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST; 633 } 634 635 /** 636 * qla2x00_find_new_loop_id - scan through our port list and find a new usable loop ID 637 * @vha: adapter state pointer. 638 * @dev: port structure pointer. 639 * 640 * Returns: 641 * qla2x00 local function return status code. 642 * 643 * Context: 644 * Kernel context. 645 */ 646 static int qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) 647 { 648 int rval; 649 struct qla_hw_data *ha = vha->hw; 650 unsigned long flags = 0; 651 652 rval = QLA_SUCCESS; 653 654 spin_lock_irqsave(&ha->vport_slock, flags); 655 656 dev->loop_id = find_first_zero_bit(ha->loop_id_map, LOOPID_MAP_SIZE); 657 if (dev->loop_id >= LOOPID_MAP_SIZE || 658 qla2x00_is_reserved_id(vha, dev->loop_id)) { 659 dev->loop_id = FC_NO_LOOP_ID; 660 rval = QLA_FUNCTION_FAILED; 661 } else { 662 set_bit(dev->loop_id, ha->loop_id_map); 663 } 664 spin_unlock_irqrestore(&ha->vport_slock, flags); 665 666 if (rval == QLA_SUCCESS) 667 ql_dbg(ql_dbg_disc, dev->vha, 0x2086, 668 "Assigning new loopid=%x, portid=%x.\n", 669 dev->loop_id, dev->d_id.b24); 670 else 671 ql_log(ql_log_warn, dev->vha, 0x2087, 672 "No loop_id's available, portid=%x.\n", 673 dev->d_id.b24); 674 675 return rval; 676 } 677 678 void qla2x00_clear_loop_id(fc_port_t *fcport) 679 { 680 struct qla_hw_data *ha = fcport->vha->hw; 681 682 if (fcport->loop_id == FC_NO_LOOP_ID || 683 qla2x00_is_reserved_id(fcport->vha, fcport->loop_id)) 684 return; 685 686 clear_bit(fcport->loop_id, ha->loop_id_map); 687 fcport->loop_id = FC_NO_LOOP_ID; 688 } 689 690 static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, 691 struct event_arg *ea) 692 { 693 fc_port_t *fcport, *conflict_fcport; 694 struct get_name_list_extended *e; 695 u16 i, n, found = 0, loop_id; 696 port_id_t id; 697 u64 wwn; 698 u16 data[2]; 699 u8 current_login_state, nvme_cls; 700 701 fcport = ea->fcport; 702 ql_dbg(ql_dbg_disc, vha, 0xffff, 703 "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d edif %d\n", 704 __func__, fcport->port_name, fcport->disc_state, 705 fcport->fw_login_state, ea->rc, 706 fcport->login_gen, fcport->last_login_gen, 707 fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id, fcport->edif.enable); 708 709 if (fcport->disc_state == DSC_DELETE_PEND) 710 return; 711 712 if (ea->rc) { /* rval */ 713 if (fcport->login_retry == 0) { 714 ql_dbg(ql_dbg_disc, vha, 0x20de, 715 "GNL failed Port login retry %8phN, retry cnt=%d.\n", 716 fcport->port_name, fcport->login_retry); 717 } 718 return; 719 } 720 721 if (fcport->last_rscn_gen != fcport->rscn_gen) { 722 qla_rscn_replay(fcport); 723 qlt_schedule_sess_for_deletion(fcport); 724 return; 725 } else if (fcport->last_login_gen != fcport->login_gen) { 726 ql_dbg(ql_dbg_disc, vha, 0x20e0, 727 "%s %8phC login gen changed\n", 728 __func__, fcport->port_name); 729 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 730 return; 731 } 732 733 n = ea->data[0] / sizeof(struct get_name_list_extended); 734 735 ql_dbg(ql_dbg_disc, vha, 0x20e1, 736 "%s %d %8phC n %d %02x%02x%02x lid %d \n", 737 __func__, __LINE__, fcport->port_name, n, 738 fcport->d_id.b.domain, fcport->d_id.b.area, 739 fcport->d_id.b.al_pa, fcport->loop_id); 740 741 for (i = 0; i < n; i++) { 742 e = &vha->gnl.l[i]; 743 wwn = wwn_to_u64(e->port_name); 744 id.b.domain = e->port_id[2]; 745 id.b.area = e->port_id[1]; 746 id.b.al_pa = e->port_id[0]; 747 id.b.rsvd_1 = 0; 748 749 if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE)) 750 continue; 751 752 if (IS_SW_RESV_ADDR(id)) 753 continue; 754 755 found = 1; 756 757 loop_id = le16_to_cpu(e->nport_handle); 758 loop_id = (loop_id & 0x7fff); 759 nvme_cls = e->current_login_state >> 4; 760 current_login_state = e->current_login_state & 0xf; 761 762 if (PRLI_PHASE(nvme_cls)) { 763 current_login_state = nvme_cls; 764 fcport->fc4_type &= ~FS_FC4TYPE_FCP; 765 fcport->fc4_type |= FS_FC4TYPE_NVME; 766 } else if (PRLI_PHASE(current_login_state)) { 767 fcport->fc4_type |= FS_FC4TYPE_FCP; 768 fcport->fc4_type &= ~FS_FC4TYPE_NVME; 769 } 770 771 ql_dbg(ql_dbg_disc, vha, 0x20e2, 772 "%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n", 773 __func__, fcport->port_name, 774 e->current_login_state, fcport->fw_login_state, 775 fcport->fc4_type, id.b24, fcport->d_id.b24, 776 loop_id, fcport->loop_id); 777 778 switch (fcport->disc_state) { 779 case DSC_DELETE_PEND: 780 case DSC_DELETED: 781 break; 782 default: 783 if ((id.b24 != fcport->d_id.b24 && 784 fcport->d_id.b24 && 785 fcport->loop_id != FC_NO_LOOP_ID) || 786 (fcport->loop_id != FC_NO_LOOP_ID && 787 fcport->loop_id != loop_id)) { 788 ql_dbg(ql_dbg_disc, vha, 0x20e3, 789 "%s %d %8phC post del sess\n", 790 __func__, __LINE__, fcport->port_name); 791 if (fcport->n2n_flag) 792 fcport->d_id.b24 = 0; 793 qlt_schedule_sess_for_deletion(fcport); 794 return; 795 } 796 break; 797 } 798 799 fcport->loop_id = loop_id; 800 if (fcport->n2n_flag) 801 fcport->d_id.b24 = id.b24; 802 803 wwn = wwn_to_u64(fcport->port_name); 804 qlt_find_sess_invalidate_other(vha, wwn, 805 id, loop_id, &conflict_fcport); 806 807 if (conflict_fcport) { 808 /* 809 * Another share fcport share the same loop_id & 810 * nport id. Conflict fcport needs to finish 811 * cleanup before this fcport can proceed to login. 812 */ 813 conflict_fcport->conflict = fcport; 814 fcport->login_pause = 1; 815 } 816 817 switch (vha->hw->current_topology) { 818 default: 819 switch (current_login_state) { 820 case DSC_LS_PRLI_COMP: 821 ql_dbg(ql_dbg_disc + ql_dbg_verbose, 822 vha, 0x20e4, "%s %d %8phC post gpdb\n", 823 __func__, __LINE__, fcport->port_name); 824 825 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0) 826 fcport->port_type = FCT_INITIATOR; 827 else 828 fcport->port_type = FCT_TARGET; 829 data[0] = data[1] = 0; 830 qla2x00_post_async_adisc_work(vha, fcport, 831 data); 832 break; 833 case DSC_LS_PLOGI_COMP: 834 if (vha->hw->flags.edif_enabled) { 835 /* check to see if App support Secure */ 836 qla24xx_post_gpdb_work(vha, fcport, 0); 837 break; 838 } 839 fallthrough; 840 case DSC_LS_PORT_UNAVAIL: 841 default: 842 if (fcport->loop_id == FC_NO_LOOP_ID) { 843 qla2x00_find_new_loop_id(vha, fcport); 844 fcport->fw_login_state = 845 DSC_LS_PORT_UNAVAIL; 846 } 847 ql_dbg(ql_dbg_disc, vha, 0x20e5, 848 "%s %d %8phC\n", __func__, __LINE__, 849 fcport->port_name); 850 qla24xx_fcport_handle_login(vha, fcport); 851 break; 852 } 853 break; 854 case ISP_CFG_N: 855 fcport->fw_login_state = current_login_state; 856 fcport->d_id = id; 857 switch (current_login_state) { 858 case DSC_LS_PRLI_PEND: 859 /* 860 * In the middle of PRLI. Let it finish. 861 * Allow relogin code to recheck state again 862 * with GNL. Push disc_state back to DELETED 863 * so GNL can go out again 864 */ 865 qla2x00_set_fcport_disc_state(fcport, 866 DSC_DELETED); 867 break; 868 case DSC_LS_PRLI_COMP: 869 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0) 870 fcport->port_type = FCT_INITIATOR; 871 else 872 fcport->port_type = FCT_TARGET; 873 874 data[0] = data[1] = 0; 875 qla2x00_post_async_adisc_work(vha, fcport, 876 data); 877 break; 878 case DSC_LS_PLOGI_COMP: 879 if (fcport_is_bigger(fcport)) { 880 /* local adapter is smaller */ 881 if (fcport->loop_id != FC_NO_LOOP_ID) 882 qla2x00_clear_loop_id(fcport); 883 884 fcport->loop_id = loop_id; 885 qla24xx_fcport_handle_login(vha, 886 fcport); 887 break; 888 } 889 fallthrough; 890 default: 891 if (fcport_is_smaller(fcport)) { 892 /* local adapter is bigger */ 893 if (fcport->loop_id != FC_NO_LOOP_ID) 894 qla2x00_clear_loop_id(fcport); 895 896 fcport->loop_id = loop_id; 897 qla24xx_fcport_handle_login(vha, 898 fcport); 899 } 900 break; 901 } 902 break; 903 } /* switch (ha->current_topology) */ 904 } 905 906 if (!found) { 907 switch (vha->hw->current_topology) { 908 case ISP_CFG_F: 909 case ISP_CFG_FL: 910 for (i = 0; i < n; i++) { 911 e = &vha->gnl.l[i]; 912 id.b.domain = e->port_id[0]; 913 id.b.area = e->port_id[1]; 914 id.b.al_pa = e->port_id[2]; 915 id.b.rsvd_1 = 0; 916 loop_id = le16_to_cpu(e->nport_handle); 917 918 if (fcport->d_id.b24 == id.b24) { 919 conflict_fcport = 920 qla2x00_find_fcport_by_wwpn(vha, 921 e->port_name, 0); 922 if (conflict_fcport) { 923 ql_dbg(ql_dbg_disc + ql_dbg_verbose, 924 vha, 0x20e5, 925 "%s %d %8phC post del sess\n", 926 __func__, __LINE__, 927 conflict_fcport->port_name); 928 qlt_schedule_sess_for_deletion 929 (conflict_fcport); 930 } 931 } 932 /* 933 * FW already picked this loop id for 934 * another fcport 935 */ 936 if (fcport->loop_id == loop_id) 937 fcport->loop_id = FC_NO_LOOP_ID; 938 } 939 qla24xx_fcport_handle_login(vha, fcport); 940 break; 941 case ISP_CFG_N: 942 qla2x00_set_fcport_disc_state(fcport, DSC_DELETED); 943 if (time_after_eq(jiffies, fcport->dm_login_expire)) { 944 if (fcport->n2n_link_reset_cnt < 2) { 945 fcport->n2n_link_reset_cnt++; 946 /* 947 * remote port is not sending PLOGI. 948 * Reset link to kick start his state 949 * machine 950 */ 951 set_bit(N2N_LINK_RESET, 952 &vha->dpc_flags); 953 } else { 954 if (fcport->n2n_chip_reset < 1) { 955 ql_log(ql_log_info, vha, 0x705d, 956 "Chip reset to bring laser down"); 957 set_bit(ISP_ABORT_NEEDED, 958 &vha->dpc_flags); 959 fcport->n2n_chip_reset++; 960 } else { 961 ql_log(ql_log_info, vha, 0x705d, 962 "Remote port %8ph is not coming back\n", 963 fcport->port_name); 964 fcport->scan_state = 0; 965 } 966 } 967 qla2xxx_wake_dpc(vha); 968 } else { 969 /* 970 * report port suppose to do PLOGI. Give him 971 * more time. FW will catch it. 972 */ 973 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 974 } 975 break; 976 default: 977 break; 978 } 979 } 980 } /* gnl_event */ 981 982 static void qla24xx_async_gnl_sp_done(srb_t *sp, int res) 983 { 984 struct scsi_qla_host *vha = sp->vha; 985 unsigned long flags; 986 struct fc_port *fcport = NULL, *tf; 987 u16 i, n = 0, loop_id; 988 struct event_arg ea; 989 struct get_name_list_extended *e; 990 u64 wwn; 991 struct list_head h; 992 bool found = false; 993 994 ql_dbg(ql_dbg_disc, vha, 0x20e7, 995 "Async done-%s res %x mb[1]=%x mb[2]=%x \n", 996 sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1], 997 sp->u.iocb_cmd.u.mbx.in_mb[2]); 998 999 if (res == QLA_FUNCTION_TIMEOUT) 1000 return; 1001 1002 sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE); 1003 memset(&ea, 0, sizeof(ea)); 1004 ea.sp = sp; 1005 ea.rc = res; 1006 1007 if (sp->u.iocb_cmd.u.mbx.in_mb[1] >= 1008 sizeof(struct get_name_list_extended)) { 1009 n = sp->u.iocb_cmd.u.mbx.in_mb[1] / 1010 sizeof(struct get_name_list_extended); 1011 ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */ 1012 } 1013 1014 for (i = 0; i < n; i++) { 1015 e = &vha->gnl.l[i]; 1016 loop_id = le16_to_cpu(e->nport_handle); 1017 /* mask out reserve bit */ 1018 loop_id = (loop_id & 0x7fff); 1019 set_bit(loop_id, vha->hw->loop_id_map); 1020 wwn = wwn_to_u64(e->port_name); 1021 1022 ql_dbg(ql_dbg_disc, vha, 0x20e8, 1023 "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n", 1024 __func__, &wwn, e->port_id[2], e->port_id[1], 1025 e->port_id[0], e->current_login_state, e->last_login_state, 1026 (loop_id & 0x7fff)); 1027 } 1028 1029 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1030 1031 INIT_LIST_HEAD(&h); 1032 fcport = tf = NULL; 1033 if (!list_empty(&vha->gnl.fcports)) 1034 list_splice_init(&vha->gnl.fcports, &h); 1035 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1036 1037 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { 1038 list_del_init(&fcport->gnl_entry); 1039 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1040 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 1041 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1042 ea.fcport = fcport; 1043 1044 qla24xx_handle_gnl_done_event(vha, &ea); 1045 } 1046 1047 /* create new fcport if fw has knowledge of new sessions */ 1048 for (i = 0; i < n; i++) { 1049 port_id_t id; 1050 u64 wwnn; 1051 1052 e = &vha->gnl.l[i]; 1053 wwn = wwn_to_u64(e->port_name); 1054 1055 found = false; 1056 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { 1057 if (!memcmp((u8 *)&wwn, fcport->port_name, 1058 WWN_SIZE)) { 1059 found = true; 1060 break; 1061 } 1062 } 1063 1064 id.b.domain = e->port_id[2]; 1065 id.b.area = e->port_id[1]; 1066 id.b.al_pa = e->port_id[0]; 1067 id.b.rsvd_1 = 0; 1068 1069 if (!found && wwn && !IS_SW_RESV_ADDR(id)) { 1070 ql_dbg(ql_dbg_disc, vha, 0x2065, 1071 "%s %d %8phC %06x post new sess\n", 1072 __func__, __LINE__, (u8 *)&wwn, id.b24); 1073 wwnn = wwn_to_u64(e->node_name); 1074 qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn, 1075 (u8 *)&wwnn, NULL, 0); 1076 } 1077 } 1078 1079 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1080 vha->gnl.sent = 0; 1081 if (!list_empty(&vha->gnl.fcports)) { 1082 /* retrigger gnl */ 1083 list_for_each_entry_safe(fcport, tf, &vha->gnl.fcports, 1084 gnl_entry) { 1085 list_del_init(&fcport->gnl_entry); 1086 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 1087 if (qla24xx_post_gnl_work(vha, fcport) == QLA_SUCCESS) 1088 break; 1089 } 1090 } 1091 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1092 1093 sp->free(sp); 1094 } 1095 1096 int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) 1097 { 1098 srb_t *sp; 1099 struct srb_iocb *mbx; 1100 int rval = QLA_FUNCTION_FAILED; 1101 unsigned long flags; 1102 u16 *mb; 1103 1104 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) 1105 return rval; 1106 1107 ql_dbg(ql_dbg_disc, vha, 0x20d9, 1108 "Async-gnlist WWPN %8phC \n", fcport->port_name); 1109 1110 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1111 fcport->flags |= FCF_ASYNC_SENT; 1112 qla2x00_set_fcport_disc_state(fcport, DSC_GNL); 1113 fcport->last_rscn_gen = fcport->rscn_gen; 1114 fcport->last_login_gen = fcport->login_gen; 1115 1116 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports); 1117 if (vha->gnl.sent) { 1118 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1119 return QLA_SUCCESS; 1120 } 1121 vha->gnl.sent = 1; 1122 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1123 1124 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1125 if (!sp) 1126 goto done; 1127 1128 sp->type = SRB_MB_IOCB; 1129 sp->name = "gnlist"; 1130 sp->gen1 = fcport->rscn_gen; 1131 sp->gen2 = fcport->login_gen; 1132 1133 mbx = &sp->u.iocb_cmd; 1134 mbx->timeout = qla2x00_async_iocb_timeout; 1135 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2); 1136 1137 mb = sp->u.iocb_cmd.u.mbx.out_mb; 1138 mb[0] = MBC_PORT_NODE_NAME_LIST; 1139 mb[1] = BIT_2 | BIT_3; 1140 mb[2] = MSW(vha->gnl.ldma); 1141 mb[3] = LSW(vha->gnl.ldma); 1142 mb[6] = MSW(MSD(vha->gnl.ldma)); 1143 mb[7] = LSW(MSD(vha->gnl.ldma)); 1144 mb[8] = vha->gnl.size; 1145 mb[9] = vha->vp_idx; 1146 1147 sp->done = qla24xx_async_gnl_sp_done; 1148 1149 ql_dbg(ql_dbg_disc, vha, 0x20da, 1150 "Async-%s - OUT WWPN %8phC hndl %x\n", 1151 sp->name, fcport->port_name, sp->handle); 1152 1153 rval = qla2x00_start_sp(sp); 1154 if (rval != QLA_SUCCESS) 1155 goto done_free_sp; 1156 1157 return rval; 1158 1159 done_free_sp: 1160 sp->free(sp); 1161 done: 1162 fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT); 1163 return rval; 1164 } 1165 1166 int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport) 1167 { 1168 struct qla_work_evt *e; 1169 1170 e = qla2x00_alloc_work(vha, QLA_EVT_GNL); 1171 if (!e) 1172 return QLA_FUNCTION_FAILED; 1173 1174 e->u.fcport.fcport = fcport; 1175 fcport->flags |= FCF_ASYNC_ACTIVE; 1176 return qla2x00_post_work(vha, e); 1177 } 1178 1179 static void qla24xx_async_gpdb_sp_done(srb_t *sp, int res) 1180 { 1181 struct scsi_qla_host *vha = sp->vha; 1182 struct qla_hw_data *ha = vha->hw; 1183 fc_port_t *fcport = sp->fcport; 1184 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb; 1185 struct event_arg ea; 1186 1187 ql_dbg(ql_dbg_disc, vha, 0x20db, 1188 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n", 1189 sp->name, res, fcport->port_name, mb[1], mb[2]); 1190 1191 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 1192 1193 if (res == QLA_FUNCTION_TIMEOUT) 1194 goto done; 1195 1196 memset(&ea, 0, sizeof(ea)); 1197 ea.fcport = fcport; 1198 ea.sp = sp; 1199 1200 qla24xx_handle_gpdb_event(vha, &ea); 1201 1202 done: 1203 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in, 1204 sp->u.iocb_cmd.u.mbx.in_dma); 1205 1206 sp->free(sp); 1207 } 1208 1209 int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport) 1210 { 1211 struct qla_work_evt *e; 1212 1213 if (vha->host->active_mode == MODE_TARGET) 1214 return QLA_FUNCTION_FAILED; 1215 1216 e = qla2x00_alloc_work(vha, QLA_EVT_PRLI); 1217 if (!e) 1218 return QLA_FUNCTION_FAILED; 1219 1220 e->u.fcport.fcport = fcport; 1221 1222 return qla2x00_post_work(vha, e); 1223 } 1224 1225 static void qla2x00_async_prli_sp_done(srb_t *sp, int res) 1226 { 1227 struct scsi_qla_host *vha = sp->vha; 1228 struct srb_iocb *lio = &sp->u.iocb_cmd; 1229 struct event_arg ea; 1230 1231 ql_dbg(ql_dbg_disc, vha, 0x2129, 1232 "%s %8phC res %d \n", __func__, 1233 sp->fcport->port_name, res); 1234 1235 sp->fcport->flags &= ~FCF_ASYNC_SENT; 1236 1237 if (!test_bit(UNLOADING, &vha->dpc_flags)) { 1238 memset(&ea, 0, sizeof(ea)); 1239 ea.fcport = sp->fcport; 1240 ea.data[0] = lio->u.logio.data[0]; 1241 ea.data[1] = lio->u.logio.data[1]; 1242 ea.iop[0] = lio->u.logio.iop[0]; 1243 ea.iop[1] = lio->u.logio.iop[1]; 1244 ea.sp = sp; 1245 1246 qla24xx_handle_prli_done_event(vha, &ea); 1247 } 1248 1249 sp->free(sp); 1250 } 1251 1252 int 1253 qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport) 1254 { 1255 srb_t *sp; 1256 struct srb_iocb *lio; 1257 int rval = QLA_FUNCTION_FAILED; 1258 1259 if (!vha->flags.online) { 1260 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n", 1261 __func__, __LINE__, fcport->port_name); 1262 return rval; 1263 } 1264 1265 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND || 1266 fcport->fw_login_state == DSC_LS_PRLI_PEND) && 1267 qla_dual_mode_enabled(vha)) { 1268 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n", 1269 __func__, __LINE__, fcport->port_name); 1270 return rval; 1271 } 1272 1273 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1274 if (!sp) 1275 return rval; 1276 1277 fcport->flags |= FCF_ASYNC_SENT; 1278 fcport->logout_completed = 0; 1279 1280 sp->type = SRB_PRLI_CMD; 1281 sp->name = "prli"; 1282 1283 lio = &sp->u.iocb_cmd; 1284 lio->timeout = qla2x00_async_iocb_timeout; 1285 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 1286 1287 sp->done = qla2x00_async_prli_sp_done; 1288 lio->u.logio.flags = 0; 1289 1290 if (NVME_TARGET(vha->hw, fcport)) 1291 lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI; 1292 1293 ql_dbg(ql_dbg_disc, vha, 0x211b, 1294 "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d fc4type %x priority %x %s.\n", 1295 fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24, 1296 fcport->login_retry, fcport->fc4_type, vha->hw->fc4_type_priority, 1297 NVME_TARGET(vha->hw, fcport) ? "nvme" : "fcp"); 1298 1299 rval = qla2x00_start_sp(sp); 1300 if (rval != QLA_SUCCESS) { 1301 fcport->flags |= FCF_LOGIN_NEEDED; 1302 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1303 goto done_free_sp; 1304 } 1305 1306 return rval; 1307 1308 done_free_sp: 1309 sp->free(sp); 1310 fcport->flags &= ~FCF_ASYNC_SENT; 1311 return rval; 1312 } 1313 1314 int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 1315 { 1316 struct qla_work_evt *e; 1317 1318 e = qla2x00_alloc_work(vha, QLA_EVT_GPDB); 1319 if (!e) 1320 return QLA_FUNCTION_FAILED; 1321 1322 e->u.fcport.fcport = fcport; 1323 e->u.fcport.opt = opt; 1324 fcport->flags |= FCF_ASYNC_ACTIVE; 1325 return qla2x00_post_work(vha, e); 1326 } 1327 1328 int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 1329 { 1330 srb_t *sp; 1331 struct srb_iocb *mbx; 1332 int rval = QLA_FUNCTION_FAILED; 1333 u16 *mb; 1334 dma_addr_t pd_dma; 1335 struct port_database_24xx *pd; 1336 struct qla_hw_data *ha = vha->hw; 1337 1338 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) || 1339 fcport->loop_id == FC_NO_LOOP_ID) { 1340 ql_log(ql_log_warn, vha, 0xffff, 1341 "%s: %8phC - not sending command.\n", 1342 __func__, fcport->port_name); 1343 return rval; 1344 } 1345 1346 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1347 if (!sp) 1348 goto done; 1349 1350 qla2x00_set_fcport_disc_state(fcport, DSC_GPDB); 1351 1352 fcport->flags |= FCF_ASYNC_SENT; 1353 sp->type = SRB_MB_IOCB; 1354 sp->name = "gpdb"; 1355 sp->gen1 = fcport->rscn_gen; 1356 sp->gen2 = fcport->login_gen; 1357 1358 mbx = &sp->u.iocb_cmd; 1359 mbx->timeout = qla2x00_async_iocb_timeout; 1360 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 1361 1362 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1363 if (pd == NULL) { 1364 ql_log(ql_log_warn, vha, 0xd043, 1365 "Failed to allocate port database structure.\n"); 1366 goto done_free_sp; 1367 } 1368 1369 mb = sp->u.iocb_cmd.u.mbx.out_mb; 1370 mb[0] = MBC_GET_PORT_DATABASE; 1371 mb[1] = fcport->loop_id; 1372 mb[2] = MSW(pd_dma); 1373 mb[3] = LSW(pd_dma); 1374 mb[6] = MSW(MSD(pd_dma)); 1375 mb[7] = LSW(MSD(pd_dma)); 1376 mb[9] = vha->vp_idx; 1377 mb[10] = opt; 1378 1379 mbx->u.mbx.in = pd; 1380 mbx->u.mbx.in_dma = pd_dma; 1381 1382 sp->done = qla24xx_async_gpdb_sp_done; 1383 1384 ql_dbg(ql_dbg_disc, vha, 0x20dc, 1385 "Async-%s %8phC hndl %x opt %x\n", 1386 sp->name, fcport->port_name, sp->handle, opt); 1387 1388 rval = qla2x00_start_sp(sp); 1389 if (rval != QLA_SUCCESS) 1390 goto done_free_sp; 1391 return rval; 1392 1393 done_free_sp: 1394 if (pd) 1395 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 1396 1397 sp->free(sp); 1398 fcport->flags &= ~FCF_ASYNC_SENT; 1399 done: 1400 fcport->flags &= ~FCF_ASYNC_ACTIVE; 1401 qla24xx_post_gpdb_work(vha, fcport, opt); 1402 return rval; 1403 } 1404 1405 static 1406 void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) 1407 { 1408 unsigned long flags; 1409 1410 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1411 ea->fcport->login_gen++; 1412 ea->fcport->deleted = 0; 1413 ea->fcport->logout_on_delete = 1; 1414 1415 if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) { 1416 vha->fcport_count++; 1417 ea->fcport->login_succ = 1; 1418 1419 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1420 qla24xx_sched_upd_fcport(ea->fcport); 1421 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1422 } else if (ea->fcport->login_succ) { 1423 /* 1424 * We have an existing session. A late RSCN delivery 1425 * must have triggered the session to be re-validate. 1426 * Session is still valid. 1427 */ 1428 ql_dbg(ql_dbg_disc, vha, 0x20d6, 1429 "%s %d %8phC session revalidate success\n", 1430 __func__, __LINE__, ea->fcport->port_name); 1431 qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_COMPLETE); 1432 } 1433 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1434 } 1435 1436 static int qla_chk_secure_login(scsi_qla_host_t *vha, fc_port_t *fcport, 1437 struct port_database_24xx *pd) 1438 { 1439 int rc = 0; 1440 1441 if (pd->secure_login) { 1442 ql_dbg(ql_dbg_disc, vha, 0x104d, 1443 "Secure Login established on %8phC\n", 1444 fcport->port_name); 1445 fcport->edif.secured_login = 1; 1446 fcport->edif.non_secured_login = 0; 1447 fcport->flags |= FCF_FCSP_DEVICE; 1448 } else { 1449 ql_dbg(ql_dbg_disc, vha, 0x104d, 1450 "non-Secure Login %8phC", 1451 fcport->port_name); 1452 fcport->edif.secured_login = 0; 1453 fcport->edif.non_secured_login = 1; 1454 } 1455 if (vha->hw->flags.edif_enabled) { 1456 if (fcport->flags & FCF_FCSP_DEVICE) { 1457 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_AUTH_PEND); 1458 /* Start edif prli timer & ring doorbell for app */ 1459 fcport->edif.rx_sa_set = 0; 1460 fcport->edif.tx_sa_set = 0; 1461 fcport->edif.rx_sa_pending = 0; 1462 fcport->edif.tx_sa_pending = 0; 1463 1464 qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, 1465 fcport->d_id.b24); 1466 1467 if (vha->e_dbell.db_flags == EDB_ACTIVE) { 1468 ql_dbg(ql_dbg_disc, vha, 0x20ef, 1469 "%s %d %8phC EDIF: post DB_AUTH: AUTH needed\n", 1470 __func__, __LINE__, fcport->port_name); 1471 fcport->edif.app_started = 1; 1472 fcport->edif.app_sess_online = 1; 1473 1474 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED, 1475 fcport->d_id.b24, 0, fcport); 1476 } 1477 1478 rc = 1; 1479 } else { 1480 ql_dbg(ql_dbg_disc, vha, 0x2117, 1481 "%s %d %8phC post prli\n", 1482 __func__, __LINE__, fcport->port_name); 1483 qla24xx_post_prli_work(vha, fcport); 1484 rc = 1; 1485 } 1486 } 1487 return rc; 1488 } 1489 1490 static 1491 void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) 1492 { 1493 fc_port_t *fcport = ea->fcport; 1494 struct port_database_24xx *pd; 1495 struct srb *sp = ea->sp; 1496 uint8_t ls; 1497 1498 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; 1499 1500 fcport->flags &= ~FCF_ASYNC_SENT; 1501 1502 ql_dbg(ql_dbg_disc, vha, 0x20d2, 1503 "%s %8phC DS %d LS %d fc4_type %x rc %d\n", __func__, 1504 fcport->port_name, fcport->disc_state, pd->current_login_state, 1505 fcport->fc4_type, ea->rc); 1506 1507 if (fcport->disc_state == DSC_DELETE_PEND) 1508 return; 1509 1510 if (NVME_TARGET(vha->hw, fcport)) 1511 ls = pd->current_login_state >> 4; 1512 else 1513 ls = pd->current_login_state & 0xf; 1514 1515 if (ea->sp->gen2 != fcport->login_gen) { 1516 /* target side must have changed it. */ 1517 1518 ql_dbg(ql_dbg_disc, vha, 0x20d3, 1519 "%s %8phC generation changed\n", 1520 __func__, fcport->port_name); 1521 return; 1522 } else if (ea->sp->gen1 != fcport->rscn_gen) { 1523 qla_rscn_replay(fcport); 1524 qlt_schedule_sess_for_deletion(fcport); 1525 return; 1526 } 1527 1528 switch (ls) { 1529 case PDS_PRLI_COMPLETE: 1530 __qla24xx_parse_gpdb(vha, fcport, pd); 1531 break; 1532 case PDS_PLOGI_COMPLETE: 1533 if (qla_chk_secure_login(vha, fcport, pd)) 1534 return; 1535 fallthrough; 1536 case PDS_PLOGI_PENDING: 1537 case PDS_PRLI_PENDING: 1538 case PDS_PRLI2_PENDING: 1539 /* Set discovery state back to GNL to Relogin attempt */ 1540 if (qla_dual_mode_enabled(vha) || 1541 qla_ini_mode_enabled(vha)) { 1542 qla2x00_set_fcport_disc_state(fcport, DSC_GNL); 1543 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1544 } 1545 return; 1546 case PDS_LOGO_PENDING: 1547 case PDS_PORT_UNAVAILABLE: 1548 default: 1549 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n", 1550 __func__, __LINE__, fcport->port_name); 1551 qlt_schedule_sess_for_deletion(fcport); 1552 return; 1553 } 1554 __qla24xx_handle_gpdb_event(vha, ea); 1555 } /* gpdb event */ 1556 1557 static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport) 1558 { 1559 u8 login = 0; 1560 int rc; 1561 1562 if (qla_tgt_mode_enabled(vha)) 1563 return; 1564 1565 if (qla_dual_mode_enabled(vha)) { 1566 if (N2N_TOPO(vha->hw)) { 1567 u64 mywwn, wwn; 1568 1569 mywwn = wwn_to_u64(vha->port_name); 1570 wwn = wwn_to_u64(fcport->port_name); 1571 if (mywwn > wwn) 1572 login = 1; 1573 else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP) 1574 && time_after_eq(jiffies, 1575 fcport->plogi_nack_done_deadline)) 1576 login = 1; 1577 } else { 1578 login = 1; 1579 } 1580 } else { 1581 /* initiator mode */ 1582 login = 1; 1583 } 1584 1585 if (login && fcport->login_retry) { 1586 fcport->login_retry--; 1587 if (fcport->loop_id == FC_NO_LOOP_ID) { 1588 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 1589 rc = qla2x00_find_new_loop_id(vha, fcport); 1590 if (rc) { 1591 ql_dbg(ql_dbg_disc, vha, 0x20e6, 1592 "%s %d %8phC post del sess - out of loopid\n", 1593 __func__, __LINE__, fcport->port_name); 1594 fcport->scan_state = 0; 1595 qlt_schedule_sess_for_deletion(fcport); 1596 return; 1597 } 1598 } 1599 ql_dbg(ql_dbg_disc, vha, 0x20bf, 1600 "%s %d %8phC post login\n", 1601 __func__, __LINE__, fcport->port_name); 1602 qla2x00_post_async_login_work(vha, fcport, NULL); 1603 } 1604 } 1605 1606 int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) 1607 { 1608 u16 data[2]; 1609 u64 wwn; 1610 u16 sec; 1611 1612 ql_dbg(ql_dbg_disc, vha, 0x20d8, 1613 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d\n", 1614 __func__, fcport->port_name, fcport->disc_state, 1615 fcport->fw_login_state, fcport->login_pause, fcport->flags, 1616 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen, 1617 fcport->login_gen, fcport->loop_id, fcport->scan_state); 1618 1619 if (fcport->scan_state != QLA_FCPORT_FOUND) 1620 return 0; 1621 1622 if ((fcport->loop_id != FC_NO_LOOP_ID) && 1623 qla_dual_mode_enabled(vha) && 1624 ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 1625 (fcport->fw_login_state == DSC_LS_PRLI_PEND))) 1626 return 0; 1627 1628 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP && 1629 !N2N_TOPO(vha->hw)) { 1630 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) { 1631 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1632 return 0; 1633 } 1634 } 1635 1636 /* Target won't initiate port login if fabric is present */ 1637 if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw)) 1638 return 0; 1639 1640 if (fcport->flags & FCF_ASYNC_SENT) { 1641 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1642 return 0; 1643 } 1644 1645 switch (fcport->disc_state) { 1646 case DSC_DELETED: 1647 wwn = wwn_to_u64(fcport->node_name); 1648 switch (vha->hw->current_topology) { 1649 case ISP_CFG_N: 1650 if (fcport_is_smaller(fcport)) { 1651 /* this adapter is bigger */ 1652 if (fcport->login_retry) { 1653 if (fcport->loop_id == FC_NO_LOOP_ID) { 1654 qla2x00_find_new_loop_id(vha, 1655 fcport); 1656 fcport->fw_login_state = 1657 DSC_LS_PORT_UNAVAIL; 1658 } 1659 fcport->login_retry--; 1660 qla_post_els_plogi_work(vha, fcport); 1661 } else { 1662 ql_log(ql_log_info, vha, 0x705d, 1663 "Unable to reach remote port %8phC", 1664 fcport->port_name); 1665 } 1666 } else { 1667 qla24xx_post_gnl_work(vha, fcport); 1668 } 1669 break; 1670 default: 1671 if (wwn == 0) { 1672 ql_dbg(ql_dbg_disc, vha, 0xffff, 1673 "%s %d %8phC post GNNID\n", 1674 __func__, __LINE__, fcport->port_name); 1675 qla24xx_post_gnnid_work(vha, fcport); 1676 } else if (fcport->loop_id == FC_NO_LOOP_ID) { 1677 ql_dbg(ql_dbg_disc, vha, 0x20bd, 1678 "%s %d %8phC post gnl\n", 1679 __func__, __LINE__, fcport->port_name); 1680 qla24xx_post_gnl_work(vha, fcport); 1681 } else { 1682 qla_chk_n2n_b4_login(vha, fcport); 1683 } 1684 break; 1685 } 1686 break; 1687 1688 case DSC_GNL: 1689 switch (vha->hw->current_topology) { 1690 case ISP_CFG_N: 1691 if ((fcport->current_login_state & 0xf) == 0x6) { 1692 ql_dbg(ql_dbg_disc, vha, 0x2118, 1693 "%s %d %8phC post GPDB work\n", 1694 __func__, __LINE__, fcport->port_name); 1695 fcport->chip_reset = 1696 vha->hw->base_qpair->chip_reset; 1697 qla24xx_post_gpdb_work(vha, fcport, 0); 1698 } else { 1699 ql_dbg(ql_dbg_disc, vha, 0x2118, 1700 "%s %d %8phC post %s PRLI\n", 1701 __func__, __LINE__, fcport->port_name, 1702 NVME_TARGET(vha->hw, fcport) ? "NVME" : 1703 "FC"); 1704 qla24xx_post_prli_work(vha, fcport); 1705 } 1706 break; 1707 default: 1708 if (fcport->login_pause) { 1709 ql_dbg(ql_dbg_disc, vha, 0x20d8, 1710 "%s %d %8phC exit\n", 1711 __func__, __LINE__, 1712 fcport->port_name); 1713 fcport->last_rscn_gen = fcport->rscn_gen; 1714 fcport->last_login_gen = fcport->login_gen; 1715 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1716 break; 1717 } 1718 qla_chk_n2n_b4_login(vha, fcport); 1719 break; 1720 } 1721 break; 1722 1723 case DSC_LOGIN_FAILED: 1724 if (N2N_TOPO(vha->hw)) 1725 qla_chk_n2n_b4_login(vha, fcport); 1726 else 1727 qlt_schedule_sess_for_deletion(fcport); 1728 break; 1729 1730 case DSC_LOGIN_COMPLETE: 1731 /* recheck login state */ 1732 data[0] = data[1] = 0; 1733 qla2x00_post_async_adisc_work(vha, fcport, data); 1734 break; 1735 1736 case DSC_LOGIN_PEND: 1737 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) 1738 qla24xx_post_prli_work(vha, fcport); 1739 break; 1740 1741 case DSC_UPD_FCPORT: 1742 sec = jiffies_to_msecs(jiffies - 1743 fcport->jiffies_at_registration)/1000; 1744 if (fcport->sec_since_registration < sec && sec && 1745 !(sec % 60)) { 1746 fcport->sec_since_registration = sec; 1747 ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, 1748 "%s %8phC - Slow Rport registration(%d Sec)\n", 1749 __func__, fcport->port_name, sec); 1750 } 1751 1752 if (fcport->next_disc_state != DSC_DELETE_PEND) 1753 fcport->next_disc_state = DSC_ADISC; 1754 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1755 break; 1756 1757 default: 1758 break; 1759 } 1760 1761 return 0; 1762 } 1763 1764 int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id, 1765 u8 *port_name, u8 *node_name, void *pla, u8 fc4_type) 1766 { 1767 struct qla_work_evt *e; 1768 1769 e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS); 1770 if (!e) 1771 return QLA_FUNCTION_FAILED; 1772 1773 e->u.new_sess.id = *id; 1774 e->u.new_sess.pla = pla; 1775 e->u.new_sess.fc4_type = fc4_type; 1776 memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE); 1777 if (node_name) 1778 memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE); 1779 1780 return qla2x00_post_work(vha, e); 1781 } 1782 1783 void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) 1784 { 1785 fc_port_t *fcport; 1786 unsigned long flags; 1787 1788 fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1); 1789 if (fcport) { 1790 fcport->scan_needed = 1; 1791 fcport->rscn_gen++; 1792 } 1793 1794 spin_lock_irqsave(&vha->work_lock, flags); 1795 if (vha->scan.scan_flags == 0) { 1796 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__); 1797 vha->scan.scan_flags |= SF_QUEUED; 1798 schedule_delayed_work(&vha->scan.scan_work, 5); 1799 } 1800 spin_unlock_irqrestore(&vha->work_lock, flags); 1801 } 1802 1803 void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, 1804 struct event_arg *ea) 1805 { 1806 fc_port_t *fcport = ea->fcport; 1807 1808 if (test_bit(UNLOADING, &vha->dpc_flags)) 1809 return; 1810 1811 ql_dbg(ql_dbg_disc, vha, 0x2102, 1812 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n", 1813 __func__, fcport->port_name, fcport->disc_state, 1814 fcport->fw_login_state, fcport->login_pause, 1815 fcport->deleted, fcport->conflict, 1816 fcport->last_rscn_gen, fcport->rscn_gen, 1817 fcport->last_login_gen, fcport->login_gen, 1818 fcport->flags); 1819 1820 if (fcport->last_rscn_gen != fcport->rscn_gen) { 1821 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gnl\n", 1822 __func__, __LINE__, fcport->port_name); 1823 qla24xx_post_gnl_work(vha, fcport); 1824 return; 1825 } 1826 1827 qla24xx_fcport_handle_login(vha, fcport); 1828 } 1829 1830 void qla_handle_els_plogi_done(scsi_qla_host_t *vha, 1831 struct event_arg *ea) 1832 { 1833 /* for pure Target Mode, PRLI will not be initiated */ 1834 if (vha->host->active_mode == MODE_TARGET) 1835 return; 1836 1837 ql_dbg(ql_dbg_disc, vha, 0x2118, 1838 "%s %d %8phC post PRLI\n", 1839 __func__, __LINE__, ea->fcport->port_name); 1840 qla24xx_post_prli_work(vha, ea->fcport); 1841 } 1842 1843 /* 1844 * RSCN(s) came in for this fcport, but the RSCN(s) was not able 1845 * to be consumed by the fcport 1846 */ 1847 void qla_rscn_replay(fc_port_t *fcport) 1848 { 1849 struct event_arg ea; 1850 1851 switch (fcport->disc_state) { 1852 case DSC_DELETE_PEND: 1853 return; 1854 default: 1855 break; 1856 } 1857 1858 if (fcport->scan_needed) { 1859 memset(&ea, 0, sizeof(ea)); 1860 ea.id = fcport->d_id; 1861 ea.id.b.rsvd_1 = RSCN_PORT_ADDR; 1862 qla2x00_handle_rscn(fcport->vha, &ea); 1863 } 1864 } 1865 1866 static void 1867 qla2x00_tmf_iocb_timeout(void *data) 1868 { 1869 srb_t *sp = data; 1870 struct srb_iocb *tmf = &sp->u.iocb_cmd; 1871 int rc, h; 1872 unsigned long flags; 1873 1874 rc = qla24xx_async_abort_cmd(sp, false); 1875 if (rc) { 1876 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); 1877 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { 1878 if (sp->qpair->req->outstanding_cmds[h] == sp) { 1879 sp->qpair->req->outstanding_cmds[h] = NULL; 1880 break; 1881 } 1882 } 1883 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); 1884 tmf->u.tmf.comp_status = cpu_to_le16(CS_TIMEOUT); 1885 tmf->u.tmf.data = QLA_FUNCTION_FAILED; 1886 complete(&tmf->u.tmf.comp); 1887 } 1888 } 1889 1890 static void qla2x00_tmf_sp_done(srb_t *sp, int res) 1891 { 1892 struct srb_iocb *tmf = &sp->u.iocb_cmd; 1893 1894 complete(&tmf->u.tmf.comp); 1895 } 1896 1897 int 1898 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, 1899 uint32_t tag) 1900 { 1901 struct scsi_qla_host *vha = fcport->vha; 1902 struct srb_iocb *tm_iocb; 1903 srb_t *sp; 1904 int rval = QLA_FUNCTION_FAILED; 1905 1906 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1907 if (!sp) 1908 goto done; 1909 1910 tm_iocb = &sp->u.iocb_cmd; 1911 sp->type = SRB_TM_CMD; 1912 sp->name = "tmf"; 1913 1914 tm_iocb->timeout = qla2x00_tmf_iocb_timeout; 1915 init_completion(&tm_iocb->u.tmf.comp); 1916 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)); 1917 1918 tm_iocb->u.tmf.flags = flags; 1919 tm_iocb->u.tmf.lun = lun; 1920 tm_iocb->u.tmf.data = tag; 1921 sp->done = qla2x00_tmf_sp_done; 1922 1923 ql_dbg(ql_dbg_taskm, vha, 0x802f, 1924 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n", 1925 sp->handle, fcport->loop_id, fcport->d_id.b.domain, 1926 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1927 1928 rval = qla2x00_start_sp(sp); 1929 if (rval != QLA_SUCCESS) 1930 goto done_free_sp; 1931 wait_for_completion(&tm_iocb->u.tmf.comp); 1932 1933 rval = tm_iocb->u.tmf.data; 1934 1935 if (rval != QLA_SUCCESS) { 1936 ql_log(ql_log_warn, vha, 0x8030, 1937 "TM IOCB failed (%x).\n", rval); 1938 } 1939 1940 if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) { 1941 flags = tm_iocb->u.tmf.flags; 1942 lun = (uint16_t)tm_iocb->u.tmf.lun; 1943 1944 /* Issue Marker IOCB */ 1945 qla2x00_marker(vha, vha->hw->base_qpair, 1946 fcport->loop_id, lun, 1947 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 1948 } 1949 1950 done_free_sp: 1951 sp->free(sp); 1952 fcport->flags &= ~FCF_ASYNC_SENT; 1953 done: 1954 return rval; 1955 } 1956 1957 int 1958 qla24xx_async_abort_command(srb_t *sp) 1959 { 1960 unsigned long flags = 0; 1961 1962 uint32_t handle; 1963 fc_port_t *fcport = sp->fcport; 1964 struct qla_qpair *qpair = sp->qpair; 1965 struct scsi_qla_host *vha = fcport->vha; 1966 struct req_que *req = qpair->req; 1967 1968 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 1969 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 1970 if (req->outstanding_cmds[handle] == sp) 1971 break; 1972 } 1973 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 1974 1975 if (handle == req->num_outstanding_cmds) { 1976 /* Command not found. */ 1977 return QLA_FUNCTION_FAILED; 1978 } 1979 if (sp->type == SRB_FXIOCB_DCMD) 1980 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport, 1981 FXDISC_ABORT_IOCTL); 1982 1983 return qla24xx_async_abort_cmd(sp, true); 1984 } 1985 1986 static void 1987 qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea) 1988 { 1989 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n", 1990 ea->data[0]); 1991 1992 switch (ea->data[0]) { 1993 case MBS_COMMAND_COMPLETE: 1994 ql_dbg(ql_dbg_disc, vha, 0x2118, 1995 "%s %d %8phC post gpdb\n", 1996 __func__, __LINE__, ea->fcport->port_name); 1997 1998 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; 1999 ea->fcport->logout_on_delete = 1; 2000 ea->fcport->nvme_prli_service_param = ea->iop[0]; 2001 if (ea->iop[0] & NVME_PRLI_SP_FIRST_BURST) 2002 ea->fcport->nvme_first_burst_size = 2003 (ea->iop[1] & 0xffff) * 512; 2004 else 2005 ea->fcport->nvme_first_burst_size = 0; 2006 qla24xx_post_gpdb_work(vha, ea->fcport, 0); 2007 break; 2008 default: 2009 if ((ea->iop[0] == LSC_SCODE_ELS_REJECT) && 2010 (ea->iop[1] == 0x50000)) { /* reson 5=busy expl:0x0 */ 2011 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 2012 ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP; 2013 break; 2014 } 2015 2016 ql_dbg(ql_dbg_disc, vha, 0x2118, 2017 "%s %d %8phC priority %s, fc4type %x\n", 2018 __func__, __LINE__, ea->fcport->port_name, 2019 vha->hw->fc4_type_priority == FC4_PRIORITY_FCP ? 2020 "FCP" : "NVMe", ea->fcport->fc4_type); 2021 2022 if (N2N_TOPO(vha->hw)) { 2023 if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME) { 2024 ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME; 2025 ea->fcport->fc4_type |= FS_FC4TYPE_FCP; 2026 } else { 2027 ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP; 2028 ea->fcport->fc4_type |= FS_FC4TYPE_NVME; 2029 } 2030 2031 if (ea->fcport->n2n_link_reset_cnt < 3) { 2032 ea->fcport->n2n_link_reset_cnt++; 2033 vha->relogin_jif = jiffies + 2 * HZ; 2034 /* 2035 * PRLI failed. Reset link to kick start 2036 * state machine 2037 */ 2038 set_bit(N2N_LINK_RESET, &vha->dpc_flags); 2039 } else { 2040 ql_log(ql_log_warn, vha, 0x2119, 2041 "%s %d %8phC Unable to reconnect\n", 2042 __func__, __LINE__, 2043 ea->fcport->port_name); 2044 } 2045 } else { 2046 /* 2047 * switch connect. login failed. Take connection down 2048 * and allow relogin to retrigger 2049 */ 2050 if (NVME_FCP_TARGET(ea->fcport)) { 2051 ql_dbg(ql_dbg_disc, vha, 0x2118, 2052 "%s %d %8phC post %s prli\n", 2053 __func__, __LINE__, 2054 ea->fcport->port_name, 2055 (ea->fcport->fc4_type & FS_FC4TYPE_NVME) 2056 ? "NVMe" : "FCP"); 2057 if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME) 2058 ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME; 2059 else 2060 ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP; 2061 } 2062 2063 ea->fcport->flags &= ~FCF_ASYNC_SENT; 2064 ea->fcport->keep_nport_handle = 0; 2065 ea->fcport->logout_on_delete = 1; 2066 qlt_schedule_sess_for_deletion(ea->fcport); 2067 } 2068 break; 2069 } 2070 } 2071 2072 void 2073 qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) 2074 { 2075 port_id_t cid; /* conflict Nport id */ 2076 u16 lid; 2077 struct fc_port *conflict_fcport; 2078 unsigned long flags; 2079 struct fc_port *fcport = ea->fcport; 2080 2081 ql_dbg(ql_dbg_disc, vha, 0xffff, 2082 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n", 2083 __func__, fcport->port_name, fcport->disc_state, 2084 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen, 2085 ea->sp->gen1, fcport->rscn_gen, 2086 ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]); 2087 2088 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 2089 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) { 2090 ql_dbg(ql_dbg_disc, vha, 0x20ea, 2091 "%s %d %8phC Remote is trying to login\n", 2092 __func__, __LINE__, fcport->port_name); 2093 return; 2094 } 2095 2096 if ((fcport->disc_state == DSC_DELETE_PEND) || 2097 (fcport->disc_state == DSC_DELETED)) { 2098 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 2099 return; 2100 } 2101 2102 if (ea->sp->gen2 != fcport->login_gen) { 2103 /* target side must have changed it. */ 2104 ql_dbg(ql_dbg_disc, vha, 0x20d3, 2105 "%s %8phC generation changed\n", 2106 __func__, fcport->port_name); 2107 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 2108 return; 2109 } else if (ea->sp->gen1 != fcport->rscn_gen) { 2110 ql_dbg(ql_dbg_disc, vha, 0x20d3, 2111 "%s %8phC RSCN generation changed\n", 2112 __func__, fcport->port_name); 2113 qla_rscn_replay(fcport); 2114 qlt_schedule_sess_for_deletion(fcport); 2115 return; 2116 } 2117 2118 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n", 2119 ea->data[0]); 2120 2121 switch (ea->data[0]) { 2122 case MBS_COMMAND_COMPLETE: 2123 /* 2124 * Driver must validate login state - If PRLI not complete, 2125 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI 2126 * requests. 2127 */ 2128 if (vha->hw->flags.edif_enabled) { 2129 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); 2130 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 2131 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; 2132 ea->fcport->logout_on_delete = 1; 2133 ea->fcport->send_els_logo = 0; 2134 ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP; 2135 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 2136 2137 qla24xx_post_gpdb_work(vha, ea->fcport, 0); 2138 } else { 2139 if (NVME_TARGET(vha->hw, fcport)) { 2140 ql_dbg(ql_dbg_disc, vha, 0x2117, 2141 "%s %d %8phC post prli\n", 2142 __func__, __LINE__, fcport->port_name); 2143 qla24xx_post_prli_work(vha, fcport); 2144 } else { 2145 ql_dbg(ql_dbg_disc, vha, 0x20ea, 2146 "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n", 2147 __func__, __LINE__, fcport->port_name, 2148 fcport->loop_id, fcport->d_id.b24); 2149 2150 set_bit(fcport->loop_id, vha->hw->loop_id_map); 2151 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 2152 fcport->chip_reset = vha->hw->base_qpair->chip_reset; 2153 fcport->logout_on_delete = 1; 2154 fcport->send_els_logo = 0; 2155 fcport->fw_login_state = DSC_LS_PRLI_COMP; 2156 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 2157 2158 qla24xx_post_gpdb_work(vha, fcport, 0); 2159 } 2160 } 2161 break; 2162 case MBS_COMMAND_ERROR: 2163 ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n", 2164 __func__, __LINE__, ea->fcport->port_name, ea->data[1]); 2165 2166 ea->fcport->flags &= ~FCF_ASYNC_SENT; 2167 qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_FAILED); 2168 if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED) 2169 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 2170 else 2171 qla2x00_mark_device_lost(vha, ea->fcport, 1); 2172 break; 2173 case MBS_LOOP_ID_USED: 2174 /* data[1] = IO PARAM 1 = nport ID */ 2175 cid.b.domain = (ea->iop[1] >> 16) & 0xff; 2176 cid.b.area = (ea->iop[1] >> 8) & 0xff; 2177 cid.b.al_pa = ea->iop[1] & 0xff; 2178 cid.b.rsvd_1 = 0; 2179 2180 ql_dbg(ql_dbg_disc, vha, 0x20ec, 2181 "%s %d %8phC lid %#x in use with pid %06x post gnl\n", 2182 __func__, __LINE__, ea->fcport->port_name, 2183 ea->fcport->loop_id, cid.b24); 2184 2185 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); 2186 ea->fcport->loop_id = FC_NO_LOOP_ID; 2187 qla24xx_post_gnl_work(vha, ea->fcport); 2188 break; 2189 case MBS_PORT_ID_USED: 2190 lid = ea->iop[1] & 0xffff; 2191 qlt_find_sess_invalidate_other(vha, 2192 wwn_to_u64(ea->fcport->port_name), 2193 ea->fcport->d_id, lid, &conflict_fcport); 2194 2195 if (conflict_fcport) { 2196 /* 2197 * Another fcport share the same loop_id/nport id. 2198 * Conflict fcport needs to finish cleanup before this 2199 * fcport can proceed to login. 2200 */ 2201 conflict_fcport->conflict = ea->fcport; 2202 ea->fcport->login_pause = 1; 2203 2204 ql_dbg(ql_dbg_disc, vha, 0x20ed, 2205 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n", 2206 __func__, __LINE__, ea->fcport->port_name, 2207 ea->fcport->d_id.b24, lid); 2208 } else { 2209 ql_dbg(ql_dbg_disc, vha, 0x20ed, 2210 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n", 2211 __func__, __LINE__, ea->fcport->port_name, 2212 ea->fcport->d_id.b24, lid); 2213 2214 qla2x00_clear_loop_id(ea->fcport); 2215 set_bit(lid, vha->hw->loop_id_map); 2216 ea->fcport->loop_id = lid; 2217 ea->fcport->keep_nport_handle = 0; 2218 ea->fcport->logout_on_delete = 1; 2219 qlt_schedule_sess_for_deletion(ea->fcport); 2220 } 2221 break; 2222 } 2223 return; 2224 } 2225 2226 /****************************************************************************/ 2227 /* QLogic ISP2x00 Hardware Support Functions. */ 2228 /****************************************************************************/ 2229 2230 static int 2231 qla83xx_nic_core_fw_load(scsi_qla_host_t *vha) 2232 { 2233 int rval = QLA_SUCCESS; 2234 struct qla_hw_data *ha = vha->hw; 2235 uint32_t idc_major_ver, idc_minor_ver; 2236 uint16_t config[4]; 2237 2238 qla83xx_idc_lock(vha, 0); 2239 2240 /* SV: TODO: Assign initialization timeout from 2241 * flash-info / other param 2242 */ 2243 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT; 2244 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT; 2245 2246 /* Set our fcoe function presence */ 2247 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) { 2248 ql_dbg(ql_dbg_p3p, vha, 0xb077, 2249 "Error while setting DRV-Presence.\n"); 2250 rval = QLA_FUNCTION_FAILED; 2251 goto exit; 2252 } 2253 2254 /* Decide the reset ownership */ 2255 qla83xx_reset_ownership(vha); 2256 2257 /* 2258 * On first protocol driver load: 2259 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery 2260 * register. 2261 * Others: Check compatibility with current IDC Major version. 2262 */ 2263 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver); 2264 if (ha->flags.nic_core_reset_owner) { 2265 /* Set IDC Major version */ 2266 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION; 2267 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver); 2268 2269 /* Clearing IDC-Lock-Recovery register */ 2270 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0); 2271 } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) { 2272 /* 2273 * Clear further IDC participation if we are not compatible with 2274 * the current IDC Major Version. 2275 */ 2276 ql_log(ql_log_warn, vha, 0xb07d, 2277 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n", 2278 idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION); 2279 __qla83xx_clear_drv_presence(vha); 2280 rval = QLA_FUNCTION_FAILED; 2281 goto exit; 2282 } 2283 /* Each function sets its supported Minor version. */ 2284 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver); 2285 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2)); 2286 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver); 2287 2288 if (ha->flags.nic_core_reset_owner) { 2289 memset(config, 0, sizeof(config)); 2290 if (!qla81xx_get_port_config(vha, config)) 2291 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, 2292 QLA8XXX_DEV_READY); 2293 } 2294 2295 rval = qla83xx_idc_state_handler(vha); 2296 2297 exit: 2298 qla83xx_idc_unlock(vha, 0); 2299 2300 return rval; 2301 } 2302 2303 /* 2304 * qla2x00_initialize_adapter 2305 * Initialize board. 2306 * 2307 * Input: 2308 * ha = adapter block pointer. 2309 * 2310 * Returns: 2311 * 0 = success 2312 */ 2313 int 2314 qla2x00_initialize_adapter(scsi_qla_host_t *vha) 2315 { 2316 int rval; 2317 struct qla_hw_data *ha = vha->hw; 2318 struct req_que *req = ha->req_q_map[0]; 2319 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2320 2321 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats)); 2322 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat)); 2323 2324 /* Clear adapter flags. */ 2325 vha->flags.online = 0; 2326 ha->flags.chip_reset_done = 0; 2327 vha->flags.reset_active = 0; 2328 ha->flags.pci_channel_io_perm_failure = 0; 2329 ha->flags.eeh_busy = 0; 2330 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); 2331 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 2332 atomic_set(&vha->loop_state, LOOP_DOWN); 2333 vha->device_flags = DFLG_NO_CABLE; 2334 vha->dpc_flags = 0; 2335 vha->flags.management_server_logged_in = 0; 2336 vha->marker_needed = 0; 2337 ha->isp_abort_cnt = 0; 2338 ha->beacon_blink_led = 0; 2339 2340 set_bit(0, ha->req_qid_map); 2341 set_bit(0, ha->rsp_qid_map); 2342 2343 ql_dbg(ql_dbg_init, vha, 0x0040, 2344 "Configuring PCI space...\n"); 2345 rval = ha->isp_ops->pci_config(vha); 2346 if (rval) { 2347 ql_log(ql_log_warn, vha, 0x0044, 2348 "Unable to configure PCI space.\n"); 2349 return (rval); 2350 } 2351 2352 ha->isp_ops->reset_chip(vha); 2353 2354 /* Check for secure flash support */ 2355 if (IS_QLA28XX(ha)) { 2356 if (rd_reg_word(®->mailbox12) & BIT_0) 2357 ha->flags.secure_adapter = 1; 2358 ql_log(ql_log_info, vha, 0xffff, "Secure Adapter: %s\n", 2359 (ha->flags.secure_adapter) ? "Yes" : "No"); 2360 } 2361 2362 2363 rval = qla2xxx_get_flash_info(vha); 2364 if (rval) { 2365 ql_log(ql_log_fatal, vha, 0x004f, 2366 "Unable to validate FLASH data.\n"); 2367 return rval; 2368 } 2369 2370 if (IS_QLA8044(ha)) { 2371 qla8044_read_reset_template(vha); 2372 2373 /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0. 2374 * If DONRESET_BIT0 is set, drivers should not set dev_state 2375 * to NEED_RESET. But if NEED_RESET is set, drivers should 2376 * should honor the reset. */ 2377 if (ql2xdontresethba == 1) 2378 qla8044_set_idc_dontreset(vha); 2379 } 2380 2381 ha->isp_ops->get_flash_version(vha, req->ring); 2382 ql_dbg(ql_dbg_init, vha, 0x0061, 2383 "Configure NVRAM parameters...\n"); 2384 2385 /* Let priority default to FCP, can be overridden by nvram_config */ 2386 ha->fc4_type_priority = FC4_PRIORITY_FCP; 2387 2388 ha->isp_ops->nvram_config(vha); 2389 2390 if (ha->fc4_type_priority != FC4_PRIORITY_FCP && 2391 ha->fc4_type_priority != FC4_PRIORITY_NVME) 2392 ha->fc4_type_priority = FC4_PRIORITY_FCP; 2393 2394 ql_log(ql_log_info, vha, 0xffff, "FC4 priority set to %s\n", 2395 ha->fc4_type_priority == FC4_PRIORITY_FCP ? "FCP" : "NVMe"); 2396 2397 if (ha->flags.disable_serdes) { 2398 /* Mask HBA via NVRAM settings? */ 2399 ql_log(ql_log_info, vha, 0x0077, 2400 "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name); 2401 return QLA_FUNCTION_FAILED; 2402 } 2403 2404 ql_dbg(ql_dbg_init, vha, 0x0078, 2405 "Verifying loaded RISC code...\n"); 2406 2407 /* If smartsan enabled then require fdmi and rdp enabled */ 2408 if (ql2xsmartsan) { 2409 ql2xfdmienable = 1; 2410 ql2xrdpenable = 1; 2411 } 2412 2413 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) { 2414 rval = ha->isp_ops->chip_diag(vha); 2415 if (rval) 2416 return (rval); 2417 rval = qla2x00_setup_chip(vha); 2418 if (rval) 2419 return (rval); 2420 } 2421 2422 if (IS_QLA84XX(ha)) { 2423 ha->cs84xx = qla84xx_get_chip(vha); 2424 if (!ha->cs84xx) { 2425 ql_log(ql_log_warn, vha, 0x00d0, 2426 "Unable to configure ISP84XX.\n"); 2427 return QLA_FUNCTION_FAILED; 2428 } 2429 } 2430 2431 if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) 2432 rval = qla2x00_init_rings(vha); 2433 2434 /* No point in continuing if firmware initialization failed. */ 2435 if (rval != QLA_SUCCESS) 2436 return rval; 2437 2438 ha->flags.chip_reset_done = 1; 2439 2440 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) { 2441 /* Issue verify 84xx FW IOCB to complete 84xx initialization */ 2442 rval = qla84xx_init_chip(vha); 2443 if (rval != QLA_SUCCESS) { 2444 ql_log(ql_log_warn, vha, 0x00d4, 2445 "Unable to initialize ISP84XX.\n"); 2446 qla84xx_put_chip(vha); 2447 } 2448 } 2449 2450 /* Load the NIC Core f/w if we are the first protocol driver. */ 2451 if (IS_QLA8031(ha)) { 2452 rval = qla83xx_nic_core_fw_load(vha); 2453 if (rval) 2454 ql_log(ql_log_warn, vha, 0x0124, 2455 "Error in initializing NIC Core f/w.\n"); 2456 } 2457 2458 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) 2459 qla24xx_read_fcp_prio_cfg(vha); 2460 2461 if (IS_P3P_TYPE(ha)) 2462 qla82xx_set_driver_version(vha, QLA2XXX_VERSION); 2463 else 2464 qla25xx_set_driver_version(vha, QLA2XXX_VERSION); 2465 2466 return (rval); 2467 } 2468 2469 /** 2470 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers. 2471 * @vha: HA context 2472 * 2473 * Returns 0 on success. 2474 */ 2475 int 2476 qla2100_pci_config(scsi_qla_host_t *vha) 2477 { 2478 uint16_t w; 2479 unsigned long flags; 2480 struct qla_hw_data *ha = vha->hw; 2481 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2482 2483 pci_set_master(ha->pdev); 2484 pci_try_set_mwi(ha->pdev); 2485 2486 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 2487 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 2488 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 2489 2490 pci_disable_rom(ha->pdev); 2491 2492 /* Get PCI bus information. */ 2493 spin_lock_irqsave(&ha->hardware_lock, flags); 2494 ha->pci_attr = rd_reg_word(®->ctrl_status); 2495 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2496 2497 return QLA_SUCCESS; 2498 } 2499 2500 /** 2501 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers. 2502 * @vha: HA context 2503 * 2504 * Returns 0 on success. 2505 */ 2506 int 2507 qla2300_pci_config(scsi_qla_host_t *vha) 2508 { 2509 uint16_t w; 2510 unsigned long flags = 0; 2511 uint32_t cnt; 2512 struct qla_hw_data *ha = vha->hw; 2513 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2514 2515 pci_set_master(ha->pdev); 2516 pci_try_set_mwi(ha->pdev); 2517 2518 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 2519 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 2520 2521 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 2522 w &= ~PCI_COMMAND_INTX_DISABLE; 2523 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 2524 2525 /* 2526 * If this is a 2300 card and not 2312, reset the 2527 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately, 2528 * the 2310 also reports itself as a 2300 so we need to get the 2529 * fb revision level -- a 6 indicates it really is a 2300 and 2530 * not a 2310. 2531 */ 2532 if (IS_QLA2300(ha)) { 2533 spin_lock_irqsave(&ha->hardware_lock, flags); 2534 2535 /* Pause RISC. */ 2536 wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); 2537 for (cnt = 0; cnt < 30000; cnt++) { 2538 if ((rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) != 0) 2539 break; 2540 2541 udelay(10); 2542 } 2543 2544 /* Select FPM registers. */ 2545 wrt_reg_word(®->ctrl_status, 0x20); 2546 rd_reg_word(®->ctrl_status); 2547 2548 /* Get the fb rev level */ 2549 ha->fb_rev = RD_FB_CMD_REG(ha, reg); 2550 2551 if (ha->fb_rev == FPM_2300) 2552 pci_clear_mwi(ha->pdev); 2553 2554 /* Deselect FPM registers. */ 2555 wrt_reg_word(®->ctrl_status, 0x0); 2556 rd_reg_word(®->ctrl_status); 2557 2558 /* Release RISC module. */ 2559 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); 2560 for (cnt = 0; cnt < 30000; cnt++) { 2561 if ((rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0) 2562 break; 2563 2564 udelay(10); 2565 } 2566 2567 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2568 } 2569 2570 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 2571 2572 pci_disable_rom(ha->pdev); 2573 2574 /* Get PCI bus information. */ 2575 spin_lock_irqsave(&ha->hardware_lock, flags); 2576 ha->pci_attr = rd_reg_word(®->ctrl_status); 2577 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2578 2579 return QLA_SUCCESS; 2580 } 2581 2582 /** 2583 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers. 2584 * @vha: HA context 2585 * 2586 * Returns 0 on success. 2587 */ 2588 int 2589 qla24xx_pci_config(scsi_qla_host_t *vha) 2590 { 2591 uint16_t w; 2592 unsigned long flags = 0; 2593 struct qla_hw_data *ha = vha->hw; 2594 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2595 2596 pci_set_master(ha->pdev); 2597 pci_try_set_mwi(ha->pdev); 2598 2599 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 2600 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 2601 w &= ~PCI_COMMAND_INTX_DISABLE; 2602 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 2603 2604 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 2605 2606 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */ 2607 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX)) 2608 pcix_set_mmrbc(ha->pdev, 2048); 2609 2610 /* PCIe -- adjust Maximum Read Request Size (2048). */ 2611 if (pci_is_pcie(ha->pdev)) 2612 pcie_set_readrq(ha->pdev, 4096); 2613 2614 pci_disable_rom(ha->pdev); 2615 2616 ha->chip_revision = ha->pdev->revision; 2617 2618 /* Get PCI bus information. */ 2619 spin_lock_irqsave(&ha->hardware_lock, flags); 2620 ha->pci_attr = rd_reg_dword(®->ctrl_status); 2621 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2622 2623 return QLA_SUCCESS; 2624 } 2625 2626 /** 2627 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers. 2628 * @vha: HA context 2629 * 2630 * Returns 0 on success. 2631 */ 2632 int 2633 qla25xx_pci_config(scsi_qla_host_t *vha) 2634 { 2635 uint16_t w; 2636 struct qla_hw_data *ha = vha->hw; 2637 2638 pci_set_master(ha->pdev); 2639 pci_try_set_mwi(ha->pdev); 2640 2641 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 2642 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 2643 w &= ~PCI_COMMAND_INTX_DISABLE; 2644 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 2645 2646 /* PCIe -- adjust Maximum Read Request Size (2048). */ 2647 if (pci_is_pcie(ha->pdev)) 2648 pcie_set_readrq(ha->pdev, 4096); 2649 2650 pci_disable_rom(ha->pdev); 2651 2652 ha->chip_revision = ha->pdev->revision; 2653 2654 return QLA_SUCCESS; 2655 } 2656 2657 /** 2658 * qla2x00_isp_firmware() - Choose firmware image. 2659 * @vha: HA context 2660 * 2661 * Returns 0 on success. 2662 */ 2663 static int 2664 qla2x00_isp_firmware(scsi_qla_host_t *vha) 2665 { 2666 int rval; 2667 uint16_t loop_id, topo, sw_cap; 2668 uint8_t domain, area, al_pa; 2669 struct qla_hw_data *ha = vha->hw; 2670 2671 /* Assume loading risc code */ 2672 rval = QLA_FUNCTION_FAILED; 2673 2674 if (ha->flags.disable_risc_code_load) { 2675 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n"); 2676 2677 /* Verify checksum of loaded RISC code. */ 2678 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address); 2679 if (rval == QLA_SUCCESS) { 2680 /* And, verify we are not in ROM code. */ 2681 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, 2682 &area, &domain, &topo, &sw_cap); 2683 } 2684 } 2685 2686 if (rval) 2687 ql_dbg(ql_dbg_init, vha, 0x007a, 2688 "**** Load RISC code ****.\n"); 2689 2690 return (rval); 2691 } 2692 2693 /** 2694 * qla2x00_reset_chip() - Reset ISP chip. 2695 * @vha: HA context 2696 * 2697 * Returns 0 on success. 2698 */ 2699 int 2700 qla2x00_reset_chip(scsi_qla_host_t *vha) 2701 { 2702 unsigned long flags = 0; 2703 struct qla_hw_data *ha = vha->hw; 2704 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2705 uint32_t cnt; 2706 uint16_t cmd; 2707 int rval = QLA_FUNCTION_FAILED; 2708 2709 if (unlikely(pci_channel_offline(ha->pdev))) 2710 return rval; 2711 2712 ha->isp_ops->disable_intrs(ha); 2713 2714 spin_lock_irqsave(&ha->hardware_lock, flags); 2715 2716 /* Turn off master enable */ 2717 cmd = 0; 2718 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd); 2719 cmd &= ~PCI_COMMAND_MASTER; 2720 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); 2721 2722 if (!IS_QLA2100(ha)) { 2723 /* Pause RISC. */ 2724 wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); 2725 if (IS_QLA2200(ha) || IS_QLA2300(ha)) { 2726 for (cnt = 0; cnt < 30000; cnt++) { 2727 if ((rd_reg_word(®->hccr) & 2728 HCCR_RISC_PAUSE) != 0) 2729 break; 2730 udelay(100); 2731 } 2732 } else { 2733 rd_reg_word(®->hccr); /* PCI Posting. */ 2734 udelay(10); 2735 } 2736 2737 /* Select FPM registers. */ 2738 wrt_reg_word(®->ctrl_status, 0x20); 2739 rd_reg_word(®->ctrl_status); /* PCI Posting. */ 2740 2741 /* FPM Soft Reset. */ 2742 wrt_reg_word(®->fpm_diag_config, 0x100); 2743 rd_reg_word(®->fpm_diag_config); /* PCI Posting. */ 2744 2745 /* Toggle Fpm Reset. */ 2746 if (!IS_QLA2200(ha)) { 2747 wrt_reg_word(®->fpm_diag_config, 0x0); 2748 rd_reg_word(®->fpm_diag_config); /* PCI Posting. */ 2749 } 2750 2751 /* Select frame buffer registers. */ 2752 wrt_reg_word(®->ctrl_status, 0x10); 2753 rd_reg_word(®->ctrl_status); /* PCI Posting. */ 2754 2755 /* Reset frame buffer FIFOs. */ 2756 if (IS_QLA2200(ha)) { 2757 WRT_FB_CMD_REG(ha, reg, 0xa000); 2758 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */ 2759 } else { 2760 WRT_FB_CMD_REG(ha, reg, 0x00fc); 2761 2762 /* Read back fb_cmd until zero or 3 seconds max */ 2763 for (cnt = 0; cnt < 3000; cnt++) { 2764 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0) 2765 break; 2766 udelay(100); 2767 } 2768 } 2769 2770 /* Select RISC module registers. */ 2771 wrt_reg_word(®->ctrl_status, 0); 2772 rd_reg_word(®->ctrl_status); /* PCI Posting. */ 2773 2774 /* Reset RISC processor. */ 2775 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 2776 rd_reg_word(®->hccr); /* PCI Posting. */ 2777 2778 /* Release RISC processor. */ 2779 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); 2780 rd_reg_word(®->hccr); /* PCI Posting. */ 2781 } 2782 2783 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 2784 wrt_reg_word(®->hccr, HCCR_CLR_HOST_INT); 2785 2786 /* Reset ISP chip. */ 2787 wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET); 2788 2789 /* Wait for RISC to recover from reset. */ 2790 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 2791 /* 2792 * It is necessary to for a delay here since the card doesn't 2793 * respond to PCI reads during a reset. On some architectures 2794 * this will result in an MCA. 2795 */ 2796 udelay(20); 2797 for (cnt = 30000; cnt; cnt--) { 2798 if ((rd_reg_word(®->ctrl_status) & 2799 CSR_ISP_SOFT_RESET) == 0) 2800 break; 2801 udelay(100); 2802 } 2803 } else 2804 udelay(10); 2805 2806 /* Reset RISC processor. */ 2807 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 2808 2809 wrt_reg_word(®->semaphore, 0); 2810 2811 /* Release RISC processor. */ 2812 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); 2813 rd_reg_word(®->hccr); /* PCI Posting. */ 2814 2815 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 2816 for (cnt = 0; cnt < 30000; cnt++) { 2817 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY) 2818 break; 2819 2820 udelay(100); 2821 } 2822 } else 2823 udelay(100); 2824 2825 /* Turn on master enable */ 2826 cmd |= PCI_COMMAND_MASTER; 2827 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); 2828 2829 /* Disable RISC pause on FPM parity error. */ 2830 if (!IS_QLA2100(ha)) { 2831 wrt_reg_word(®->hccr, HCCR_DISABLE_PARITY_PAUSE); 2832 rd_reg_word(®->hccr); /* PCI Posting. */ 2833 } 2834 2835 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2836 2837 return QLA_SUCCESS; 2838 } 2839 2840 /** 2841 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC. 2842 * @vha: HA context 2843 * 2844 * Returns 0 on success. 2845 */ 2846 static int 2847 qla81xx_reset_mpi(scsi_qla_host_t *vha) 2848 { 2849 uint16_t mb[4] = {0x1010, 0, 1, 0}; 2850 2851 if (!IS_QLA81XX(vha->hw)) 2852 return QLA_SUCCESS; 2853 2854 return qla81xx_write_mpi_register(vha, mb); 2855 } 2856 2857 static int 2858 qla_chk_risc_recovery(scsi_qla_host_t *vha) 2859 { 2860 struct qla_hw_data *ha = vha->hw; 2861 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2862 __le16 __iomem *mbptr = ®->mailbox0; 2863 int i; 2864 u16 mb[32]; 2865 int rc = QLA_SUCCESS; 2866 2867 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 2868 return rc; 2869 2870 /* this check is only valid after RISC reset */ 2871 mb[0] = rd_reg_word(mbptr); 2872 mbptr++; 2873 if (mb[0] == 0xf) { 2874 rc = QLA_FUNCTION_FAILED; 2875 2876 for (i = 1; i < 32; i++) { 2877 mb[i] = rd_reg_word(mbptr); 2878 mbptr++; 2879 } 2880 2881 ql_log(ql_log_warn, vha, 0x1015, 2882 "RISC reset failed. mb[0-7] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n", 2883 mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6], mb[7]); 2884 ql_log(ql_log_warn, vha, 0x1015, 2885 "RISC reset failed. mb[8-15] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n", 2886 mb[8], mb[9], mb[10], mb[11], mb[12], mb[13], mb[14], 2887 mb[15]); 2888 ql_log(ql_log_warn, vha, 0x1015, 2889 "RISC reset failed. mb[16-23] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n", 2890 mb[16], mb[17], mb[18], mb[19], mb[20], mb[21], mb[22], 2891 mb[23]); 2892 ql_log(ql_log_warn, vha, 0x1015, 2893 "RISC reset failed. mb[24-31] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n", 2894 mb[24], mb[25], mb[26], mb[27], mb[28], mb[29], mb[30], 2895 mb[31]); 2896 } 2897 return rc; 2898 } 2899 2900 /** 2901 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC. 2902 * @vha: HA context 2903 * 2904 * Returns 0 on success. 2905 */ 2906 static inline int 2907 qla24xx_reset_risc(scsi_qla_host_t *vha) 2908 { 2909 unsigned long flags = 0; 2910 struct qla_hw_data *ha = vha->hw; 2911 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2912 uint32_t cnt; 2913 uint16_t wd; 2914 static int abts_cnt; /* ISP abort retry counts */ 2915 int rval = QLA_SUCCESS; 2916 int print = 1; 2917 2918 spin_lock_irqsave(&ha->hardware_lock, flags); 2919 2920 /* Reset RISC. */ 2921 wrt_reg_dword(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 2922 for (cnt = 0; cnt < 30000; cnt++) { 2923 if ((rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) 2924 break; 2925 2926 udelay(10); 2927 } 2928 2929 if (!(rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE)) 2930 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); 2931 2932 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e, 2933 "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n", 2934 rd_reg_dword(®->hccr), 2935 rd_reg_dword(®->ctrl_status), 2936 (rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE)); 2937 2938 wrt_reg_dword(®->ctrl_status, 2939 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 2940 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); 2941 2942 udelay(100); 2943 2944 /* Wait for firmware to complete NVRAM accesses. */ 2945 rd_reg_word(®->mailbox0); 2946 for (cnt = 10000; rd_reg_word(®->mailbox0) != 0 && 2947 rval == QLA_SUCCESS; cnt--) { 2948 barrier(); 2949 if (cnt) 2950 udelay(5); 2951 else 2952 rval = QLA_FUNCTION_TIMEOUT; 2953 } 2954 2955 if (rval == QLA_SUCCESS) 2956 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags); 2957 2958 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f, 2959 "HCCR: 0x%x, MailBox0 Status 0x%x\n", 2960 rd_reg_dword(®->hccr), 2961 rd_reg_word(®->mailbox0)); 2962 2963 /* Wait for soft-reset to complete. */ 2964 rd_reg_dword(®->ctrl_status); 2965 for (cnt = 0; cnt < 60; cnt++) { 2966 barrier(); 2967 if ((rd_reg_dword(®->ctrl_status) & 2968 CSRX_ISP_SOFT_RESET) == 0) 2969 break; 2970 2971 udelay(5); 2972 } 2973 if (!(rd_reg_dword(®->ctrl_status) & CSRX_ISP_SOFT_RESET)) 2974 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags); 2975 2976 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d, 2977 "HCCR: 0x%x, Soft Reset status: 0x%x\n", 2978 rd_reg_dword(®->hccr), 2979 rd_reg_dword(®->ctrl_status)); 2980 2981 /* If required, do an MPI FW reset now */ 2982 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) { 2983 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) { 2984 if (++abts_cnt < 5) { 2985 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2986 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags); 2987 } else { 2988 /* 2989 * We exhausted the ISP abort retries. We have to 2990 * set the board offline. 2991 */ 2992 abts_cnt = 0; 2993 vha->flags.online = 0; 2994 } 2995 } 2996 } 2997 2998 wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET); 2999 rd_reg_dword(®->hccr); 3000 3001 wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE); 3002 rd_reg_dword(®->hccr); 3003 3004 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_RESET); 3005 mdelay(10); 3006 rd_reg_dword(®->hccr); 3007 3008 wd = rd_reg_word(®->mailbox0); 3009 for (cnt = 300; wd != 0 && rval == QLA_SUCCESS; cnt--) { 3010 barrier(); 3011 if (cnt) { 3012 mdelay(1); 3013 if (print && qla_chk_risc_recovery(vha)) 3014 print = 0; 3015 3016 wd = rd_reg_word(®->mailbox0); 3017 } else { 3018 rval = QLA_FUNCTION_TIMEOUT; 3019 3020 ql_log(ql_log_warn, vha, 0x015e, 3021 "RISC reset timeout\n"); 3022 } 3023 } 3024 3025 if (rval == QLA_SUCCESS) 3026 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); 3027 3028 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e, 3029 "Host Risc 0x%x, mailbox0 0x%x\n", 3030 rd_reg_dword(®->hccr), 3031 rd_reg_word(®->mailbox0)); 3032 3033 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3034 3035 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f, 3036 "Driver in %s mode\n", 3037 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling"); 3038 3039 if (IS_NOPOLLING_TYPE(ha)) 3040 ha->isp_ops->enable_intrs(ha); 3041 3042 return rval; 3043 } 3044 3045 static void 3046 qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data) 3047 { 3048 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; 3049 3050 wrt_reg_dword(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); 3051 *data = rd_reg_dword(®->iobase_window + RISC_REGISTER_WINDOW_OFFSET); 3052 } 3053 3054 static void 3055 qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data) 3056 { 3057 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; 3058 3059 wrt_reg_dword(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); 3060 wrt_reg_dword(®->iobase_window + RISC_REGISTER_WINDOW_OFFSET, data); 3061 } 3062 3063 static void 3064 qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha) 3065 { 3066 uint32_t wd32 = 0; 3067 uint delta_msec = 100; 3068 uint elapsed_msec = 0; 3069 uint timeout_msec; 3070 ulong n; 3071 3072 if (vha->hw->pdev->subsystem_device != 0x0175 && 3073 vha->hw->pdev->subsystem_device != 0x0240) 3074 return; 3075 3076 wrt_reg_dword(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE); 3077 udelay(100); 3078 3079 attempt: 3080 timeout_msec = TIMEOUT_SEMAPHORE; 3081 n = timeout_msec / delta_msec; 3082 while (n--) { 3083 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET); 3084 qla25xx_read_risc_sema_reg(vha, &wd32); 3085 if (wd32 & RISC_SEMAPHORE) 3086 break; 3087 msleep(delta_msec); 3088 elapsed_msec += delta_msec; 3089 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED) 3090 goto force; 3091 } 3092 3093 if (!(wd32 & RISC_SEMAPHORE)) 3094 goto force; 3095 3096 if (!(wd32 & RISC_SEMAPHORE_FORCE)) 3097 goto acquired; 3098 3099 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR); 3100 timeout_msec = TIMEOUT_SEMAPHORE_FORCE; 3101 n = timeout_msec / delta_msec; 3102 while (n--) { 3103 qla25xx_read_risc_sema_reg(vha, &wd32); 3104 if (!(wd32 & RISC_SEMAPHORE_FORCE)) 3105 break; 3106 msleep(delta_msec); 3107 elapsed_msec += delta_msec; 3108 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED) 3109 goto force; 3110 } 3111 3112 if (wd32 & RISC_SEMAPHORE_FORCE) 3113 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR); 3114 3115 goto attempt; 3116 3117 force: 3118 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET); 3119 3120 acquired: 3121 return; 3122 } 3123 3124 /** 3125 * qla24xx_reset_chip() - Reset ISP24xx chip. 3126 * @vha: HA context 3127 * 3128 * Returns 0 on success. 3129 */ 3130 int 3131 qla24xx_reset_chip(scsi_qla_host_t *vha) 3132 { 3133 struct qla_hw_data *ha = vha->hw; 3134 int rval = QLA_FUNCTION_FAILED; 3135 3136 if (pci_channel_offline(ha->pdev) && 3137 ha->flags.pci_channel_io_perm_failure) { 3138 return rval; 3139 } 3140 3141 ha->isp_ops->disable_intrs(ha); 3142 3143 qla25xx_manipulate_risc_semaphore(vha); 3144 3145 /* Perform RISC reset. */ 3146 rval = qla24xx_reset_risc(vha); 3147 3148 return rval; 3149 } 3150 3151 /** 3152 * qla2x00_chip_diag() - Test chip for proper operation. 3153 * @vha: HA context 3154 * 3155 * Returns 0 on success. 3156 */ 3157 int 3158 qla2x00_chip_diag(scsi_qla_host_t *vha) 3159 { 3160 int rval; 3161 struct qla_hw_data *ha = vha->hw; 3162 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3163 unsigned long flags = 0; 3164 uint16_t data; 3165 uint32_t cnt; 3166 uint16_t mb[5]; 3167 struct req_que *req = ha->req_q_map[0]; 3168 3169 /* Assume a failed state */ 3170 rval = QLA_FUNCTION_FAILED; 3171 3172 ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n", 3173 ®->flash_address); 3174 3175 spin_lock_irqsave(&ha->hardware_lock, flags); 3176 3177 /* Reset ISP chip. */ 3178 wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET); 3179 3180 /* 3181 * We need to have a delay here since the card will not respond while 3182 * in reset causing an MCA on some architectures. 3183 */ 3184 udelay(20); 3185 data = qla2x00_debounce_register(®->ctrl_status); 3186 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) { 3187 udelay(5); 3188 data = rd_reg_word(®->ctrl_status); 3189 barrier(); 3190 } 3191 3192 if (!cnt) 3193 goto chip_diag_failed; 3194 3195 ql_dbg(ql_dbg_init, vha, 0x007c, 3196 "Reset register cleared by chip reset.\n"); 3197 3198 /* Reset RISC processor. */ 3199 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 3200 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); 3201 3202 /* Workaround for QLA2312 PCI parity error */ 3203 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 3204 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0)); 3205 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) { 3206 udelay(5); 3207 data = RD_MAILBOX_REG(ha, reg, 0); 3208 barrier(); 3209 } 3210 } else 3211 udelay(10); 3212 3213 if (!cnt) 3214 goto chip_diag_failed; 3215 3216 /* Check product ID of chip */ 3217 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n"); 3218 3219 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 3220 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 3221 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 3222 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4)); 3223 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) || 3224 mb[3] != PROD_ID_3) { 3225 ql_log(ql_log_warn, vha, 0x0062, 3226 "Wrong product ID = 0x%x,0x%x,0x%x.\n", 3227 mb[1], mb[2], mb[3]); 3228 3229 goto chip_diag_failed; 3230 } 3231 ha->product_id[0] = mb[1]; 3232 ha->product_id[1] = mb[2]; 3233 ha->product_id[2] = mb[3]; 3234 ha->product_id[3] = mb[4]; 3235 3236 /* Adjust fw RISC transfer size */ 3237 if (req->length > 1024) 3238 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; 3239 else 3240 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 3241 req->length; 3242 3243 if (IS_QLA2200(ha) && 3244 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { 3245 /* Limit firmware transfer size with a 2200A */ 3246 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n"); 3247 3248 ha->device_type |= DT_ISP2200A; 3249 ha->fw_transfer_size = 128; 3250 } 3251 3252 /* Wrap Incoming Mailboxes Test. */ 3253 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3254 3255 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n"); 3256 rval = qla2x00_mbx_reg_test(vha); 3257 if (rval) 3258 ql_log(ql_log_warn, vha, 0x0080, 3259 "Failed mailbox send register test.\n"); 3260 else 3261 /* Flag a successful rval */ 3262 rval = QLA_SUCCESS; 3263 spin_lock_irqsave(&ha->hardware_lock, flags); 3264 3265 chip_diag_failed: 3266 if (rval) 3267 ql_log(ql_log_info, vha, 0x0081, 3268 "Chip diagnostics **** FAILED ****.\n"); 3269 3270 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3271 3272 return (rval); 3273 } 3274 3275 /** 3276 * qla24xx_chip_diag() - Test ISP24xx for proper operation. 3277 * @vha: HA context 3278 * 3279 * Returns 0 on success. 3280 */ 3281 int 3282 qla24xx_chip_diag(scsi_qla_host_t *vha) 3283 { 3284 int rval; 3285 struct qla_hw_data *ha = vha->hw; 3286 struct req_que *req = ha->req_q_map[0]; 3287 3288 if (IS_P3P_TYPE(ha)) 3289 return QLA_SUCCESS; 3290 3291 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; 3292 3293 rval = qla2x00_mbx_reg_test(vha); 3294 if (rval) { 3295 ql_log(ql_log_warn, vha, 0x0082, 3296 "Failed mailbox send register test.\n"); 3297 } else { 3298 /* Flag a successful rval */ 3299 rval = QLA_SUCCESS; 3300 } 3301 3302 return rval; 3303 } 3304 3305 static void 3306 qla2x00_init_fce_trace(scsi_qla_host_t *vha) 3307 { 3308 int rval; 3309 dma_addr_t tc_dma; 3310 void *tc; 3311 struct qla_hw_data *ha = vha->hw; 3312 3313 if (!IS_FWI2_CAPABLE(ha)) 3314 return; 3315 3316 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 3317 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 3318 return; 3319 3320 if (ha->fce) { 3321 ql_dbg(ql_dbg_init, vha, 0x00bd, 3322 "%s: FCE Mem is already allocated.\n", 3323 __func__); 3324 return; 3325 } 3326 3327 /* Allocate memory for Fibre Channel Event Buffer. */ 3328 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 3329 GFP_KERNEL); 3330 if (!tc) { 3331 ql_log(ql_log_warn, vha, 0x00be, 3332 "Unable to allocate (%d KB) for FCE.\n", 3333 FCE_SIZE / 1024); 3334 return; 3335 } 3336 3337 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS, 3338 ha->fce_mb, &ha->fce_bufs); 3339 if (rval) { 3340 ql_log(ql_log_warn, vha, 0x00bf, 3341 "Unable to initialize FCE (%d).\n", rval); 3342 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma); 3343 return; 3344 } 3345 3346 ql_dbg(ql_dbg_init, vha, 0x00c0, 3347 "Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024); 3348 3349 ha->flags.fce_enabled = 1; 3350 ha->fce_dma = tc_dma; 3351 ha->fce = tc; 3352 } 3353 3354 static void 3355 qla2x00_init_eft_trace(scsi_qla_host_t *vha) 3356 { 3357 int rval; 3358 dma_addr_t tc_dma; 3359 void *tc; 3360 struct qla_hw_data *ha = vha->hw; 3361 3362 if (!IS_FWI2_CAPABLE(ha)) 3363 return; 3364 3365 if (ha->eft) { 3366 ql_dbg(ql_dbg_init, vha, 0x00bd, 3367 "%s: EFT Mem is already allocated.\n", 3368 __func__); 3369 return; 3370 } 3371 3372 /* Allocate memory for Extended Trace Buffer. */ 3373 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, 3374 GFP_KERNEL); 3375 if (!tc) { 3376 ql_log(ql_log_warn, vha, 0x00c1, 3377 "Unable to allocate (%d KB) for EFT.\n", 3378 EFT_SIZE / 1024); 3379 return; 3380 } 3381 3382 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); 3383 if (rval) { 3384 ql_log(ql_log_warn, vha, 0x00c2, 3385 "Unable to initialize EFT (%d).\n", rval); 3386 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma); 3387 return; 3388 } 3389 3390 ql_dbg(ql_dbg_init, vha, 0x00c3, 3391 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024); 3392 3393 ha->eft_dma = tc_dma; 3394 ha->eft = tc; 3395 } 3396 3397 static void 3398 qla2x00_alloc_offload_mem(scsi_qla_host_t *vha) 3399 { 3400 qla2x00_init_fce_trace(vha); 3401 qla2x00_init_eft_trace(vha); 3402 } 3403 3404 void 3405 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) 3406 { 3407 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size, 3408 eft_size, fce_size, mq_size; 3409 struct qla_hw_data *ha = vha->hw; 3410 struct req_que *req = ha->req_q_map[0]; 3411 struct rsp_que *rsp = ha->rsp_q_map[0]; 3412 struct qla2xxx_fw_dump *fw_dump; 3413 3414 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0; 3415 req_q_size = rsp_q_size = 0; 3416 3417 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 3418 fixed_size = sizeof(struct qla2100_fw_dump); 3419 } else if (IS_QLA23XX(ha)) { 3420 fixed_size = offsetof(struct qla2300_fw_dump, data_ram); 3421 mem_size = (ha->fw_memory_size - 0x11000 + 1) * 3422 sizeof(uint16_t); 3423 } else if (IS_FWI2_CAPABLE(ha)) { 3424 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 3425 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem); 3426 else if (IS_QLA81XX(ha)) 3427 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); 3428 else if (IS_QLA25XX(ha)) 3429 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem); 3430 else 3431 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem); 3432 3433 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 3434 sizeof(uint32_t); 3435 if (ha->mqenable) { 3436 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && 3437 !IS_QLA28XX(ha)) 3438 mq_size = sizeof(struct qla2xxx_mq_chain); 3439 /* 3440 * Allocate maximum buffer size for all queues - Q0. 3441 * Resizing must be done at end-of-dump processing. 3442 */ 3443 mq_size += (ha->max_req_queues - 1) * 3444 (req->length * sizeof(request_t)); 3445 mq_size += (ha->max_rsp_queues - 1) * 3446 (rsp->length * sizeof(response_t)); 3447 } 3448 if (ha->tgt.atio_ring) 3449 mq_size += ha->tgt.atio_q_length * sizeof(request_t); 3450 3451 qla2x00_init_fce_trace(vha); 3452 if (ha->fce) 3453 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; 3454 qla2x00_init_eft_trace(vha); 3455 if (ha->eft) 3456 eft_size = EFT_SIZE; 3457 } 3458 3459 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 3460 struct fwdt *fwdt = ha->fwdt; 3461 uint j; 3462 3463 for (j = 0; j < 2; j++, fwdt++) { 3464 if (!fwdt->template) { 3465 ql_dbg(ql_dbg_init, vha, 0x00ba, 3466 "-> fwdt%u no template\n", j); 3467 continue; 3468 } 3469 ql_dbg(ql_dbg_init, vha, 0x00fa, 3470 "-> fwdt%u calculating fwdump size...\n", j); 3471 fwdt->dump_size = qla27xx_fwdt_calculate_dump_size( 3472 vha, fwdt->template); 3473 ql_dbg(ql_dbg_init, vha, 0x00fa, 3474 "-> fwdt%u calculated fwdump size = %#lx bytes\n", 3475 j, fwdt->dump_size); 3476 dump_size += fwdt->dump_size; 3477 } 3478 /* Add space for spare MPI fw dump. */ 3479 dump_size += ha->fwdt[1].dump_size; 3480 } else { 3481 req_q_size = req->length * sizeof(request_t); 3482 rsp_q_size = rsp->length * sizeof(response_t); 3483 dump_size = offsetof(struct qla2xxx_fw_dump, isp); 3484 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size 3485 + eft_size; 3486 ha->chain_offset = dump_size; 3487 dump_size += mq_size + fce_size; 3488 if (ha->exchoffld_buf) 3489 dump_size += sizeof(struct qla2xxx_offld_chain) + 3490 ha->exchoffld_size; 3491 if (ha->exlogin_buf) 3492 dump_size += sizeof(struct qla2xxx_offld_chain) + 3493 ha->exlogin_size; 3494 } 3495 3496 if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) { 3497 3498 ql_dbg(ql_dbg_init, vha, 0x00c5, 3499 "%s dump_size %d fw_dump_len %d fw_dump_alloc_len %d\n", 3500 __func__, dump_size, ha->fw_dump_len, 3501 ha->fw_dump_alloc_len); 3502 3503 fw_dump = vmalloc(dump_size); 3504 if (!fw_dump) { 3505 ql_log(ql_log_warn, vha, 0x00c4, 3506 "Unable to allocate (%d KB) for firmware dump.\n", 3507 dump_size / 1024); 3508 } else { 3509 mutex_lock(&ha->optrom_mutex); 3510 if (ha->fw_dumped) { 3511 memcpy(fw_dump, ha->fw_dump, ha->fw_dump_len); 3512 vfree(ha->fw_dump); 3513 ha->fw_dump = fw_dump; 3514 ha->fw_dump_alloc_len = dump_size; 3515 ql_dbg(ql_dbg_init, vha, 0x00c5, 3516 "Re-Allocated (%d KB) and save firmware dump.\n", 3517 dump_size / 1024); 3518 } else { 3519 vfree(ha->fw_dump); 3520 ha->fw_dump = fw_dump; 3521 3522 ha->fw_dump_len = ha->fw_dump_alloc_len = 3523 dump_size; 3524 ql_dbg(ql_dbg_init, vha, 0x00c5, 3525 "Allocated (%d KB) for firmware dump.\n", 3526 dump_size / 1024); 3527 3528 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 3529 ha->mpi_fw_dump = (char *)fw_dump + 3530 ha->fwdt[1].dump_size; 3531 mutex_unlock(&ha->optrom_mutex); 3532 return; 3533 } 3534 3535 ha->fw_dump->signature[0] = 'Q'; 3536 ha->fw_dump->signature[1] = 'L'; 3537 ha->fw_dump->signature[2] = 'G'; 3538 ha->fw_dump->signature[3] = 'C'; 3539 ha->fw_dump->version = htonl(1); 3540 3541 ha->fw_dump->fixed_size = htonl(fixed_size); 3542 ha->fw_dump->mem_size = htonl(mem_size); 3543 ha->fw_dump->req_q_size = htonl(req_q_size); 3544 ha->fw_dump->rsp_q_size = htonl(rsp_q_size); 3545 3546 ha->fw_dump->eft_size = htonl(eft_size); 3547 ha->fw_dump->eft_addr_l = 3548 htonl(LSD(ha->eft_dma)); 3549 ha->fw_dump->eft_addr_h = 3550 htonl(MSD(ha->eft_dma)); 3551 3552 ha->fw_dump->header_size = 3553 htonl(offsetof 3554 (struct qla2xxx_fw_dump, isp)); 3555 } 3556 mutex_unlock(&ha->optrom_mutex); 3557 } 3558 } 3559 } 3560 3561 static int 3562 qla81xx_mpi_sync(scsi_qla_host_t *vha) 3563 { 3564 #define MPS_MASK 0xe0 3565 int rval; 3566 uint16_t dc; 3567 uint32_t dw; 3568 3569 if (!IS_QLA81XX(vha->hw)) 3570 return QLA_SUCCESS; 3571 3572 rval = qla2x00_write_ram_word(vha, 0x7c00, 1); 3573 if (rval != QLA_SUCCESS) { 3574 ql_log(ql_log_warn, vha, 0x0105, 3575 "Unable to acquire semaphore.\n"); 3576 goto done; 3577 } 3578 3579 pci_read_config_word(vha->hw->pdev, 0x54, &dc); 3580 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw); 3581 if (rval != QLA_SUCCESS) { 3582 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n"); 3583 goto done_release; 3584 } 3585 3586 dc &= MPS_MASK; 3587 if (dc == (dw & MPS_MASK)) 3588 goto done_release; 3589 3590 dw &= ~MPS_MASK; 3591 dw |= dc; 3592 rval = qla2x00_write_ram_word(vha, 0x7a15, dw); 3593 if (rval != QLA_SUCCESS) { 3594 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n"); 3595 } 3596 3597 done_release: 3598 rval = qla2x00_write_ram_word(vha, 0x7c00, 0); 3599 if (rval != QLA_SUCCESS) { 3600 ql_log(ql_log_warn, vha, 0x006d, 3601 "Unable to release semaphore.\n"); 3602 } 3603 3604 done: 3605 return rval; 3606 } 3607 3608 int 3609 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req) 3610 { 3611 /* Don't try to reallocate the array */ 3612 if (req->outstanding_cmds) 3613 return QLA_SUCCESS; 3614 3615 if (!IS_FWI2_CAPABLE(ha)) 3616 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS; 3617 else { 3618 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count) 3619 req->num_outstanding_cmds = ha->cur_fw_xcb_count; 3620 else 3621 req->num_outstanding_cmds = ha->cur_fw_iocb_count; 3622 } 3623 3624 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds, 3625 sizeof(srb_t *), 3626 GFP_KERNEL); 3627 3628 if (!req->outstanding_cmds) { 3629 /* 3630 * Try to allocate a minimal size just so we can get through 3631 * initialization. 3632 */ 3633 req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS; 3634 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds, 3635 sizeof(srb_t *), 3636 GFP_KERNEL); 3637 3638 if (!req->outstanding_cmds) { 3639 ql_log(ql_log_fatal, NULL, 0x0126, 3640 "Failed to allocate memory for " 3641 "outstanding_cmds for req_que %p.\n", req); 3642 req->num_outstanding_cmds = 0; 3643 return QLA_FUNCTION_FAILED; 3644 } 3645 } 3646 3647 return QLA_SUCCESS; 3648 } 3649 3650 #define PRINT_FIELD(_field, _flag, _str) { \ 3651 if (a0->_field & _flag) {\ 3652 if (p) {\ 3653 strcat(ptr, "|");\ 3654 ptr++;\ 3655 leftover--;\ 3656 } \ 3657 len = snprintf(ptr, leftover, "%s", _str); \ 3658 p = 1;\ 3659 leftover -= len;\ 3660 ptr += len; \ 3661 } \ 3662 } 3663 3664 static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha) 3665 { 3666 #define STR_LEN 64 3667 struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data; 3668 u8 str[STR_LEN], *ptr, p; 3669 int leftover, len; 3670 3671 memset(str, 0, STR_LEN); 3672 snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name); 3673 ql_dbg(ql_dbg_init, vha, 0x015a, 3674 "SFP MFG Name: %s\n", str); 3675 3676 memset(str, 0, STR_LEN); 3677 snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn); 3678 ql_dbg(ql_dbg_init, vha, 0x015c, 3679 "SFP Part Name: %s\n", str); 3680 3681 /* media */ 3682 memset(str, 0, STR_LEN); 3683 ptr = str; 3684 leftover = STR_LEN; 3685 p = len = 0; 3686 PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX"); 3687 PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair"); 3688 PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax"); 3689 PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax"); 3690 PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um"); 3691 PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um"); 3692 PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode"); 3693 ql_dbg(ql_dbg_init, vha, 0x0160, 3694 "SFP Media: %s\n", str); 3695 3696 /* link length */ 3697 memset(str, 0, STR_LEN); 3698 ptr = str; 3699 leftover = STR_LEN; 3700 p = len = 0; 3701 PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long"); 3702 PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short"); 3703 PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate"); 3704 PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long"); 3705 PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium"); 3706 ql_dbg(ql_dbg_init, vha, 0x0196, 3707 "SFP Link Length: %s\n", str); 3708 3709 memset(str, 0, STR_LEN); 3710 ptr = str; 3711 leftover = STR_LEN; 3712 p = len = 0; 3713 PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)"); 3714 PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)"); 3715 PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)"); 3716 PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)"); 3717 PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)"); 3718 ql_dbg(ql_dbg_init, vha, 0x016e, 3719 "SFP FC Link Tech: %s\n", str); 3720 3721 if (a0->length_km) 3722 ql_dbg(ql_dbg_init, vha, 0x016f, 3723 "SFP Distant: %d km\n", a0->length_km); 3724 if (a0->length_100m) 3725 ql_dbg(ql_dbg_init, vha, 0x0170, 3726 "SFP Distant: %d m\n", a0->length_100m*100); 3727 if (a0->length_50um_10m) 3728 ql_dbg(ql_dbg_init, vha, 0x0189, 3729 "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10); 3730 if (a0->length_62um_10m) 3731 ql_dbg(ql_dbg_init, vha, 0x018a, 3732 "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10); 3733 if (a0->length_om4_10m) 3734 ql_dbg(ql_dbg_init, vha, 0x0194, 3735 "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10); 3736 if (a0->length_om3_10m) 3737 ql_dbg(ql_dbg_init, vha, 0x0195, 3738 "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10); 3739 } 3740 3741 3742 /** 3743 * qla24xx_detect_sfp() 3744 * 3745 * @vha: adapter state pointer. 3746 * 3747 * @return 3748 * 0 -- Configure firmware to use short-range settings -- normal 3749 * buffer-to-buffer credits. 3750 * 3751 * 1 -- Configure firmware to use long-range settings -- extra 3752 * buffer-to-buffer credits should be allocated with 3753 * ha->lr_distance containing distance settings from NVRAM or SFP 3754 * (if supported). 3755 */ 3756 int 3757 qla24xx_detect_sfp(scsi_qla_host_t *vha) 3758 { 3759 int rc, used_nvram; 3760 struct sff_8247_a0 *a; 3761 struct qla_hw_data *ha = vha->hw; 3762 struct nvram_81xx *nv = ha->nvram; 3763 #define LR_DISTANCE_UNKNOWN 2 3764 static const char * const types[] = { "Short", "Long" }; 3765 static const char * const lengths[] = { "(10km)", "(5km)", "" }; 3766 u8 ll = 0; 3767 3768 /* Seed with NVRAM settings. */ 3769 used_nvram = 0; 3770 ha->flags.lr_detected = 0; 3771 if (IS_BPM_RANGE_CAPABLE(ha) && 3772 (nv->enhanced_features & NEF_LR_DIST_ENABLE)) { 3773 used_nvram = 1; 3774 ha->flags.lr_detected = 1; 3775 ha->lr_distance = 3776 (nv->enhanced_features >> LR_DIST_NV_POS) 3777 & LR_DIST_NV_MASK; 3778 } 3779 3780 if (!IS_BPM_ENABLED(vha)) 3781 goto out; 3782 /* Determine SR/LR capabilities of SFP/Transceiver. */ 3783 rc = qla2x00_read_sfp_dev(vha, NULL, 0); 3784 if (rc) 3785 goto out; 3786 3787 used_nvram = 0; 3788 a = (struct sff_8247_a0 *)vha->hw->sfp_data; 3789 qla2xxx_print_sfp_info(vha); 3790 3791 ha->flags.lr_detected = 0; 3792 ll = a->fc_ll_cc7; 3793 if (ll & FC_LL_VL || ll & FC_LL_L) { 3794 /* Long range, track length. */ 3795 ha->flags.lr_detected = 1; 3796 3797 if (a->length_km > 5 || a->length_100m > 50) 3798 ha->lr_distance = LR_DISTANCE_10K; 3799 else 3800 ha->lr_distance = LR_DISTANCE_5K; 3801 } 3802 3803 out: 3804 ql_dbg(ql_dbg_async, vha, 0x507b, 3805 "SFP detect: %s-Range SFP %s (nvr=%x ll=%x lr=%x lrd=%x).\n", 3806 types[ha->flags.lr_detected], 3807 ha->flags.lr_detected ? lengths[ha->lr_distance] : 3808 lengths[LR_DISTANCE_UNKNOWN], 3809 used_nvram, ll, ha->flags.lr_detected, ha->lr_distance); 3810 return ha->flags.lr_detected; 3811 } 3812 3813 void qla_init_iocb_limit(scsi_qla_host_t *vha) 3814 { 3815 u16 i, num_qps; 3816 u32 limit; 3817 struct qla_hw_data *ha = vha->hw; 3818 3819 num_qps = ha->num_qpairs + 1; 3820 limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100; 3821 3822 ha->base_qpair->fwres.iocbs_total = ha->orig_fw_iocb_count; 3823 ha->base_qpair->fwres.iocbs_limit = limit; 3824 ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps; 3825 ha->base_qpair->fwres.iocbs_used = 0; 3826 for (i = 0; i < ha->max_qpairs; i++) { 3827 if (ha->queue_pair_map[i]) { 3828 ha->queue_pair_map[i]->fwres.iocbs_total = 3829 ha->orig_fw_iocb_count; 3830 ha->queue_pair_map[i]->fwres.iocbs_limit = limit; 3831 ha->queue_pair_map[i]->fwres.iocbs_qp_limit = 3832 limit / num_qps; 3833 ha->queue_pair_map[i]->fwres.iocbs_used = 0; 3834 } 3835 } 3836 } 3837 3838 /** 3839 * qla2x00_setup_chip() - Load and start RISC firmware. 3840 * @vha: HA context 3841 * 3842 * Returns 0 on success. 3843 */ 3844 static int 3845 qla2x00_setup_chip(scsi_qla_host_t *vha) 3846 { 3847 int rval; 3848 uint32_t srisc_address = 0; 3849 struct qla_hw_data *ha = vha->hw; 3850 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3851 unsigned long flags; 3852 uint16_t fw_major_version; 3853 int done_once = 0; 3854 3855 if (IS_P3P_TYPE(ha)) { 3856 rval = ha->isp_ops->load_risc(vha, &srisc_address); 3857 if (rval == QLA_SUCCESS) { 3858 qla2x00_stop_firmware(vha); 3859 goto enable_82xx_npiv; 3860 } else 3861 goto failed; 3862 } 3863 3864 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 3865 /* Disable SRAM, Instruction RAM and GP RAM parity. */ 3866 spin_lock_irqsave(&ha->hardware_lock, flags); 3867 wrt_reg_word(®->hccr, (HCCR_ENABLE_PARITY + 0x0)); 3868 rd_reg_word(®->hccr); 3869 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3870 } 3871 3872 qla81xx_mpi_sync(vha); 3873 3874 execute_fw_with_lr: 3875 /* Load firmware sequences */ 3876 rval = ha->isp_ops->load_risc(vha, &srisc_address); 3877 if (rval == QLA_SUCCESS) { 3878 ql_dbg(ql_dbg_init, vha, 0x00c9, 3879 "Verifying Checksum of loaded RISC code.\n"); 3880 3881 rval = qla2x00_verify_checksum(vha, srisc_address); 3882 if (rval == QLA_SUCCESS) { 3883 /* Start firmware execution. */ 3884 ql_dbg(ql_dbg_init, vha, 0x00ca, 3885 "Starting firmware.\n"); 3886 3887 if (ql2xexlogins) 3888 ha->flags.exlogins_enabled = 1; 3889 3890 if (qla_is_exch_offld_enabled(vha)) 3891 ha->flags.exchoffld_enabled = 1; 3892 3893 rval = qla2x00_execute_fw(vha, srisc_address); 3894 /* Retrieve firmware information. */ 3895 if (rval == QLA_SUCCESS) { 3896 /* Enable BPM support? */ 3897 if (!done_once++ && qla24xx_detect_sfp(vha)) { 3898 ql_dbg(ql_dbg_init, vha, 0x00ca, 3899 "Re-starting firmware -- BPM.\n"); 3900 /* Best-effort - re-init. */ 3901 ha->isp_ops->reset_chip(vha); 3902 ha->isp_ops->chip_diag(vha); 3903 goto execute_fw_with_lr; 3904 } 3905 3906 if (IS_ZIO_THRESHOLD_CAPABLE(ha)) 3907 qla27xx_set_zio_threshold(vha, 3908 ha->last_zio_threshold); 3909 3910 rval = qla2x00_set_exlogins_buffer(vha); 3911 if (rval != QLA_SUCCESS) 3912 goto failed; 3913 3914 rval = qla2x00_set_exchoffld_buffer(vha); 3915 if (rval != QLA_SUCCESS) 3916 goto failed; 3917 3918 enable_82xx_npiv: 3919 fw_major_version = ha->fw_major_version; 3920 if (IS_P3P_TYPE(ha)) 3921 qla82xx_check_md_needed(vha); 3922 else 3923 rval = qla2x00_get_fw_version(vha); 3924 if (rval != QLA_SUCCESS) 3925 goto failed; 3926 ha->flags.npiv_supported = 0; 3927 if (IS_QLA2XXX_MIDTYPE(ha) && 3928 (ha->fw_attributes & BIT_2)) { 3929 ha->flags.npiv_supported = 1; 3930 if ((!ha->max_npiv_vports) || 3931 ((ha->max_npiv_vports + 1) % 3932 MIN_MULTI_ID_FABRIC)) 3933 ha->max_npiv_vports = 3934 MIN_MULTI_ID_FABRIC - 1; 3935 } 3936 qla2x00_get_resource_cnts(vha); 3937 qla_init_iocb_limit(vha); 3938 3939 /* 3940 * Allocate the array of outstanding commands 3941 * now that we know the firmware resources. 3942 */ 3943 rval = qla2x00_alloc_outstanding_cmds(ha, 3944 vha->req); 3945 if (rval != QLA_SUCCESS) 3946 goto failed; 3947 3948 if (!fw_major_version && !(IS_P3P_TYPE(ha))) 3949 qla2x00_alloc_offload_mem(vha); 3950 3951 if (ql2xallocfwdump && !(IS_P3P_TYPE(ha))) 3952 qla2x00_alloc_fw_dump(vha); 3953 3954 } else { 3955 goto failed; 3956 } 3957 } else { 3958 ql_log(ql_log_fatal, vha, 0x00cd, 3959 "ISP Firmware failed checksum.\n"); 3960 goto failed; 3961 } 3962 3963 /* Enable PUREX PASSTHRU */ 3964 if (ql2xrdpenable || ha->flags.scm_supported_f || 3965 ha->flags.edif_enabled) 3966 qla25xx_set_els_cmds_supported(vha); 3967 } else 3968 goto failed; 3969 3970 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 3971 /* Enable proper parity. */ 3972 spin_lock_irqsave(&ha->hardware_lock, flags); 3973 if (IS_QLA2300(ha)) 3974 /* SRAM parity */ 3975 wrt_reg_word(®->hccr, HCCR_ENABLE_PARITY + 0x1); 3976 else 3977 /* SRAM, Instruction RAM and GP RAM parity */ 3978 wrt_reg_word(®->hccr, HCCR_ENABLE_PARITY + 0x7); 3979 rd_reg_word(®->hccr); 3980 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3981 } 3982 3983 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) 3984 ha->flags.fac_supported = 1; 3985 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { 3986 uint32_t size; 3987 3988 rval = qla81xx_fac_get_sector_size(vha, &size); 3989 if (rval == QLA_SUCCESS) { 3990 ha->flags.fac_supported = 1; 3991 ha->fdt_block_size = size << 2; 3992 } else { 3993 ql_log(ql_log_warn, vha, 0x00ce, 3994 "Unsupported FAC firmware (%d.%02d.%02d).\n", 3995 ha->fw_major_version, ha->fw_minor_version, 3996 ha->fw_subminor_version); 3997 3998 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || 3999 IS_QLA28XX(ha)) { 4000 ha->flags.fac_supported = 0; 4001 rval = QLA_SUCCESS; 4002 } 4003 } 4004 } 4005 failed: 4006 if (rval) { 4007 ql_log(ql_log_fatal, vha, 0x00cf, 4008 "Setup chip ****FAILED****.\n"); 4009 } 4010 4011 return (rval); 4012 } 4013 4014 /** 4015 * qla2x00_init_response_q_entries() - Initializes response queue entries. 4016 * @rsp: response queue 4017 * 4018 * Beginning of request ring has initialization control block already built 4019 * by nvram config routine. 4020 * 4021 * Returns 0 on success. 4022 */ 4023 void 4024 qla2x00_init_response_q_entries(struct rsp_que *rsp) 4025 { 4026 uint16_t cnt; 4027 response_t *pkt; 4028 4029 rsp->ring_ptr = rsp->ring; 4030 rsp->ring_index = 0; 4031 rsp->status_srb = NULL; 4032 pkt = rsp->ring_ptr; 4033 for (cnt = 0; cnt < rsp->length; cnt++) { 4034 pkt->signature = RESPONSE_PROCESSED; 4035 pkt++; 4036 } 4037 } 4038 4039 /** 4040 * qla2x00_update_fw_options() - Read and process firmware options. 4041 * @vha: HA context 4042 * 4043 * Returns 0 on success. 4044 */ 4045 void 4046 qla2x00_update_fw_options(scsi_qla_host_t *vha) 4047 { 4048 uint16_t swing, emphasis, tx_sens, rx_sens; 4049 struct qla_hw_data *ha = vha->hw; 4050 4051 memset(ha->fw_options, 0, sizeof(ha->fw_options)); 4052 qla2x00_get_fw_options(vha, ha->fw_options); 4053 4054 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 4055 return; 4056 4057 /* Serial Link options. */ 4058 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115, 4059 "Serial link options.\n"); 4060 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109, 4061 ha->fw_seriallink_options, sizeof(ha->fw_seriallink_options)); 4062 4063 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 4064 if (ha->fw_seriallink_options[3] & BIT_2) { 4065 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING; 4066 4067 /* 1G settings */ 4068 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0); 4069 emphasis = (ha->fw_seriallink_options[2] & 4070 (BIT_4 | BIT_3)) >> 3; 4071 tx_sens = ha->fw_seriallink_options[0] & 4072 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 4073 rx_sens = (ha->fw_seriallink_options[0] & 4074 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; 4075 ha->fw_options[10] = (emphasis << 14) | (swing << 8); 4076 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 4077 if (rx_sens == 0x0) 4078 rx_sens = 0x3; 4079 ha->fw_options[10] |= (tx_sens << 4) | rx_sens; 4080 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) 4081 ha->fw_options[10] |= BIT_5 | 4082 ((rx_sens & (BIT_1 | BIT_0)) << 2) | 4083 (tx_sens & (BIT_1 | BIT_0)); 4084 4085 /* 2G settings */ 4086 swing = (ha->fw_seriallink_options[2] & 4087 (BIT_7 | BIT_6 | BIT_5)) >> 5; 4088 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0); 4089 tx_sens = ha->fw_seriallink_options[1] & 4090 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 4091 rx_sens = (ha->fw_seriallink_options[1] & 4092 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; 4093 ha->fw_options[11] = (emphasis << 14) | (swing << 8); 4094 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 4095 if (rx_sens == 0x0) 4096 rx_sens = 0x3; 4097 ha->fw_options[11] |= (tx_sens << 4) | rx_sens; 4098 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) 4099 ha->fw_options[11] |= BIT_5 | 4100 ((rx_sens & (BIT_1 | BIT_0)) << 2) | 4101 (tx_sens & (BIT_1 | BIT_0)); 4102 } 4103 4104 /* FCP2 options. */ 4105 /* Return command IOCBs without waiting for an ABTS to complete. */ 4106 ha->fw_options[3] |= BIT_13; 4107 4108 /* LED scheme. */ 4109 if (ha->flags.enable_led_scheme) 4110 ha->fw_options[2] |= BIT_12; 4111 4112 /* Detect ISP6312. */ 4113 if (IS_QLA6312(ha)) 4114 ha->fw_options[2] |= BIT_13; 4115 4116 /* Set Retry FLOGI in case of P2P connection */ 4117 if (ha->operating_mode == P2P) { 4118 ha->fw_options[2] |= BIT_3; 4119 ql_dbg(ql_dbg_disc, vha, 0x2100, 4120 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", 4121 __func__, ha->fw_options[2]); 4122 } 4123 4124 /* Update firmware options. */ 4125 qla2x00_set_fw_options(vha, ha->fw_options); 4126 } 4127 4128 void 4129 qla24xx_update_fw_options(scsi_qla_host_t *vha) 4130 { 4131 int rval; 4132 struct qla_hw_data *ha = vha->hw; 4133 4134 if (IS_P3P_TYPE(ha)) 4135 return; 4136 4137 /* Hold status IOCBs until ABTS response received. */ 4138 if (ql2xfwholdabts) 4139 ha->fw_options[3] |= BIT_12; 4140 4141 /* Set Retry FLOGI in case of P2P connection */ 4142 if (ha->operating_mode == P2P) { 4143 ha->fw_options[2] |= BIT_3; 4144 ql_dbg(ql_dbg_disc, vha, 0x2101, 4145 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", 4146 __func__, ha->fw_options[2]); 4147 } 4148 4149 /* Move PUREX, ABTS RX & RIDA to ATIOQ */ 4150 if (ql2xmvasynctoatio && !ha->flags.edif_enabled && 4151 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) { 4152 if (qla_tgt_mode_enabled(vha) || 4153 qla_dual_mode_enabled(vha)) 4154 ha->fw_options[2] |= BIT_11; 4155 else 4156 ha->fw_options[2] &= ~BIT_11; 4157 } 4158 4159 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 4160 IS_QLA28XX(ha)) { 4161 /* 4162 * Tell FW to track each exchange to prevent 4163 * driver from using stale exchange. 4164 */ 4165 if (qla_tgt_mode_enabled(vha) || 4166 qla_dual_mode_enabled(vha)) 4167 ha->fw_options[2] |= BIT_4; 4168 else 4169 ha->fw_options[2] &= ~BIT_4; 4170 4171 /* Reserve 1/2 of emergency exchanges for ELS.*/ 4172 if (qla2xuseresexchforels) 4173 ha->fw_options[2] |= BIT_8; 4174 else 4175 ha->fw_options[2] &= ~BIT_8; 4176 } 4177 4178 if (ql2xrdpenable || ha->flags.scm_supported_f || 4179 ha->flags.edif_enabled) 4180 ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB; 4181 4182 /* Enable Async 8130/8131 events -- transceiver insertion/removal */ 4183 if (IS_BPM_RANGE_CAPABLE(ha)) 4184 ha->fw_options[3] |= BIT_10; 4185 4186 ql_dbg(ql_dbg_init, vha, 0x00e8, 4187 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", 4188 __func__, ha->fw_options[1], ha->fw_options[2], 4189 ha->fw_options[3], vha->host->active_mode); 4190 4191 if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3]) 4192 qla2x00_set_fw_options(vha, ha->fw_options); 4193 4194 /* Update Serial Link options. */ 4195 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) 4196 return; 4197 4198 rval = qla2x00_set_serdes_params(vha, 4199 le16_to_cpu(ha->fw_seriallink_options24[1]), 4200 le16_to_cpu(ha->fw_seriallink_options24[2]), 4201 le16_to_cpu(ha->fw_seriallink_options24[3])); 4202 if (rval != QLA_SUCCESS) { 4203 ql_log(ql_log_warn, vha, 0x0104, 4204 "Unable to update Serial Link options (%x).\n", rval); 4205 } 4206 } 4207 4208 void 4209 qla2x00_config_rings(struct scsi_qla_host *vha) 4210 { 4211 struct qla_hw_data *ha = vha->hw; 4212 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 4213 struct req_que *req = ha->req_q_map[0]; 4214 struct rsp_que *rsp = ha->rsp_q_map[0]; 4215 4216 /* Setup ring parameters in initialization control block. */ 4217 ha->init_cb->request_q_outpointer = cpu_to_le16(0); 4218 ha->init_cb->response_q_inpointer = cpu_to_le16(0); 4219 ha->init_cb->request_q_length = cpu_to_le16(req->length); 4220 ha->init_cb->response_q_length = cpu_to_le16(rsp->length); 4221 put_unaligned_le64(req->dma, &ha->init_cb->request_q_address); 4222 put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address); 4223 4224 wrt_reg_word(ISP_REQ_Q_IN(ha, reg), 0); 4225 wrt_reg_word(ISP_REQ_Q_OUT(ha, reg), 0); 4226 wrt_reg_word(ISP_RSP_Q_IN(ha, reg), 0); 4227 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), 0); 4228 rd_reg_word(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */ 4229 } 4230 4231 void 4232 qla24xx_config_rings(struct scsi_qla_host *vha) 4233 { 4234 struct qla_hw_data *ha = vha->hw; 4235 device_reg_t *reg = ISP_QUE_REG(ha, 0); 4236 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; 4237 struct qla_msix_entry *msix; 4238 struct init_cb_24xx *icb; 4239 uint16_t rid = 0; 4240 struct req_que *req = ha->req_q_map[0]; 4241 struct rsp_que *rsp = ha->rsp_q_map[0]; 4242 4243 /* Setup ring parameters in initialization control block. */ 4244 icb = (struct init_cb_24xx *)ha->init_cb; 4245 icb->request_q_outpointer = cpu_to_le16(0); 4246 icb->response_q_inpointer = cpu_to_le16(0); 4247 icb->request_q_length = cpu_to_le16(req->length); 4248 icb->response_q_length = cpu_to_le16(rsp->length); 4249 put_unaligned_le64(req->dma, &icb->request_q_address); 4250 put_unaligned_le64(rsp->dma, &icb->response_q_address); 4251 4252 /* Setup ATIO queue dma pointers for target mode */ 4253 icb->atio_q_inpointer = cpu_to_le16(0); 4254 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length); 4255 put_unaligned_le64(ha->tgt.atio_dma, &icb->atio_q_address); 4256 4257 if (IS_SHADOW_REG_CAPABLE(ha)) 4258 icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29); 4259 4260 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 4261 IS_QLA28XX(ha)) { 4262 icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS); 4263 icb->rid = cpu_to_le16(rid); 4264 if (ha->flags.msix_enabled) { 4265 msix = &ha->msix_entries[1]; 4266 ql_dbg(ql_dbg_init, vha, 0x0019, 4267 "Registering vector 0x%x for base que.\n", 4268 msix->entry); 4269 icb->msix = cpu_to_le16(msix->entry); 4270 } 4271 /* Use alternate PCI bus number */ 4272 if (MSB(rid)) 4273 icb->firmware_options_2 |= cpu_to_le32(BIT_19); 4274 /* Use alternate PCI devfn */ 4275 if (LSB(rid)) 4276 icb->firmware_options_2 |= cpu_to_le32(BIT_18); 4277 4278 /* Use Disable MSIX Handshake mode for capable adapters */ 4279 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) && 4280 (ha->flags.msix_enabled)) { 4281 icb->firmware_options_2 &= cpu_to_le32(~BIT_22); 4282 ha->flags.disable_msix_handshake = 1; 4283 ql_dbg(ql_dbg_init, vha, 0x00fe, 4284 "MSIX Handshake Disable Mode turned on.\n"); 4285 } else { 4286 icb->firmware_options_2 |= cpu_to_le32(BIT_22); 4287 } 4288 icb->firmware_options_2 |= cpu_to_le32(BIT_23); 4289 4290 wrt_reg_dword(®->isp25mq.req_q_in, 0); 4291 wrt_reg_dword(®->isp25mq.req_q_out, 0); 4292 wrt_reg_dword(®->isp25mq.rsp_q_in, 0); 4293 wrt_reg_dword(®->isp25mq.rsp_q_out, 0); 4294 } else { 4295 wrt_reg_dword(®->isp24.req_q_in, 0); 4296 wrt_reg_dword(®->isp24.req_q_out, 0); 4297 wrt_reg_dword(®->isp24.rsp_q_in, 0); 4298 wrt_reg_dword(®->isp24.rsp_q_out, 0); 4299 } 4300 4301 qlt_24xx_config_rings(vha); 4302 4303 /* If the user has configured the speed, set it here */ 4304 if (ha->set_data_rate) { 4305 ql_dbg(ql_dbg_init, vha, 0x00fd, 4306 "Speed set by user : %s Gbps \n", 4307 qla2x00_get_link_speed_str(ha, ha->set_data_rate)); 4308 icb->firmware_options_3 = cpu_to_le32(ha->set_data_rate << 13); 4309 } 4310 4311 /* PCI posting */ 4312 rd_reg_word(&ioreg->hccr); 4313 } 4314 4315 /** 4316 * qla2x00_init_rings() - Initializes firmware. 4317 * @vha: HA context 4318 * 4319 * Beginning of request ring has initialization control block already built 4320 * by nvram config routine. 4321 * 4322 * Returns 0 on success. 4323 */ 4324 int 4325 qla2x00_init_rings(scsi_qla_host_t *vha) 4326 { 4327 int rval; 4328 unsigned long flags = 0; 4329 int cnt, que; 4330 struct qla_hw_data *ha = vha->hw; 4331 struct req_que *req; 4332 struct rsp_que *rsp; 4333 struct mid_init_cb_24xx *mid_init_cb = 4334 (struct mid_init_cb_24xx *) ha->init_cb; 4335 4336 spin_lock_irqsave(&ha->hardware_lock, flags); 4337 4338 /* Clear outstanding commands array. */ 4339 for (que = 0; que < ha->max_req_queues; que++) { 4340 req = ha->req_q_map[que]; 4341 if (!req || !test_bit(que, ha->req_qid_map)) 4342 continue; 4343 req->out_ptr = (uint16_t *)(req->ring + req->length); 4344 *req->out_ptr = 0; 4345 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) 4346 req->outstanding_cmds[cnt] = NULL; 4347 4348 req->current_outstanding_cmd = 1; 4349 4350 /* Initialize firmware. */ 4351 req->ring_ptr = req->ring; 4352 req->ring_index = 0; 4353 req->cnt = req->length; 4354 } 4355 4356 for (que = 0; que < ha->max_rsp_queues; que++) { 4357 rsp = ha->rsp_q_map[que]; 4358 if (!rsp || !test_bit(que, ha->rsp_qid_map)) 4359 continue; 4360 rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length); 4361 *rsp->in_ptr = 0; 4362 /* Initialize response queue entries */ 4363 if (IS_QLAFX00(ha)) 4364 qlafx00_init_response_q_entries(rsp); 4365 else 4366 qla2x00_init_response_q_entries(rsp); 4367 } 4368 4369 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; 4370 ha->tgt.atio_ring_index = 0; 4371 /* Initialize ATIO queue entries */ 4372 qlt_init_atio_q_entries(vha); 4373 4374 ha->isp_ops->config_rings(vha); 4375 4376 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4377 4378 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n"); 4379 4380 if (IS_QLAFX00(ha)) { 4381 rval = qlafx00_init_firmware(vha, ha->init_cb_size); 4382 goto next_check; 4383 } 4384 4385 /* Update any ISP specific firmware options before initialization. */ 4386 ha->isp_ops->update_fw_options(vha); 4387 4388 if (ha->flags.npiv_supported) { 4389 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha)) 4390 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; 4391 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); 4392 } 4393 4394 if (IS_FWI2_CAPABLE(ha)) { 4395 mid_init_cb->options = cpu_to_le16(BIT_1); 4396 mid_init_cb->init_cb.execution_throttle = 4397 cpu_to_le16(ha->cur_fw_xcb_count); 4398 ha->flags.dport_enabled = 4399 (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) & 4400 BIT_7) != 0; 4401 ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n", 4402 (ha->flags.dport_enabled) ? "enabled" : "disabled"); 4403 /* FA-WWPN Status */ 4404 ha->flags.fawwpn_enabled = 4405 (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) & 4406 BIT_6) != 0; 4407 ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n", 4408 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled"); 4409 } 4410 4411 rval = qla2x00_init_firmware(vha, ha->init_cb_size); 4412 next_check: 4413 if (rval) { 4414 ql_log(ql_log_fatal, vha, 0x00d2, 4415 "Init Firmware **** FAILED ****.\n"); 4416 } else { 4417 ql_dbg(ql_dbg_init, vha, 0x00d3, 4418 "Init Firmware -- success.\n"); 4419 QLA_FW_STARTED(ha); 4420 vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0; 4421 } 4422 4423 return (rval); 4424 } 4425 4426 /** 4427 * qla2x00_fw_ready() - Waits for firmware ready. 4428 * @vha: HA context 4429 * 4430 * Returns 0 on success. 4431 */ 4432 static int 4433 qla2x00_fw_ready(scsi_qla_host_t *vha) 4434 { 4435 int rval; 4436 unsigned long wtime, mtime, cs84xx_time; 4437 uint16_t min_wait; /* Minimum wait time if loop is down */ 4438 uint16_t wait_time; /* Wait time if loop is coming ready */ 4439 uint16_t state[6]; 4440 struct qla_hw_data *ha = vha->hw; 4441 4442 if (IS_QLAFX00(vha->hw)) 4443 return qlafx00_fw_ready(vha); 4444 4445 /* Time to wait for loop down */ 4446 if (IS_P3P_TYPE(ha)) 4447 min_wait = 30; 4448 else 4449 min_wait = 20; 4450 4451 /* 4452 * Firmware should take at most one RATOV to login, plus 5 seconds for 4453 * our own processing. 4454 */ 4455 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) { 4456 wait_time = min_wait; 4457 } 4458 4459 /* Min wait time if loop down */ 4460 mtime = jiffies + (min_wait * HZ); 4461 4462 /* wait time before firmware ready */ 4463 wtime = jiffies + (wait_time * HZ); 4464 4465 /* Wait for ISP to finish LIP */ 4466 if (!vha->flags.init_done) 4467 ql_log(ql_log_info, vha, 0x801e, 4468 "Waiting for LIP to complete.\n"); 4469 4470 do { 4471 memset(state, -1, sizeof(state)); 4472 rval = qla2x00_get_firmware_state(vha, state); 4473 if (rval == QLA_SUCCESS) { 4474 if (state[0] < FSTATE_LOSS_OF_SYNC) { 4475 vha->device_flags &= ~DFLG_NO_CABLE; 4476 } 4477 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { 4478 ql_dbg(ql_dbg_taskm, vha, 0x801f, 4479 "fw_state=%x 84xx=%x.\n", state[0], 4480 state[2]); 4481 if ((state[2] & FSTATE_LOGGED_IN) && 4482 (state[2] & FSTATE_WAITING_FOR_VERIFY)) { 4483 ql_dbg(ql_dbg_taskm, vha, 0x8028, 4484 "Sending verify iocb.\n"); 4485 4486 cs84xx_time = jiffies; 4487 rval = qla84xx_init_chip(vha); 4488 if (rval != QLA_SUCCESS) { 4489 ql_log(ql_log_warn, 4490 vha, 0x8007, 4491 "Init chip failed.\n"); 4492 break; 4493 } 4494 4495 /* Add time taken to initialize. */ 4496 cs84xx_time = jiffies - cs84xx_time; 4497 wtime += cs84xx_time; 4498 mtime += cs84xx_time; 4499 ql_dbg(ql_dbg_taskm, vha, 0x8008, 4500 "Increasing wait time by %ld. " 4501 "New time %ld.\n", cs84xx_time, 4502 wtime); 4503 } 4504 } else if (state[0] == FSTATE_READY) { 4505 ql_dbg(ql_dbg_taskm, vha, 0x8037, 4506 "F/W Ready - OK.\n"); 4507 4508 qla2x00_get_retry_cnt(vha, &ha->retry_count, 4509 &ha->login_timeout, &ha->r_a_tov); 4510 4511 rval = QLA_SUCCESS; 4512 break; 4513 } 4514 4515 rval = QLA_FUNCTION_FAILED; 4516 4517 if (atomic_read(&vha->loop_down_timer) && 4518 state[0] != FSTATE_READY) { 4519 /* Loop down. Timeout on min_wait for states 4520 * other than Wait for Login. 4521 */ 4522 if (time_after_eq(jiffies, mtime)) { 4523 ql_log(ql_log_info, vha, 0x8038, 4524 "Cable is unplugged...\n"); 4525 4526 vha->device_flags |= DFLG_NO_CABLE; 4527 break; 4528 } 4529 } 4530 } else { 4531 /* Mailbox cmd failed. Timeout on min_wait. */ 4532 if (time_after_eq(jiffies, mtime) || 4533 ha->flags.isp82xx_fw_hung) 4534 break; 4535 } 4536 4537 if (time_after_eq(jiffies, wtime)) 4538 break; 4539 4540 /* Delay for a while */ 4541 msleep(500); 4542 } while (1); 4543 4544 ql_dbg(ql_dbg_taskm, vha, 0x803a, 4545 "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0], 4546 state[1], state[2], state[3], state[4], state[5], jiffies); 4547 4548 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) { 4549 ql_log(ql_log_warn, vha, 0x803b, 4550 "Firmware ready **** FAILED ****.\n"); 4551 } 4552 4553 return (rval); 4554 } 4555 4556 /* 4557 * qla2x00_configure_hba 4558 * Setup adapter context. 4559 * 4560 * Input: 4561 * ha = adapter state pointer. 4562 * 4563 * Returns: 4564 * 0 = success 4565 * 4566 * Context: 4567 * Kernel context. 4568 */ 4569 static int 4570 qla2x00_configure_hba(scsi_qla_host_t *vha) 4571 { 4572 int rval; 4573 uint16_t loop_id; 4574 uint16_t topo; 4575 uint16_t sw_cap; 4576 uint8_t al_pa; 4577 uint8_t area; 4578 uint8_t domain; 4579 char connect_type[22]; 4580 struct qla_hw_data *ha = vha->hw; 4581 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 4582 port_id_t id; 4583 unsigned long flags; 4584 4585 /* Get host addresses. */ 4586 rval = qla2x00_get_adapter_id(vha, 4587 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); 4588 if (rval != QLA_SUCCESS) { 4589 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || 4590 IS_CNA_CAPABLE(ha) || 4591 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { 4592 ql_dbg(ql_dbg_disc, vha, 0x2008, 4593 "Loop is in a transition state.\n"); 4594 } else { 4595 ql_log(ql_log_warn, vha, 0x2009, 4596 "Unable to get host loop ID.\n"); 4597 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) && 4598 (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) { 4599 ql_log(ql_log_warn, vha, 0x1151, 4600 "Doing link init.\n"); 4601 if (qla24xx_link_initialize(vha) == QLA_SUCCESS) 4602 return rval; 4603 } 4604 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 4605 } 4606 return (rval); 4607 } 4608 4609 if (topo == 4) { 4610 ql_log(ql_log_info, vha, 0x200a, 4611 "Cannot get topology - retrying.\n"); 4612 return (QLA_FUNCTION_FAILED); 4613 } 4614 4615 vha->loop_id = loop_id; 4616 4617 /* initialize */ 4618 ha->min_external_loopid = SNS_FIRST_LOOP_ID; 4619 ha->operating_mode = LOOP; 4620 ha->switch_cap = 0; 4621 4622 switch (topo) { 4623 case 0: 4624 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n"); 4625 ha->current_topology = ISP_CFG_NL; 4626 strcpy(connect_type, "(Loop)"); 4627 break; 4628 4629 case 1: 4630 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n"); 4631 ha->switch_cap = sw_cap; 4632 ha->current_topology = ISP_CFG_FL; 4633 strcpy(connect_type, "(FL_Port)"); 4634 break; 4635 4636 case 2: 4637 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n"); 4638 ha->operating_mode = P2P; 4639 ha->current_topology = ISP_CFG_N; 4640 strcpy(connect_type, "(N_Port-to-N_Port)"); 4641 break; 4642 4643 case 3: 4644 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n"); 4645 ha->switch_cap = sw_cap; 4646 ha->operating_mode = P2P; 4647 ha->current_topology = ISP_CFG_F; 4648 strcpy(connect_type, "(F_Port)"); 4649 break; 4650 4651 default: 4652 ql_dbg(ql_dbg_disc, vha, 0x200f, 4653 "HBA in unknown topology %x, using NL.\n", topo); 4654 ha->current_topology = ISP_CFG_NL; 4655 strcpy(connect_type, "(Loop)"); 4656 break; 4657 } 4658 4659 /* Save Host port and loop ID. */ 4660 /* byte order - Big Endian */ 4661 id.b.domain = domain; 4662 id.b.area = area; 4663 id.b.al_pa = al_pa; 4664 id.b.rsvd_1 = 0; 4665 spin_lock_irqsave(&ha->hardware_lock, flags); 4666 if (!(topo == 2 && ha->flags.n2n_bigger)) 4667 qlt_update_host_map(vha, id); 4668 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4669 4670 if (!vha->flags.init_done) 4671 ql_log(ql_log_info, vha, 0x2010, 4672 "Topology - %s, Host Loop address 0x%x.\n", 4673 connect_type, vha->loop_id); 4674 4675 return(rval); 4676 } 4677 4678 inline void 4679 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, 4680 const char *def) 4681 { 4682 char *st, *en; 4683 uint16_t index; 4684 uint64_t zero[2] = { 0 }; 4685 struct qla_hw_data *ha = vha->hw; 4686 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && 4687 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha); 4688 4689 if (len > sizeof(zero)) 4690 len = sizeof(zero); 4691 if (memcmp(model, &zero, len) != 0) { 4692 memcpy(ha->model_number, model, len); 4693 st = en = ha->model_number; 4694 en += len - 1; 4695 while (en > st) { 4696 if (*en != 0x20 && *en != 0x00) 4697 break; 4698 *en-- = '\0'; 4699 } 4700 4701 index = (ha->pdev->subsystem_device & 0xff); 4702 if (use_tbl && 4703 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 4704 index < QLA_MODEL_NAMES) 4705 strlcpy(ha->model_desc, 4706 qla2x00_model_name[index * 2 + 1], 4707 sizeof(ha->model_desc)); 4708 } else { 4709 index = (ha->pdev->subsystem_device & 0xff); 4710 if (use_tbl && 4711 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 4712 index < QLA_MODEL_NAMES) { 4713 strlcpy(ha->model_number, 4714 qla2x00_model_name[index * 2], 4715 sizeof(ha->model_number)); 4716 strlcpy(ha->model_desc, 4717 qla2x00_model_name[index * 2 + 1], 4718 sizeof(ha->model_desc)); 4719 } else { 4720 strlcpy(ha->model_number, def, 4721 sizeof(ha->model_number)); 4722 } 4723 } 4724 if (IS_FWI2_CAPABLE(ha)) 4725 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc, 4726 sizeof(ha->model_desc)); 4727 } 4728 4729 /* On sparc systems, obtain port and node WWN from firmware 4730 * properties. 4731 */ 4732 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv) 4733 { 4734 #ifdef CONFIG_SPARC 4735 struct qla_hw_data *ha = vha->hw; 4736 struct pci_dev *pdev = ha->pdev; 4737 struct device_node *dp = pci_device_to_OF_node(pdev); 4738 const u8 *val; 4739 int len; 4740 4741 val = of_get_property(dp, "port-wwn", &len); 4742 if (val && len >= WWN_SIZE) 4743 memcpy(nv->port_name, val, WWN_SIZE); 4744 4745 val = of_get_property(dp, "node-wwn", &len); 4746 if (val && len >= WWN_SIZE) 4747 memcpy(nv->node_name, val, WWN_SIZE); 4748 #endif 4749 } 4750 4751 /* 4752 * NVRAM configuration for ISP 2xxx 4753 * 4754 * Input: 4755 * ha = adapter block pointer. 4756 * 4757 * Output: 4758 * initialization control block in response_ring 4759 * host adapters parameters in host adapter block 4760 * 4761 * Returns: 4762 * 0 = success. 4763 */ 4764 int 4765 qla2x00_nvram_config(scsi_qla_host_t *vha) 4766 { 4767 int rval; 4768 uint8_t chksum = 0; 4769 uint16_t cnt; 4770 uint8_t *dptr1, *dptr2; 4771 struct qla_hw_data *ha = vha->hw; 4772 init_cb_t *icb = ha->init_cb; 4773 nvram_t *nv = ha->nvram; 4774 uint8_t *ptr = ha->nvram; 4775 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 4776 4777 rval = QLA_SUCCESS; 4778 4779 /* Determine NVRAM starting address. */ 4780 ha->nvram_size = sizeof(*nv); 4781 ha->nvram_base = 0; 4782 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) 4783 if ((rd_reg_word(®->ctrl_status) >> 14) == 1) 4784 ha->nvram_base = 0x80; 4785 4786 /* Get NVRAM data and calculate checksum. */ 4787 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size); 4788 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) 4789 chksum += *ptr++; 4790 4791 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f, 4792 "Contents of NVRAM.\n"); 4793 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110, 4794 nv, ha->nvram_size); 4795 4796 /* Bad NVRAM data, set defaults parameters. */ 4797 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) || 4798 nv->nvram_version < 1) { 4799 /* Reset NVRAM data. */ 4800 ql_log(ql_log_warn, vha, 0x0064, 4801 "Inconsistent NVRAM detected: checksum=%#x id=%.4s version=%#x.\n", 4802 chksum, nv->id, nv->nvram_version); 4803 ql_log(ql_log_warn, vha, 0x0065, 4804 "Falling back to " 4805 "functioning (yet invalid -- WWPN) defaults.\n"); 4806 4807 /* 4808 * Set default initialization control block. 4809 */ 4810 memset(nv, 0, ha->nvram_size); 4811 nv->parameter_block_version = ICB_VERSION; 4812 4813 if (IS_QLA23XX(ha)) { 4814 nv->firmware_options[0] = BIT_2 | BIT_1; 4815 nv->firmware_options[1] = BIT_7 | BIT_5; 4816 nv->add_firmware_options[0] = BIT_5; 4817 nv->add_firmware_options[1] = BIT_5 | BIT_4; 4818 nv->frame_payload_size = cpu_to_le16(2048); 4819 nv->special_options[1] = BIT_7; 4820 } else if (IS_QLA2200(ha)) { 4821 nv->firmware_options[0] = BIT_2 | BIT_1; 4822 nv->firmware_options[1] = BIT_7 | BIT_5; 4823 nv->add_firmware_options[0] = BIT_5; 4824 nv->add_firmware_options[1] = BIT_5 | BIT_4; 4825 nv->frame_payload_size = cpu_to_le16(1024); 4826 } else if (IS_QLA2100(ha)) { 4827 nv->firmware_options[0] = BIT_3 | BIT_1; 4828 nv->firmware_options[1] = BIT_5; 4829 nv->frame_payload_size = cpu_to_le16(1024); 4830 } 4831 4832 nv->max_iocb_allocation = cpu_to_le16(256); 4833 nv->execution_throttle = cpu_to_le16(16); 4834 nv->retry_count = 8; 4835 nv->retry_delay = 1; 4836 4837 nv->port_name[0] = 33; 4838 nv->port_name[3] = 224; 4839 nv->port_name[4] = 139; 4840 4841 qla2xxx_nvram_wwn_from_ofw(vha, nv); 4842 4843 nv->login_timeout = 4; 4844 4845 /* 4846 * Set default host adapter parameters 4847 */ 4848 nv->host_p[1] = BIT_2; 4849 nv->reset_delay = 5; 4850 nv->port_down_retry_count = 8; 4851 nv->max_luns_per_target = cpu_to_le16(8); 4852 nv->link_down_timeout = 60; 4853 4854 rval = 1; 4855 } 4856 4857 /* Reset Initialization control block */ 4858 memset(icb, 0, ha->init_cb_size); 4859 4860 /* 4861 * Setup driver NVRAM options. 4862 */ 4863 nv->firmware_options[0] |= (BIT_6 | BIT_1); 4864 nv->firmware_options[0] &= ~(BIT_5 | BIT_4); 4865 nv->firmware_options[1] |= (BIT_5 | BIT_0); 4866 nv->firmware_options[1] &= ~BIT_4; 4867 4868 if (IS_QLA23XX(ha)) { 4869 nv->firmware_options[0] |= BIT_2; 4870 nv->firmware_options[0] &= ~BIT_3; 4871 nv->special_options[0] &= ~BIT_6; 4872 nv->add_firmware_options[1] |= BIT_5 | BIT_4; 4873 4874 if (IS_QLA2300(ha)) { 4875 if (ha->fb_rev == FPM_2310) { 4876 strcpy(ha->model_number, "QLA2310"); 4877 } else { 4878 strcpy(ha->model_number, "QLA2300"); 4879 } 4880 } else { 4881 qla2x00_set_model_info(vha, nv->model_number, 4882 sizeof(nv->model_number), "QLA23xx"); 4883 } 4884 } else if (IS_QLA2200(ha)) { 4885 nv->firmware_options[0] |= BIT_2; 4886 /* 4887 * 'Point-to-point preferred, else loop' is not a safe 4888 * connection mode setting. 4889 */ 4890 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) == 4891 (BIT_5 | BIT_4)) { 4892 /* Force 'loop preferred, else point-to-point'. */ 4893 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4); 4894 nv->add_firmware_options[0] |= BIT_5; 4895 } 4896 strcpy(ha->model_number, "QLA22xx"); 4897 } else /*if (IS_QLA2100(ha))*/ { 4898 strcpy(ha->model_number, "QLA2100"); 4899 } 4900 4901 /* 4902 * Copy over NVRAM RISC parameter block to initialization control block. 4903 */ 4904 dptr1 = (uint8_t *)icb; 4905 dptr2 = (uint8_t *)&nv->parameter_block_version; 4906 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version; 4907 while (cnt--) 4908 *dptr1++ = *dptr2++; 4909 4910 /* Copy 2nd half. */ 4911 dptr1 = (uint8_t *)icb->add_firmware_options; 4912 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options; 4913 while (cnt--) 4914 *dptr1++ = *dptr2++; 4915 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); 4916 /* Use alternate WWN? */ 4917 if (nv->host_p[1] & BIT_7) { 4918 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 4919 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 4920 } 4921 4922 /* Prepare nodename */ 4923 if ((icb->firmware_options[1] & BIT_6) == 0) { 4924 /* 4925 * Firmware will apply the following mask if the nodename was 4926 * not provided. 4927 */ 4928 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 4929 icb->node_name[0] &= 0xF0; 4930 } 4931 4932 /* 4933 * Set host adapter parameters. 4934 */ 4935 4936 /* 4937 * BIT_7 in the host-parameters section allows for modification to 4938 * internal driver logging. 4939 */ 4940 if (nv->host_p[0] & BIT_7) 4941 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; 4942 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); 4943 /* Always load RISC code on non ISP2[12]00 chips. */ 4944 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) 4945 ha->flags.disable_risc_code_load = 0; 4946 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0); 4947 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0); 4948 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0); 4949 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0; 4950 ha->flags.disable_serdes = 0; 4951 4952 ha->operating_mode = 4953 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4; 4954 4955 memcpy(ha->fw_seriallink_options, nv->seriallink_options, 4956 sizeof(ha->fw_seriallink_options)); 4957 4958 /* save HBA serial number */ 4959 ha->serial0 = icb->port_name[5]; 4960 ha->serial1 = icb->port_name[6]; 4961 ha->serial2 = icb->port_name[7]; 4962 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 4963 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 4964 4965 icb->execution_throttle = cpu_to_le16(0xFFFF); 4966 4967 ha->retry_count = nv->retry_count; 4968 4969 /* Set minimum login_timeout to 4 seconds. */ 4970 if (nv->login_timeout != ql2xlogintimeout) 4971 nv->login_timeout = ql2xlogintimeout; 4972 if (nv->login_timeout < 4) 4973 nv->login_timeout = 4; 4974 ha->login_timeout = nv->login_timeout; 4975 4976 /* Set minimum RATOV to 100 tenths of a second. */ 4977 ha->r_a_tov = 100; 4978 4979 ha->loop_reset_delay = nv->reset_delay; 4980 4981 /* Link Down Timeout = 0: 4982 * 4983 * When Port Down timer expires we will start returning 4984 * I/O's to OS with "DID_NO_CONNECT". 4985 * 4986 * Link Down Timeout != 0: 4987 * 4988 * The driver waits for the link to come up after link down 4989 * before returning I/Os to OS with "DID_NO_CONNECT". 4990 */ 4991 if (nv->link_down_timeout == 0) { 4992 ha->loop_down_abort_time = 4993 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 4994 } else { 4995 ha->link_down_timeout = nv->link_down_timeout; 4996 ha->loop_down_abort_time = 4997 (LOOP_DOWN_TIME - ha->link_down_timeout); 4998 } 4999 5000 /* 5001 * Need enough time to try and get the port back. 5002 */ 5003 ha->port_down_retry_count = nv->port_down_retry_count; 5004 if (qlport_down_retry) 5005 ha->port_down_retry_count = qlport_down_retry; 5006 /* Set login_retry_count */ 5007 ha->login_retry_count = nv->retry_count; 5008 if (ha->port_down_retry_count == nv->port_down_retry_count && 5009 ha->port_down_retry_count > 3) 5010 ha->login_retry_count = ha->port_down_retry_count; 5011 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 5012 ha->login_retry_count = ha->port_down_retry_count; 5013 if (ql2xloginretrycount) 5014 ha->login_retry_count = ql2xloginretrycount; 5015 5016 icb->lun_enables = cpu_to_le16(0); 5017 icb->command_resource_count = 0; 5018 icb->immediate_notify_resource_count = 0; 5019 icb->timeout = cpu_to_le16(0); 5020 5021 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 5022 /* Enable RIO */ 5023 icb->firmware_options[0] &= ~BIT_3; 5024 icb->add_firmware_options[0] &= 5025 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 5026 icb->add_firmware_options[0] |= BIT_2; 5027 icb->response_accumulation_timer = 3; 5028 icb->interrupt_delay_timer = 5; 5029 5030 vha->flags.process_response_queue = 1; 5031 } else { 5032 /* Enable ZIO. */ 5033 if (!vha->flags.init_done) { 5034 ha->zio_mode = icb->add_firmware_options[0] & 5035 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 5036 ha->zio_timer = icb->interrupt_delay_timer ? 5037 icb->interrupt_delay_timer : 2; 5038 } 5039 icb->add_firmware_options[0] &= 5040 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 5041 vha->flags.process_response_queue = 0; 5042 if (ha->zio_mode != QLA_ZIO_DISABLED) { 5043 ha->zio_mode = QLA_ZIO_MODE_6; 5044 5045 ql_log(ql_log_info, vha, 0x0068, 5046 "ZIO mode %d enabled; timer delay (%d us).\n", 5047 ha->zio_mode, ha->zio_timer * 100); 5048 5049 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode; 5050 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer; 5051 vha->flags.process_response_queue = 1; 5052 } 5053 } 5054 5055 if (rval) { 5056 ql_log(ql_log_warn, vha, 0x0069, 5057 "NVRAM configuration failed.\n"); 5058 } 5059 return (rval); 5060 } 5061 5062 static void 5063 qla2x00_rport_del(void *data) 5064 { 5065 fc_port_t *fcport = data; 5066 struct fc_rport *rport; 5067 unsigned long flags; 5068 5069 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 5070 rport = fcport->drport ? fcport->drport : fcport->rport; 5071 fcport->drport = NULL; 5072 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 5073 if (rport) { 5074 ql_dbg(ql_dbg_disc, fcport->vha, 0x210b, 5075 "%s %8phN. rport %p roles %x\n", 5076 __func__, fcport->port_name, rport, 5077 rport->roles); 5078 5079 fc_remote_port_delete(rport); 5080 } 5081 } 5082 5083 void qla2x00_set_fcport_state(fc_port_t *fcport, int state) 5084 { 5085 int old_state; 5086 5087 old_state = atomic_read(&fcport->state); 5088 atomic_set(&fcport->state, state); 5089 5090 /* Don't print state transitions during initial allocation of fcport */ 5091 if (old_state && old_state != state) { 5092 ql_dbg(ql_dbg_disc, fcport->vha, 0x207d, 5093 "FCPort %8phC state transitioned from %s to %s - portid=%02x%02x%02x.\n", 5094 fcport->port_name, port_state_str[old_state], 5095 port_state_str[state], fcport->d_id.b.domain, 5096 fcport->d_id.b.area, fcport->d_id.b.al_pa); 5097 } 5098 } 5099 5100 /** 5101 * qla2x00_alloc_fcport() - Allocate a generic fcport. 5102 * @vha: HA context 5103 * @flags: allocation flags 5104 * 5105 * Returns a pointer to the allocated fcport, or NULL, if none available. 5106 */ 5107 fc_port_t * 5108 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) 5109 { 5110 fc_port_t *fcport; 5111 5112 fcport = kzalloc(sizeof(fc_port_t), flags); 5113 if (!fcport) 5114 return NULL; 5115 5116 fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev, 5117 sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma, 5118 flags); 5119 if (!fcport->ct_desc.ct_sns) { 5120 ql_log(ql_log_warn, vha, 0xd049, 5121 "Failed to allocate ct_sns request.\n"); 5122 kfree(fcport); 5123 return NULL; 5124 } 5125 5126 /* Setup fcport template structure. */ 5127 fcport->vha = vha; 5128 fcport->port_type = FCT_UNKNOWN; 5129 fcport->loop_id = FC_NO_LOOP_ID; 5130 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); 5131 fcport->supported_classes = FC_COS_UNSPECIFIED; 5132 fcport->fp_speed = PORT_SPEED_UNKNOWN; 5133 5134 fcport->disc_state = DSC_DELETED; 5135 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 5136 fcport->deleted = QLA_SESS_DELETED; 5137 fcport->login_retry = vha->hw->login_retry_count; 5138 fcport->chip_reset = vha->hw->base_qpair->chip_reset; 5139 fcport->logout_on_delete = 1; 5140 fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; 5141 fcport->tgt_short_link_down_cnt = 0; 5142 fcport->dev_loss_tmo = 0; 5143 5144 if (!fcport->ct_desc.ct_sns) { 5145 ql_log(ql_log_warn, vha, 0xd049, 5146 "Failed to allocate ct_sns request.\n"); 5147 kfree(fcport); 5148 return NULL; 5149 } 5150 5151 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); 5152 INIT_WORK(&fcport->free_work, qlt_free_session_done); 5153 INIT_WORK(&fcport->reg_work, qla_register_fcport_fn); 5154 INIT_LIST_HEAD(&fcport->gnl_entry); 5155 INIT_LIST_HEAD(&fcport->list); 5156 5157 INIT_LIST_HEAD(&fcport->sess_cmd_list); 5158 spin_lock_init(&fcport->sess_cmd_lock); 5159 5160 spin_lock_init(&fcport->edif.sa_list_lock); 5161 INIT_LIST_HEAD(&fcport->edif.tx_sa_list); 5162 INIT_LIST_HEAD(&fcport->edif.rx_sa_list); 5163 5164 if (vha->e_dbell.db_flags == EDB_ACTIVE) 5165 fcport->edif.app_started = 1; 5166 5167 spin_lock_init(&fcport->edif.indx_list_lock); 5168 INIT_LIST_HEAD(&fcport->edif.edif_indx_list); 5169 5170 return fcport; 5171 } 5172 5173 void 5174 qla2x00_free_fcport(fc_port_t *fcport) 5175 { 5176 if (fcport->ct_desc.ct_sns) { 5177 dma_free_coherent(&fcport->vha->hw->pdev->dev, 5178 sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns, 5179 fcport->ct_desc.ct_sns_dma); 5180 5181 fcport->ct_desc.ct_sns = NULL; 5182 } 5183 5184 qla_edif_flush_sa_ctl_lists(fcport); 5185 list_del(&fcport->list); 5186 qla2x00_clear_loop_id(fcport); 5187 5188 qla_edif_list_del(fcport); 5189 5190 kfree(fcport); 5191 } 5192 5193 static void qla_get_login_template(scsi_qla_host_t *vha) 5194 { 5195 struct qla_hw_data *ha = vha->hw; 5196 int rval; 5197 u32 *bp, sz; 5198 __be32 *q; 5199 5200 memset(ha->init_cb, 0, ha->init_cb_size); 5201 sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size); 5202 rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma, 5203 ha->init_cb, sz); 5204 if (rval != QLA_SUCCESS) { 5205 ql_dbg(ql_dbg_init, vha, 0x00d1, 5206 "PLOGI ELS param read fail.\n"); 5207 return; 5208 } 5209 q = (__be32 *)&ha->plogi_els_payld.fl_csp; 5210 5211 bp = (uint32_t *)ha->init_cb; 5212 cpu_to_be32_array(q, bp, sz / 4); 5213 ha->flags.plogi_template_valid = 1; 5214 } 5215 5216 /* 5217 * qla2x00_configure_loop 5218 * Updates Fibre Channel Device Database with what is actually on loop. 5219 * 5220 * Input: 5221 * ha = adapter block pointer. 5222 * 5223 * Returns: 5224 * 0 = success. 5225 * 1 = error. 5226 * 2 = database was full and device was not configured. 5227 */ 5228 static int 5229 qla2x00_configure_loop(scsi_qla_host_t *vha) 5230 { 5231 int rval; 5232 unsigned long flags, save_flags; 5233 struct qla_hw_data *ha = vha->hw; 5234 5235 rval = QLA_SUCCESS; 5236 5237 /* Get Initiator ID */ 5238 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) { 5239 rval = qla2x00_configure_hba(vha); 5240 if (rval != QLA_SUCCESS) { 5241 ql_dbg(ql_dbg_disc, vha, 0x2013, 5242 "Unable to configure HBA.\n"); 5243 return (rval); 5244 } 5245 } 5246 5247 save_flags = flags = vha->dpc_flags; 5248 ql_dbg(ql_dbg_disc, vha, 0x2014, 5249 "Configure loop -- dpc flags = 0x%lx.\n", flags); 5250 5251 /* 5252 * If we have both an RSCN and PORT UPDATE pending then handle them 5253 * both at the same time. 5254 */ 5255 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 5256 clear_bit(RSCN_UPDATE, &vha->dpc_flags); 5257 5258 qla2x00_get_data_rate(vha); 5259 qla_get_login_template(vha); 5260 5261 /* Determine what we need to do */ 5262 if ((ha->current_topology == ISP_CFG_FL || 5263 ha->current_topology == ISP_CFG_F) && 5264 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 5265 5266 set_bit(RSCN_UPDATE, &flags); 5267 clear_bit(LOCAL_LOOP_UPDATE, &flags); 5268 5269 } else if (ha->current_topology == ISP_CFG_NL || 5270 ha->current_topology == ISP_CFG_N) { 5271 clear_bit(RSCN_UPDATE, &flags); 5272 set_bit(LOCAL_LOOP_UPDATE, &flags); 5273 } else if (!vha->flags.online || 5274 (test_bit(ABORT_ISP_ACTIVE, &flags))) { 5275 set_bit(RSCN_UPDATE, &flags); 5276 set_bit(LOCAL_LOOP_UPDATE, &flags); 5277 } 5278 5279 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) { 5280 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 5281 ql_dbg(ql_dbg_disc, vha, 0x2015, 5282 "Loop resync needed, failing.\n"); 5283 rval = QLA_FUNCTION_FAILED; 5284 } else 5285 rval = qla2x00_configure_local_loop(vha); 5286 } 5287 5288 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) { 5289 if (LOOP_TRANSITION(vha)) { 5290 ql_dbg(ql_dbg_disc, vha, 0x2099, 5291 "Needs RSCN update and loop transition.\n"); 5292 rval = QLA_FUNCTION_FAILED; 5293 } 5294 else 5295 rval = qla2x00_configure_fabric(vha); 5296 } 5297 5298 if (rval == QLA_SUCCESS) { 5299 if (atomic_read(&vha->loop_down_timer) || 5300 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 5301 rval = QLA_FUNCTION_FAILED; 5302 } else { 5303 atomic_set(&vha->loop_state, LOOP_READY); 5304 ql_dbg(ql_dbg_disc, vha, 0x2069, 5305 "LOOP READY.\n"); 5306 ha->flags.fw_init_done = 1; 5307 5308 if (vha->hw->flags.edif_enabled && 5309 vha->e_dbell.db_flags != EDB_ACTIVE) { 5310 /* wake up authentication app to get ready */ 5311 qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, 0); 5312 } 5313 5314 /* 5315 * Process any ATIO queue entries that came in 5316 * while we weren't online. 5317 */ 5318 if (qla_tgt_mode_enabled(vha) || 5319 qla_dual_mode_enabled(vha)) { 5320 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 5321 qlt_24xx_process_atio_queue(vha, 0); 5322 spin_unlock_irqrestore(&ha->tgt.atio_lock, 5323 flags); 5324 } 5325 } 5326 } 5327 5328 if (rval) { 5329 ql_dbg(ql_dbg_disc, vha, 0x206a, 5330 "%s *** FAILED ***.\n", __func__); 5331 } else { 5332 ql_dbg(ql_dbg_disc, vha, 0x206b, 5333 "%s: exiting normally. local port wwpn %8phN id %06x)\n", 5334 __func__, vha->port_name, vha->d_id.b24); 5335 } 5336 5337 /* Restore state if a resync event occurred during processing */ 5338 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 5339 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 5340 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 5341 if (test_bit(RSCN_UPDATE, &save_flags)) { 5342 set_bit(RSCN_UPDATE, &vha->dpc_flags); 5343 } 5344 } 5345 5346 return (rval); 5347 } 5348 5349 static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha) 5350 { 5351 unsigned long flags; 5352 fc_port_t *fcport; 5353 5354 if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) 5355 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 5356 5357 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5358 if (fcport->n2n_flag) { 5359 qla24xx_fcport_handle_login(vha, fcport); 5360 return QLA_SUCCESS; 5361 } 5362 } 5363 5364 spin_lock_irqsave(&vha->work_lock, flags); 5365 vha->scan.scan_retry++; 5366 spin_unlock_irqrestore(&vha->work_lock, flags); 5367 5368 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { 5369 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 5370 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5371 } 5372 return QLA_FUNCTION_FAILED; 5373 } 5374 5375 /* 5376 * qla2x00_configure_local_loop 5377 * Updates Fibre Channel Device Database with local loop devices. 5378 * 5379 * Input: 5380 * ha = adapter block pointer. 5381 * 5382 * Returns: 5383 * 0 = success. 5384 */ 5385 static int 5386 qla2x00_configure_local_loop(scsi_qla_host_t *vha) 5387 { 5388 int rval, rval2; 5389 int found_devs; 5390 int found; 5391 fc_port_t *fcport, *new_fcport; 5392 uint16_t index; 5393 uint16_t entries; 5394 struct gid_list_info *gid; 5395 uint16_t loop_id; 5396 uint8_t domain, area, al_pa; 5397 struct qla_hw_data *ha = vha->hw; 5398 unsigned long flags; 5399 5400 /* Inititae N2N login. */ 5401 if (N2N_TOPO(ha)) 5402 return qla2x00_configure_n2n_loop(vha); 5403 5404 found_devs = 0; 5405 new_fcport = NULL; 5406 entries = MAX_FIBRE_DEVICES_LOOP; 5407 5408 /* Get list of logged in devices. */ 5409 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha)); 5410 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma, 5411 &entries); 5412 if (rval != QLA_SUCCESS) 5413 goto err; 5414 5415 ql_dbg(ql_dbg_disc, vha, 0x2011, 5416 "Entries in ID list (%d).\n", entries); 5417 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075, 5418 ha->gid_list, entries * sizeof(*ha->gid_list)); 5419 5420 if (entries == 0) { 5421 spin_lock_irqsave(&vha->work_lock, flags); 5422 vha->scan.scan_retry++; 5423 spin_unlock_irqrestore(&vha->work_lock, flags); 5424 5425 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { 5426 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 5427 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5428 } 5429 } else { 5430 vha->scan.scan_retry = 0; 5431 } 5432 5433 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5434 fcport->scan_state = QLA_FCPORT_SCAN; 5435 } 5436 5437 /* Allocate temporary fcport for any new fcports discovered. */ 5438 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 5439 if (new_fcport == NULL) { 5440 ql_log(ql_log_warn, vha, 0x2012, 5441 "Memory allocation failed for fcport.\n"); 5442 rval = QLA_MEMORY_ALLOC_FAILED; 5443 goto err; 5444 } 5445 new_fcport->flags &= ~FCF_FABRIC_DEVICE; 5446 5447 /* Add devices to port list. */ 5448 gid = ha->gid_list; 5449 for (index = 0; index < entries; index++) { 5450 domain = gid->domain; 5451 area = gid->area; 5452 al_pa = gid->al_pa; 5453 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 5454 loop_id = gid->loop_id_2100; 5455 else 5456 loop_id = le16_to_cpu(gid->loop_id); 5457 gid = (void *)gid + ha->gid_list_info_size; 5458 5459 /* Bypass reserved domain fields. */ 5460 if ((domain & 0xf0) == 0xf0) 5461 continue; 5462 5463 /* Bypass if not same domain and area of adapter. */ 5464 if (area && domain && ((area != vha->d_id.b.area) || 5465 (domain != vha->d_id.b.domain)) && 5466 (ha->current_topology == ISP_CFG_NL)) 5467 continue; 5468 5469 5470 /* Bypass invalid local loop ID. */ 5471 if (loop_id > LAST_LOCAL_LOOP_ID) 5472 continue; 5473 5474 memset(new_fcport->port_name, 0, WWN_SIZE); 5475 5476 /* Fill in member data. */ 5477 new_fcport->d_id.b.domain = domain; 5478 new_fcport->d_id.b.area = area; 5479 new_fcport->d_id.b.al_pa = al_pa; 5480 new_fcport->loop_id = loop_id; 5481 new_fcport->scan_state = QLA_FCPORT_FOUND; 5482 5483 rval2 = qla2x00_get_port_database(vha, new_fcport, 0); 5484 if (rval2 != QLA_SUCCESS) { 5485 ql_dbg(ql_dbg_disc, vha, 0x2097, 5486 "Failed to retrieve fcport information " 5487 "-- get_port_database=%x, loop_id=0x%04x.\n", 5488 rval2, new_fcport->loop_id); 5489 /* Skip retry if N2N */ 5490 if (ha->current_topology != ISP_CFG_N) { 5491 ql_dbg(ql_dbg_disc, vha, 0x2105, 5492 "Scheduling resync.\n"); 5493 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5494 continue; 5495 } 5496 } 5497 5498 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5499 /* Check for matching device in port list. */ 5500 found = 0; 5501 fcport = NULL; 5502 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5503 if (memcmp(new_fcport->port_name, fcport->port_name, 5504 WWN_SIZE)) 5505 continue; 5506 5507 fcport->flags &= ~FCF_FABRIC_DEVICE; 5508 fcport->loop_id = new_fcport->loop_id; 5509 fcport->port_type = new_fcport->port_type; 5510 fcport->d_id.b24 = new_fcport->d_id.b24; 5511 memcpy(fcport->node_name, new_fcport->node_name, 5512 WWN_SIZE); 5513 fcport->scan_state = QLA_FCPORT_FOUND; 5514 found++; 5515 break; 5516 } 5517 5518 if (!found) { 5519 /* New device, add to fcports list. */ 5520 list_add_tail(&new_fcport->list, &vha->vp_fcports); 5521 5522 /* Allocate a new replacement fcport. */ 5523 fcport = new_fcport; 5524 5525 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5526 5527 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 5528 5529 if (new_fcport == NULL) { 5530 ql_log(ql_log_warn, vha, 0xd031, 5531 "Failed to allocate memory for fcport.\n"); 5532 rval = QLA_MEMORY_ALLOC_FAILED; 5533 goto err; 5534 } 5535 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5536 new_fcport->flags &= ~FCF_FABRIC_DEVICE; 5537 } 5538 5539 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5540 5541 /* Base iIDMA settings on HBA port speed. */ 5542 fcport->fp_speed = ha->link_data_rate; 5543 5544 found_devs++; 5545 } 5546 5547 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5548 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5549 break; 5550 5551 if (fcport->scan_state == QLA_FCPORT_SCAN) { 5552 if ((qla_dual_mode_enabled(vha) || 5553 qla_ini_mode_enabled(vha)) && 5554 atomic_read(&fcport->state) == FCS_ONLINE) { 5555 qla2x00_mark_device_lost(vha, fcport, 5556 ql2xplogiabsentdevice); 5557 if (fcport->loop_id != FC_NO_LOOP_ID && 5558 (fcport->flags & FCF_FCP2_DEVICE) == 0 && 5559 fcport->port_type != FCT_INITIATOR && 5560 fcport->port_type != FCT_BROADCAST) { 5561 ql_dbg(ql_dbg_disc, vha, 0x20f0, 5562 "%s %d %8phC post del sess\n", 5563 __func__, __LINE__, 5564 fcport->port_name); 5565 5566 qlt_schedule_sess_for_deletion(fcport); 5567 continue; 5568 } 5569 } 5570 } 5571 5572 if (fcport->scan_state == QLA_FCPORT_FOUND) 5573 qla24xx_fcport_handle_login(vha, fcport); 5574 } 5575 5576 qla2x00_free_fcport(new_fcport); 5577 5578 return rval; 5579 5580 err: 5581 ql_dbg(ql_dbg_disc, vha, 0x2098, 5582 "Configure local loop error exit: rval=%x.\n", rval); 5583 return rval; 5584 } 5585 5586 static void 5587 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 5588 { 5589 int rval; 5590 uint16_t mb[MAILBOX_REGISTER_COUNT]; 5591 struct qla_hw_data *ha = vha->hw; 5592 5593 if (!IS_IIDMA_CAPABLE(ha)) 5594 return; 5595 5596 if (atomic_read(&fcport->state) != FCS_ONLINE) 5597 return; 5598 5599 if (fcport->fp_speed == PORT_SPEED_UNKNOWN || 5600 fcport->fp_speed > ha->link_data_rate || 5601 !ha->flags.gpsc_supported) 5602 return; 5603 5604 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed, 5605 mb); 5606 if (rval != QLA_SUCCESS) { 5607 ql_dbg(ql_dbg_disc, vha, 0x2004, 5608 "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n", 5609 fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]); 5610 } else { 5611 ql_dbg(ql_dbg_disc, vha, 0x2005, 5612 "iIDMA adjusted to %s GB/s (%X) on %8phN.\n", 5613 qla2x00_get_link_speed_str(ha, fcport->fp_speed), 5614 fcport->fp_speed, fcport->port_name); 5615 } 5616 } 5617 5618 void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport) 5619 { 5620 qla2x00_iidma_fcport(vha, fcport); 5621 qla24xx_update_fcport_fcp_prio(vha, fcport); 5622 } 5623 5624 int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport) 5625 { 5626 struct qla_work_evt *e; 5627 5628 e = qla2x00_alloc_work(vha, QLA_EVT_IIDMA); 5629 if (!e) 5630 return QLA_FUNCTION_FAILED; 5631 5632 e->u.fcport.fcport = fcport; 5633 return qla2x00_post_work(vha, e); 5634 } 5635 5636 /* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/ 5637 static void 5638 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) 5639 { 5640 struct fc_rport_identifiers rport_ids; 5641 struct fc_rport *rport; 5642 unsigned long flags; 5643 5644 if (atomic_read(&fcport->state) == FCS_ONLINE) 5645 return; 5646 5647 rport_ids.node_name = wwn_to_u64(fcport->node_name); 5648 rport_ids.port_name = wwn_to_u64(fcport->port_name); 5649 rport_ids.port_id = fcport->d_id.b.domain << 16 | 5650 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; 5651 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 5652 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids); 5653 if (!rport) { 5654 ql_log(ql_log_warn, vha, 0x2006, 5655 "Unable to allocate fc remote port.\n"); 5656 return; 5657 } 5658 5659 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 5660 *((fc_port_t **)rport->dd_data) = fcport; 5661 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 5662 fcport->dev_loss_tmo = rport->dev_loss_tmo; 5663 5664 rport->supported_classes = fcport->supported_classes; 5665 5666 rport_ids.roles = FC_PORT_ROLE_UNKNOWN; 5667 if (fcport->port_type == FCT_INITIATOR) 5668 rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; 5669 if (fcport->port_type == FCT_TARGET) 5670 rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET; 5671 if (fcport->port_type & FCT_NVME_INITIATOR) 5672 rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR; 5673 if (fcport->port_type & FCT_NVME_TARGET) 5674 rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET; 5675 if (fcport->port_type & FCT_NVME_DISCOVERY) 5676 rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY; 5677 5678 fc_remote_port_rolechg(rport, rport_ids.roles); 5679 5680 ql_dbg(ql_dbg_disc, vha, 0x20ee, 5681 "%s: %8phN. rport %ld:0:%d (%p) is %s mode\n", 5682 __func__, fcport->port_name, vha->host_no, 5683 rport->scsi_target_id, rport, 5684 (fcport->port_type == FCT_TARGET) ? "tgt" : 5685 ((fcport->port_type & FCT_NVME) ? "nvme" : "ini")); 5686 } 5687 5688 /* 5689 * qla2x00_update_fcport 5690 * Updates device on list. 5691 * 5692 * Input: 5693 * ha = adapter block pointer. 5694 * fcport = port structure pointer. 5695 * 5696 * Return: 5697 * 0 - Success 5698 * BIT_0 - error 5699 * 5700 * Context: 5701 * Kernel context. 5702 */ 5703 void 5704 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 5705 { 5706 if (IS_SW_RESV_ADDR(fcport->d_id)) 5707 return; 5708 5709 ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n", 5710 __func__, fcport->port_name); 5711 5712 qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT); 5713 fcport->login_retry = vha->hw->login_retry_count; 5714 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 5715 fcport->deleted = 0; 5716 if (vha->hw->current_topology == ISP_CFG_NL) 5717 fcport->logout_on_delete = 0; 5718 else 5719 fcport->logout_on_delete = 1; 5720 fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0; 5721 5722 if (fcport->tgt_link_down_time < fcport->dev_loss_tmo) { 5723 fcport->tgt_short_link_down_cnt++; 5724 fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; 5725 } 5726 5727 switch (vha->hw->current_topology) { 5728 case ISP_CFG_N: 5729 case ISP_CFG_NL: 5730 fcport->keep_nport_handle = 1; 5731 break; 5732 default: 5733 break; 5734 } 5735 5736 qla2x00_iidma_fcport(vha, fcport); 5737 5738 qla2x00_dfs_create_rport(vha, fcport); 5739 5740 if (NVME_TARGET(vha->hw, fcport)) { 5741 qla_nvme_register_remote(vha, fcport); 5742 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE); 5743 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 5744 return; 5745 } 5746 5747 qla24xx_update_fcport_fcp_prio(vha, fcport); 5748 5749 switch (vha->host->active_mode) { 5750 case MODE_INITIATOR: 5751 qla2x00_reg_remote_port(vha, fcport); 5752 break; 5753 case MODE_TARGET: 5754 if (!vha->vha_tgt.qla_tgt->tgt_stop && 5755 !vha->vha_tgt.qla_tgt->tgt_stopped) 5756 qlt_fc_port_added(vha, fcport); 5757 break; 5758 case MODE_DUAL: 5759 qla2x00_reg_remote_port(vha, fcport); 5760 if (!vha->vha_tgt.qla_tgt->tgt_stop && 5761 !vha->vha_tgt.qla_tgt->tgt_stopped) 5762 qlt_fc_port_added(vha, fcport); 5763 break; 5764 default: 5765 break; 5766 } 5767 5768 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 5769 5770 if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) { 5771 if (fcport->id_changed) { 5772 fcport->id_changed = 0; 5773 ql_dbg(ql_dbg_disc, vha, 0x20d7, 5774 "%s %d %8phC post gfpnid fcp_cnt %d\n", 5775 __func__, __LINE__, fcport->port_name, 5776 vha->fcport_count); 5777 qla24xx_post_gfpnid_work(vha, fcport); 5778 } else { 5779 ql_dbg(ql_dbg_disc, vha, 0x20d7, 5780 "%s %d %8phC post gpsc fcp_cnt %d\n", 5781 __func__, __LINE__, fcport->port_name, 5782 vha->fcport_count); 5783 qla24xx_post_gpsc_work(vha, fcport); 5784 } 5785 } 5786 5787 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE); 5788 } 5789 5790 void qla_register_fcport_fn(struct work_struct *work) 5791 { 5792 fc_port_t *fcport = container_of(work, struct fc_port, reg_work); 5793 u32 rscn_gen = fcport->rscn_gen; 5794 u16 data[2]; 5795 5796 if (IS_SW_RESV_ADDR(fcport->d_id)) 5797 return; 5798 5799 qla2x00_update_fcport(fcport->vha, fcport); 5800 5801 if (rscn_gen != fcport->rscn_gen) { 5802 /* RSCN(s) came in while registration */ 5803 switch (fcport->next_disc_state) { 5804 case DSC_DELETE_PEND: 5805 qlt_schedule_sess_for_deletion(fcport); 5806 break; 5807 case DSC_ADISC: 5808 data[0] = data[1] = 0; 5809 qla2x00_post_async_adisc_work(fcport->vha, fcport, 5810 data); 5811 break; 5812 default: 5813 break; 5814 } 5815 } 5816 } 5817 5818 /* 5819 * qla2x00_configure_fabric 5820 * Setup SNS devices with loop ID's. 5821 * 5822 * Input: 5823 * ha = adapter block pointer. 5824 * 5825 * Returns: 5826 * 0 = success. 5827 * BIT_0 = error 5828 */ 5829 static int 5830 qla2x00_configure_fabric(scsi_qla_host_t *vha) 5831 { 5832 int rval; 5833 fc_port_t *fcport; 5834 uint16_t mb[MAILBOX_REGISTER_COUNT]; 5835 uint16_t loop_id; 5836 LIST_HEAD(new_fcports); 5837 struct qla_hw_data *ha = vha->hw; 5838 int discovery_gen; 5839 5840 /* If FL port exists, then SNS is present */ 5841 if (IS_FWI2_CAPABLE(ha)) 5842 loop_id = NPH_F_PORT; 5843 else 5844 loop_id = SNS_FL_PORT; 5845 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1); 5846 if (rval != QLA_SUCCESS) { 5847 ql_dbg(ql_dbg_disc, vha, 0x20a0, 5848 "MBX_GET_PORT_NAME failed, No FL Port.\n"); 5849 5850 vha->device_flags &= ~SWITCH_FOUND; 5851 return (QLA_SUCCESS); 5852 } 5853 vha->device_flags |= SWITCH_FOUND; 5854 5855 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_port_name, 0); 5856 if (rval != QLA_SUCCESS) 5857 ql_dbg(ql_dbg_disc, vha, 0x20ff, 5858 "Failed to get Fabric Port Name\n"); 5859 5860 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { 5861 rval = qla2x00_send_change_request(vha, 0x3, 0); 5862 if (rval != QLA_SUCCESS) 5863 ql_log(ql_log_warn, vha, 0x121, 5864 "Failed to enable receiving of RSCN requests: 0x%x.\n", 5865 rval); 5866 } 5867 5868 do { 5869 qla2x00_mgmt_svr_login(vha); 5870 5871 /* Ensure we are logged into the SNS. */ 5872 loop_id = NPH_SNS_LID(ha); 5873 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, 5874 0xfc, mb, BIT_1|BIT_0); 5875 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { 5876 ql_dbg(ql_dbg_disc, vha, 0x20a1, 5877 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n", 5878 loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval); 5879 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5880 return rval; 5881 } 5882 5883 /* FDMI support. */ 5884 if (ql2xfdmienable && 5885 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags)) 5886 qla2x00_fdmi_register(vha); 5887 5888 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) { 5889 if (qla2x00_rft_id(vha)) { 5890 /* EMPTY */ 5891 ql_dbg(ql_dbg_disc, vha, 0x20a2, 5892 "Register FC-4 TYPE failed.\n"); 5893 if (test_bit(LOOP_RESYNC_NEEDED, 5894 &vha->dpc_flags)) 5895 break; 5896 } 5897 if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) { 5898 /* EMPTY */ 5899 ql_dbg(ql_dbg_disc, vha, 0x209a, 5900 "Register FC-4 Features failed.\n"); 5901 if (test_bit(LOOP_RESYNC_NEEDED, 5902 &vha->dpc_flags)) 5903 break; 5904 } 5905 if (vha->flags.nvme_enabled) { 5906 if (qla2x00_rff_id(vha, FC_TYPE_NVME)) { 5907 ql_dbg(ql_dbg_disc, vha, 0x2049, 5908 "Register NVME FC Type Features failed.\n"); 5909 } 5910 } 5911 if (qla2x00_rnn_id(vha)) { 5912 /* EMPTY */ 5913 ql_dbg(ql_dbg_disc, vha, 0x2104, 5914 "Register Node Name failed.\n"); 5915 if (test_bit(LOOP_RESYNC_NEEDED, 5916 &vha->dpc_flags)) 5917 break; 5918 } else if (qla2x00_rsnn_nn(vha)) { 5919 /* EMPTY */ 5920 ql_dbg(ql_dbg_disc, vha, 0x209b, 5921 "Register Symbolic Node Name failed.\n"); 5922 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5923 break; 5924 } 5925 } 5926 5927 5928 /* Mark the time right before querying FW for connected ports. 5929 * This process is long, asynchronous and by the time it's done, 5930 * collected information might not be accurate anymore. E.g. 5931 * disconnected port might have re-connected and a brand new 5932 * session has been created. In this case session's generation 5933 * will be newer than discovery_gen. */ 5934 qlt_do_generation_tick(vha, &discovery_gen); 5935 5936 if (USE_ASYNC_SCAN(ha)) { 5937 rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI, 5938 NULL); 5939 if (rval) 5940 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5941 } else { 5942 list_for_each_entry(fcport, &vha->vp_fcports, list) 5943 fcport->scan_state = QLA_FCPORT_SCAN; 5944 5945 rval = qla2x00_find_all_fabric_devs(vha); 5946 } 5947 if (rval != QLA_SUCCESS) 5948 break; 5949 } while (0); 5950 5951 if (!vha->nvme_local_port && vha->flags.nvme_enabled) 5952 qla_nvme_register_hba(vha); 5953 5954 if (rval) 5955 ql_dbg(ql_dbg_disc, vha, 0x2068, 5956 "Configure fabric error exit rval=%d.\n", rval); 5957 5958 return (rval); 5959 } 5960 5961 /* 5962 * qla2x00_find_all_fabric_devs 5963 * 5964 * Input: 5965 * ha = adapter block pointer. 5966 * dev = database device entry pointer. 5967 * 5968 * Returns: 5969 * 0 = success. 5970 * 5971 * Context: 5972 * Kernel context. 5973 */ 5974 static int 5975 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) 5976 { 5977 int rval; 5978 uint16_t loop_id; 5979 fc_port_t *fcport, *new_fcport; 5980 int found; 5981 5982 sw_info_t *swl; 5983 int swl_idx; 5984 int first_dev, last_dev; 5985 port_id_t wrap = {}, nxt_d_id; 5986 struct qla_hw_data *ha = vha->hw; 5987 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 5988 unsigned long flags; 5989 5990 rval = QLA_SUCCESS; 5991 5992 /* Try GID_PT to get device list, else GAN. */ 5993 if (!ha->swl) 5994 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t), 5995 GFP_KERNEL); 5996 swl = ha->swl; 5997 if (!swl) { 5998 /*EMPTY*/ 5999 ql_dbg(ql_dbg_disc, vha, 0x209c, 6000 "GID_PT allocations failed, fallback on GA_NXT.\n"); 6001 } else { 6002 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t)); 6003 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) { 6004 swl = NULL; 6005 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 6006 return rval; 6007 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) { 6008 swl = NULL; 6009 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 6010 return rval; 6011 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) { 6012 swl = NULL; 6013 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 6014 return rval; 6015 } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) { 6016 swl = NULL; 6017 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 6018 return rval; 6019 } 6020 6021 /* If other queries succeeded probe for FC-4 type */ 6022 if (swl) { 6023 qla2x00_gff_id(vha, swl); 6024 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 6025 return rval; 6026 } 6027 } 6028 swl_idx = 0; 6029 6030 /* Allocate temporary fcport for any new fcports discovered. */ 6031 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 6032 if (new_fcport == NULL) { 6033 ql_log(ql_log_warn, vha, 0x209d, 6034 "Failed to allocate memory for fcport.\n"); 6035 return (QLA_MEMORY_ALLOC_FAILED); 6036 } 6037 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 6038 /* Set start port ID scan at adapter ID. */ 6039 first_dev = 1; 6040 last_dev = 0; 6041 6042 /* Starting free loop ID. */ 6043 loop_id = ha->min_external_loopid; 6044 for (; loop_id <= ha->max_loop_id; loop_id++) { 6045 if (qla2x00_is_reserved_id(vha, loop_id)) 6046 continue; 6047 6048 if (ha->current_topology == ISP_CFG_FL && 6049 (atomic_read(&vha->loop_down_timer) || 6050 LOOP_TRANSITION(vha))) { 6051 atomic_set(&vha->loop_down_timer, 0); 6052 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 6053 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 6054 break; 6055 } 6056 6057 if (swl != NULL) { 6058 if (last_dev) { 6059 wrap.b24 = new_fcport->d_id.b24; 6060 } else { 6061 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24; 6062 memcpy(new_fcport->node_name, 6063 swl[swl_idx].node_name, WWN_SIZE); 6064 memcpy(new_fcport->port_name, 6065 swl[swl_idx].port_name, WWN_SIZE); 6066 memcpy(new_fcport->fabric_port_name, 6067 swl[swl_idx].fabric_port_name, WWN_SIZE); 6068 new_fcport->fp_speed = swl[swl_idx].fp_speed; 6069 new_fcport->fc4_type = swl[swl_idx].fc4_type; 6070 6071 new_fcport->nvme_flag = 0; 6072 if (vha->flags.nvme_enabled && 6073 swl[swl_idx].fc4_type & FS_FC4TYPE_NVME) { 6074 ql_log(ql_log_info, vha, 0x2131, 6075 "FOUND: NVME port %8phC as FC Type 28h\n", 6076 new_fcport->port_name); 6077 } 6078 6079 if (swl[swl_idx].d_id.b.rsvd_1 != 0) { 6080 last_dev = 1; 6081 } 6082 swl_idx++; 6083 } 6084 } else { 6085 /* Send GA_NXT to the switch */ 6086 rval = qla2x00_ga_nxt(vha, new_fcport); 6087 if (rval != QLA_SUCCESS) { 6088 ql_log(ql_log_warn, vha, 0x209e, 6089 "SNS scan failed -- assuming " 6090 "zero-entry result.\n"); 6091 rval = QLA_SUCCESS; 6092 break; 6093 } 6094 } 6095 6096 /* If wrap on switch device list, exit. */ 6097 if (first_dev) { 6098 wrap.b24 = new_fcport->d_id.b24; 6099 first_dev = 0; 6100 } else if (new_fcport->d_id.b24 == wrap.b24) { 6101 ql_dbg(ql_dbg_disc, vha, 0x209f, 6102 "Device wrap (%02x%02x%02x).\n", 6103 new_fcport->d_id.b.domain, 6104 new_fcport->d_id.b.area, 6105 new_fcport->d_id.b.al_pa); 6106 break; 6107 } 6108 6109 /* Bypass if same physical adapter. */ 6110 if (new_fcport->d_id.b24 == base_vha->d_id.b24) 6111 continue; 6112 6113 /* Bypass virtual ports of the same host. */ 6114 if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24)) 6115 continue; 6116 6117 /* Bypass if same domain and area of adapter. */ 6118 if (((new_fcport->d_id.b24 & 0xffff00) == 6119 (vha->d_id.b24 & 0xffff00)) && ha->current_topology == 6120 ISP_CFG_FL) 6121 continue; 6122 6123 /* Bypass reserved domain fields. */ 6124 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0) 6125 continue; 6126 6127 /* Bypass ports whose FCP-4 type is not FCP_SCSI */ 6128 if (ql2xgffidenable && 6129 (!(new_fcport->fc4_type & FS_FC4TYPE_FCP) && 6130 new_fcport->fc4_type != 0)) 6131 continue; 6132 6133 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 6134 6135 /* Locate matching device in database. */ 6136 found = 0; 6137 list_for_each_entry(fcport, &vha->vp_fcports, list) { 6138 if (memcmp(new_fcport->port_name, fcport->port_name, 6139 WWN_SIZE)) 6140 continue; 6141 6142 fcport->scan_state = QLA_FCPORT_FOUND; 6143 6144 found++; 6145 6146 /* Update port state. */ 6147 memcpy(fcport->fabric_port_name, 6148 new_fcport->fabric_port_name, WWN_SIZE); 6149 fcport->fp_speed = new_fcport->fp_speed; 6150 6151 /* 6152 * If address the same and state FCS_ONLINE 6153 * (or in target mode), nothing changed. 6154 */ 6155 if (fcport->d_id.b24 == new_fcport->d_id.b24 && 6156 (atomic_read(&fcport->state) == FCS_ONLINE || 6157 (vha->host->active_mode == MODE_TARGET))) { 6158 break; 6159 } 6160 6161 if (fcport->login_retry == 0) 6162 fcport->login_retry = 6163 vha->hw->login_retry_count; 6164 /* 6165 * If device was not a fabric device before. 6166 */ 6167 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { 6168 fcport->d_id.b24 = new_fcport->d_id.b24; 6169 qla2x00_clear_loop_id(fcport); 6170 fcport->flags |= (FCF_FABRIC_DEVICE | 6171 FCF_LOGIN_NEEDED); 6172 break; 6173 } 6174 6175 /* 6176 * Port ID changed or device was marked to be updated; 6177 * Log it out if still logged in and mark it for 6178 * relogin later. 6179 */ 6180 if (qla_tgt_mode_enabled(base_vha)) { 6181 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080, 6182 "port changed FC ID, %8phC" 6183 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n", 6184 fcport->port_name, 6185 fcport->d_id.b.domain, 6186 fcport->d_id.b.area, 6187 fcport->d_id.b.al_pa, 6188 fcport->loop_id, 6189 new_fcport->d_id.b.domain, 6190 new_fcport->d_id.b.area, 6191 new_fcport->d_id.b.al_pa); 6192 fcport->d_id.b24 = new_fcport->d_id.b24; 6193 break; 6194 } 6195 6196 fcport->d_id.b24 = new_fcport->d_id.b24; 6197 fcport->flags |= FCF_LOGIN_NEEDED; 6198 break; 6199 } 6200 6201 if (found && NVME_TARGET(vha->hw, fcport)) { 6202 if (fcport->disc_state == DSC_DELETE_PEND) { 6203 qla2x00_set_fcport_disc_state(fcport, DSC_GNL); 6204 vha->fcport_count--; 6205 fcport->login_succ = 0; 6206 } 6207 } 6208 6209 if (found) { 6210 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 6211 continue; 6212 } 6213 /* If device was not in our fcports list, then add it. */ 6214 new_fcport->scan_state = QLA_FCPORT_FOUND; 6215 list_add_tail(&new_fcport->list, &vha->vp_fcports); 6216 6217 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 6218 6219 6220 /* Allocate a new replacement fcport. */ 6221 nxt_d_id.b24 = new_fcport->d_id.b24; 6222 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 6223 if (new_fcport == NULL) { 6224 ql_log(ql_log_warn, vha, 0xd032, 6225 "Memory allocation failed for fcport.\n"); 6226 return (QLA_MEMORY_ALLOC_FAILED); 6227 } 6228 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 6229 new_fcport->d_id.b24 = nxt_d_id.b24; 6230 } 6231 6232 qla2x00_free_fcport(new_fcport); 6233 6234 /* 6235 * Logout all previous fabric dev marked lost, except FCP2 devices. 6236 */ 6237 list_for_each_entry(fcport, &vha->vp_fcports, list) { 6238 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 6239 break; 6240 6241 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) 6242 continue; 6243 6244 if (fcport->scan_state == QLA_FCPORT_SCAN) { 6245 if ((qla_dual_mode_enabled(vha) || 6246 qla_ini_mode_enabled(vha)) && 6247 atomic_read(&fcport->state) == FCS_ONLINE) { 6248 qla2x00_mark_device_lost(vha, fcport, 6249 ql2xplogiabsentdevice); 6250 if (fcport->loop_id != FC_NO_LOOP_ID && 6251 (fcport->flags & FCF_FCP2_DEVICE) == 0 && 6252 fcport->port_type != FCT_INITIATOR && 6253 fcport->port_type != FCT_BROADCAST) { 6254 ql_dbg(ql_dbg_disc, vha, 0x20f0, 6255 "%s %d %8phC post del sess\n", 6256 __func__, __LINE__, 6257 fcport->port_name); 6258 qlt_schedule_sess_for_deletion(fcport); 6259 continue; 6260 } 6261 } 6262 } 6263 6264 if (fcport->scan_state == QLA_FCPORT_FOUND && 6265 (fcport->flags & FCF_LOGIN_NEEDED) != 0) 6266 qla24xx_fcport_handle_login(vha, fcport); 6267 } 6268 return (rval); 6269 } 6270 6271 /* FW does not set aside Loop id for MGMT Server/FFFFFAh */ 6272 int 6273 qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha) 6274 { 6275 int loop_id = FC_NO_LOOP_ID; 6276 int lid = NPH_MGMT_SERVER - vha->vp_idx; 6277 unsigned long flags; 6278 struct qla_hw_data *ha = vha->hw; 6279 6280 if (vha->vp_idx == 0) { 6281 set_bit(NPH_MGMT_SERVER, ha->loop_id_map); 6282 return NPH_MGMT_SERVER; 6283 } 6284 6285 /* pick id from high and work down to low */ 6286 spin_lock_irqsave(&ha->vport_slock, flags); 6287 for (; lid > 0; lid--) { 6288 if (!test_bit(lid, vha->hw->loop_id_map)) { 6289 set_bit(lid, vha->hw->loop_id_map); 6290 loop_id = lid; 6291 break; 6292 } 6293 } 6294 spin_unlock_irqrestore(&ha->vport_slock, flags); 6295 6296 return loop_id; 6297 } 6298 6299 /* 6300 * qla2x00_fabric_login 6301 * Issue fabric login command. 6302 * 6303 * Input: 6304 * ha = adapter block pointer. 6305 * device = pointer to FC device type structure. 6306 * 6307 * Returns: 6308 * 0 - Login successfully 6309 * 1 - Login failed 6310 * 2 - Initiator device 6311 * 3 - Fatal error 6312 */ 6313 int 6314 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport, 6315 uint16_t *next_loopid) 6316 { 6317 int rval; 6318 int retry; 6319 uint16_t tmp_loopid; 6320 uint16_t mb[MAILBOX_REGISTER_COUNT]; 6321 struct qla_hw_data *ha = vha->hw; 6322 6323 retry = 0; 6324 tmp_loopid = 0; 6325 6326 for (;;) { 6327 ql_dbg(ql_dbg_disc, vha, 0x2000, 6328 "Trying Fabric Login w/loop id 0x%04x for port " 6329 "%02x%02x%02x.\n", 6330 fcport->loop_id, fcport->d_id.b.domain, 6331 fcport->d_id.b.area, fcport->d_id.b.al_pa); 6332 6333 /* Login fcport on switch. */ 6334 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id, 6335 fcport->d_id.b.domain, fcport->d_id.b.area, 6336 fcport->d_id.b.al_pa, mb, BIT_0); 6337 if (rval != QLA_SUCCESS) { 6338 return rval; 6339 } 6340 if (mb[0] == MBS_PORT_ID_USED) { 6341 /* 6342 * Device has another loop ID. The firmware team 6343 * recommends the driver perform an implicit login with 6344 * the specified ID again. The ID we just used is save 6345 * here so we return with an ID that can be tried by 6346 * the next login. 6347 */ 6348 retry++; 6349 tmp_loopid = fcport->loop_id; 6350 fcport->loop_id = mb[1]; 6351 6352 ql_dbg(ql_dbg_disc, vha, 0x2001, 6353 "Fabric Login: port in use - next loop " 6354 "id=0x%04x, port id= %02x%02x%02x.\n", 6355 fcport->loop_id, fcport->d_id.b.domain, 6356 fcport->d_id.b.area, fcport->d_id.b.al_pa); 6357 6358 } else if (mb[0] == MBS_COMMAND_COMPLETE) { 6359 /* 6360 * Login succeeded. 6361 */ 6362 if (retry) { 6363 /* A retry occurred before. */ 6364 *next_loopid = tmp_loopid; 6365 } else { 6366 /* 6367 * No retry occurred before. Just increment the 6368 * ID value for next login. 6369 */ 6370 *next_loopid = (fcport->loop_id + 1); 6371 } 6372 6373 if (mb[1] & BIT_0) { 6374 fcport->port_type = FCT_INITIATOR; 6375 } else { 6376 fcport->port_type = FCT_TARGET; 6377 if (mb[1] & BIT_1) { 6378 fcport->flags |= FCF_FCP2_DEVICE; 6379 } 6380 } 6381 6382 if (mb[10] & BIT_0) 6383 fcport->supported_classes |= FC_COS_CLASS2; 6384 if (mb[10] & BIT_1) 6385 fcport->supported_classes |= FC_COS_CLASS3; 6386 6387 if (IS_FWI2_CAPABLE(ha)) { 6388 if (mb[10] & BIT_7) 6389 fcport->flags |= 6390 FCF_CONF_COMP_SUPPORTED; 6391 } 6392 6393 rval = QLA_SUCCESS; 6394 break; 6395 } else if (mb[0] == MBS_LOOP_ID_USED) { 6396 /* 6397 * Loop ID already used, try next loop ID. 6398 */ 6399 fcport->loop_id++; 6400 rval = qla2x00_find_new_loop_id(vha, fcport); 6401 if (rval != QLA_SUCCESS) { 6402 /* Ran out of loop IDs to use */ 6403 break; 6404 } 6405 } else if (mb[0] == MBS_COMMAND_ERROR) { 6406 /* 6407 * Firmware possibly timed out during login. If NO 6408 * retries are left to do then the device is declared 6409 * dead. 6410 */ 6411 *next_loopid = fcport->loop_id; 6412 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 6413 fcport->d_id.b.domain, fcport->d_id.b.area, 6414 fcport->d_id.b.al_pa); 6415 qla2x00_mark_device_lost(vha, fcport, 1); 6416 6417 rval = 1; 6418 break; 6419 } else { 6420 /* 6421 * unrecoverable / not handled error 6422 */ 6423 ql_dbg(ql_dbg_disc, vha, 0x2002, 6424 "Failed=%x port_id=%02x%02x%02x loop_id=%x " 6425 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain, 6426 fcport->d_id.b.area, fcport->d_id.b.al_pa, 6427 fcport->loop_id, jiffies); 6428 6429 *next_loopid = fcport->loop_id; 6430 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 6431 fcport->d_id.b.domain, fcport->d_id.b.area, 6432 fcport->d_id.b.al_pa); 6433 qla2x00_clear_loop_id(fcport); 6434 fcport->login_retry = 0; 6435 6436 rval = 3; 6437 break; 6438 } 6439 } 6440 6441 return (rval); 6442 } 6443 6444 /* 6445 * qla2x00_local_device_login 6446 * Issue local device login command. 6447 * 6448 * Input: 6449 * ha = adapter block pointer. 6450 * loop_id = loop id of device to login to. 6451 * 6452 * Returns (Where's the #define!!!!): 6453 * 0 - Login successfully 6454 * 1 - Login failed 6455 * 3 - Fatal error 6456 */ 6457 int 6458 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport) 6459 { 6460 int rval; 6461 uint16_t mb[MAILBOX_REGISTER_COUNT]; 6462 6463 memset(mb, 0, sizeof(mb)); 6464 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0); 6465 if (rval == QLA_SUCCESS) { 6466 /* Interrogate mailbox registers for any errors */ 6467 if (mb[0] == MBS_COMMAND_ERROR) 6468 rval = 1; 6469 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR) 6470 /* device not in PCB table */ 6471 rval = 3; 6472 } 6473 6474 return (rval); 6475 } 6476 6477 /* 6478 * qla2x00_loop_resync 6479 * Resync with fibre channel devices. 6480 * 6481 * Input: 6482 * ha = adapter block pointer. 6483 * 6484 * Returns: 6485 * 0 = success 6486 */ 6487 int 6488 qla2x00_loop_resync(scsi_qla_host_t *vha) 6489 { 6490 int rval = QLA_SUCCESS; 6491 uint32_t wait_time; 6492 6493 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 6494 if (vha->flags.online) { 6495 if (!(rval = qla2x00_fw_ready(vha))) { 6496 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 6497 wait_time = 256; 6498 do { 6499 if (!IS_QLAFX00(vha->hw)) { 6500 /* 6501 * Issue a marker after FW becomes 6502 * ready. 6503 */ 6504 qla2x00_marker(vha, vha->hw->base_qpair, 6505 0, 0, MK_SYNC_ALL); 6506 vha->marker_needed = 0; 6507 } 6508 6509 /* Remap devices on Loop. */ 6510 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 6511 6512 if (IS_QLAFX00(vha->hw)) 6513 qlafx00_configure_devices(vha); 6514 else 6515 qla2x00_configure_loop(vha); 6516 6517 wait_time--; 6518 } while (!atomic_read(&vha->loop_down_timer) && 6519 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 6520 && wait_time && (test_bit(LOOP_RESYNC_NEEDED, 6521 &vha->dpc_flags))); 6522 } 6523 } 6524 6525 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 6526 return (QLA_FUNCTION_FAILED); 6527 6528 if (rval) 6529 ql_dbg(ql_dbg_disc, vha, 0x206c, 6530 "%s *** FAILED ***.\n", __func__); 6531 6532 return (rval); 6533 } 6534 6535 /* 6536 * qla2x00_perform_loop_resync 6537 * Description: This function will set the appropriate flags and call 6538 * qla2x00_loop_resync. If successful loop will be resynced 6539 * Arguments : scsi_qla_host_t pointer 6540 * returm : Success or Failure 6541 */ 6542 6543 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha) 6544 { 6545 int32_t rval = 0; 6546 6547 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) { 6548 /*Configure the flags so that resync happens properly*/ 6549 atomic_set(&ha->loop_down_timer, 0); 6550 if (!(ha->device_flags & DFLG_NO_CABLE)) { 6551 atomic_set(&ha->loop_state, LOOP_UP); 6552 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 6553 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); 6554 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 6555 6556 rval = qla2x00_loop_resync(ha); 6557 } else 6558 atomic_set(&ha->loop_state, LOOP_DEAD); 6559 6560 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); 6561 } 6562 6563 return rval; 6564 } 6565 6566 void 6567 qla2x00_update_fcports(scsi_qla_host_t *base_vha) 6568 { 6569 fc_port_t *fcport; 6570 struct scsi_qla_host *vha; 6571 struct qla_hw_data *ha = base_vha->hw; 6572 unsigned long flags; 6573 6574 spin_lock_irqsave(&ha->vport_slock, flags); 6575 /* Go with deferred removal of rport references. */ 6576 list_for_each_entry(vha, &base_vha->hw->vp_list, list) { 6577 atomic_inc(&vha->vref_count); 6578 list_for_each_entry(fcport, &vha->vp_fcports, list) { 6579 if (fcport->drport && 6580 atomic_read(&fcport->state) != FCS_UNCONFIGURED) { 6581 spin_unlock_irqrestore(&ha->vport_slock, flags); 6582 qla2x00_rport_del(fcport); 6583 6584 spin_lock_irqsave(&ha->vport_slock, flags); 6585 } 6586 } 6587 atomic_dec(&vha->vref_count); 6588 wake_up(&vha->vref_waitq); 6589 } 6590 spin_unlock_irqrestore(&ha->vport_slock, flags); 6591 } 6592 6593 /* Assumes idc_lock always held on entry */ 6594 void 6595 qla83xx_reset_ownership(scsi_qla_host_t *vha) 6596 { 6597 struct qla_hw_data *ha = vha->hw; 6598 uint32_t drv_presence, drv_presence_mask; 6599 uint32_t dev_part_info1, dev_part_info2, class_type; 6600 uint32_t class_type_mask = 0x3; 6601 uint16_t fcoe_other_function = 0xffff, i; 6602 6603 if (IS_QLA8044(ha)) { 6604 drv_presence = qla8044_rd_direct(vha, 6605 QLA8044_CRB_DRV_ACTIVE_INDEX); 6606 dev_part_info1 = qla8044_rd_direct(vha, 6607 QLA8044_CRB_DEV_PART_INFO_INDEX); 6608 dev_part_info2 = qla8044_rd_direct(vha, 6609 QLA8044_CRB_DEV_PART_INFO2); 6610 } else { 6611 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 6612 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1); 6613 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2); 6614 } 6615 for (i = 0; i < 8; i++) { 6616 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask); 6617 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) && 6618 (i != ha->portnum)) { 6619 fcoe_other_function = i; 6620 break; 6621 } 6622 } 6623 if (fcoe_other_function == 0xffff) { 6624 for (i = 0; i < 8; i++) { 6625 class_type = ((dev_part_info2 >> (i * 4)) & 6626 class_type_mask); 6627 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) && 6628 ((i + 8) != ha->portnum)) { 6629 fcoe_other_function = i + 8; 6630 break; 6631 } 6632 } 6633 } 6634 /* 6635 * Prepare drv-presence mask based on fcoe functions present. 6636 * However consider only valid physical fcoe function numbers (0-15). 6637 */ 6638 drv_presence_mask = ~((1 << (ha->portnum)) | 6639 ((fcoe_other_function == 0xffff) ? 6640 0 : (1 << (fcoe_other_function)))); 6641 6642 /* We are the reset owner iff: 6643 * - No other protocol drivers present. 6644 * - This is the lowest among fcoe functions. */ 6645 if (!(drv_presence & drv_presence_mask) && 6646 (ha->portnum < fcoe_other_function)) { 6647 ql_dbg(ql_dbg_p3p, vha, 0xb07f, 6648 "This host is Reset owner.\n"); 6649 ha->flags.nic_core_reset_owner = 1; 6650 } 6651 } 6652 6653 static int 6654 __qla83xx_set_drv_ack(scsi_qla_host_t *vha) 6655 { 6656 int rval = QLA_SUCCESS; 6657 struct qla_hw_data *ha = vha->hw; 6658 uint32_t drv_ack; 6659 6660 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); 6661 if (rval == QLA_SUCCESS) { 6662 drv_ack |= (1 << ha->portnum); 6663 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack); 6664 } 6665 6666 return rval; 6667 } 6668 6669 static int 6670 __qla83xx_clear_drv_ack(scsi_qla_host_t *vha) 6671 { 6672 int rval = QLA_SUCCESS; 6673 struct qla_hw_data *ha = vha->hw; 6674 uint32_t drv_ack; 6675 6676 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); 6677 if (rval == QLA_SUCCESS) { 6678 drv_ack &= ~(1 << ha->portnum); 6679 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack); 6680 } 6681 6682 return rval; 6683 } 6684 6685 static const char * 6686 qla83xx_dev_state_to_string(uint32_t dev_state) 6687 { 6688 switch (dev_state) { 6689 case QLA8XXX_DEV_COLD: 6690 return "COLD/RE-INIT"; 6691 case QLA8XXX_DEV_INITIALIZING: 6692 return "INITIALIZING"; 6693 case QLA8XXX_DEV_READY: 6694 return "READY"; 6695 case QLA8XXX_DEV_NEED_RESET: 6696 return "NEED RESET"; 6697 case QLA8XXX_DEV_NEED_QUIESCENT: 6698 return "NEED QUIESCENT"; 6699 case QLA8XXX_DEV_FAILED: 6700 return "FAILED"; 6701 case QLA8XXX_DEV_QUIESCENT: 6702 return "QUIESCENT"; 6703 default: 6704 return "Unknown"; 6705 } 6706 } 6707 6708 /* Assumes idc-lock always held on entry */ 6709 void 6710 qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type) 6711 { 6712 struct qla_hw_data *ha = vha->hw; 6713 uint32_t idc_audit_reg = 0, duration_secs = 0; 6714 6715 switch (audit_type) { 6716 case IDC_AUDIT_TIMESTAMP: 6717 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000); 6718 idc_audit_reg = (ha->portnum) | 6719 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8); 6720 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg); 6721 break; 6722 6723 case IDC_AUDIT_COMPLETION: 6724 duration_secs = ((jiffies_to_msecs(jiffies) - 6725 jiffies_to_msecs(ha->idc_audit_ts)) / 1000); 6726 idc_audit_reg = (ha->portnum) | 6727 (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8); 6728 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg); 6729 break; 6730 6731 default: 6732 ql_log(ql_log_warn, vha, 0xb078, 6733 "Invalid audit type specified.\n"); 6734 break; 6735 } 6736 } 6737 6738 /* Assumes idc_lock always held on entry */ 6739 static int 6740 qla83xx_initiating_reset(scsi_qla_host_t *vha) 6741 { 6742 struct qla_hw_data *ha = vha->hw; 6743 uint32_t idc_control, dev_state; 6744 6745 __qla83xx_get_idc_control(vha, &idc_control); 6746 if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) { 6747 ql_log(ql_log_info, vha, 0xb080, 6748 "NIC Core reset has been disabled. idc-control=0x%x\n", 6749 idc_control); 6750 return QLA_FUNCTION_FAILED; 6751 } 6752 6753 /* Set NEED-RESET iff in READY state and we are the reset-owner */ 6754 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state); 6755 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) { 6756 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, 6757 QLA8XXX_DEV_NEED_RESET); 6758 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n"); 6759 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP); 6760 } else { 6761 const char *state = qla83xx_dev_state_to_string(dev_state); 6762 6763 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state); 6764 6765 /* SV: XXX: Is timeout required here? */ 6766 /* Wait for IDC state change READY -> NEED_RESET */ 6767 while (dev_state == QLA8XXX_DEV_READY) { 6768 qla83xx_idc_unlock(vha, 0); 6769 msleep(200); 6770 qla83xx_idc_lock(vha, 0); 6771 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state); 6772 } 6773 } 6774 6775 /* Send IDC ack by writing to drv-ack register */ 6776 __qla83xx_set_drv_ack(vha); 6777 6778 return QLA_SUCCESS; 6779 } 6780 6781 int 6782 __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control) 6783 { 6784 return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control); 6785 } 6786 6787 int 6788 __qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control) 6789 { 6790 return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control); 6791 } 6792 6793 static int 6794 qla83xx_check_driver_presence(scsi_qla_host_t *vha) 6795 { 6796 uint32_t drv_presence = 0; 6797 struct qla_hw_data *ha = vha->hw; 6798 6799 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 6800 if (drv_presence & (1 << ha->portnum)) 6801 return QLA_SUCCESS; 6802 else 6803 return QLA_TEST_FAILED; 6804 } 6805 6806 int 6807 qla83xx_nic_core_reset(scsi_qla_host_t *vha) 6808 { 6809 int rval = QLA_SUCCESS; 6810 struct qla_hw_data *ha = vha->hw; 6811 6812 ql_dbg(ql_dbg_p3p, vha, 0xb058, 6813 "Entered %s().\n", __func__); 6814 6815 if (vha->device_flags & DFLG_DEV_FAILED) { 6816 ql_log(ql_log_warn, vha, 0xb059, 6817 "Device in unrecoverable FAILED state.\n"); 6818 return QLA_FUNCTION_FAILED; 6819 } 6820 6821 qla83xx_idc_lock(vha, 0); 6822 6823 if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) { 6824 ql_log(ql_log_warn, vha, 0xb05a, 6825 "Function=0x%x has been removed from IDC participation.\n", 6826 ha->portnum); 6827 rval = QLA_FUNCTION_FAILED; 6828 goto exit; 6829 } 6830 6831 qla83xx_reset_ownership(vha); 6832 6833 rval = qla83xx_initiating_reset(vha); 6834 6835 /* 6836 * Perform reset if we are the reset-owner, 6837 * else wait till IDC state changes to READY/FAILED. 6838 */ 6839 if (rval == QLA_SUCCESS) { 6840 rval = qla83xx_idc_state_handler(vha); 6841 6842 if (rval == QLA_SUCCESS) 6843 ha->flags.nic_core_hung = 0; 6844 __qla83xx_clear_drv_ack(vha); 6845 } 6846 6847 exit: 6848 qla83xx_idc_unlock(vha, 0); 6849 6850 ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__); 6851 6852 return rval; 6853 } 6854 6855 int 6856 qla2xxx_mctp_dump(scsi_qla_host_t *vha) 6857 { 6858 struct qla_hw_data *ha = vha->hw; 6859 int rval = QLA_FUNCTION_FAILED; 6860 6861 if (!IS_MCTP_CAPABLE(ha)) { 6862 /* This message can be removed from the final version */ 6863 ql_log(ql_log_info, vha, 0x506d, 6864 "This board is not MCTP capable\n"); 6865 return rval; 6866 } 6867 6868 if (!ha->mctp_dump) { 6869 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev, 6870 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL); 6871 6872 if (!ha->mctp_dump) { 6873 ql_log(ql_log_warn, vha, 0x506e, 6874 "Failed to allocate memory for mctp dump\n"); 6875 return rval; 6876 } 6877 } 6878 6879 #define MCTP_DUMP_STR_ADDR 0x00000000 6880 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma, 6881 MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4); 6882 if (rval != QLA_SUCCESS) { 6883 ql_log(ql_log_warn, vha, 0x506f, 6884 "Failed to capture mctp dump\n"); 6885 } else { 6886 ql_log(ql_log_info, vha, 0x5070, 6887 "Mctp dump capture for host (%ld/%p).\n", 6888 vha->host_no, ha->mctp_dump); 6889 ha->mctp_dumped = 1; 6890 } 6891 6892 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) { 6893 ha->flags.nic_core_reset_hdlr_active = 1; 6894 rval = qla83xx_restart_nic_firmware(vha); 6895 if (rval) 6896 /* NIC Core reset failed. */ 6897 ql_log(ql_log_warn, vha, 0x5071, 6898 "Failed to restart nic firmware\n"); 6899 else 6900 ql_dbg(ql_dbg_p3p, vha, 0xb084, 6901 "Restarted NIC firmware successfully.\n"); 6902 ha->flags.nic_core_reset_hdlr_active = 0; 6903 } 6904 6905 return rval; 6906 6907 } 6908 6909 /* 6910 * qla2x00_quiesce_io 6911 * Description: This function will block the new I/Os 6912 * Its not aborting any I/Os as context 6913 * is not destroyed during quiescence 6914 * Arguments: scsi_qla_host_t 6915 * return : void 6916 */ 6917 void 6918 qla2x00_quiesce_io(scsi_qla_host_t *vha) 6919 { 6920 struct qla_hw_data *ha = vha->hw; 6921 struct scsi_qla_host *vp; 6922 6923 ql_dbg(ql_dbg_dpc, vha, 0x401d, 6924 "Quiescing I/O - ha=%p.\n", ha); 6925 6926 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 6927 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 6928 atomic_set(&vha->loop_state, LOOP_DOWN); 6929 qla2x00_mark_all_devices_lost(vha); 6930 list_for_each_entry(vp, &ha->vp_list, list) 6931 qla2x00_mark_all_devices_lost(vp); 6932 } else { 6933 if (!atomic_read(&vha->loop_down_timer)) 6934 atomic_set(&vha->loop_down_timer, 6935 LOOP_DOWN_TIME); 6936 } 6937 /* Wait for pending cmds to complete */ 6938 WARN_ON_ONCE(qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) 6939 != QLA_SUCCESS); 6940 } 6941 6942 void 6943 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) 6944 { 6945 struct qla_hw_data *ha = vha->hw; 6946 struct scsi_qla_host *vp; 6947 unsigned long flags; 6948 fc_port_t *fcport; 6949 u16 i; 6950 6951 /* For ISP82XX, driver waits for completion of the commands. 6952 * online flag should be set. 6953 */ 6954 if (!(IS_P3P_TYPE(ha))) 6955 vha->flags.online = 0; 6956 ha->flags.chip_reset_done = 0; 6957 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 6958 vha->qla_stats.total_isp_aborts++; 6959 6960 ql_log(ql_log_info, vha, 0x00af, 6961 "Performing ISP error recovery - ha=%p.\n", ha); 6962 6963 ha->flags.purge_mbox = 1; 6964 /* For ISP82XX, reset_chip is just disabling interrupts. 6965 * Driver waits for the completion of the commands. 6966 * the interrupts need to be enabled. 6967 */ 6968 if (!(IS_P3P_TYPE(ha))) 6969 ha->isp_ops->reset_chip(vha); 6970 6971 ha->link_data_rate = PORT_SPEED_UNKNOWN; 6972 SAVE_TOPO(ha); 6973 ha->flags.rida_fmt2 = 0; 6974 ha->flags.n2n_ae = 0; 6975 ha->flags.lip_ae = 0; 6976 ha->current_topology = 0; 6977 QLA_FW_STOPPED(ha); 6978 ha->flags.fw_init_done = 0; 6979 ha->chip_reset++; 6980 ha->base_qpair->chip_reset = ha->chip_reset; 6981 ha->base_qpair->cmd_cnt = ha->base_qpair->cmd_completion_cnt = 0; 6982 for (i = 0; i < ha->max_qpairs; i++) { 6983 if (ha->queue_pair_map[i]) { 6984 ha->queue_pair_map[i]->chip_reset = 6985 ha->base_qpair->chip_reset; 6986 ha->queue_pair_map[i]->cmd_cnt = 6987 ha->queue_pair_map[i]->cmd_completion_cnt = 0; 6988 } 6989 } 6990 6991 /* purge MBox commands */ 6992 if (atomic_read(&ha->num_pend_mbx_stage3)) { 6993 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 6994 complete(&ha->mbx_intr_comp); 6995 } 6996 6997 i = 0; 6998 while (atomic_read(&ha->num_pend_mbx_stage3) || 6999 atomic_read(&ha->num_pend_mbx_stage2) || 7000 atomic_read(&ha->num_pend_mbx_stage1)) { 7001 msleep(20); 7002 i++; 7003 if (i > 50) 7004 break; 7005 } 7006 ha->flags.purge_mbox = 0; 7007 7008 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 7009 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 7010 atomic_set(&vha->loop_state, LOOP_DOWN); 7011 qla2x00_mark_all_devices_lost(vha); 7012 7013 spin_lock_irqsave(&ha->vport_slock, flags); 7014 list_for_each_entry(vp, &ha->vp_list, list) { 7015 atomic_inc(&vp->vref_count); 7016 spin_unlock_irqrestore(&ha->vport_slock, flags); 7017 7018 qla2x00_mark_all_devices_lost(vp); 7019 7020 spin_lock_irqsave(&ha->vport_slock, flags); 7021 atomic_dec(&vp->vref_count); 7022 } 7023 spin_unlock_irqrestore(&ha->vport_slock, flags); 7024 } else { 7025 if (!atomic_read(&vha->loop_down_timer)) 7026 atomic_set(&vha->loop_down_timer, 7027 LOOP_DOWN_TIME); 7028 } 7029 7030 /* Clear all async request states across all VPs. */ 7031 list_for_each_entry(fcport, &vha->vp_fcports, list) { 7032 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 7033 fcport->scan_state = 0; 7034 } 7035 spin_lock_irqsave(&ha->vport_slock, flags); 7036 list_for_each_entry(vp, &ha->vp_list, list) { 7037 atomic_inc(&vp->vref_count); 7038 spin_unlock_irqrestore(&ha->vport_slock, flags); 7039 7040 list_for_each_entry(fcport, &vp->vp_fcports, list) 7041 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 7042 7043 spin_lock_irqsave(&ha->vport_slock, flags); 7044 atomic_dec(&vp->vref_count); 7045 } 7046 spin_unlock_irqrestore(&ha->vport_slock, flags); 7047 7048 /* Make sure for ISP 82XX IO DMA is complete */ 7049 if (IS_P3P_TYPE(ha)) { 7050 qla82xx_chip_reset_cleanup(vha); 7051 ql_log(ql_log_info, vha, 0x00b4, 7052 "Done chip reset cleanup.\n"); 7053 7054 /* Done waiting for pending commands. Reset online flag */ 7055 vha->flags.online = 0; 7056 } 7057 7058 /* Requeue all commands in outstanding command list. */ 7059 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 7060 /* memory barrier */ 7061 wmb(); 7062 } 7063 7064 /* 7065 * qla2x00_abort_isp 7066 * Resets ISP and aborts all outstanding commands. 7067 * 7068 * Input: 7069 * ha = adapter block pointer. 7070 * 7071 * Returns: 7072 * 0 = success 7073 */ 7074 int 7075 qla2x00_abort_isp(scsi_qla_host_t *vha) 7076 { 7077 int rval; 7078 uint8_t status = 0; 7079 struct qla_hw_data *ha = vha->hw; 7080 struct scsi_qla_host *vp; 7081 struct req_que *req = ha->req_q_map[0]; 7082 unsigned long flags; 7083 7084 if (vha->flags.online) { 7085 qla2x00_abort_isp_cleanup(vha); 7086 7087 if (vha->hw->flags.port_isolated) 7088 return status; 7089 7090 if (qla2x00_isp_reg_stat(ha)) { 7091 ql_log(ql_log_info, vha, 0x803f, 7092 "ISP Abort - ISP reg disconnect, exiting.\n"); 7093 return status; 7094 } 7095 7096 if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) { 7097 ha->flags.chip_reset_done = 1; 7098 vha->flags.online = 1; 7099 status = 0; 7100 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 7101 return status; 7102 } 7103 7104 if (IS_QLA8031(ha)) { 7105 ql_dbg(ql_dbg_p3p, vha, 0xb05c, 7106 "Clearing fcoe driver presence.\n"); 7107 if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS) 7108 ql_dbg(ql_dbg_p3p, vha, 0xb073, 7109 "Error while clearing DRV-Presence.\n"); 7110 } 7111 7112 if (unlikely(pci_channel_offline(ha->pdev) && 7113 ha->flags.pci_channel_io_perm_failure)) { 7114 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 7115 status = 0; 7116 return status; 7117 } 7118 7119 switch (vha->qlini_mode) { 7120 case QLA2XXX_INI_MODE_DISABLED: 7121 if (!qla_tgt_mode_enabled(vha)) 7122 return 0; 7123 break; 7124 case QLA2XXX_INI_MODE_DUAL: 7125 if (!qla_dual_mode_enabled(vha)) 7126 return 0; 7127 break; 7128 case QLA2XXX_INI_MODE_ENABLED: 7129 default: 7130 break; 7131 } 7132 7133 ha->isp_ops->get_flash_version(vha, req->ring); 7134 7135 if (qla2x00_isp_reg_stat(ha)) { 7136 ql_log(ql_log_info, vha, 0x803f, 7137 "ISP Abort - ISP reg disconnect pre nvram config, exiting.\n"); 7138 return status; 7139 } 7140 ha->isp_ops->nvram_config(vha); 7141 7142 if (qla2x00_isp_reg_stat(ha)) { 7143 ql_log(ql_log_info, vha, 0x803f, 7144 "ISP Abort - ISP reg disconnect post nvmram config, exiting.\n"); 7145 return status; 7146 } 7147 if (!qla2x00_restart_isp(vha)) { 7148 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 7149 7150 if (!atomic_read(&vha->loop_down_timer)) { 7151 /* 7152 * Issue marker command only when we are going 7153 * to start the I/O . 7154 */ 7155 vha->marker_needed = 1; 7156 } 7157 7158 vha->flags.online = 1; 7159 7160 ha->isp_ops->enable_intrs(ha); 7161 7162 ha->isp_abort_cnt = 0; 7163 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 7164 7165 if (IS_QLA81XX(ha) || IS_QLA8031(ha)) 7166 qla2x00_get_fw_version(vha); 7167 if (ha->fce) { 7168 ha->flags.fce_enabled = 1; 7169 memset(ha->fce, 0, 7170 fce_calc_size(ha->fce_bufs)); 7171 rval = qla2x00_enable_fce_trace(vha, 7172 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 7173 &ha->fce_bufs); 7174 if (rval) { 7175 ql_log(ql_log_warn, vha, 0x8033, 7176 "Unable to reinitialize FCE " 7177 "(%d).\n", rval); 7178 ha->flags.fce_enabled = 0; 7179 } 7180 } 7181 7182 if (ha->eft) { 7183 memset(ha->eft, 0, EFT_SIZE); 7184 rval = qla2x00_enable_eft_trace(vha, 7185 ha->eft_dma, EFT_NUM_BUFFERS); 7186 if (rval) { 7187 ql_log(ql_log_warn, vha, 0x8034, 7188 "Unable to reinitialize EFT " 7189 "(%d).\n", rval); 7190 } 7191 } 7192 } else { /* failed the ISP abort */ 7193 vha->flags.online = 1; 7194 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 7195 if (ha->isp_abort_cnt == 0) { 7196 ql_log(ql_log_fatal, vha, 0x8035, 7197 "ISP error recover failed - " 7198 "board disabled.\n"); 7199 /* 7200 * The next call disables the board 7201 * completely. 7202 */ 7203 qla2x00_abort_isp_cleanup(vha); 7204 vha->flags.online = 0; 7205 clear_bit(ISP_ABORT_RETRY, 7206 &vha->dpc_flags); 7207 status = 0; 7208 } else { /* schedule another ISP abort */ 7209 ha->isp_abort_cnt--; 7210 ql_dbg(ql_dbg_taskm, vha, 0x8020, 7211 "ISP abort - retry remaining %d.\n", 7212 ha->isp_abort_cnt); 7213 status = 1; 7214 } 7215 } else { 7216 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 7217 ql_dbg(ql_dbg_taskm, vha, 0x8021, 7218 "ISP error recovery - retrying (%d) " 7219 "more times.\n", ha->isp_abort_cnt); 7220 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 7221 status = 1; 7222 } 7223 } 7224 7225 } 7226 7227 if (vha->hw->flags.port_isolated) { 7228 qla2x00_abort_isp_cleanup(vha); 7229 return status; 7230 } 7231 7232 if (!status) { 7233 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__); 7234 qla2x00_configure_hba(vha); 7235 spin_lock_irqsave(&ha->vport_slock, flags); 7236 list_for_each_entry(vp, &ha->vp_list, list) { 7237 if (vp->vp_idx) { 7238 atomic_inc(&vp->vref_count); 7239 spin_unlock_irqrestore(&ha->vport_slock, flags); 7240 7241 qla2x00_vp_abort_isp(vp); 7242 7243 spin_lock_irqsave(&ha->vport_slock, flags); 7244 atomic_dec(&vp->vref_count); 7245 } 7246 } 7247 spin_unlock_irqrestore(&ha->vport_slock, flags); 7248 7249 if (IS_QLA8031(ha)) { 7250 ql_dbg(ql_dbg_p3p, vha, 0xb05d, 7251 "Setting back fcoe driver presence.\n"); 7252 if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS) 7253 ql_dbg(ql_dbg_p3p, vha, 0xb074, 7254 "Error while setting DRV-Presence.\n"); 7255 } 7256 } else { 7257 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n", 7258 __func__); 7259 } 7260 7261 return(status); 7262 } 7263 7264 /* 7265 * qla2x00_restart_isp 7266 * restarts the ISP after a reset 7267 * 7268 * Input: 7269 * ha = adapter block pointer. 7270 * 7271 * Returns: 7272 * 0 = success 7273 */ 7274 static int 7275 qla2x00_restart_isp(scsi_qla_host_t *vha) 7276 { 7277 int status; 7278 struct qla_hw_data *ha = vha->hw; 7279 7280 /* If firmware needs to be loaded */ 7281 if (qla2x00_isp_firmware(vha)) { 7282 vha->flags.online = 0; 7283 status = ha->isp_ops->chip_diag(vha); 7284 if (status) 7285 return status; 7286 status = qla2x00_setup_chip(vha); 7287 if (status) 7288 return status; 7289 } 7290 7291 status = qla2x00_init_rings(vha); 7292 if (status) 7293 return status; 7294 7295 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 7296 ha->flags.chip_reset_done = 1; 7297 7298 /* Initialize the queues in use */ 7299 qla25xx_init_queues(ha); 7300 7301 status = qla2x00_fw_ready(vha); 7302 if (status) { 7303 /* if no cable then assume it's good */ 7304 return vha->device_flags & DFLG_NO_CABLE ? 0 : status; 7305 } 7306 7307 /* Issue a marker after FW becomes ready. */ 7308 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); 7309 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 7310 7311 return 0; 7312 } 7313 7314 static int 7315 qla25xx_init_queues(struct qla_hw_data *ha) 7316 { 7317 struct rsp_que *rsp = NULL; 7318 struct req_que *req = NULL; 7319 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 7320 int ret = -1; 7321 int i; 7322 7323 for (i = 1; i < ha->max_rsp_queues; i++) { 7324 rsp = ha->rsp_q_map[i]; 7325 if (rsp && test_bit(i, ha->rsp_qid_map)) { 7326 rsp->options &= ~BIT_0; 7327 ret = qla25xx_init_rsp_que(base_vha, rsp); 7328 if (ret != QLA_SUCCESS) 7329 ql_dbg(ql_dbg_init, base_vha, 0x00ff, 7330 "%s Rsp que: %d init failed.\n", 7331 __func__, rsp->id); 7332 else 7333 ql_dbg(ql_dbg_init, base_vha, 0x0100, 7334 "%s Rsp que: %d inited.\n", 7335 __func__, rsp->id); 7336 } 7337 } 7338 for (i = 1; i < ha->max_req_queues; i++) { 7339 req = ha->req_q_map[i]; 7340 if (req && test_bit(i, ha->req_qid_map)) { 7341 /* Clear outstanding commands array. */ 7342 req->options &= ~BIT_0; 7343 ret = qla25xx_init_req_que(base_vha, req); 7344 if (ret != QLA_SUCCESS) 7345 ql_dbg(ql_dbg_init, base_vha, 0x0101, 7346 "%s Req que: %d init failed.\n", 7347 __func__, req->id); 7348 else 7349 ql_dbg(ql_dbg_init, base_vha, 0x0102, 7350 "%s Req que: %d inited.\n", 7351 __func__, req->id); 7352 } 7353 } 7354 return ret; 7355 } 7356 7357 /* 7358 * qla2x00_reset_adapter 7359 * Reset adapter. 7360 * 7361 * Input: 7362 * ha = adapter block pointer. 7363 */ 7364 int 7365 qla2x00_reset_adapter(scsi_qla_host_t *vha) 7366 { 7367 unsigned long flags = 0; 7368 struct qla_hw_data *ha = vha->hw; 7369 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 7370 7371 vha->flags.online = 0; 7372 ha->isp_ops->disable_intrs(ha); 7373 7374 spin_lock_irqsave(&ha->hardware_lock, flags); 7375 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 7376 rd_reg_word(®->hccr); /* PCI Posting. */ 7377 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); 7378 rd_reg_word(®->hccr); /* PCI Posting. */ 7379 spin_unlock_irqrestore(&ha->hardware_lock, flags); 7380 7381 return QLA_SUCCESS; 7382 } 7383 7384 int 7385 qla24xx_reset_adapter(scsi_qla_host_t *vha) 7386 { 7387 unsigned long flags = 0; 7388 struct qla_hw_data *ha = vha->hw; 7389 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 7390 7391 if (IS_P3P_TYPE(ha)) 7392 return QLA_SUCCESS; 7393 7394 vha->flags.online = 0; 7395 ha->isp_ops->disable_intrs(ha); 7396 7397 spin_lock_irqsave(&ha->hardware_lock, flags); 7398 wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET); 7399 rd_reg_dword(®->hccr); 7400 wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE); 7401 rd_reg_dword(®->hccr); 7402 spin_unlock_irqrestore(&ha->hardware_lock, flags); 7403 7404 if (IS_NOPOLLING_TYPE(ha)) 7405 ha->isp_ops->enable_intrs(ha); 7406 7407 return QLA_SUCCESS; 7408 } 7409 7410 /* On sparc systems, obtain port and node WWN from firmware 7411 * properties. 7412 */ 7413 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, 7414 struct nvram_24xx *nv) 7415 { 7416 #ifdef CONFIG_SPARC 7417 struct qla_hw_data *ha = vha->hw; 7418 struct pci_dev *pdev = ha->pdev; 7419 struct device_node *dp = pci_device_to_OF_node(pdev); 7420 const u8 *val; 7421 int len; 7422 7423 val = of_get_property(dp, "port-wwn", &len); 7424 if (val && len >= WWN_SIZE) 7425 memcpy(nv->port_name, val, WWN_SIZE); 7426 7427 val = of_get_property(dp, "node-wwn", &len); 7428 if (val && len >= WWN_SIZE) 7429 memcpy(nv->node_name, val, WWN_SIZE); 7430 #endif 7431 } 7432 7433 int 7434 qla24xx_nvram_config(scsi_qla_host_t *vha) 7435 { 7436 int rval; 7437 struct init_cb_24xx *icb; 7438 struct nvram_24xx *nv; 7439 __le32 *dptr; 7440 uint8_t *dptr1, *dptr2; 7441 uint32_t chksum; 7442 uint16_t cnt; 7443 struct qla_hw_data *ha = vha->hw; 7444 7445 rval = QLA_SUCCESS; 7446 icb = (struct init_cb_24xx *)ha->init_cb; 7447 nv = ha->nvram; 7448 7449 /* Determine NVRAM starting address. */ 7450 if (ha->port_no == 0) { 7451 ha->nvram_base = FA_NVRAM_FUNC0_ADDR; 7452 ha->vpd_base = FA_NVRAM_VPD0_ADDR; 7453 } else { 7454 ha->nvram_base = FA_NVRAM_FUNC1_ADDR; 7455 ha->vpd_base = FA_NVRAM_VPD1_ADDR; 7456 } 7457 7458 ha->nvram_size = sizeof(*nv); 7459 ha->vpd_size = FA_NVRAM_VPD_SIZE; 7460 7461 /* Get VPD data into cache */ 7462 ha->vpd = ha->nvram + VPD_OFFSET; 7463 ha->isp_ops->read_nvram(vha, ha->vpd, 7464 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); 7465 7466 /* Get NVRAM data into cache and calculate checksum. */ 7467 dptr = (__force __le32 *)nv; 7468 ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size); 7469 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) 7470 chksum += le32_to_cpu(*dptr); 7471 7472 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a, 7473 "Contents of NVRAM\n"); 7474 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d, 7475 nv, ha->nvram_size); 7476 7477 /* Bad NVRAM data, set defaults parameters. */ 7478 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) || 7479 le16_to_cpu(nv->nvram_version) < ICB_VERSION) { 7480 /* Reset NVRAM data. */ 7481 ql_log(ql_log_warn, vha, 0x006b, 7482 "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n", 7483 chksum, nv->id, nv->nvram_version); 7484 ql_dump_buffer(ql_dbg_init, vha, 0x006b, nv, sizeof(*nv)); 7485 ql_log(ql_log_warn, vha, 0x006c, 7486 "Falling back to functioning (yet invalid -- WWPN) " 7487 "defaults.\n"); 7488 7489 /* 7490 * Set default initialization control block. 7491 */ 7492 memset(nv, 0, ha->nvram_size); 7493 nv->nvram_version = cpu_to_le16(ICB_VERSION); 7494 nv->version = cpu_to_le16(ICB_VERSION); 7495 nv->frame_payload_size = cpu_to_le16(2048); 7496 nv->execution_throttle = cpu_to_le16(0xFFFF); 7497 nv->exchange_count = cpu_to_le16(0); 7498 nv->hard_address = cpu_to_le16(124); 7499 nv->port_name[0] = 0x21; 7500 nv->port_name[1] = 0x00 + ha->port_no + 1; 7501 nv->port_name[2] = 0x00; 7502 nv->port_name[3] = 0xe0; 7503 nv->port_name[4] = 0x8b; 7504 nv->port_name[5] = 0x1c; 7505 nv->port_name[6] = 0x55; 7506 nv->port_name[7] = 0x86; 7507 nv->node_name[0] = 0x20; 7508 nv->node_name[1] = 0x00; 7509 nv->node_name[2] = 0x00; 7510 nv->node_name[3] = 0xe0; 7511 nv->node_name[4] = 0x8b; 7512 nv->node_name[5] = 0x1c; 7513 nv->node_name[6] = 0x55; 7514 nv->node_name[7] = 0x86; 7515 qla24xx_nvram_wwn_from_ofw(vha, nv); 7516 nv->login_retry_count = cpu_to_le16(8); 7517 nv->interrupt_delay_timer = cpu_to_le16(0); 7518 nv->login_timeout = cpu_to_le16(0); 7519 nv->firmware_options_1 = 7520 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); 7521 nv->firmware_options_2 = cpu_to_le32(2 << 4); 7522 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 7523 nv->firmware_options_3 = cpu_to_le32(2 << 13); 7524 nv->host_p = cpu_to_le32(BIT_11|BIT_10); 7525 nv->efi_parameters = cpu_to_le32(0); 7526 nv->reset_delay = 5; 7527 nv->max_luns_per_target = cpu_to_le16(128); 7528 nv->port_down_retry_count = cpu_to_le16(30); 7529 nv->link_down_timeout = cpu_to_le16(30); 7530 7531 rval = 1; 7532 } 7533 7534 if (qla_tgt_mode_enabled(vha)) { 7535 /* Don't enable full login after initial LIP */ 7536 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 7537 /* Don't enable LIP full login for initiator */ 7538 nv->host_p &= cpu_to_le32(~BIT_10); 7539 } 7540 7541 qlt_24xx_config_nvram_stage1(vha, nv); 7542 7543 /* Reset Initialization control block */ 7544 memset(icb, 0, ha->init_cb_size); 7545 7546 /* Copy 1st segment. */ 7547 dptr1 = (uint8_t *)icb; 7548 dptr2 = (uint8_t *)&nv->version; 7549 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; 7550 while (cnt--) 7551 *dptr1++ = *dptr2++; 7552 7553 icb->login_retry_count = nv->login_retry_count; 7554 icb->link_down_on_nos = nv->link_down_on_nos; 7555 7556 /* Copy 2nd segment. */ 7557 dptr1 = (uint8_t *)&icb->interrupt_delay_timer; 7558 dptr2 = (uint8_t *)&nv->interrupt_delay_timer; 7559 cnt = (uint8_t *)&icb->reserved_3 - 7560 (uint8_t *)&icb->interrupt_delay_timer; 7561 while (cnt--) 7562 *dptr1++ = *dptr2++; 7563 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); 7564 /* 7565 * Setup driver NVRAM options. 7566 */ 7567 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), 7568 "QLA2462"); 7569 7570 qlt_24xx_config_nvram_stage2(vha, icb); 7571 7572 if (nv->host_p & cpu_to_le32(BIT_15)) { 7573 /* Use alternate WWN? */ 7574 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 7575 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 7576 } 7577 7578 /* Prepare nodename */ 7579 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) { 7580 /* 7581 * Firmware will apply the following mask if the nodename was 7582 * not provided. 7583 */ 7584 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 7585 icb->node_name[0] &= 0xF0; 7586 } 7587 7588 /* Set host adapter parameters. */ 7589 ha->flags.disable_risc_code_load = 0; 7590 ha->flags.enable_lip_reset = 0; 7591 ha->flags.enable_lip_full_login = 7592 le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0; 7593 ha->flags.enable_target_reset = 7594 le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0; 7595 ha->flags.enable_led_scheme = 0; 7596 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0; 7597 7598 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & 7599 (BIT_6 | BIT_5 | BIT_4)) >> 4; 7600 7601 memcpy(ha->fw_seriallink_options24, nv->seriallink_options, 7602 sizeof(ha->fw_seriallink_options24)); 7603 7604 /* save HBA serial number */ 7605 ha->serial0 = icb->port_name[5]; 7606 ha->serial1 = icb->port_name[6]; 7607 ha->serial2 = icb->port_name[7]; 7608 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 7609 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 7610 7611 icb->execution_throttle = cpu_to_le16(0xFFFF); 7612 7613 ha->retry_count = le16_to_cpu(nv->login_retry_count); 7614 7615 /* Set minimum login_timeout to 4 seconds. */ 7616 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) 7617 nv->login_timeout = cpu_to_le16(ql2xlogintimeout); 7618 if (le16_to_cpu(nv->login_timeout) < 4) 7619 nv->login_timeout = cpu_to_le16(4); 7620 ha->login_timeout = le16_to_cpu(nv->login_timeout); 7621 7622 /* Set minimum RATOV to 100 tenths of a second. */ 7623 ha->r_a_tov = 100; 7624 7625 ha->loop_reset_delay = nv->reset_delay; 7626 7627 /* Link Down Timeout = 0: 7628 * 7629 * When Port Down timer expires we will start returning 7630 * I/O's to OS with "DID_NO_CONNECT". 7631 * 7632 * Link Down Timeout != 0: 7633 * 7634 * The driver waits for the link to come up after link down 7635 * before returning I/Os to OS with "DID_NO_CONNECT". 7636 */ 7637 if (le16_to_cpu(nv->link_down_timeout) == 0) { 7638 ha->loop_down_abort_time = 7639 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 7640 } else { 7641 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); 7642 ha->loop_down_abort_time = 7643 (LOOP_DOWN_TIME - ha->link_down_timeout); 7644 } 7645 7646 /* Need enough time to try and get the port back. */ 7647 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); 7648 if (qlport_down_retry) 7649 ha->port_down_retry_count = qlport_down_retry; 7650 7651 /* Set login_retry_count */ 7652 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); 7653 if (ha->port_down_retry_count == 7654 le16_to_cpu(nv->port_down_retry_count) && 7655 ha->port_down_retry_count > 3) 7656 ha->login_retry_count = ha->port_down_retry_count; 7657 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 7658 ha->login_retry_count = ha->port_down_retry_count; 7659 if (ql2xloginretrycount) 7660 ha->login_retry_count = ql2xloginretrycount; 7661 7662 /* N2N: driver will initiate Login instead of FW */ 7663 icb->firmware_options_3 |= cpu_to_le32(BIT_8); 7664 7665 /* Enable ZIO. */ 7666 if (!vha->flags.init_done) { 7667 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 7668 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 7669 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 7670 le16_to_cpu(icb->interrupt_delay_timer) : 2; 7671 } 7672 icb->firmware_options_2 &= cpu_to_le32( 7673 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 7674 if (ha->zio_mode != QLA_ZIO_DISABLED) { 7675 ha->zio_mode = QLA_ZIO_MODE_6; 7676 7677 ql_log(ql_log_info, vha, 0x006f, 7678 "ZIO mode %d enabled; timer delay (%d us).\n", 7679 ha->zio_mode, ha->zio_timer * 100); 7680 7681 icb->firmware_options_2 |= cpu_to_le32( 7682 (uint32_t)ha->zio_mode); 7683 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); 7684 } 7685 7686 if (rval) { 7687 ql_log(ql_log_warn, vha, 0x0070, 7688 "NVRAM configuration failed.\n"); 7689 } 7690 return (rval); 7691 } 7692 7693 static void 7694 qla27xx_print_image(struct scsi_qla_host *vha, char *name, 7695 struct qla27xx_image_status *image_status) 7696 { 7697 ql_dbg(ql_dbg_init, vha, 0x018b, 7698 "%s %s: mask=%#02x gen=%#04x ver=%u.%u map=%#01x sum=%#08x sig=%#08x\n", 7699 name, "status", 7700 image_status->image_status_mask, 7701 le16_to_cpu(image_status->generation), 7702 image_status->ver_major, 7703 image_status->ver_minor, 7704 image_status->bitmap, 7705 le32_to_cpu(image_status->checksum), 7706 le32_to_cpu(image_status->signature)); 7707 } 7708 7709 static bool 7710 qla28xx_check_aux_image_status_signature( 7711 struct qla27xx_image_status *image_status) 7712 { 7713 ulong signature = le32_to_cpu(image_status->signature); 7714 7715 return signature != QLA28XX_AUX_IMG_STATUS_SIGN; 7716 } 7717 7718 static bool 7719 qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status) 7720 { 7721 ulong signature = le32_to_cpu(image_status->signature); 7722 7723 return 7724 signature != QLA27XX_IMG_STATUS_SIGN && 7725 signature != QLA28XX_IMG_STATUS_SIGN; 7726 } 7727 7728 static ulong 7729 qla27xx_image_status_checksum(struct qla27xx_image_status *image_status) 7730 { 7731 __le32 *p = (__force __le32 *)image_status; 7732 uint n = sizeof(*image_status) / sizeof(*p); 7733 uint32_t sum = 0; 7734 7735 for ( ; n--; p++) 7736 sum += le32_to_cpup(p); 7737 7738 return sum; 7739 } 7740 7741 static inline uint 7742 qla28xx_component_bitmask(struct qla27xx_image_status *aux, uint bitmask) 7743 { 7744 return aux->bitmap & bitmask ? 7745 QLA27XX_SECONDARY_IMAGE : QLA27XX_PRIMARY_IMAGE; 7746 } 7747 7748 static void 7749 qla28xx_component_status( 7750 struct active_regions *active_regions, struct qla27xx_image_status *aux) 7751 { 7752 active_regions->aux.board_config = 7753 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_BOARD_CONFIG); 7754 7755 active_regions->aux.vpd_nvram = 7756 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_VPD_NVRAM); 7757 7758 active_regions->aux.npiv_config_0_1 = 7759 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_0_1); 7760 7761 active_regions->aux.npiv_config_2_3 = 7762 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3); 7763 } 7764 7765 static int 7766 qla27xx_compare_image_generation( 7767 struct qla27xx_image_status *pri_image_status, 7768 struct qla27xx_image_status *sec_image_status) 7769 { 7770 /* calculate generation delta as uint16 (this accounts for wrap) */ 7771 int16_t delta = 7772 le16_to_cpu(pri_image_status->generation) - 7773 le16_to_cpu(sec_image_status->generation); 7774 7775 ql_dbg(ql_dbg_init, NULL, 0x0180, "generation delta = %d\n", delta); 7776 7777 return delta; 7778 } 7779 7780 void 7781 qla28xx_get_aux_images( 7782 struct scsi_qla_host *vha, struct active_regions *active_regions) 7783 { 7784 struct qla_hw_data *ha = vha->hw; 7785 struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status; 7786 bool valid_pri_image = false, valid_sec_image = false; 7787 bool active_pri_image = false, active_sec_image = false; 7788 7789 if (!ha->flt_region_aux_img_status_pri) { 7790 ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n"); 7791 goto check_sec_image; 7792 } 7793 7794 qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status, 7795 ha->flt_region_aux_img_status_pri, 7796 sizeof(pri_aux_image_status) >> 2); 7797 qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status); 7798 7799 if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) { 7800 ql_dbg(ql_dbg_init, vha, 0x018b, 7801 "Primary aux image signature (%#x) not valid\n", 7802 le32_to_cpu(pri_aux_image_status.signature)); 7803 goto check_sec_image; 7804 } 7805 7806 if (qla27xx_image_status_checksum(&pri_aux_image_status)) { 7807 ql_dbg(ql_dbg_init, vha, 0x018c, 7808 "Primary aux image checksum failed\n"); 7809 goto check_sec_image; 7810 } 7811 7812 valid_pri_image = true; 7813 7814 if (pri_aux_image_status.image_status_mask & 1) { 7815 ql_dbg(ql_dbg_init, vha, 0x018d, 7816 "Primary aux image is active\n"); 7817 active_pri_image = true; 7818 } 7819 7820 check_sec_image: 7821 if (!ha->flt_region_aux_img_status_sec) { 7822 ql_dbg(ql_dbg_init, vha, 0x018a, 7823 "Secondary aux image not addressed\n"); 7824 goto check_valid_image; 7825 } 7826 7827 qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status, 7828 ha->flt_region_aux_img_status_sec, 7829 sizeof(sec_aux_image_status) >> 2); 7830 qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status); 7831 7832 if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) { 7833 ql_dbg(ql_dbg_init, vha, 0x018b, 7834 "Secondary aux image signature (%#x) not valid\n", 7835 le32_to_cpu(sec_aux_image_status.signature)); 7836 goto check_valid_image; 7837 } 7838 7839 if (qla27xx_image_status_checksum(&sec_aux_image_status)) { 7840 ql_dbg(ql_dbg_init, vha, 0x018c, 7841 "Secondary aux image checksum failed\n"); 7842 goto check_valid_image; 7843 } 7844 7845 valid_sec_image = true; 7846 7847 if (sec_aux_image_status.image_status_mask & 1) { 7848 ql_dbg(ql_dbg_init, vha, 0x018d, 7849 "Secondary aux image is active\n"); 7850 active_sec_image = true; 7851 } 7852 7853 check_valid_image: 7854 if (valid_pri_image && active_pri_image && 7855 valid_sec_image && active_sec_image) { 7856 if (qla27xx_compare_image_generation(&pri_aux_image_status, 7857 &sec_aux_image_status) >= 0) { 7858 qla28xx_component_status(active_regions, 7859 &pri_aux_image_status); 7860 } else { 7861 qla28xx_component_status(active_regions, 7862 &sec_aux_image_status); 7863 } 7864 } else if (valid_pri_image && active_pri_image) { 7865 qla28xx_component_status(active_regions, &pri_aux_image_status); 7866 } else if (valid_sec_image && active_sec_image) { 7867 qla28xx_component_status(active_regions, &sec_aux_image_status); 7868 } 7869 7870 ql_dbg(ql_dbg_init, vha, 0x018f, 7871 "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u\n", 7872 active_regions->aux.board_config, 7873 active_regions->aux.vpd_nvram, 7874 active_regions->aux.npiv_config_0_1, 7875 active_regions->aux.npiv_config_2_3); 7876 } 7877 7878 void 7879 qla27xx_get_active_image(struct scsi_qla_host *vha, 7880 struct active_regions *active_regions) 7881 { 7882 struct qla_hw_data *ha = vha->hw; 7883 struct qla27xx_image_status pri_image_status, sec_image_status; 7884 bool valid_pri_image = false, valid_sec_image = false; 7885 bool active_pri_image = false, active_sec_image = false; 7886 7887 if (!ha->flt_region_img_status_pri) { 7888 ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n"); 7889 goto check_sec_image; 7890 } 7891 7892 if (qla24xx_read_flash_data(vha, (uint32_t *)&pri_image_status, 7893 ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) != 7894 QLA_SUCCESS) { 7895 WARN_ON_ONCE(true); 7896 goto check_sec_image; 7897 } 7898 qla27xx_print_image(vha, "Primary image", &pri_image_status); 7899 7900 if (qla27xx_check_image_status_signature(&pri_image_status)) { 7901 ql_dbg(ql_dbg_init, vha, 0x018b, 7902 "Primary image signature (%#x) not valid\n", 7903 le32_to_cpu(pri_image_status.signature)); 7904 goto check_sec_image; 7905 } 7906 7907 if (qla27xx_image_status_checksum(&pri_image_status)) { 7908 ql_dbg(ql_dbg_init, vha, 0x018c, 7909 "Primary image checksum failed\n"); 7910 goto check_sec_image; 7911 } 7912 7913 valid_pri_image = true; 7914 7915 if (pri_image_status.image_status_mask & 1) { 7916 ql_dbg(ql_dbg_init, vha, 0x018d, 7917 "Primary image is active\n"); 7918 active_pri_image = true; 7919 } 7920 7921 check_sec_image: 7922 if (!ha->flt_region_img_status_sec) { 7923 ql_dbg(ql_dbg_init, vha, 0x018a, "Secondary image not addressed\n"); 7924 goto check_valid_image; 7925 } 7926 7927 qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status), 7928 ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2); 7929 qla27xx_print_image(vha, "Secondary image", &sec_image_status); 7930 7931 if (qla27xx_check_image_status_signature(&sec_image_status)) { 7932 ql_dbg(ql_dbg_init, vha, 0x018b, 7933 "Secondary image signature (%#x) not valid\n", 7934 le32_to_cpu(sec_image_status.signature)); 7935 goto check_valid_image; 7936 } 7937 7938 if (qla27xx_image_status_checksum(&sec_image_status)) { 7939 ql_dbg(ql_dbg_init, vha, 0x018c, 7940 "Secondary image checksum failed\n"); 7941 goto check_valid_image; 7942 } 7943 7944 valid_sec_image = true; 7945 7946 if (sec_image_status.image_status_mask & 1) { 7947 ql_dbg(ql_dbg_init, vha, 0x018d, 7948 "Secondary image is active\n"); 7949 active_sec_image = true; 7950 } 7951 7952 check_valid_image: 7953 if (valid_pri_image && active_pri_image) 7954 active_regions->global = QLA27XX_PRIMARY_IMAGE; 7955 7956 if (valid_sec_image && active_sec_image) { 7957 if (!active_regions->global || 7958 qla27xx_compare_image_generation( 7959 &pri_image_status, &sec_image_status) < 0) { 7960 active_regions->global = QLA27XX_SECONDARY_IMAGE; 7961 } 7962 } 7963 7964 ql_dbg(ql_dbg_init, vha, 0x018f, "active image %s (%u)\n", 7965 active_regions->global == QLA27XX_DEFAULT_IMAGE ? 7966 "default (boot/fw)" : 7967 active_regions->global == QLA27XX_PRIMARY_IMAGE ? 7968 "primary" : 7969 active_regions->global == QLA27XX_SECONDARY_IMAGE ? 7970 "secondary" : "invalid", 7971 active_regions->global); 7972 } 7973 7974 bool qla24xx_risc_firmware_invalid(uint32_t *dword) 7975 { 7976 return 7977 !(dword[4] | dword[5] | dword[6] | dword[7]) || 7978 !(~dword[4] | ~dword[5] | ~dword[6] | ~dword[7]); 7979 } 7980 7981 static int 7982 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, 7983 uint32_t faddr) 7984 { 7985 int rval; 7986 uint templates, segments, fragment; 7987 ulong i; 7988 uint j; 7989 ulong dlen; 7990 uint32_t *dcode; 7991 uint32_t risc_addr, risc_size, risc_attr = 0; 7992 struct qla_hw_data *ha = vha->hw; 7993 struct req_que *req = ha->req_q_map[0]; 7994 struct fwdt *fwdt = ha->fwdt; 7995 7996 ql_dbg(ql_dbg_init, vha, 0x008b, 7997 "FW: Loading firmware from flash (%x).\n", faddr); 7998 7999 dcode = (uint32_t *)req->ring; 8000 qla24xx_read_flash_data(vha, dcode, faddr, 8); 8001 if (qla24xx_risc_firmware_invalid(dcode)) { 8002 ql_log(ql_log_fatal, vha, 0x008c, 8003 "Unable to verify the integrity of flash firmware " 8004 "image.\n"); 8005 ql_log(ql_log_fatal, vha, 0x008d, 8006 "Firmware data: %08x %08x %08x %08x.\n", 8007 dcode[0], dcode[1], dcode[2], dcode[3]); 8008 8009 return QLA_FUNCTION_FAILED; 8010 } 8011 8012 dcode = (uint32_t *)req->ring; 8013 *srisc_addr = 0; 8014 segments = FA_RISC_CODE_SEGMENTS; 8015 for (j = 0; j < segments; j++) { 8016 ql_dbg(ql_dbg_init, vha, 0x008d, 8017 "-> Loading segment %u...\n", j); 8018 qla24xx_read_flash_data(vha, dcode, faddr, 10); 8019 risc_addr = be32_to_cpu((__force __be32)dcode[2]); 8020 risc_size = be32_to_cpu((__force __be32)dcode[3]); 8021 if (!*srisc_addr) { 8022 *srisc_addr = risc_addr; 8023 risc_attr = be32_to_cpu((__force __be32)dcode[9]); 8024 } 8025 8026 dlen = ha->fw_transfer_size >> 2; 8027 for (fragment = 0; risc_size; fragment++) { 8028 if (dlen > risc_size) 8029 dlen = risc_size; 8030 8031 ql_dbg(ql_dbg_init, vha, 0x008e, 8032 "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n", 8033 fragment, risc_addr, faddr, dlen); 8034 qla24xx_read_flash_data(vha, dcode, faddr, dlen); 8035 for (i = 0; i < dlen; i++) 8036 dcode[i] = swab32(dcode[i]); 8037 8038 rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen); 8039 if (rval) { 8040 ql_log(ql_log_fatal, vha, 0x008f, 8041 "-> Failed load firmware fragment %u.\n", 8042 fragment); 8043 return QLA_FUNCTION_FAILED; 8044 } 8045 8046 faddr += dlen; 8047 risc_addr += dlen; 8048 risc_size -= dlen; 8049 } 8050 } 8051 8052 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 8053 return QLA_SUCCESS; 8054 8055 templates = (risc_attr & BIT_9) ? 2 : 1; 8056 ql_dbg(ql_dbg_init, vha, 0x0160, "-> templates = %u\n", templates); 8057 for (j = 0; j < templates; j++, fwdt++) { 8058 vfree(fwdt->template); 8059 fwdt->template = NULL; 8060 fwdt->length = 0; 8061 8062 dcode = (uint32_t *)req->ring; 8063 qla24xx_read_flash_data(vha, dcode, faddr, 7); 8064 risc_size = be32_to_cpu((__force __be32)dcode[2]); 8065 ql_dbg(ql_dbg_init, vha, 0x0161, 8066 "-> fwdt%u template array at %#x (%#x dwords)\n", 8067 j, faddr, risc_size); 8068 if (!risc_size || !~risc_size) { 8069 ql_dbg(ql_dbg_init, vha, 0x0162, 8070 "-> fwdt%u failed to read array\n", j); 8071 goto failed; 8072 } 8073 8074 /* skip header and ignore checksum */ 8075 faddr += 7; 8076 risc_size -= 8; 8077 8078 ql_dbg(ql_dbg_init, vha, 0x0163, 8079 "-> fwdt%u template allocate template %#x words...\n", 8080 j, risc_size); 8081 fwdt->template = vmalloc(risc_size * sizeof(*dcode)); 8082 if (!fwdt->template) { 8083 ql_log(ql_log_warn, vha, 0x0164, 8084 "-> fwdt%u failed allocate template.\n", j); 8085 goto failed; 8086 } 8087 8088 dcode = fwdt->template; 8089 qla24xx_read_flash_data(vha, dcode, faddr, risc_size); 8090 8091 if (!qla27xx_fwdt_template_valid(dcode)) { 8092 ql_log(ql_log_warn, vha, 0x0165, 8093 "-> fwdt%u failed template validate\n", j); 8094 goto failed; 8095 } 8096 8097 dlen = qla27xx_fwdt_template_size(dcode); 8098 ql_dbg(ql_dbg_init, vha, 0x0166, 8099 "-> fwdt%u template size %#lx bytes (%#lx words)\n", 8100 j, dlen, dlen / sizeof(*dcode)); 8101 if (dlen > risc_size * sizeof(*dcode)) { 8102 ql_log(ql_log_warn, vha, 0x0167, 8103 "-> fwdt%u template exceeds array (%-lu bytes)\n", 8104 j, dlen - risc_size * sizeof(*dcode)); 8105 goto failed; 8106 } 8107 8108 fwdt->length = dlen; 8109 ql_dbg(ql_dbg_init, vha, 0x0168, 8110 "-> fwdt%u loaded template ok\n", j); 8111 8112 faddr += risc_size + 1; 8113 } 8114 8115 return QLA_SUCCESS; 8116 8117 failed: 8118 vfree(fwdt->template); 8119 fwdt->template = NULL; 8120 fwdt->length = 0; 8121 8122 return QLA_SUCCESS; 8123 } 8124 8125 #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/" 8126 8127 int 8128 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 8129 { 8130 int rval; 8131 int i, fragment; 8132 uint16_t *wcode; 8133 __be16 *fwcode; 8134 uint32_t risc_addr, risc_size, fwclen, wlen, *seg; 8135 struct fw_blob *blob; 8136 struct qla_hw_data *ha = vha->hw; 8137 struct req_que *req = ha->req_q_map[0]; 8138 8139 /* Load firmware blob. */ 8140 blob = qla2x00_request_firmware(vha); 8141 if (!blob) { 8142 ql_log(ql_log_info, vha, 0x0083, 8143 "Firmware image unavailable.\n"); 8144 ql_log(ql_log_info, vha, 0x0084, 8145 "Firmware images can be retrieved from: "QLA_FW_URL ".\n"); 8146 return QLA_FUNCTION_FAILED; 8147 } 8148 8149 rval = QLA_SUCCESS; 8150 8151 wcode = (uint16_t *)req->ring; 8152 *srisc_addr = 0; 8153 fwcode = (__force __be16 *)blob->fw->data; 8154 fwclen = 0; 8155 8156 /* Validate firmware image by checking version. */ 8157 if (blob->fw->size < 8 * sizeof(uint16_t)) { 8158 ql_log(ql_log_fatal, vha, 0x0085, 8159 "Unable to verify integrity of firmware image (%zd).\n", 8160 blob->fw->size); 8161 goto fail_fw_integrity; 8162 } 8163 for (i = 0; i < 4; i++) 8164 wcode[i] = be16_to_cpu(fwcode[i + 4]); 8165 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff && 8166 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 && 8167 wcode[2] == 0 && wcode[3] == 0)) { 8168 ql_log(ql_log_fatal, vha, 0x0086, 8169 "Unable to verify integrity of firmware image.\n"); 8170 ql_log(ql_log_fatal, vha, 0x0087, 8171 "Firmware data: %04x %04x %04x %04x.\n", 8172 wcode[0], wcode[1], wcode[2], wcode[3]); 8173 goto fail_fw_integrity; 8174 } 8175 8176 seg = blob->segs; 8177 while (*seg && rval == QLA_SUCCESS) { 8178 risc_addr = *seg; 8179 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr; 8180 risc_size = be16_to_cpu(fwcode[3]); 8181 8182 /* Validate firmware image size. */ 8183 fwclen += risc_size * sizeof(uint16_t); 8184 if (blob->fw->size < fwclen) { 8185 ql_log(ql_log_fatal, vha, 0x0088, 8186 "Unable to verify integrity of firmware image " 8187 "(%zd).\n", blob->fw->size); 8188 goto fail_fw_integrity; 8189 } 8190 8191 fragment = 0; 8192 while (risc_size > 0 && rval == QLA_SUCCESS) { 8193 wlen = (uint16_t)(ha->fw_transfer_size >> 1); 8194 if (wlen > risc_size) 8195 wlen = risc_size; 8196 ql_dbg(ql_dbg_init, vha, 0x0089, 8197 "Loading risc segment@ risc addr %x number of " 8198 "words 0x%x.\n", risc_addr, wlen); 8199 8200 for (i = 0; i < wlen; i++) 8201 wcode[i] = swab16((__force u32)fwcode[i]); 8202 8203 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 8204 wlen); 8205 if (rval) { 8206 ql_log(ql_log_fatal, vha, 0x008a, 8207 "Failed to load segment %d of firmware.\n", 8208 fragment); 8209 break; 8210 } 8211 8212 fwcode += wlen; 8213 risc_addr += wlen; 8214 risc_size -= wlen; 8215 fragment++; 8216 } 8217 8218 /* Next segment. */ 8219 seg++; 8220 } 8221 return rval; 8222 8223 fail_fw_integrity: 8224 return QLA_FUNCTION_FAILED; 8225 } 8226 8227 static int 8228 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) 8229 { 8230 int rval; 8231 uint templates, segments, fragment; 8232 uint32_t *dcode; 8233 ulong dlen; 8234 uint32_t risc_addr, risc_size, risc_attr = 0; 8235 ulong i; 8236 uint j; 8237 struct fw_blob *blob; 8238 __be32 *fwcode; 8239 struct qla_hw_data *ha = vha->hw; 8240 struct req_que *req = ha->req_q_map[0]; 8241 struct fwdt *fwdt = ha->fwdt; 8242 8243 ql_dbg(ql_dbg_init, vha, 0x0090, 8244 "-> FW: Loading via request-firmware.\n"); 8245 8246 blob = qla2x00_request_firmware(vha); 8247 if (!blob) { 8248 ql_log(ql_log_warn, vha, 0x0092, 8249 "-> Firmware file not found.\n"); 8250 8251 return QLA_FUNCTION_FAILED; 8252 } 8253 8254 fwcode = (__force __be32 *)blob->fw->data; 8255 dcode = (__force uint32_t *)fwcode; 8256 if (qla24xx_risc_firmware_invalid(dcode)) { 8257 ql_log(ql_log_fatal, vha, 0x0093, 8258 "Unable to verify integrity of firmware image (%zd).\n", 8259 blob->fw->size); 8260 ql_log(ql_log_fatal, vha, 0x0095, 8261 "Firmware data: %08x %08x %08x %08x.\n", 8262 dcode[0], dcode[1], dcode[2], dcode[3]); 8263 return QLA_FUNCTION_FAILED; 8264 } 8265 8266 dcode = (uint32_t *)req->ring; 8267 *srisc_addr = 0; 8268 segments = FA_RISC_CODE_SEGMENTS; 8269 for (j = 0; j < segments; j++) { 8270 ql_dbg(ql_dbg_init, vha, 0x0096, 8271 "-> Loading segment %u...\n", j); 8272 risc_addr = be32_to_cpu(fwcode[2]); 8273 risc_size = be32_to_cpu(fwcode[3]); 8274 8275 if (!*srisc_addr) { 8276 *srisc_addr = risc_addr; 8277 risc_attr = be32_to_cpu(fwcode[9]); 8278 } 8279 8280 dlen = ha->fw_transfer_size >> 2; 8281 for (fragment = 0; risc_size; fragment++) { 8282 if (dlen > risc_size) 8283 dlen = risc_size; 8284 8285 ql_dbg(ql_dbg_init, vha, 0x0097, 8286 "-> Loading fragment %u: %#x <- %#x (%#lx words)...\n", 8287 fragment, risc_addr, 8288 (uint32_t)(fwcode - (typeof(fwcode))blob->fw->data), 8289 dlen); 8290 8291 for (i = 0; i < dlen; i++) 8292 dcode[i] = swab32((__force u32)fwcode[i]); 8293 8294 rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen); 8295 if (rval) { 8296 ql_log(ql_log_fatal, vha, 0x0098, 8297 "-> Failed load firmware fragment %u.\n", 8298 fragment); 8299 return QLA_FUNCTION_FAILED; 8300 } 8301 8302 fwcode += dlen; 8303 risc_addr += dlen; 8304 risc_size -= dlen; 8305 } 8306 } 8307 8308 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 8309 return QLA_SUCCESS; 8310 8311 templates = (risc_attr & BIT_9) ? 2 : 1; 8312 ql_dbg(ql_dbg_init, vha, 0x0170, "-> templates = %u\n", templates); 8313 for (j = 0; j < templates; j++, fwdt++) { 8314 vfree(fwdt->template); 8315 fwdt->template = NULL; 8316 fwdt->length = 0; 8317 8318 risc_size = be32_to_cpu(fwcode[2]); 8319 ql_dbg(ql_dbg_init, vha, 0x0171, 8320 "-> fwdt%u template array at %#x (%#x dwords)\n", 8321 j, (uint32_t)((void *)fwcode - (void *)blob->fw->data), 8322 risc_size); 8323 if (!risc_size || !~risc_size) { 8324 ql_dbg(ql_dbg_init, vha, 0x0172, 8325 "-> fwdt%u failed to read array\n", j); 8326 goto failed; 8327 } 8328 8329 /* skip header and ignore checksum */ 8330 fwcode += 7; 8331 risc_size -= 8; 8332 8333 ql_dbg(ql_dbg_init, vha, 0x0173, 8334 "-> fwdt%u template allocate template %#x words...\n", 8335 j, risc_size); 8336 fwdt->template = vmalloc(risc_size * sizeof(*dcode)); 8337 if (!fwdt->template) { 8338 ql_log(ql_log_warn, vha, 0x0174, 8339 "-> fwdt%u failed allocate template.\n", j); 8340 goto failed; 8341 } 8342 8343 dcode = fwdt->template; 8344 for (i = 0; i < risc_size; i++) 8345 dcode[i] = (__force u32)fwcode[i]; 8346 8347 if (!qla27xx_fwdt_template_valid(dcode)) { 8348 ql_log(ql_log_warn, vha, 0x0175, 8349 "-> fwdt%u failed template validate\n", j); 8350 goto failed; 8351 } 8352 8353 dlen = qla27xx_fwdt_template_size(dcode); 8354 ql_dbg(ql_dbg_init, vha, 0x0176, 8355 "-> fwdt%u template size %#lx bytes (%#lx words)\n", 8356 j, dlen, dlen / sizeof(*dcode)); 8357 if (dlen > risc_size * sizeof(*dcode)) { 8358 ql_log(ql_log_warn, vha, 0x0177, 8359 "-> fwdt%u template exceeds array (%-lu bytes)\n", 8360 j, dlen - risc_size * sizeof(*dcode)); 8361 goto failed; 8362 } 8363 8364 fwdt->length = dlen; 8365 ql_dbg(ql_dbg_init, vha, 0x0178, 8366 "-> fwdt%u loaded template ok\n", j); 8367 8368 fwcode += risc_size + 1; 8369 } 8370 8371 return QLA_SUCCESS; 8372 8373 failed: 8374 vfree(fwdt->template); 8375 fwdt->template = NULL; 8376 fwdt->length = 0; 8377 8378 return QLA_SUCCESS; 8379 } 8380 8381 int 8382 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 8383 { 8384 int rval; 8385 8386 if (ql2xfwloadbin == 1) 8387 return qla81xx_load_risc(vha, srisc_addr); 8388 8389 /* 8390 * FW Load priority: 8391 * 1) Firmware via request-firmware interface (.bin file). 8392 * 2) Firmware residing in flash. 8393 */ 8394 rval = qla24xx_load_risc_blob(vha, srisc_addr); 8395 if (rval == QLA_SUCCESS) 8396 return rval; 8397 8398 return qla24xx_load_risc_flash(vha, srisc_addr, 8399 vha->hw->flt_region_fw); 8400 } 8401 8402 int 8403 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 8404 { 8405 int rval; 8406 struct qla_hw_data *ha = vha->hw; 8407 struct active_regions active_regions = { }; 8408 8409 if (ql2xfwloadbin == 2) 8410 goto try_blob_fw; 8411 8412 /* FW Load priority: 8413 * 1) Firmware residing in flash. 8414 * 2) Firmware via request-firmware interface (.bin file). 8415 * 3) Golden-Firmware residing in flash -- (limited operation). 8416 */ 8417 8418 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 8419 goto try_primary_fw; 8420 8421 qla27xx_get_active_image(vha, &active_regions); 8422 8423 if (active_regions.global != QLA27XX_SECONDARY_IMAGE) 8424 goto try_primary_fw; 8425 8426 ql_dbg(ql_dbg_init, vha, 0x008b, 8427 "Loading secondary firmware image.\n"); 8428 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw_sec); 8429 if (!rval) 8430 return rval; 8431 8432 try_primary_fw: 8433 ql_dbg(ql_dbg_init, vha, 0x008b, 8434 "Loading primary firmware image.\n"); 8435 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw); 8436 if (!rval) 8437 return rval; 8438 8439 try_blob_fw: 8440 rval = qla24xx_load_risc_blob(vha, srisc_addr); 8441 if (!rval || !ha->flt_region_gold_fw) 8442 return rval; 8443 8444 ql_log(ql_log_info, vha, 0x0099, 8445 "Attempting to fallback to golden firmware.\n"); 8446 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); 8447 if (rval) 8448 return rval; 8449 8450 ql_log(ql_log_info, vha, 0x009a, "Need firmware flash update.\n"); 8451 ha->flags.running_gold_fw = 1; 8452 return rval; 8453 } 8454 8455 void 8456 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) 8457 { 8458 int ret, retries; 8459 struct qla_hw_data *ha = vha->hw; 8460 8461 if (ha->flags.pci_channel_io_perm_failure) 8462 return; 8463 if (!IS_FWI2_CAPABLE(ha)) 8464 return; 8465 if (!ha->fw_major_version) 8466 return; 8467 if (!ha->flags.fw_started) 8468 return; 8469 8470 ret = qla2x00_stop_firmware(vha); 8471 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && 8472 ret != QLA_INVALID_COMMAND && retries ; retries--) { 8473 ha->isp_ops->reset_chip(vha); 8474 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS) 8475 continue; 8476 if (qla2x00_setup_chip(vha) != QLA_SUCCESS) 8477 continue; 8478 ql_log(ql_log_info, vha, 0x8015, 8479 "Attempting retry of stop-firmware command.\n"); 8480 ret = qla2x00_stop_firmware(vha); 8481 } 8482 8483 QLA_FW_STOPPED(ha); 8484 ha->flags.fw_init_done = 0; 8485 } 8486 8487 int 8488 qla24xx_configure_vhba(scsi_qla_host_t *vha) 8489 { 8490 int rval = QLA_SUCCESS; 8491 int rval2; 8492 uint16_t mb[MAILBOX_REGISTER_COUNT]; 8493 struct qla_hw_data *ha = vha->hw; 8494 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 8495 8496 if (!vha->vp_idx) 8497 return -EINVAL; 8498 8499 rval = qla2x00_fw_ready(base_vha); 8500 8501 if (rval == QLA_SUCCESS) { 8502 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 8503 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); 8504 } 8505 8506 vha->flags.management_server_logged_in = 0; 8507 8508 /* Login to SNS first */ 8509 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, 8510 BIT_1); 8511 if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { 8512 if (rval2 == QLA_MEMORY_ALLOC_FAILED) 8513 ql_dbg(ql_dbg_init, vha, 0x0120, 8514 "Failed SNS login: loop_id=%x, rval2=%d\n", 8515 NPH_SNS, rval2); 8516 else 8517 ql_dbg(ql_dbg_init, vha, 0x0103, 8518 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " 8519 "mb[2]=%x mb[6]=%x mb[7]=%x.\n", 8520 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]); 8521 return (QLA_FUNCTION_FAILED); 8522 } 8523 8524 atomic_set(&vha->loop_down_timer, 0); 8525 atomic_set(&vha->loop_state, LOOP_UP); 8526 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 8527 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 8528 rval = qla2x00_loop_resync(base_vha); 8529 8530 return rval; 8531 } 8532 8533 /* 84XX Support **************************************************************/ 8534 8535 static LIST_HEAD(qla_cs84xx_list); 8536 static DEFINE_MUTEX(qla_cs84xx_mutex); 8537 8538 static struct qla_chip_state_84xx * 8539 qla84xx_get_chip(struct scsi_qla_host *vha) 8540 { 8541 struct qla_chip_state_84xx *cs84xx; 8542 struct qla_hw_data *ha = vha->hw; 8543 8544 mutex_lock(&qla_cs84xx_mutex); 8545 8546 /* Find any shared 84xx chip. */ 8547 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) { 8548 if (cs84xx->bus == ha->pdev->bus) { 8549 kref_get(&cs84xx->kref); 8550 goto done; 8551 } 8552 } 8553 8554 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL); 8555 if (!cs84xx) 8556 goto done; 8557 8558 kref_init(&cs84xx->kref); 8559 spin_lock_init(&cs84xx->access_lock); 8560 mutex_init(&cs84xx->fw_update_mutex); 8561 cs84xx->bus = ha->pdev->bus; 8562 8563 list_add_tail(&cs84xx->list, &qla_cs84xx_list); 8564 done: 8565 mutex_unlock(&qla_cs84xx_mutex); 8566 return cs84xx; 8567 } 8568 8569 static void 8570 __qla84xx_chip_release(struct kref *kref) 8571 { 8572 struct qla_chip_state_84xx *cs84xx = 8573 container_of(kref, struct qla_chip_state_84xx, kref); 8574 8575 mutex_lock(&qla_cs84xx_mutex); 8576 list_del(&cs84xx->list); 8577 mutex_unlock(&qla_cs84xx_mutex); 8578 kfree(cs84xx); 8579 } 8580 8581 void 8582 qla84xx_put_chip(struct scsi_qla_host *vha) 8583 { 8584 struct qla_hw_data *ha = vha->hw; 8585 8586 if (ha->cs84xx) 8587 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release); 8588 } 8589 8590 static int 8591 qla84xx_init_chip(scsi_qla_host_t *vha) 8592 { 8593 int rval; 8594 uint16_t status[2]; 8595 struct qla_hw_data *ha = vha->hw; 8596 8597 mutex_lock(&ha->cs84xx->fw_update_mutex); 8598 8599 rval = qla84xx_verify_chip(vha, status); 8600 8601 mutex_unlock(&ha->cs84xx->fw_update_mutex); 8602 8603 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED : 8604 QLA_SUCCESS; 8605 } 8606 8607 /* 81XX Support **************************************************************/ 8608 8609 int 8610 qla81xx_nvram_config(scsi_qla_host_t *vha) 8611 { 8612 int rval; 8613 struct init_cb_81xx *icb; 8614 struct nvram_81xx *nv; 8615 __le32 *dptr; 8616 uint8_t *dptr1, *dptr2; 8617 uint32_t chksum; 8618 uint16_t cnt; 8619 struct qla_hw_data *ha = vha->hw; 8620 uint32_t faddr; 8621 struct active_regions active_regions = { }; 8622 8623 rval = QLA_SUCCESS; 8624 icb = (struct init_cb_81xx *)ha->init_cb; 8625 nv = ha->nvram; 8626 8627 /* Determine NVRAM starting address. */ 8628 ha->nvram_size = sizeof(*nv); 8629 ha->vpd_size = FA_NVRAM_VPD_SIZE; 8630 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha)) 8631 ha->vpd_size = FA_VPD_SIZE_82XX; 8632 8633 if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) 8634 qla28xx_get_aux_images(vha, &active_regions); 8635 8636 /* Get VPD data into cache */ 8637 ha->vpd = ha->nvram + VPD_OFFSET; 8638 8639 faddr = ha->flt_region_vpd; 8640 if (IS_QLA28XX(ha)) { 8641 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) 8642 faddr = ha->flt_region_vpd_sec; 8643 ql_dbg(ql_dbg_init, vha, 0x0110, 8644 "Loading %s nvram image.\n", 8645 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ? 8646 "primary" : "secondary"); 8647 } 8648 ha->isp_ops->read_optrom(vha, ha->vpd, faddr << 2, ha->vpd_size); 8649 8650 /* Get NVRAM data into cache and calculate checksum. */ 8651 faddr = ha->flt_region_nvram; 8652 if (IS_QLA28XX(ha)) { 8653 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) 8654 faddr = ha->flt_region_nvram_sec; 8655 } 8656 ql_dbg(ql_dbg_init, vha, 0x0110, 8657 "Loading %s nvram image.\n", 8658 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ? 8659 "primary" : "secondary"); 8660 ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size); 8661 8662 dptr = (__force __le32 *)nv; 8663 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) 8664 chksum += le32_to_cpu(*dptr); 8665 8666 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111, 8667 "Contents of NVRAM:\n"); 8668 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112, 8669 nv, ha->nvram_size); 8670 8671 /* Bad NVRAM data, set defaults parameters. */ 8672 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) || 8673 le16_to_cpu(nv->nvram_version) < ICB_VERSION) { 8674 /* Reset NVRAM data. */ 8675 ql_log(ql_log_info, vha, 0x0073, 8676 "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n", 8677 chksum, nv->id, le16_to_cpu(nv->nvram_version)); 8678 ql_dump_buffer(ql_dbg_init, vha, 0x0073, nv, sizeof(*nv)); 8679 ql_log(ql_log_info, vha, 0x0074, 8680 "Falling back to functioning (yet invalid -- WWPN) " 8681 "defaults.\n"); 8682 8683 /* 8684 * Set default initialization control block. 8685 */ 8686 memset(nv, 0, ha->nvram_size); 8687 nv->nvram_version = cpu_to_le16(ICB_VERSION); 8688 nv->version = cpu_to_le16(ICB_VERSION); 8689 nv->frame_payload_size = cpu_to_le16(2048); 8690 nv->execution_throttle = cpu_to_le16(0xFFFF); 8691 nv->exchange_count = cpu_to_le16(0); 8692 nv->port_name[0] = 0x21; 8693 nv->port_name[1] = 0x00 + ha->port_no + 1; 8694 nv->port_name[2] = 0x00; 8695 nv->port_name[3] = 0xe0; 8696 nv->port_name[4] = 0x8b; 8697 nv->port_name[5] = 0x1c; 8698 nv->port_name[6] = 0x55; 8699 nv->port_name[7] = 0x86; 8700 nv->node_name[0] = 0x20; 8701 nv->node_name[1] = 0x00; 8702 nv->node_name[2] = 0x00; 8703 nv->node_name[3] = 0xe0; 8704 nv->node_name[4] = 0x8b; 8705 nv->node_name[5] = 0x1c; 8706 nv->node_name[6] = 0x55; 8707 nv->node_name[7] = 0x86; 8708 nv->login_retry_count = cpu_to_le16(8); 8709 nv->interrupt_delay_timer = cpu_to_le16(0); 8710 nv->login_timeout = cpu_to_le16(0); 8711 nv->firmware_options_1 = 8712 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); 8713 nv->firmware_options_2 = cpu_to_le32(2 << 4); 8714 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 8715 nv->firmware_options_3 = cpu_to_le32(2 << 13); 8716 nv->host_p = cpu_to_le32(BIT_11|BIT_10); 8717 nv->efi_parameters = cpu_to_le32(0); 8718 nv->reset_delay = 5; 8719 nv->max_luns_per_target = cpu_to_le16(128); 8720 nv->port_down_retry_count = cpu_to_le16(30); 8721 nv->link_down_timeout = cpu_to_le16(180); 8722 nv->enode_mac[0] = 0x00; 8723 nv->enode_mac[1] = 0xC0; 8724 nv->enode_mac[2] = 0xDD; 8725 nv->enode_mac[3] = 0x04; 8726 nv->enode_mac[4] = 0x05; 8727 nv->enode_mac[5] = 0x06 + ha->port_no + 1; 8728 8729 rval = 1; 8730 } 8731 8732 if (IS_T10_PI_CAPABLE(ha)) 8733 nv->frame_payload_size &= cpu_to_le16(~7); 8734 8735 qlt_81xx_config_nvram_stage1(vha, nv); 8736 8737 /* Reset Initialization control block */ 8738 memset(icb, 0, ha->init_cb_size); 8739 8740 /* Copy 1st segment. */ 8741 dptr1 = (uint8_t *)icb; 8742 dptr2 = (uint8_t *)&nv->version; 8743 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; 8744 while (cnt--) 8745 *dptr1++ = *dptr2++; 8746 8747 icb->login_retry_count = nv->login_retry_count; 8748 8749 /* Copy 2nd segment. */ 8750 dptr1 = (uint8_t *)&icb->interrupt_delay_timer; 8751 dptr2 = (uint8_t *)&nv->interrupt_delay_timer; 8752 cnt = (uint8_t *)&icb->reserved_5 - 8753 (uint8_t *)&icb->interrupt_delay_timer; 8754 while (cnt--) 8755 *dptr1++ = *dptr2++; 8756 8757 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac)); 8758 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */ 8759 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) { 8760 icb->enode_mac[0] = 0x00; 8761 icb->enode_mac[1] = 0xC0; 8762 icb->enode_mac[2] = 0xDD; 8763 icb->enode_mac[3] = 0x04; 8764 icb->enode_mac[4] = 0x05; 8765 icb->enode_mac[5] = 0x06 + ha->port_no + 1; 8766 } 8767 8768 /* Use extended-initialization control block. */ 8769 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb)); 8770 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); 8771 /* 8772 * Setup driver NVRAM options. 8773 */ 8774 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), 8775 "QLE8XXX"); 8776 8777 qlt_81xx_config_nvram_stage2(vha, icb); 8778 8779 /* Use alternate WWN? */ 8780 if (nv->host_p & cpu_to_le32(BIT_15)) { 8781 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 8782 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 8783 } 8784 8785 /* Prepare nodename */ 8786 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) { 8787 /* 8788 * Firmware will apply the following mask if the nodename was 8789 * not provided. 8790 */ 8791 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 8792 icb->node_name[0] &= 0xF0; 8793 } 8794 8795 if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) { 8796 if ((nv->enhanced_features & BIT_7) == 0) 8797 ha->flags.scm_supported_a = 1; 8798 } 8799 8800 /* Set host adapter parameters. */ 8801 ha->flags.disable_risc_code_load = 0; 8802 ha->flags.enable_lip_reset = 0; 8803 ha->flags.enable_lip_full_login = 8804 le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0; 8805 ha->flags.enable_target_reset = 8806 le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0; 8807 ha->flags.enable_led_scheme = 0; 8808 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0; 8809 8810 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & 8811 (BIT_6 | BIT_5 | BIT_4)) >> 4; 8812 8813 /* save HBA serial number */ 8814 ha->serial0 = icb->port_name[5]; 8815 ha->serial1 = icb->port_name[6]; 8816 ha->serial2 = icb->port_name[7]; 8817 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 8818 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 8819 8820 icb->execution_throttle = cpu_to_le16(0xFFFF); 8821 8822 ha->retry_count = le16_to_cpu(nv->login_retry_count); 8823 8824 /* Set minimum login_timeout to 4 seconds. */ 8825 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) 8826 nv->login_timeout = cpu_to_le16(ql2xlogintimeout); 8827 if (le16_to_cpu(nv->login_timeout) < 4) 8828 nv->login_timeout = cpu_to_le16(4); 8829 ha->login_timeout = le16_to_cpu(nv->login_timeout); 8830 8831 /* Set minimum RATOV to 100 tenths of a second. */ 8832 ha->r_a_tov = 100; 8833 8834 ha->loop_reset_delay = nv->reset_delay; 8835 8836 /* Link Down Timeout = 0: 8837 * 8838 * When Port Down timer expires we will start returning 8839 * I/O's to OS with "DID_NO_CONNECT". 8840 * 8841 * Link Down Timeout != 0: 8842 * 8843 * The driver waits for the link to come up after link down 8844 * before returning I/Os to OS with "DID_NO_CONNECT". 8845 */ 8846 if (le16_to_cpu(nv->link_down_timeout) == 0) { 8847 ha->loop_down_abort_time = 8848 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 8849 } else { 8850 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); 8851 ha->loop_down_abort_time = 8852 (LOOP_DOWN_TIME - ha->link_down_timeout); 8853 } 8854 8855 /* Need enough time to try and get the port back. */ 8856 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); 8857 if (qlport_down_retry) 8858 ha->port_down_retry_count = qlport_down_retry; 8859 8860 /* Set login_retry_count */ 8861 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); 8862 if (ha->port_down_retry_count == 8863 le16_to_cpu(nv->port_down_retry_count) && 8864 ha->port_down_retry_count > 3) 8865 ha->login_retry_count = ha->port_down_retry_count; 8866 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 8867 ha->login_retry_count = ha->port_down_retry_count; 8868 if (ql2xloginretrycount) 8869 ha->login_retry_count = ql2xloginretrycount; 8870 8871 /* if not running MSI-X we need handshaking on interrupts */ 8872 if (!vha->hw->flags.msix_enabled && 8873 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) 8874 icb->firmware_options_2 |= cpu_to_le32(BIT_22); 8875 8876 /* Enable ZIO. */ 8877 if (!vha->flags.init_done) { 8878 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 8879 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 8880 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 8881 le16_to_cpu(icb->interrupt_delay_timer) : 2; 8882 } 8883 icb->firmware_options_2 &= cpu_to_le32( 8884 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 8885 vha->flags.process_response_queue = 0; 8886 if (ha->zio_mode != QLA_ZIO_DISABLED) { 8887 ha->zio_mode = QLA_ZIO_MODE_6; 8888 8889 ql_log(ql_log_info, vha, 0x0075, 8890 "ZIO mode %d enabled; timer delay (%d us).\n", 8891 ha->zio_mode, 8892 ha->zio_timer * 100); 8893 8894 icb->firmware_options_2 |= cpu_to_le32( 8895 (uint32_t)ha->zio_mode); 8896 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); 8897 vha->flags.process_response_queue = 1; 8898 } 8899 8900 /* enable RIDA Format2 */ 8901 icb->firmware_options_3 |= cpu_to_le32(BIT_0); 8902 8903 /* N2N: driver will initiate Login instead of FW */ 8904 icb->firmware_options_3 |= cpu_to_le32(BIT_8); 8905 8906 /* Determine NVMe/FCP priority for target ports */ 8907 ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha); 8908 8909 if (rval) { 8910 ql_log(ql_log_warn, vha, 0x0076, 8911 "NVRAM configuration failed.\n"); 8912 } 8913 return (rval); 8914 } 8915 8916 int 8917 qla82xx_restart_isp(scsi_qla_host_t *vha) 8918 { 8919 int status, rval; 8920 struct qla_hw_data *ha = vha->hw; 8921 struct scsi_qla_host *vp; 8922 unsigned long flags; 8923 8924 status = qla2x00_init_rings(vha); 8925 if (!status) { 8926 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 8927 ha->flags.chip_reset_done = 1; 8928 8929 status = qla2x00_fw_ready(vha); 8930 if (!status) { 8931 /* Issue a marker after FW becomes ready. */ 8932 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); 8933 vha->flags.online = 1; 8934 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 8935 } 8936 8937 /* if no cable then assume it's good */ 8938 if ((vha->device_flags & DFLG_NO_CABLE)) 8939 status = 0; 8940 } 8941 8942 if (!status) { 8943 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 8944 8945 if (!atomic_read(&vha->loop_down_timer)) { 8946 /* 8947 * Issue marker command only when we are going 8948 * to start the I/O . 8949 */ 8950 vha->marker_needed = 1; 8951 } 8952 8953 ha->isp_ops->enable_intrs(ha); 8954 8955 ha->isp_abort_cnt = 0; 8956 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 8957 8958 /* Update the firmware version */ 8959 status = qla82xx_check_md_needed(vha); 8960 8961 if (ha->fce) { 8962 ha->flags.fce_enabled = 1; 8963 memset(ha->fce, 0, 8964 fce_calc_size(ha->fce_bufs)); 8965 rval = qla2x00_enable_fce_trace(vha, 8966 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 8967 &ha->fce_bufs); 8968 if (rval) { 8969 ql_log(ql_log_warn, vha, 0x8001, 8970 "Unable to reinitialize FCE (%d).\n", 8971 rval); 8972 ha->flags.fce_enabled = 0; 8973 } 8974 } 8975 8976 if (ha->eft) { 8977 memset(ha->eft, 0, EFT_SIZE); 8978 rval = qla2x00_enable_eft_trace(vha, 8979 ha->eft_dma, EFT_NUM_BUFFERS); 8980 if (rval) { 8981 ql_log(ql_log_warn, vha, 0x8010, 8982 "Unable to reinitialize EFT (%d).\n", 8983 rval); 8984 } 8985 } 8986 } 8987 8988 if (!status) { 8989 ql_dbg(ql_dbg_taskm, vha, 0x8011, 8990 "qla82xx_restart_isp succeeded.\n"); 8991 8992 spin_lock_irqsave(&ha->vport_slock, flags); 8993 list_for_each_entry(vp, &ha->vp_list, list) { 8994 if (vp->vp_idx) { 8995 atomic_inc(&vp->vref_count); 8996 spin_unlock_irqrestore(&ha->vport_slock, flags); 8997 8998 qla2x00_vp_abort_isp(vp); 8999 9000 spin_lock_irqsave(&ha->vport_slock, flags); 9001 atomic_dec(&vp->vref_count); 9002 } 9003 } 9004 spin_unlock_irqrestore(&ha->vport_slock, flags); 9005 9006 } else { 9007 ql_log(ql_log_warn, vha, 0x8016, 9008 "qla82xx_restart_isp **** FAILED ****.\n"); 9009 } 9010 9011 return status; 9012 } 9013 9014 /* 9015 * qla24xx_get_fcp_prio 9016 * Gets the fcp cmd priority value for the logged in port. 9017 * Looks for a match of the port descriptors within 9018 * each of the fcp prio config entries. If a match is found, 9019 * the tag (priority) value is returned. 9020 * 9021 * Input: 9022 * vha = scsi host structure pointer. 9023 * fcport = port structure pointer. 9024 * 9025 * Return: 9026 * non-zero (if found) 9027 * -1 (if not found) 9028 * 9029 * Context: 9030 * Kernel context 9031 */ 9032 static int 9033 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) 9034 { 9035 int i, entries; 9036 uint8_t pid_match, wwn_match; 9037 int priority; 9038 uint32_t pid1, pid2; 9039 uint64_t wwn1, wwn2; 9040 struct qla_fcp_prio_entry *pri_entry; 9041 struct qla_hw_data *ha = vha->hw; 9042 9043 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled) 9044 return -1; 9045 9046 priority = -1; 9047 entries = ha->fcp_prio_cfg->num_entries; 9048 pri_entry = &ha->fcp_prio_cfg->entry[0]; 9049 9050 for (i = 0; i < entries; i++) { 9051 pid_match = wwn_match = 0; 9052 9053 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) { 9054 pri_entry++; 9055 continue; 9056 } 9057 9058 /* check source pid for a match */ 9059 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) { 9060 pid1 = pri_entry->src_pid & INVALID_PORT_ID; 9061 pid2 = vha->d_id.b24 & INVALID_PORT_ID; 9062 if (pid1 == INVALID_PORT_ID) 9063 pid_match++; 9064 else if (pid1 == pid2) 9065 pid_match++; 9066 } 9067 9068 /* check destination pid for a match */ 9069 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) { 9070 pid1 = pri_entry->dst_pid & INVALID_PORT_ID; 9071 pid2 = fcport->d_id.b24 & INVALID_PORT_ID; 9072 if (pid1 == INVALID_PORT_ID) 9073 pid_match++; 9074 else if (pid1 == pid2) 9075 pid_match++; 9076 } 9077 9078 /* check source WWN for a match */ 9079 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) { 9080 wwn1 = wwn_to_u64(vha->port_name); 9081 wwn2 = wwn_to_u64(pri_entry->src_wwpn); 9082 if (wwn2 == (uint64_t)-1) 9083 wwn_match++; 9084 else if (wwn1 == wwn2) 9085 wwn_match++; 9086 } 9087 9088 /* check destination WWN for a match */ 9089 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) { 9090 wwn1 = wwn_to_u64(fcport->port_name); 9091 wwn2 = wwn_to_u64(pri_entry->dst_wwpn); 9092 if (wwn2 == (uint64_t)-1) 9093 wwn_match++; 9094 else if (wwn1 == wwn2) 9095 wwn_match++; 9096 } 9097 9098 if (pid_match == 2 || wwn_match == 2) { 9099 /* Found a matching entry */ 9100 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID) 9101 priority = pri_entry->tag; 9102 break; 9103 } 9104 9105 pri_entry++; 9106 } 9107 9108 return priority; 9109 } 9110 9111 /* 9112 * qla24xx_update_fcport_fcp_prio 9113 * Activates fcp priority for the logged in fc port 9114 * 9115 * Input: 9116 * vha = scsi host structure pointer. 9117 * fcp = port structure pointer. 9118 * 9119 * Return: 9120 * QLA_SUCCESS or QLA_FUNCTION_FAILED 9121 * 9122 * Context: 9123 * Kernel context. 9124 */ 9125 int 9126 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) 9127 { 9128 int ret; 9129 int priority; 9130 uint16_t mb[5]; 9131 9132 if (fcport->port_type != FCT_TARGET || 9133 fcport->loop_id == FC_NO_LOOP_ID) 9134 return QLA_FUNCTION_FAILED; 9135 9136 priority = qla24xx_get_fcp_prio(vha, fcport); 9137 if (priority < 0) 9138 return QLA_FUNCTION_FAILED; 9139 9140 if (IS_P3P_TYPE(vha->hw)) { 9141 fcport->fcp_prio = priority & 0xf; 9142 return QLA_SUCCESS; 9143 } 9144 9145 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb); 9146 if (ret == QLA_SUCCESS) { 9147 if (fcport->fcp_prio != priority) 9148 ql_dbg(ql_dbg_user, vha, 0x709e, 9149 "Updated FCP_CMND priority - value=%d loop_id=%d " 9150 "port_id=%02x%02x%02x.\n", priority, 9151 fcport->loop_id, fcport->d_id.b.domain, 9152 fcport->d_id.b.area, fcport->d_id.b.al_pa); 9153 fcport->fcp_prio = priority & 0xf; 9154 } else 9155 ql_dbg(ql_dbg_user, vha, 0x704f, 9156 "Unable to update FCP_CMND priority - ret=0x%x for " 9157 "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id, 9158 fcport->d_id.b.domain, fcport->d_id.b.area, 9159 fcport->d_id.b.al_pa); 9160 return ret; 9161 } 9162 9163 /* 9164 * qla24xx_update_all_fcp_prio 9165 * Activates fcp priority for all the logged in ports 9166 * 9167 * Input: 9168 * ha = adapter block pointer. 9169 * 9170 * Return: 9171 * QLA_SUCCESS or QLA_FUNCTION_FAILED 9172 * 9173 * Context: 9174 * Kernel context. 9175 */ 9176 int 9177 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha) 9178 { 9179 int ret; 9180 fc_port_t *fcport; 9181 9182 ret = QLA_FUNCTION_FAILED; 9183 /* We need to set priority for all logged in ports */ 9184 list_for_each_entry(fcport, &vha->vp_fcports, list) 9185 ret = qla24xx_update_fcport_fcp_prio(vha, fcport); 9186 9187 return ret; 9188 } 9189 9190 struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, 9191 int vp_idx, bool startqp) 9192 { 9193 int rsp_id = 0; 9194 int req_id = 0; 9195 int i; 9196 struct qla_hw_data *ha = vha->hw; 9197 uint16_t qpair_id = 0; 9198 struct qla_qpair *qpair = NULL; 9199 struct qla_msix_entry *msix; 9200 9201 if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) { 9202 ql_log(ql_log_warn, vha, 0x00181, 9203 "FW/Driver is not multi-queue capable.\n"); 9204 return NULL; 9205 } 9206 9207 if (ql2xmqsupport || ql2xnvmeenable) { 9208 qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL); 9209 if (qpair == NULL) { 9210 ql_log(ql_log_warn, vha, 0x0182, 9211 "Failed to allocate memory for queue pair.\n"); 9212 return NULL; 9213 } 9214 9215 qpair->hw = vha->hw; 9216 qpair->vha = vha; 9217 qpair->qp_lock_ptr = &qpair->qp_lock; 9218 spin_lock_init(&qpair->qp_lock); 9219 qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; 9220 9221 /* Assign available que pair id */ 9222 mutex_lock(&ha->mq_lock); 9223 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs); 9224 if (ha->num_qpairs >= ha->max_qpairs) { 9225 mutex_unlock(&ha->mq_lock); 9226 ql_log(ql_log_warn, vha, 0x0183, 9227 "No resources to create additional q pair.\n"); 9228 goto fail_qid_map; 9229 } 9230 ha->num_qpairs++; 9231 set_bit(qpair_id, ha->qpair_qid_map); 9232 ha->queue_pair_map[qpair_id] = qpair; 9233 qpair->id = qpair_id; 9234 qpair->vp_idx = vp_idx; 9235 qpair->fw_started = ha->flags.fw_started; 9236 INIT_LIST_HEAD(&qpair->hints_list); 9237 qpair->chip_reset = ha->base_qpair->chip_reset; 9238 qpair->enable_class_2 = ha->base_qpair->enable_class_2; 9239 qpair->enable_explicit_conf = 9240 ha->base_qpair->enable_explicit_conf; 9241 9242 for (i = 0; i < ha->msix_count; i++) { 9243 msix = &ha->msix_entries[i]; 9244 if (msix->in_use) 9245 continue; 9246 qpair->msix = msix; 9247 ql_dbg(ql_dbg_multiq, vha, 0xc00f, 9248 "Vector %x selected for qpair\n", msix->vector); 9249 break; 9250 } 9251 if (!qpair->msix) { 9252 ql_log(ql_log_warn, vha, 0x0184, 9253 "Out of MSI-X vectors!.\n"); 9254 goto fail_msix; 9255 } 9256 9257 qpair->msix->in_use = 1; 9258 list_add_tail(&qpair->qp_list_elem, &vha->qp_list); 9259 qpair->pdev = ha->pdev; 9260 if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) 9261 qpair->reqq_start_iocbs = qla_83xx_start_iocbs; 9262 9263 mutex_unlock(&ha->mq_lock); 9264 9265 /* Create response queue first */ 9266 rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp); 9267 if (!rsp_id) { 9268 ql_log(ql_log_warn, vha, 0x0185, 9269 "Failed to create response queue.\n"); 9270 goto fail_rsp; 9271 } 9272 9273 qpair->rsp = ha->rsp_q_map[rsp_id]; 9274 9275 /* Create request queue */ 9276 req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos, 9277 startqp); 9278 if (!req_id) { 9279 ql_log(ql_log_warn, vha, 0x0186, 9280 "Failed to create request queue.\n"); 9281 goto fail_req; 9282 } 9283 9284 qpair->req = ha->req_q_map[req_id]; 9285 qpair->rsp->req = qpair->req; 9286 qpair->rsp->qpair = qpair; 9287 /* init qpair to this cpu. Will adjust at run time. */ 9288 qla_cpu_update(qpair, smp_processor_id()); 9289 9290 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 9291 if (ha->fw_attributes & BIT_4) 9292 qpair->difdix_supported = 1; 9293 } 9294 9295 qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 9296 if (!qpair->srb_mempool) { 9297 ql_log(ql_log_warn, vha, 0xd036, 9298 "Failed to create srb mempool for qpair %d\n", 9299 qpair->id); 9300 goto fail_mempool; 9301 } 9302 9303 /* Mark as online */ 9304 qpair->online = 1; 9305 9306 if (!vha->flags.qpairs_available) 9307 vha->flags.qpairs_available = 1; 9308 9309 ql_dbg(ql_dbg_multiq, vha, 0xc00d, 9310 "Request/Response queue pair created, id %d\n", 9311 qpair->id); 9312 ql_dbg(ql_dbg_init, vha, 0x0187, 9313 "Request/Response queue pair created, id %d\n", 9314 qpair->id); 9315 } 9316 return qpair; 9317 9318 fail_mempool: 9319 fail_req: 9320 qla25xx_delete_rsp_que(vha, qpair->rsp); 9321 fail_rsp: 9322 mutex_lock(&ha->mq_lock); 9323 qpair->msix->in_use = 0; 9324 list_del(&qpair->qp_list_elem); 9325 if (list_empty(&vha->qp_list)) 9326 vha->flags.qpairs_available = 0; 9327 fail_msix: 9328 ha->queue_pair_map[qpair_id] = NULL; 9329 clear_bit(qpair_id, ha->qpair_qid_map); 9330 ha->num_qpairs--; 9331 mutex_unlock(&ha->mq_lock); 9332 fail_qid_map: 9333 kfree(qpair); 9334 return NULL; 9335 } 9336 9337 int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) 9338 { 9339 int ret = QLA_FUNCTION_FAILED; 9340 struct qla_hw_data *ha = qpair->hw; 9341 9342 qpair->delete_in_progress = 1; 9343 9344 ret = qla25xx_delete_req_que(vha, qpair->req); 9345 if (ret != QLA_SUCCESS) 9346 goto fail; 9347 9348 ret = qla25xx_delete_rsp_que(vha, qpair->rsp); 9349 if (ret != QLA_SUCCESS) 9350 goto fail; 9351 9352 mutex_lock(&ha->mq_lock); 9353 ha->queue_pair_map[qpair->id] = NULL; 9354 clear_bit(qpair->id, ha->qpair_qid_map); 9355 ha->num_qpairs--; 9356 list_del(&qpair->qp_list_elem); 9357 if (list_empty(&vha->qp_list)) { 9358 vha->flags.qpairs_available = 0; 9359 vha->flags.qpairs_req_created = 0; 9360 vha->flags.qpairs_rsp_created = 0; 9361 } 9362 mempool_destroy(qpair->srb_mempool); 9363 kfree(qpair); 9364 mutex_unlock(&ha->mq_lock); 9365 9366 return QLA_SUCCESS; 9367 fail: 9368 return ret; 9369 } 9370 9371 uint64_t 9372 qla2x00_count_set_bits(uint32_t num) 9373 { 9374 /* Brian Kernighan's Algorithm */ 9375 u64 count = 0; 9376 9377 while (num) { 9378 num &= (num - 1); 9379 count++; 9380 } 9381 return count; 9382 } 9383 9384 uint64_t 9385 qla2x00_get_num_tgts(scsi_qla_host_t *vha) 9386 { 9387 fc_port_t *f, *tf; 9388 u64 count = 0; 9389 9390 f = NULL; 9391 tf = NULL; 9392 9393 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { 9394 if (f->port_type != FCT_TARGET) 9395 continue; 9396 count++; 9397 } 9398 return count; 9399 } 9400 9401 int qla2xxx_reset_stats(struct Scsi_Host *host, u32 flags) 9402 { 9403 scsi_qla_host_t *vha = shost_priv(host); 9404 fc_port_t *fcport = NULL; 9405 unsigned long int_flags; 9406 9407 if (flags & QLA2XX_HW_ERROR) 9408 vha->hw_err_cnt = 0; 9409 if (flags & QLA2XX_SHT_LNK_DWN) 9410 vha->short_link_down_cnt = 0; 9411 if (flags & QLA2XX_INT_ERR) 9412 vha->interface_err_cnt = 0; 9413 if (flags & QLA2XX_CMD_TIMEOUT) 9414 vha->cmd_timeout_cnt = 0; 9415 if (flags & QLA2XX_RESET_CMD_ERR) 9416 vha->reset_cmd_err_cnt = 0; 9417 if (flags & QLA2XX_TGT_SHT_LNK_DOWN) { 9418 spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags); 9419 list_for_each_entry(fcport, &vha->vp_fcports, list) { 9420 fcport->tgt_short_link_down_cnt = 0; 9421 fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; 9422 } 9423 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags); 9424 } 9425 vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; 9426 return 0; 9427 } 9428 9429 int qla2xxx_start_stats(struct Scsi_Host *host, u32 flags) 9430 { 9431 return qla2xxx_reset_stats(host, flags); 9432 } 9433 9434 int qla2xxx_stop_stats(struct Scsi_Host *host, u32 flags) 9435 { 9436 return qla2xxx_reset_stats(host, flags); 9437 } 9438 9439 int qla2xxx_get_ini_stats(struct Scsi_Host *host, u32 flags, 9440 void *data, u64 size) 9441 { 9442 scsi_qla_host_t *vha = shost_priv(host); 9443 struct ql_vnd_host_stats_resp *resp = (struct ql_vnd_host_stats_resp *)data; 9444 struct ql_vnd_stats *rsp_data = &resp->stats; 9445 u64 ini_entry_count = 0; 9446 u64 i = 0; 9447 u64 entry_count = 0; 9448 u64 num_tgt = 0; 9449 u32 tmp_stat_type = 0; 9450 fc_port_t *fcport = NULL; 9451 unsigned long int_flags; 9452 9453 /* Copy stat type to work on it */ 9454 tmp_stat_type = flags; 9455 9456 if (tmp_stat_type & BIT_17) { 9457 num_tgt = qla2x00_get_num_tgts(vha); 9458 /* unset BIT_17 */ 9459 tmp_stat_type &= ~(1 << 17); 9460 } 9461 ini_entry_count = qla2x00_count_set_bits(tmp_stat_type); 9462 9463 entry_count = ini_entry_count + num_tgt; 9464 9465 rsp_data->entry_count = entry_count; 9466 9467 i = 0; 9468 if (flags & QLA2XX_HW_ERROR) { 9469 rsp_data->entry[i].stat_type = QLA2XX_HW_ERROR; 9470 rsp_data->entry[i].tgt_num = 0x0; 9471 rsp_data->entry[i].cnt = vha->hw_err_cnt; 9472 i++; 9473 } 9474 9475 if (flags & QLA2XX_SHT_LNK_DWN) { 9476 rsp_data->entry[i].stat_type = QLA2XX_SHT_LNK_DWN; 9477 rsp_data->entry[i].tgt_num = 0x0; 9478 rsp_data->entry[i].cnt = vha->short_link_down_cnt; 9479 i++; 9480 } 9481 9482 if (flags & QLA2XX_INT_ERR) { 9483 rsp_data->entry[i].stat_type = QLA2XX_INT_ERR; 9484 rsp_data->entry[i].tgt_num = 0x0; 9485 rsp_data->entry[i].cnt = vha->interface_err_cnt; 9486 i++; 9487 } 9488 9489 if (flags & QLA2XX_CMD_TIMEOUT) { 9490 rsp_data->entry[i].stat_type = QLA2XX_CMD_TIMEOUT; 9491 rsp_data->entry[i].tgt_num = 0x0; 9492 rsp_data->entry[i].cnt = vha->cmd_timeout_cnt; 9493 i++; 9494 } 9495 9496 if (flags & QLA2XX_RESET_CMD_ERR) { 9497 rsp_data->entry[i].stat_type = QLA2XX_RESET_CMD_ERR; 9498 rsp_data->entry[i].tgt_num = 0x0; 9499 rsp_data->entry[i].cnt = vha->reset_cmd_err_cnt; 9500 i++; 9501 } 9502 9503 /* i will continue from previous loop, as target 9504 * entries are after initiator 9505 */ 9506 if (flags & QLA2XX_TGT_SHT_LNK_DOWN) { 9507 spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags); 9508 list_for_each_entry(fcport, &vha->vp_fcports, list) { 9509 if (fcport->port_type != FCT_TARGET) 9510 continue; 9511 if (!fcport->rport) 9512 continue; 9513 rsp_data->entry[i].stat_type = QLA2XX_TGT_SHT_LNK_DOWN; 9514 rsp_data->entry[i].tgt_num = fcport->rport->number; 9515 rsp_data->entry[i].cnt = fcport->tgt_short_link_down_cnt; 9516 i++; 9517 } 9518 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags); 9519 } 9520 resp->status = EXT_STATUS_OK; 9521 9522 return 0; 9523 } 9524 9525 int qla2xxx_get_tgt_stats(struct Scsi_Host *host, u32 flags, 9526 struct fc_rport *rport, void *data, u64 size) 9527 { 9528 struct ql_vnd_tgt_stats_resp *tgt_data = data; 9529 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 9530 9531 tgt_data->status = 0; 9532 tgt_data->stats.entry_count = 1; 9533 tgt_data->stats.entry[0].stat_type = flags; 9534 tgt_data->stats.entry[0].tgt_num = rport->number; 9535 tgt_data->stats.entry[0].cnt = fcport->tgt_short_link_down_cnt; 9536 9537 return 0; 9538 } 9539 9540 int qla2xxx_disable_port(struct Scsi_Host *host) 9541 { 9542 scsi_qla_host_t *vha = shost_priv(host); 9543 9544 vha->hw->flags.port_isolated = 1; 9545 9546 if (qla2x00_chip_is_down(vha)) 9547 return 0; 9548 9549 if (vha->flags.online) { 9550 qla2x00_abort_isp_cleanup(vha); 9551 qla2x00_wait_for_sess_deletion(vha); 9552 } 9553 9554 return 0; 9555 } 9556 9557 int qla2xxx_enable_port(struct Scsi_Host *host) 9558 { 9559 scsi_qla_host_t *vha = shost_priv(host); 9560 9561 vha->hw->flags.port_isolated = 0; 9562 /* Set the flag to 1, so that isp_abort can proceed */ 9563 vha->flags.online = 1; 9564 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 9565 qla2xxx_wake_dpc(vha); 9566 9567 return 0; 9568 } 9569