1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_gbl.h" 9 10 #include <linux/delay.h> 11 #include <linux/slab.h> 12 #include <linux/vmalloc.h> 13 14 #include "qla_devtbl.h" 15 16 #ifdef CONFIG_SPARC 17 #include <asm/prom.h> 18 #endif 19 20 #include <target/target_core_base.h> 21 #include "qla_target.h" 22 23 /* 24 * QLogic ISP2x00 Hardware Support Function Prototypes. 25 */ 26 static int qla2x00_isp_firmware(scsi_qla_host_t *); 27 static int qla2x00_setup_chip(scsi_qla_host_t *); 28 static int qla2x00_fw_ready(scsi_qla_host_t *); 29 static int qla2x00_configure_hba(scsi_qla_host_t *); 30 static int qla2x00_configure_loop(scsi_qla_host_t *); 31 static int qla2x00_configure_local_loop(scsi_qla_host_t *); 32 static int qla2x00_configure_fabric(scsi_qla_host_t *); 33 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *); 34 static int qla2x00_restart_isp(scsi_qla_host_t *); 35 36 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); 37 static int qla84xx_init_chip(scsi_qla_host_t *); 38 static int qla25xx_init_queues(struct qla_hw_data *); 39 static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *); 40 static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *, 41 struct event_arg *); 42 static void qla24xx_handle_prli_done_event(struct scsi_qla_host *, 43 struct event_arg *); 44 static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *); 45 46 /* SRB Extensions ---------------------------------------------------------- */ 47 48 void 49 qla2x00_sp_timeout(struct timer_list *t) 50 { 51 srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer); 52 struct srb_iocb *iocb; 53 struct req_que *req; 54 unsigned long flags; 55 struct qla_hw_data *ha = sp->vha->hw; 56 57 WARN_ON_ONCE(irqs_disabled()); 58 spin_lock_irqsave(&ha->hardware_lock, flags); 59 req = sp->qpair->req; 60 req->outstanding_cmds[sp->handle] = NULL; 61 iocb = &sp->u.iocb_cmd; 62 spin_unlock_irqrestore(&ha->hardware_lock, flags); 63 iocb->timeout(sp); 64 } 65 66 void 67 qla2x00_sp_free(void *ptr) 68 { 69 srb_t *sp = ptr; 70 struct srb_iocb *iocb = &sp->u.iocb_cmd; 71 72 del_timer(&iocb->timer); 73 qla2x00_rel_sp(sp); 74 } 75 76 /* Asynchronous Login/Logout Routines -------------------------------------- */ 77 78 unsigned long 79 qla2x00_get_async_timeout(struct scsi_qla_host *vha) 80 { 81 unsigned long tmo; 82 struct qla_hw_data *ha = vha->hw; 83 84 /* Firmware should use switch negotiated r_a_tov for timeout. */ 85 tmo = ha->r_a_tov / 10 * 2; 86 if (IS_QLAFX00(ha)) { 87 tmo = FX00_DEF_RATOV * 2; 88 } else if (!IS_FWI2_CAPABLE(ha)) { 89 /* 90 * Except for earlier ISPs where the timeout is seeded from the 91 * initialization control block. 92 */ 93 tmo = ha->login_timeout; 94 } 95 return tmo; 96 } 97 98 void 99 qla2x00_async_iocb_timeout(void *data) 100 { 101 srb_t *sp = data; 102 fc_port_t *fcport = sp->fcport; 103 struct srb_iocb *lio = &sp->u.iocb_cmd; 104 int rc, h; 105 unsigned long flags; 106 107 if (fcport) { 108 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, 109 "Async-%s timeout - hdl=%x portid=%06x %8phC.\n", 110 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name); 111 112 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 113 } else { 114 pr_info("Async-%s timeout - hdl=%x.\n", 115 sp->name, sp->handle); 116 } 117 118 switch (sp->type) { 119 case SRB_LOGIN_CMD: 120 rc = qla24xx_async_abort_cmd(sp, false); 121 if (rc) { 122 /* Retry as needed. */ 123 lio->u.logio.data[0] = MBS_COMMAND_ERROR; 124 lio->u.logio.data[1] = 125 lio->u.logio.flags & SRB_LOGIN_RETRIED ? 126 QLA_LOGIO_LOGIN_RETRIED : 0; 127 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); 128 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; 129 h++) { 130 if (sp->qpair->req->outstanding_cmds[h] == 131 sp) { 132 sp->qpair->req->outstanding_cmds[h] = 133 NULL; 134 break; 135 } 136 } 137 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); 138 sp->done(sp, QLA_FUNCTION_TIMEOUT); 139 } 140 break; 141 case SRB_LOGOUT_CMD: 142 case SRB_CT_PTHRU_CMD: 143 case SRB_MB_IOCB: 144 case SRB_NACK_PLOGI: 145 case SRB_NACK_PRLI: 146 case SRB_NACK_LOGO: 147 case SRB_CTRL_VP: 148 rc = qla24xx_async_abort_cmd(sp, false); 149 if (rc) { 150 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); 151 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; 152 h++) { 153 if (sp->qpair->req->outstanding_cmds[h] == 154 sp) { 155 sp->qpair->req->outstanding_cmds[h] = 156 NULL; 157 break; 158 } 159 } 160 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); 161 sp->done(sp, QLA_FUNCTION_TIMEOUT); 162 } 163 break; 164 } 165 } 166 167 static void 168 qla2x00_async_login_sp_done(void *ptr, int res) 169 { 170 srb_t *sp = ptr; 171 struct scsi_qla_host *vha = sp->vha; 172 struct srb_iocb *lio = &sp->u.iocb_cmd; 173 struct event_arg ea; 174 175 ql_dbg(ql_dbg_disc, vha, 0x20dd, 176 "%s %8phC res %d \n", __func__, sp->fcport->port_name, res); 177 178 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 179 180 if (!test_bit(UNLOADING, &vha->dpc_flags)) { 181 memset(&ea, 0, sizeof(ea)); 182 ea.event = FCME_PLOGI_DONE; 183 ea.fcport = sp->fcport; 184 ea.data[0] = lio->u.logio.data[0]; 185 ea.data[1] = lio->u.logio.data[1]; 186 ea.iop[0] = lio->u.logio.iop[0]; 187 ea.iop[1] = lio->u.logio.iop[1]; 188 ea.sp = sp; 189 qla2x00_fcport_event_handler(vha, &ea); 190 } 191 192 sp->free(sp); 193 } 194 195 static inline bool 196 fcport_is_smaller(fc_port_t *fcport) 197 { 198 if (wwn_to_u64(fcport->port_name) < 199 wwn_to_u64(fcport->vha->port_name)) 200 return true; 201 else 202 return false; 203 } 204 205 static inline bool 206 fcport_is_bigger(fc_port_t *fcport) 207 { 208 return !fcport_is_smaller(fcport); 209 } 210 211 int 212 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, 213 uint16_t *data) 214 { 215 srb_t *sp; 216 struct srb_iocb *lio; 217 int rval = QLA_FUNCTION_FAILED; 218 219 if (!vha->flags.online) 220 goto done; 221 222 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 223 if (!sp) 224 goto done; 225 226 fcport->flags |= FCF_ASYNC_SENT; 227 fcport->logout_completed = 0; 228 229 fcport->disc_state = DSC_LOGIN_PEND; 230 sp->type = SRB_LOGIN_CMD; 231 sp->name = "login"; 232 sp->gen1 = fcport->rscn_gen; 233 sp->gen2 = fcport->login_gen; 234 235 lio = &sp->u.iocb_cmd; 236 lio->timeout = qla2x00_async_iocb_timeout; 237 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 238 239 sp->done = qla2x00_async_login_sp_done; 240 if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) { 241 lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY; 242 } else { 243 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; 244 245 if (fcport->fc4f_nvme) 246 lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI; 247 248 } 249 250 ql_dbg(ql_dbg_disc, vha, 0x2072, 251 "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x " 252 "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id, 253 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 254 fcport->login_retry); 255 256 rval = qla2x00_start_sp(sp); 257 if (rval != QLA_SUCCESS) { 258 fcport->flags |= FCF_LOGIN_NEEDED; 259 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 260 goto done_free_sp; 261 } 262 263 return rval; 264 265 done_free_sp: 266 sp->free(sp); 267 fcport->flags &= ~FCF_ASYNC_SENT; 268 done: 269 fcport->flags &= ~FCF_ASYNC_ACTIVE; 270 return rval; 271 } 272 273 static void 274 qla2x00_async_logout_sp_done(void *ptr, int res) 275 { 276 srb_t *sp = ptr; 277 278 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 279 sp->fcport->login_gen++; 280 qlt_logo_completion_handler(sp->fcport, res); 281 sp->free(sp); 282 } 283 284 int 285 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) 286 { 287 srb_t *sp; 288 struct srb_iocb *lio; 289 int rval = QLA_FUNCTION_FAILED; 290 291 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) 292 return rval; 293 294 fcport->flags |= FCF_ASYNC_SENT; 295 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 296 if (!sp) 297 goto done; 298 299 sp->type = SRB_LOGOUT_CMD; 300 sp->name = "logout"; 301 302 lio = &sp->u.iocb_cmd; 303 lio->timeout = qla2x00_async_iocb_timeout; 304 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 305 306 sp->done = qla2x00_async_logout_sp_done; 307 308 ql_dbg(ql_dbg_disc, vha, 0x2070, 309 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n", 310 sp->handle, fcport->loop_id, fcport->d_id.b.domain, 311 fcport->d_id.b.area, fcport->d_id.b.al_pa, 312 fcport->port_name); 313 314 rval = qla2x00_start_sp(sp); 315 if (rval != QLA_SUCCESS) 316 goto done_free_sp; 317 return rval; 318 319 done_free_sp: 320 sp->free(sp); 321 done: 322 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 323 return rval; 324 } 325 326 void 327 qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport, 328 uint16_t *data) 329 { 330 fcport->flags &= ~FCF_ASYNC_ACTIVE; 331 /* Don't re-login in target mode */ 332 if (!fcport->tgt_session) 333 qla2x00_mark_device_lost(vha, fcport, 1, 0); 334 qlt_logo_completion_handler(fcport, data[0]); 335 } 336 337 static void 338 qla2x00_async_prlo_sp_done(void *s, int res) 339 { 340 srb_t *sp = (srb_t *)s; 341 struct srb_iocb *lio = &sp->u.iocb_cmd; 342 struct scsi_qla_host *vha = sp->vha; 343 344 sp->fcport->flags &= ~FCF_ASYNC_ACTIVE; 345 if (!test_bit(UNLOADING, &vha->dpc_flags)) 346 qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport, 347 lio->u.logio.data); 348 sp->free(sp); 349 } 350 351 int 352 qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport) 353 { 354 srb_t *sp; 355 struct srb_iocb *lio; 356 int rval; 357 358 rval = QLA_FUNCTION_FAILED; 359 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 360 if (!sp) 361 goto done; 362 363 sp->type = SRB_PRLO_CMD; 364 sp->name = "prlo"; 365 366 lio = &sp->u.iocb_cmd; 367 lio->timeout = qla2x00_async_iocb_timeout; 368 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 369 370 sp->done = qla2x00_async_prlo_sp_done; 371 rval = qla2x00_start_sp(sp); 372 if (rval != QLA_SUCCESS) 373 goto done_free_sp; 374 375 ql_dbg(ql_dbg_disc, vha, 0x2070, 376 "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n", 377 sp->handle, fcport->loop_id, fcport->d_id.b.domain, 378 fcport->d_id.b.area, fcport->d_id.b.al_pa); 379 return rval; 380 381 done_free_sp: 382 sp->free(sp); 383 done: 384 fcport->flags &= ~FCF_ASYNC_ACTIVE; 385 return rval; 386 } 387 388 static 389 void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea) 390 { 391 struct fc_port *fcport = ea->fcport; 392 393 ql_dbg(ql_dbg_disc, vha, 0x20d2, 394 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n", 395 __func__, fcport->port_name, fcport->disc_state, 396 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2, 397 fcport->rscn_gen, ea->sp->gen1, fcport->loop_id); 398 399 if (ea->data[0] != MBS_COMMAND_COMPLETE) { 400 ql_dbg(ql_dbg_disc, vha, 0x2066, 401 "%s %8phC: adisc fail: post delete\n", 402 __func__, ea->fcport->port_name); 403 /* deleted = 0 & logout_on_delete = force fw cleanup */ 404 fcport->deleted = 0; 405 fcport->logout_on_delete = 1; 406 qlt_schedule_sess_for_deletion(ea->fcport); 407 return; 408 } 409 410 if (ea->fcport->disc_state == DSC_DELETE_PEND) 411 return; 412 413 if (ea->sp->gen2 != ea->fcport->login_gen) { 414 /* target side must have changed it. */ 415 ql_dbg(ql_dbg_disc, vha, 0x20d3, 416 "%s %8phC generation changed\n", 417 __func__, ea->fcport->port_name); 418 return; 419 } else if (ea->sp->gen1 != ea->fcport->rscn_gen) { 420 qla_rscn_replay(fcport); 421 qlt_schedule_sess_for_deletion(fcport); 422 return; 423 } 424 425 __qla24xx_handle_gpdb_event(vha, ea); 426 } 427 428 int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport) 429 { 430 struct qla_work_evt *e; 431 432 e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI); 433 if (!e) 434 return QLA_FUNCTION_FAILED; 435 436 e->u.fcport.fcport = fcport; 437 fcport->flags |= FCF_ASYNC_ACTIVE; 438 return qla2x00_post_work(vha, e); 439 } 440 441 static void 442 qla2x00_async_adisc_sp_done(void *ptr, int res) 443 { 444 srb_t *sp = ptr; 445 struct scsi_qla_host *vha = sp->vha; 446 struct event_arg ea; 447 struct srb_iocb *lio = &sp->u.iocb_cmd; 448 449 ql_dbg(ql_dbg_disc, vha, 0x2066, 450 "Async done-%s res %x %8phC\n", 451 sp->name, res, sp->fcport->port_name); 452 453 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 454 455 memset(&ea, 0, sizeof(ea)); 456 ea.event = FCME_ADISC_DONE; 457 ea.rc = res; 458 ea.data[0] = lio->u.logio.data[0]; 459 ea.data[1] = lio->u.logio.data[1]; 460 ea.iop[0] = lio->u.logio.iop[0]; 461 ea.iop[1] = lio->u.logio.iop[1]; 462 ea.fcport = sp->fcport; 463 ea.sp = sp; 464 465 qla2x00_fcport_event_handler(vha, &ea); 466 467 sp->free(sp); 468 } 469 470 int 471 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, 472 uint16_t *data) 473 { 474 srb_t *sp; 475 struct srb_iocb *lio; 476 int rval; 477 478 rval = QLA_FUNCTION_FAILED; 479 fcport->flags |= FCF_ASYNC_SENT; 480 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 481 if (!sp) 482 goto done; 483 484 sp->type = SRB_ADISC_CMD; 485 sp->name = "adisc"; 486 487 lio = &sp->u.iocb_cmd; 488 lio->timeout = qla2x00_async_iocb_timeout; 489 sp->gen1 = fcport->rscn_gen; 490 sp->gen2 = fcport->login_gen; 491 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 492 493 sp->done = qla2x00_async_adisc_sp_done; 494 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 495 lio->u.logio.flags |= SRB_LOGIN_RETRIED; 496 497 ql_dbg(ql_dbg_disc, vha, 0x206f, 498 "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n", 499 sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name); 500 501 rval = qla2x00_start_sp(sp); 502 if (rval != QLA_SUCCESS) 503 goto done_free_sp; 504 505 return rval; 506 507 done_free_sp: 508 sp->free(sp); 509 done: 510 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 511 qla2x00_post_async_adisc_work(vha, fcport, data); 512 return rval; 513 } 514 515 static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, 516 struct event_arg *ea) 517 { 518 fc_port_t *fcport, *conflict_fcport; 519 struct get_name_list_extended *e; 520 u16 i, n, found = 0, loop_id; 521 port_id_t id; 522 u64 wwn; 523 u16 data[2]; 524 u8 current_login_state; 525 526 fcport = ea->fcport; 527 ql_dbg(ql_dbg_disc, vha, 0xffff, 528 "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d\n", 529 __func__, fcport->port_name, fcport->disc_state, 530 fcport->fw_login_state, ea->rc, 531 fcport->login_gen, fcport->last_login_gen, 532 fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id); 533 534 if (fcport->disc_state == DSC_DELETE_PEND) 535 return; 536 537 if (ea->rc) { /* rval */ 538 if (fcport->login_retry == 0) { 539 ql_dbg(ql_dbg_disc, vha, 0x20de, 540 "GNL failed Port login retry %8phN, retry cnt=%d.\n", 541 fcport->port_name, fcport->login_retry); 542 } 543 return; 544 } 545 546 if (fcport->last_rscn_gen != fcport->rscn_gen) { 547 qla_rscn_replay(fcport); 548 qlt_schedule_sess_for_deletion(fcport); 549 return; 550 } else if (fcport->last_login_gen != fcport->login_gen) { 551 ql_dbg(ql_dbg_disc, vha, 0x20e0, 552 "%s %8phC login gen changed\n", 553 __func__, fcport->port_name); 554 return; 555 } 556 557 n = ea->data[0] / sizeof(struct get_name_list_extended); 558 559 ql_dbg(ql_dbg_disc, vha, 0x20e1, 560 "%s %d %8phC n %d %02x%02x%02x lid %d \n", 561 __func__, __LINE__, fcport->port_name, n, 562 fcport->d_id.b.domain, fcport->d_id.b.area, 563 fcport->d_id.b.al_pa, fcport->loop_id); 564 565 for (i = 0; i < n; i++) { 566 e = &vha->gnl.l[i]; 567 wwn = wwn_to_u64(e->port_name); 568 id.b.domain = e->port_id[2]; 569 id.b.area = e->port_id[1]; 570 id.b.al_pa = e->port_id[0]; 571 id.b.rsvd_1 = 0; 572 573 if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE)) 574 continue; 575 576 if (IS_SW_RESV_ADDR(id)) 577 continue; 578 579 found = 1; 580 581 loop_id = le16_to_cpu(e->nport_handle); 582 loop_id = (loop_id & 0x7fff); 583 if (fcport->fc4f_nvme) 584 current_login_state = e->current_login_state >> 4; 585 else 586 current_login_state = e->current_login_state & 0xf; 587 588 589 ql_dbg(ql_dbg_disc, vha, 0x20e2, 590 "%s found %8phC CLS [%x|%x] nvme %d ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n", 591 __func__, fcport->port_name, 592 e->current_login_state, fcport->fw_login_state, 593 fcport->fc4f_nvme, id.b.domain, id.b.area, id.b.al_pa, 594 fcport->d_id.b.domain, fcport->d_id.b.area, 595 fcport->d_id.b.al_pa, loop_id, fcport->loop_id); 596 597 switch (fcport->disc_state) { 598 case DSC_DELETE_PEND: 599 case DSC_DELETED: 600 break; 601 default: 602 if ((id.b24 != fcport->d_id.b24 && 603 fcport->d_id.b24) || 604 (fcport->loop_id != FC_NO_LOOP_ID && 605 fcport->loop_id != loop_id)) { 606 ql_dbg(ql_dbg_disc, vha, 0x20e3, 607 "%s %d %8phC post del sess\n", 608 __func__, __LINE__, fcport->port_name); 609 qlt_schedule_sess_for_deletion(fcport); 610 return; 611 } 612 break; 613 } 614 615 fcport->loop_id = loop_id; 616 617 wwn = wwn_to_u64(fcport->port_name); 618 qlt_find_sess_invalidate_other(vha, wwn, 619 id, loop_id, &conflict_fcport); 620 621 if (conflict_fcport) { 622 /* 623 * Another share fcport share the same loop_id & 624 * nport id. Conflict fcport needs to finish 625 * cleanup before this fcport can proceed to login. 626 */ 627 conflict_fcport->conflict = fcport; 628 fcport->login_pause = 1; 629 } 630 631 switch (vha->hw->current_topology) { 632 default: 633 switch (current_login_state) { 634 case DSC_LS_PRLI_COMP: 635 ql_dbg(ql_dbg_disc + ql_dbg_verbose, 636 vha, 0x20e4, "%s %d %8phC post gpdb\n", 637 __func__, __LINE__, fcport->port_name); 638 639 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0) 640 fcport->port_type = FCT_INITIATOR; 641 else 642 fcport->port_type = FCT_TARGET; 643 data[0] = data[1] = 0; 644 qla2x00_post_async_adisc_work(vha, fcport, 645 data); 646 break; 647 case DSC_LS_PORT_UNAVAIL: 648 default: 649 if (fcport->loop_id != FC_NO_LOOP_ID) 650 qla2x00_clear_loop_id(fcport); 651 652 fcport->loop_id = loop_id; 653 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 654 qla24xx_fcport_handle_login(vha, fcport); 655 break; 656 } 657 break; 658 case ISP_CFG_N: 659 fcport->fw_login_state = current_login_state; 660 fcport->d_id = id; 661 switch (current_login_state) { 662 case DSC_LS_PRLI_COMP: 663 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0) 664 fcport->port_type = FCT_INITIATOR; 665 else 666 fcport->port_type = FCT_TARGET; 667 668 data[0] = data[1] = 0; 669 qla2x00_post_async_adisc_work(vha, fcport, 670 data); 671 break; 672 case DSC_LS_PLOGI_COMP: 673 if (fcport_is_bigger(fcport)) { 674 /* local adapter is smaller */ 675 if (fcport->loop_id != FC_NO_LOOP_ID) 676 qla2x00_clear_loop_id(fcport); 677 678 fcport->loop_id = loop_id; 679 qla24xx_fcport_handle_login(vha, 680 fcport); 681 break; 682 } 683 /* drop through */ 684 default: 685 if (fcport_is_smaller(fcport)) { 686 /* local adapter is bigger */ 687 if (fcport->loop_id != FC_NO_LOOP_ID) 688 qla2x00_clear_loop_id(fcport); 689 690 fcport->loop_id = loop_id; 691 qla24xx_fcport_handle_login(vha, 692 fcport); 693 } 694 break; 695 } 696 break; 697 } /* switch (ha->current_topology) */ 698 } 699 700 if (!found) { 701 switch (vha->hw->current_topology) { 702 case ISP_CFG_F: 703 case ISP_CFG_FL: 704 for (i = 0; i < n; i++) { 705 e = &vha->gnl.l[i]; 706 id.b.domain = e->port_id[0]; 707 id.b.area = e->port_id[1]; 708 id.b.al_pa = e->port_id[2]; 709 id.b.rsvd_1 = 0; 710 loop_id = le16_to_cpu(e->nport_handle); 711 712 if (fcport->d_id.b24 == id.b24) { 713 conflict_fcport = 714 qla2x00_find_fcport_by_wwpn(vha, 715 e->port_name, 0); 716 if (conflict_fcport) { 717 ql_dbg(ql_dbg_disc + ql_dbg_verbose, 718 vha, 0x20e5, 719 "%s %d %8phC post del sess\n", 720 __func__, __LINE__, 721 conflict_fcport->port_name); 722 qlt_schedule_sess_for_deletion 723 (conflict_fcport); 724 } 725 } 726 /* 727 * FW already picked this loop id for 728 * another fcport 729 */ 730 if (fcport->loop_id == loop_id) 731 fcport->loop_id = FC_NO_LOOP_ID; 732 } 733 qla24xx_fcport_handle_login(vha, fcport); 734 break; 735 case ISP_CFG_N: 736 fcport->disc_state = DSC_DELETED; 737 if (time_after_eq(jiffies, fcport->dm_login_expire)) { 738 if (fcport->n2n_link_reset_cnt < 2) { 739 fcport->n2n_link_reset_cnt++; 740 /* 741 * remote port is not sending PLOGI. 742 * Reset link to kick start his state 743 * machine 744 */ 745 set_bit(N2N_LINK_RESET, 746 &vha->dpc_flags); 747 } else { 748 if (fcport->n2n_chip_reset < 1) { 749 ql_log(ql_log_info, vha, 0x705d, 750 "Chip reset to bring laser down"); 751 set_bit(ISP_ABORT_NEEDED, 752 &vha->dpc_flags); 753 fcport->n2n_chip_reset++; 754 } else { 755 ql_log(ql_log_info, vha, 0x705d, 756 "Remote port %8ph is not coming back\n", 757 fcport->port_name); 758 fcport->scan_state = 0; 759 } 760 } 761 qla2xxx_wake_dpc(vha); 762 } else { 763 /* 764 * report port suppose to do PLOGI. Give him 765 * more time. FW will catch it. 766 */ 767 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 768 } 769 break; 770 default: 771 break; 772 } 773 } 774 } /* gnl_event */ 775 776 static void 777 qla24xx_async_gnl_sp_done(void *s, int res) 778 { 779 struct srb *sp = s; 780 struct scsi_qla_host *vha = sp->vha; 781 unsigned long flags; 782 struct fc_port *fcport = NULL, *tf; 783 u16 i, n = 0, loop_id; 784 struct event_arg ea; 785 struct get_name_list_extended *e; 786 u64 wwn; 787 struct list_head h; 788 bool found = false; 789 790 ql_dbg(ql_dbg_disc, vha, 0x20e7, 791 "Async done-%s res %x mb[1]=%x mb[2]=%x \n", 792 sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1], 793 sp->u.iocb_cmd.u.mbx.in_mb[2]); 794 795 if (res == QLA_FUNCTION_TIMEOUT) 796 return; 797 798 sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE); 799 memset(&ea, 0, sizeof(ea)); 800 ea.sp = sp; 801 ea.rc = res; 802 ea.event = FCME_GNL_DONE; 803 804 if (sp->u.iocb_cmd.u.mbx.in_mb[1] >= 805 sizeof(struct get_name_list_extended)) { 806 n = sp->u.iocb_cmd.u.mbx.in_mb[1] / 807 sizeof(struct get_name_list_extended); 808 ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */ 809 } 810 811 for (i = 0; i < n; i++) { 812 e = &vha->gnl.l[i]; 813 loop_id = le16_to_cpu(e->nport_handle); 814 /* mask out reserve bit */ 815 loop_id = (loop_id & 0x7fff); 816 set_bit(loop_id, vha->hw->loop_id_map); 817 wwn = wwn_to_u64(e->port_name); 818 819 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8, 820 "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n", 821 __func__, (void *)&wwn, e->port_id[2], e->port_id[1], 822 e->port_id[0], e->current_login_state, e->last_login_state, 823 (loop_id & 0x7fff)); 824 } 825 826 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 827 828 INIT_LIST_HEAD(&h); 829 fcport = tf = NULL; 830 if (!list_empty(&vha->gnl.fcports)) 831 list_splice_init(&vha->gnl.fcports, &h); 832 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 833 834 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { 835 list_del_init(&fcport->gnl_entry); 836 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 837 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 838 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 839 ea.fcport = fcport; 840 841 qla2x00_fcport_event_handler(vha, &ea); 842 } 843 844 /* create new fcport if fw has knowledge of new sessions */ 845 for (i = 0; i < n; i++) { 846 port_id_t id; 847 u64 wwnn; 848 849 e = &vha->gnl.l[i]; 850 wwn = wwn_to_u64(e->port_name); 851 852 found = false; 853 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { 854 if (!memcmp((u8 *)&wwn, fcport->port_name, 855 WWN_SIZE)) { 856 found = true; 857 break; 858 } 859 } 860 861 id.b.domain = e->port_id[2]; 862 id.b.area = e->port_id[1]; 863 id.b.al_pa = e->port_id[0]; 864 id.b.rsvd_1 = 0; 865 866 if (!found && wwn && !IS_SW_RESV_ADDR(id)) { 867 ql_dbg(ql_dbg_disc, vha, 0x2065, 868 "%s %d %8phC %06x post new sess\n", 869 __func__, __LINE__, (u8 *)&wwn, id.b24); 870 wwnn = wwn_to_u64(e->node_name); 871 qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn, 872 (u8 *)&wwnn, NULL, FC4_TYPE_UNKNOWN); 873 } 874 } 875 876 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 877 vha->gnl.sent = 0; 878 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 879 880 sp->free(sp); 881 } 882 883 int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) 884 { 885 srb_t *sp; 886 struct srb_iocb *mbx; 887 int rval = QLA_FUNCTION_FAILED; 888 unsigned long flags; 889 u16 *mb; 890 891 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) 892 return rval; 893 894 ql_dbg(ql_dbg_disc, vha, 0x20d9, 895 "Async-gnlist WWPN %8phC \n", fcport->port_name); 896 897 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 898 fcport->flags |= FCF_ASYNC_SENT; 899 fcport->disc_state = DSC_GNL; 900 fcport->last_rscn_gen = fcport->rscn_gen; 901 fcport->last_login_gen = fcport->login_gen; 902 903 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports); 904 if (vha->gnl.sent) { 905 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 906 return QLA_SUCCESS; 907 } 908 vha->gnl.sent = 1; 909 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 910 911 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 912 if (!sp) 913 goto done; 914 915 sp->type = SRB_MB_IOCB; 916 sp->name = "gnlist"; 917 sp->gen1 = fcport->rscn_gen; 918 sp->gen2 = fcport->login_gen; 919 920 mbx = &sp->u.iocb_cmd; 921 mbx->timeout = qla2x00_async_iocb_timeout; 922 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2); 923 924 mb = sp->u.iocb_cmd.u.mbx.out_mb; 925 mb[0] = MBC_PORT_NODE_NAME_LIST; 926 mb[1] = BIT_2 | BIT_3; 927 mb[2] = MSW(vha->gnl.ldma); 928 mb[3] = LSW(vha->gnl.ldma); 929 mb[6] = MSW(MSD(vha->gnl.ldma)); 930 mb[7] = LSW(MSD(vha->gnl.ldma)); 931 mb[8] = vha->gnl.size; 932 mb[9] = vha->vp_idx; 933 934 sp->done = qla24xx_async_gnl_sp_done; 935 936 rval = qla2x00_start_sp(sp); 937 if (rval != QLA_SUCCESS) 938 goto done_free_sp; 939 940 ql_dbg(ql_dbg_disc, vha, 0x20da, 941 "Async-%s - OUT WWPN %8phC hndl %x\n", 942 sp->name, fcport->port_name, sp->handle); 943 944 return rval; 945 946 done_free_sp: 947 sp->free(sp); 948 fcport->flags &= ~FCF_ASYNC_SENT; 949 done: 950 return rval; 951 } 952 953 int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport) 954 { 955 struct qla_work_evt *e; 956 957 e = qla2x00_alloc_work(vha, QLA_EVT_GNL); 958 if (!e) 959 return QLA_FUNCTION_FAILED; 960 961 e->u.fcport.fcport = fcport; 962 fcport->flags |= FCF_ASYNC_ACTIVE; 963 return qla2x00_post_work(vha, e); 964 } 965 966 static 967 void qla24xx_async_gpdb_sp_done(void *s, int res) 968 { 969 struct srb *sp = s; 970 struct scsi_qla_host *vha = sp->vha; 971 struct qla_hw_data *ha = vha->hw; 972 fc_port_t *fcport = sp->fcport; 973 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb; 974 struct event_arg ea; 975 976 ql_dbg(ql_dbg_disc, vha, 0x20db, 977 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n", 978 sp->name, res, fcport->port_name, mb[1], mb[2]); 979 980 if (res == QLA_FUNCTION_TIMEOUT) { 981 dma_pool_free(sp->vha->hw->s_dma_pool, sp->u.iocb_cmd.u.mbx.in, 982 sp->u.iocb_cmd.u.mbx.in_dma); 983 return; 984 } 985 986 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 987 memset(&ea, 0, sizeof(ea)); 988 ea.event = FCME_GPDB_DONE; 989 ea.fcport = fcport; 990 ea.sp = sp; 991 992 qla2x00_fcport_event_handler(vha, &ea); 993 994 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in, 995 sp->u.iocb_cmd.u.mbx.in_dma); 996 997 sp->free(sp); 998 } 999 1000 static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport) 1001 { 1002 struct qla_work_evt *e; 1003 1004 e = qla2x00_alloc_work(vha, QLA_EVT_PRLI); 1005 if (!e) 1006 return QLA_FUNCTION_FAILED; 1007 1008 e->u.fcport.fcport = fcport; 1009 1010 return qla2x00_post_work(vha, e); 1011 } 1012 1013 static void 1014 qla2x00_async_prli_sp_done(void *ptr, int res) 1015 { 1016 srb_t *sp = ptr; 1017 struct scsi_qla_host *vha = sp->vha; 1018 struct srb_iocb *lio = &sp->u.iocb_cmd; 1019 struct event_arg ea; 1020 1021 ql_dbg(ql_dbg_disc, vha, 0x2129, 1022 "%s %8phC res %d \n", __func__, 1023 sp->fcport->port_name, res); 1024 1025 sp->fcport->flags &= ~FCF_ASYNC_SENT; 1026 1027 if (!test_bit(UNLOADING, &vha->dpc_flags)) { 1028 memset(&ea, 0, sizeof(ea)); 1029 ea.event = FCME_PRLI_DONE; 1030 ea.fcport = sp->fcport; 1031 ea.data[0] = lio->u.logio.data[0]; 1032 ea.data[1] = lio->u.logio.data[1]; 1033 ea.iop[0] = lio->u.logio.iop[0]; 1034 ea.iop[1] = lio->u.logio.iop[1]; 1035 ea.sp = sp; 1036 1037 qla2x00_fcport_event_handler(vha, &ea); 1038 } 1039 1040 sp->free(sp); 1041 } 1042 1043 int 1044 qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport) 1045 { 1046 srb_t *sp; 1047 struct srb_iocb *lio; 1048 int rval = QLA_FUNCTION_FAILED; 1049 1050 if (!vha->flags.online) 1051 return rval; 1052 1053 if (fcport->fw_login_state == DSC_LS_PLOGI_PEND || 1054 fcport->fw_login_state == DSC_LS_PRLI_PEND) 1055 return rval; 1056 1057 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1058 if (!sp) 1059 return rval; 1060 1061 fcport->flags |= FCF_ASYNC_SENT; 1062 fcport->logout_completed = 0; 1063 1064 sp->type = SRB_PRLI_CMD; 1065 sp->name = "prli"; 1066 1067 lio = &sp->u.iocb_cmd; 1068 lio->timeout = qla2x00_async_iocb_timeout; 1069 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 1070 1071 sp->done = qla2x00_async_prli_sp_done; 1072 lio->u.logio.flags = 0; 1073 1074 if (fcport->fc4f_nvme) 1075 lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI; 1076 1077 rval = qla2x00_start_sp(sp); 1078 if (rval != QLA_SUCCESS) { 1079 fcport->flags |= FCF_LOGIN_NEEDED; 1080 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1081 goto done_free_sp; 1082 } 1083 1084 ql_dbg(ql_dbg_disc, vha, 0x211b, 1085 "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n", 1086 fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24, 1087 fcport->login_retry, fcport->fc4f_nvme ? "nvme" : "fc"); 1088 1089 return rval; 1090 1091 done_free_sp: 1092 sp->free(sp); 1093 fcport->flags &= ~FCF_ASYNC_SENT; 1094 return rval; 1095 } 1096 1097 int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 1098 { 1099 struct qla_work_evt *e; 1100 1101 e = qla2x00_alloc_work(vha, QLA_EVT_GPDB); 1102 if (!e) 1103 return QLA_FUNCTION_FAILED; 1104 1105 e->u.fcport.fcport = fcport; 1106 e->u.fcport.opt = opt; 1107 fcport->flags |= FCF_ASYNC_ACTIVE; 1108 return qla2x00_post_work(vha, e); 1109 } 1110 1111 int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 1112 { 1113 srb_t *sp; 1114 struct srb_iocb *mbx; 1115 int rval = QLA_FUNCTION_FAILED; 1116 u16 *mb; 1117 dma_addr_t pd_dma; 1118 struct port_database_24xx *pd; 1119 struct qla_hw_data *ha = vha->hw; 1120 1121 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) 1122 return rval; 1123 1124 fcport->disc_state = DSC_GPDB; 1125 1126 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1127 if (!sp) 1128 goto done; 1129 1130 fcport->flags |= FCF_ASYNC_SENT; 1131 sp->type = SRB_MB_IOCB; 1132 sp->name = "gpdb"; 1133 sp->gen1 = fcport->rscn_gen; 1134 sp->gen2 = fcport->login_gen; 1135 1136 mbx = &sp->u.iocb_cmd; 1137 mbx->timeout = qla2x00_async_iocb_timeout; 1138 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 1139 1140 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1141 if (pd == NULL) { 1142 ql_log(ql_log_warn, vha, 0xd043, 1143 "Failed to allocate port database structure.\n"); 1144 goto done_free_sp; 1145 } 1146 1147 mb = sp->u.iocb_cmd.u.mbx.out_mb; 1148 mb[0] = MBC_GET_PORT_DATABASE; 1149 mb[1] = fcport->loop_id; 1150 mb[2] = MSW(pd_dma); 1151 mb[3] = LSW(pd_dma); 1152 mb[6] = MSW(MSD(pd_dma)); 1153 mb[7] = LSW(MSD(pd_dma)); 1154 mb[9] = vha->vp_idx; 1155 mb[10] = opt; 1156 1157 mbx->u.mbx.in = (void *)pd; 1158 mbx->u.mbx.in_dma = pd_dma; 1159 1160 sp->done = qla24xx_async_gpdb_sp_done; 1161 1162 ql_dbg(ql_dbg_disc, vha, 0x20dc, 1163 "Async-%s %8phC hndl %x opt %x\n", 1164 sp->name, fcport->port_name, sp->handle, opt); 1165 1166 rval = qla2x00_start_sp(sp); 1167 if (rval != QLA_SUCCESS) 1168 goto done_free_sp; 1169 return rval; 1170 1171 done_free_sp: 1172 if (pd) 1173 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 1174 1175 sp->free(sp); 1176 fcport->flags &= ~FCF_ASYNC_SENT; 1177 done: 1178 qla24xx_post_gpdb_work(vha, fcport, opt); 1179 return rval; 1180 } 1181 1182 static 1183 void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) 1184 { 1185 unsigned long flags; 1186 1187 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1188 ea->fcport->login_gen++; 1189 ea->fcport->deleted = 0; 1190 ea->fcport->logout_on_delete = 1; 1191 1192 if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) { 1193 vha->fcport_count++; 1194 ea->fcport->login_succ = 1; 1195 1196 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1197 qla24xx_sched_upd_fcport(ea->fcport); 1198 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1199 } else if (ea->fcport->login_succ) { 1200 /* 1201 * We have an existing session. A late RSCN delivery 1202 * must have triggered the session to be re-validate. 1203 * Session is still valid. 1204 */ 1205 ql_dbg(ql_dbg_disc, vha, 0x20d6, 1206 "%s %d %8phC session revalidate success\n", 1207 __func__, __LINE__, ea->fcport->port_name); 1208 ea->fcport->disc_state = DSC_LOGIN_COMPLETE; 1209 } 1210 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1211 } 1212 1213 static 1214 void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) 1215 { 1216 fc_port_t *fcport = ea->fcport; 1217 struct port_database_24xx *pd; 1218 struct srb *sp = ea->sp; 1219 uint8_t ls; 1220 1221 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; 1222 1223 fcport->flags &= ~FCF_ASYNC_SENT; 1224 1225 ql_dbg(ql_dbg_disc, vha, 0x20d2, 1226 "%s %8phC DS %d LS %d nvme %x rc %d\n", __func__, fcport->port_name, 1227 fcport->disc_state, pd->current_login_state, fcport->fc4f_nvme, 1228 ea->rc); 1229 1230 if (fcport->disc_state == DSC_DELETE_PEND) 1231 return; 1232 1233 if (fcport->fc4f_nvme) 1234 ls = pd->current_login_state >> 4; 1235 else 1236 ls = pd->current_login_state & 0xf; 1237 1238 if (ea->sp->gen2 != fcport->login_gen) { 1239 /* target side must have changed it. */ 1240 1241 ql_dbg(ql_dbg_disc, vha, 0x20d3, 1242 "%s %8phC generation changed\n", 1243 __func__, fcport->port_name); 1244 return; 1245 } else if (ea->sp->gen1 != fcport->rscn_gen) { 1246 qla_rscn_replay(fcport); 1247 qlt_schedule_sess_for_deletion(fcport); 1248 return; 1249 } 1250 1251 switch (ls) { 1252 case PDS_PRLI_COMPLETE: 1253 __qla24xx_parse_gpdb(vha, fcport, pd); 1254 break; 1255 case PDS_PLOGI_PENDING: 1256 case PDS_PLOGI_COMPLETE: 1257 case PDS_PRLI_PENDING: 1258 case PDS_PRLI2_PENDING: 1259 /* Set discovery state back to GNL to Relogin attempt */ 1260 if (qla_dual_mode_enabled(vha) || 1261 qla_ini_mode_enabled(vha)) { 1262 fcport->disc_state = DSC_GNL; 1263 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1264 } 1265 return; 1266 case PDS_LOGO_PENDING: 1267 case PDS_PORT_UNAVAILABLE: 1268 default: 1269 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n", 1270 __func__, __LINE__, fcport->port_name); 1271 qlt_schedule_sess_for_deletion(fcport); 1272 return; 1273 } 1274 __qla24xx_handle_gpdb_event(vha, ea); 1275 } /* gpdb event */ 1276 1277 static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport) 1278 { 1279 u8 login = 0; 1280 int rc; 1281 1282 if (qla_tgt_mode_enabled(vha)) 1283 return; 1284 1285 if (qla_dual_mode_enabled(vha)) { 1286 if (N2N_TOPO(vha->hw)) { 1287 u64 mywwn, wwn; 1288 1289 mywwn = wwn_to_u64(vha->port_name); 1290 wwn = wwn_to_u64(fcport->port_name); 1291 if (mywwn > wwn) 1292 login = 1; 1293 else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP) 1294 && time_after_eq(jiffies, 1295 fcport->plogi_nack_done_deadline)) 1296 login = 1; 1297 } else { 1298 login = 1; 1299 } 1300 } else { 1301 /* initiator mode */ 1302 login = 1; 1303 } 1304 1305 if (login && fcport->login_retry) { 1306 fcport->login_retry--; 1307 if (fcport->loop_id == FC_NO_LOOP_ID) { 1308 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 1309 rc = qla2x00_find_new_loop_id(vha, fcport); 1310 if (rc) { 1311 ql_dbg(ql_dbg_disc, vha, 0x20e6, 1312 "%s %d %8phC post del sess - out of loopid\n", 1313 __func__, __LINE__, fcport->port_name); 1314 fcport->scan_state = 0; 1315 qlt_schedule_sess_for_deletion(fcport); 1316 return; 1317 } 1318 } 1319 ql_dbg(ql_dbg_disc, vha, 0x20bf, 1320 "%s %d %8phC post login\n", 1321 __func__, __LINE__, fcport->port_name); 1322 qla2x00_post_async_login_work(vha, fcport, NULL); 1323 } 1324 } 1325 1326 int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) 1327 { 1328 u16 data[2]; 1329 u64 wwn; 1330 u16 sec; 1331 1332 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20d8, 1333 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d\n", 1334 __func__, fcport->port_name, fcport->disc_state, 1335 fcport->fw_login_state, fcport->login_pause, fcport->flags, 1336 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen, 1337 fcport->login_gen, fcport->loop_id, fcport->scan_state); 1338 1339 if (fcport->scan_state != QLA_FCPORT_FOUND) 1340 return 0; 1341 1342 if ((fcport->loop_id != FC_NO_LOOP_ID) && 1343 ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 1344 (fcport->fw_login_state == DSC_LS_PRLI_PEND))) 1345 return 0; 1346 1347 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { 1348 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) { 1349 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1350 return 0; 1351 } 1352 } 1353 1354 /* for pure Target Mode. Login will not be initiated */ 1355 if (vha->host->active_mode == MODE_TARGET) 1356 return 0; 1357 1358 if (fcport->flags & FCF_ASYNC_SENT) { 1359 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1360 return 0; 1361 } 1362 1363 switch (fcport->disc_state) { 1364 case DSC_DELETED: 1365 wwn = wwn_to_u64(fcport->node_name); 1366 switch (vha->hw->current_topology) { 1367 case ISP_CFG_N: 1368 if (fcport_is_smaller(fcport)) { 1369 /* this adapter is bigger */ 1370 if (fcport->login_retry) { 1371 if (fcport->loop_id == FC_NO_LOOP_ID) { 1372 qla2x00_find_new_loop_id(vha, 1373 fcport); 1374 fcport->fw_login_state = 1375 DSC_LS_PORT_UNAVAIL; 1376 } 1377 fcport->login_retry--; 1378 qla_post_els_plogi_work(vha, fcport); 1379 } else { 1380 ql_log(ql_log_info, vha, 0x705d, 1381 "Unable to reach remote port %8phC", 1382 fcport->port_name); 1383 } 1384 } else { 1385 qla24xx_post_gnl_work(vha, fcport); 1386 } 1387 break; 1388 default: 1389 if (wwn == 0) { 1390 ql_dbg(ql_dbg_disc, vha, 0xffff, 1391 "%s %d %8phC post GNNID\n", 1392 __func__, __LINE__, fcport->port_name); 1393 qla24xx_post_gnnid_work(vha, fcport); 1394 } else if (fcport->loop_id == FC_NO_LOOP_ID) { 1395 ql_dbg(ql_dbg_disc, vha, 0x20bd, 1396 "%s %d %8phC post gnl\n", 1397 __func__, __LINE__, fcport->port_name); 1398 qla24xx_post_gnl_work(vha, fcport); 1399 } else { 1400 qla_chk_n2n_b4_login(vha, fcport); 1401 } 1402 break; 1403 } 1404 break; 1405 1406 case DSC_GNL: 1407 switch (vha->hw->current_topology) { 1408 case ISP_CFG_N: 1409 if ((fcport->current_login_state & 0xf) == 0x6) { 1410 ql_dbg(ql_dbg_disc, vha, 0x2118, 1411 "%s %d %8phC post GPDB work\n", 1412 __func__, __LINE__, fcport->port_name); 1413 fcport->chip_reset = 1414 vha->hw->base_qpair->chip_reset; 1415 qla24xx_post_gpdb_work(vha, fcport, 0); 1416 } else { 1417 ql_dbg(ql_dbg_disc, vha, 0x2118, 1418 "%s %d %8phC post NVMe PRLI\n", 1419 __func__, __LINE__, fcport->port_name); 1420 qla24xx_post_prli_work(vha, fcport); 1421 } 1422 break; 1423 default: 1424 if (fcport->login_pause) { 1425 fcport->last_rscn_gen = fcport->rscn_gen; 1426 fcport->last_login_gen = fcport->login_gen; 1427 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1428 break; 1429 } 1430 qla_chk_n2n_b4_login(vha, fcport); 1431 break; 1432 } 1433 break; 1434 1435 case DSC_LOGIN_FAILED: 1436 if (N2N_TOPO(vha->hw)) 1437 qla_chk_n2n_b4_login(vha, fcport); 1438 else 1439 qlt_schedule_sess_for_deletion(fcport); 1440 break; 1441 1442 case DSC_LOGIN_COMPLETE: 1443 /* recheck login state */ 1444 data[0] = data[1] = 0; 1445 qla2x00_post_async_adisc_work(vha, fcport, data); 1446 break; 1447 1448 case DSC_LOGIN_PEND: 1449 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) 1450 qla24xx_post_prli_work(vha, fcport); 1451 break; 1452 1453 case DSC_UPD_FCPORT: 1454 sec = jiffies_to_msecs(jiffies - 1455 fcport->jiffies_at_registration)/1000; 1456 if (fcport->sec_since_registration < sec && sec && 1457 !(sec % 60)) { 1458 fcport->sec_since_registration = sec; 1459 ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, 1460 "%s %8phC - Slow Rport registration(%d Sec)\n", 1461 __func__, fcport->port_name, sec); 1462 } 1463 1464 if (fcport->next_disc_state != DSC_DELETE_PEND) 1465 fcport->next_disc_state = DSC_ADISC; 1466 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1467 break; 1468 1469 default: 1470 break; 1471 } 1472 1473 return 0; 1474 } 1475 1476 static 1477 void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea) 1478 { 1479 fcport->rscn_gen++; 1480 1481 ql_dbg(ql_dbg_disc, fcport->vha, 0x210c, 1482 "%s %8phC DS %d LS %d\n", 1483 __func__, fcport->port_name, fcport->disc_state, 1484 fcport->fw_login_state); 1485 1486 if (fcport->flags & FCF_ASYNC_SENT) 1487 return; 1488 1489 switch (fcport->disc_state) { 1490 case DSC_DELETED: 1491 case DSC_LOGIN_COMPLETE: 1492 qla24xx_post_gpnid_work(fcport->vha, &ea->id); 1493 break; 1494 default: 1495 break; 1496 } 1497 } 1498 1499 int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id, 1500 u8 *port_name, u8 *node_name, void *pla, u8 fc4_type) 1501 { 1502 struct qla_work_evt *e; 1503 e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS); 1504 if (!e) 1505 return QLA_FUNCTION_FAILED; 1506 1507 e->u.new_sess.id = *id; 1508 e->u.new_sess.pla = pla; 1509 e->u.new_sess.fc4_type = fc4_type; 1510 memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE); 1511 if (node_name) 1512 memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE); 1513 1514 return qla2x00_post_work(vha, e); 1515 } 1516 1517 static 1518 void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, 1519 struct event_arg *ea) 1520 { 1521 fc_port_t *fcport = ea->fcport; 1522 1523 ql_dbg(ql_dbg_disc, vha, 0x2102, 1524 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n", 1525 __func__, fcport->port_name, fcport->disc_state, 1526 fcport->fw_login_state, fcport->login_pause, 1527 fcport->deleted, fcport->conflict, 1528 fcport->last_rscn_gen, fcport->rscn_gen, 1529 fcport->last_login_gen, fcport->login_gen, 1530 fcport->flags); 1531 1532 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 1533 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) 1534 return; 1535 1536 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { 1537 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) { 1538 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1539 return; 1540 } 1541 } 1542 1543 if (fcport->last_rscn_gen != fcport->rscn_gen) { 1544 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n", 1545 __func__, __LINE__, fcport->port_name); 1546 1547 return; 1548 } 1549 1550 qla24xx_fcport_handle_login(vha, fcport); 1551 } 1552 1553 1554 void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea) 1555 { 1556 ql_dbg(ql_dbg_disc, vha, 0x2118, 1557 "%s %d %8phC post PRLI\n", 1558 __func__, __LINE__, ea->fcport->port_name); 1559 qla24xx_post_prli_work(vha, ea->fcport); 1560 } 1561 1562 void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea) 1563 { 1564 fc_port_t *f, *tf; 1565 uint32_t id = 0, mask, rid; 1566 fc_port_t *fcport; 1567 1568 switch (ea->event) { 1569 case FCME_RELOGIN: 1570 if (test_bit(UNLOADING, &vha->dpc_flags)) 1571 return; 1572 1573 qla24xx_handle_relogin_event(vha, ea); 1574 break; 1575 case FCME_RSCN: 1576 if (test_bit(UNLOADING, &vha->dpc_flags)) 1577 return; 1578 switch (ea->id.b.rsvd_1) { 1579 case RSCN_PORT_ADDR: 1580 #define BIGSCAN 1 1581 #if defined BIGSCAN & BIGSCAN > 0 1582 { 1583 unsigned long flags; 1584 fcport = qla2x00_find_fcport_by_nportid 1585 (vha, &ea->id, 1); 1586 if (fcport) { 1587 fcport->scan_needed = 1; 1588 fcport->rscn_gen++; 1589 } 1590 1591 spin_lock_irqsave(&vha->work_lock, flags); 1592 if (vha->scan.scan_flags == 0) { 1593 ql_dbg(ql_dbg_disc, vha, 0xffff, 1594 "%s: schedule\n", __func__); 1595 vha->scan.scan_flags |= SF_QUEUED; 1596 schedule_delayed_work(&vha->scan.scan_work, 5); 1597 } 1598 spin_unlock_irqrestore(&vha->work_lock, flags); 1599 } 1600 #else 1601 { 1602 int rc; 1603 fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1); 1604 if (!fcport) { 1605 /* cable moved */ 1606 rc = qla24xx_post_gpnid_work(vha, &ea->id); 1607 if (rc) { 1608 ql_log(ql_log_warn, vha, 0xd044, 1609 "RSCN GPNID work failed %06x\n", 1610 ea->id.b24); 1611 } 1612 } else { 1613 ea->fcport = fcport; 1614 fcport->scan_needed = 1; 1615 qla24xx_handle_rscn_event(fcport, ea); 1616 } 1617 } 1618 #endif 1619 break; 1620 case RSCN_AREA_ADDR: 1621 case RSCN_DOM_ADDR: 1622 if (ea->id.b.rsvd_1 == RSCN_AREA_ADDR) { 1623 mask = 0xffff00; 1624 ql_dbg(ql_dbg_async, vha, 0x5044, 1625 "RSCN: Area 0x%06x was affected\n", 1626 ea->id.b24); 1627 } else { 1628 mask = 0xff0000; 1629 ql_dbg(ql_dbg_async, vha, 0x507a, 1630 "RSCN: Domain 0x%06x was affected\n", 1631 ea->id.b24); 1632 } 1633 1634 rid = ea->id.b24 & mask; 1635 list_for_each_entry_safe(f, tf, &vha->vp_fcports, 1636 list) { 1637 id = f->d_id.b24 & mask; 1638 if (rid == id) { 1639 ea->fcport = f; 1640 qla24xx_handle_rscn_event(f, ea); 1641 } 1642 } 1643 break; 1644 case RSCN_FAB_ADDR: 1645 default: 1646 ql_log(ql_log_warn, vha, 0xd045, 1647 "RSCN: Fabric was affected. Addr format %d\n", 1648 ea->id.b.rsvd_1); 1649 qla2x00_mark_all_devices_lost(vha, 1); 1650 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1651 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1652 } 1653 break; 1654 case FCME_GNL_DONE: 1655 qla24xx_handle_gnl_done_event(vha, ea); 1656 break; 1657 case FCME_GPSC_DONE: 1658 qla24xx_handle_gpsc_event(vha, ea); 1659 break; 1660 case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */ 1661 qla24xx_handle_plogi_done_event(vha, ea); 1662 break; 1663 case FCME_PRLI_DONE: 1664 qla24xx_handle_prli_done_event(vha, ea); 1665 break; 1666 case FCME_GPDB_DONE: 1667 qla24xx_handle_gpdb_event(vha, ea); 1668 break; 1669 case FCME_GPNID_DONE: 1670 qla24xx_handle_gpnid_event(vha, ea); 1671 break; 1672 case FCME_GFFID_DONE: 1673 qla24xx_handle_gffid_event(vha, ea); 1674 break; 1675 case FCME_ADISC_DONE: 1676 qla24xx_handle_adisc_event(vha, ea); 1677 break; 1678 case FCME_GNNID_DONE: 1679 qla24xx_handle_gnnid_event(vha, ea); 1680 break; 1681 case FCME_GFPNID_DONE: 1682 qla24xx_handle_gfpnid_event(vha, ea); 1683 break; 1684 case FCME_ELS_PLOGI_DONE: 1685 qla_handle_els_plogi_done(vha, ea); 1686 break; 1687 default: 1688 BUG_ON(1); 1689 break; 1690 } 1691 } 1692 1693 /* 1694 * RSCN(s) came in for this fcport, but the RSCN(s) was not able 1695 * to be consumed by the fcport 1696 */ 1697 void qla_rscn_replay(fc_port_t *fcport) 1698 { 1699 struct event_arg ea; 1700 1701 switch (fcport->disc_state) { 1702 case DSC_DELETE_PEND: 1703 return; 1704 default: 1705 break; 1706 } 1707 1708 if (fcport->scan_needed) { 1709 memset(&ea, 0, sizeof(ea)); 1710 ea.event = FCME_RSCN; 1711 ea.id = fcport->d_id; 1712 ea.id.b.rsvd_1 = RSCN_PORT_ADDR; 1713 #if defined BIGSCAN & BIGSCAN > 0 1714 qla2x00_fcport_event_handler(fcport->vha, &ea); 1715 #else 1716 qla24xx_post_gpnid_work(fcport->vha, &ea.id); 1717 #endif 1718 } 1719 } 1720 1721 static void 1722 qla2x00_tmf_iocb_timeout(void *data) 1723 { 1724 srb_t *sp = data; 1725 struct srb_iocb *tmf = &sp->u.iocb_cmd; 1726 1727 tmf->u.tmf.comp_status = CS_TIMEOUT; 1728 complete(&tmf->u.tmf.comp); 1729 } 1730 1731 static void 1732 qla2x00_tmf_sp_done(void *ptr, int res) 1733 { 1734 srb_t *sp = ptr; 1735 struct srb_iocb *tmf = &sp->u.iocb_cmd; 1736 1737 complete(&tmf->u.tmf.comp); 1738 } 1739 1740 int 1741 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, 1742 uint32_t tag) 1743 { 1744 struct scsi_qla_host *vha = fcport->vha; 1745 struct srb_iocb *tm_iocb; 1746 srb_t *sp; 1747 int rval = QLA_FUNCTION_FAILED; 1748 1749 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1750 if (!sp) 1751 goto done; 1752 1753 tm_iocb = &sp->u.iocb_cmd; 1754 sp->type = SRB_TM_CMD; 1755 sp->name = "tmf"; 1756 1757 tm_iocb->timeout = qla2x00_tmf_iocb_timeout; 1758 init_completion(&tm_iocb->u.tmf.comp); 1759 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)); 1760 1761 tm_iocb->u.tmf.flags = flags; 1762 tm_iocb->u.tmf.lun = lun; 1763 tm_iocb->u.tmf.data = tag; 1764 sp->done = qla2x00_tmf_sp_done; 1765 1766 ql_dbg(ql_dbg_taskm, vha, 0x802f, 1767 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n", 1768 sp->handle, fcport->loop_id, fcport->d_id.b.domain, 1769 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1770 1771 rval = qla2x00_start_sp(sp); 1772 if (rval != QLA_SUCCESS) 1773 goto done_free_sp; 1774 wait_for_completion(&tm_iocb->u.tmf.comp); 1775 1776 rval = tm_iocb->u.tmf.data; 1777 1778 if (rval != QLA_SUCCESS) { 1779 ql_log(ql_log_warn, vha, 0x8030, 1780 "TM IOCB failed (%x).\n", rval); 1781 } 1782 1783 if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) { 1784 flags = tm_iocb->u.tmf.flags; 1785 lun = (uint16_t)tm_iocb->u.tmf.lun; 1786 1787 /* Issue Marker IOCB */ 1788 qla2x00_marker(vha, vha->hw->req_q_map[0], 1789 vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun, 1790 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 1791 } 1792 1793 done_free_sp: 1794 sp->free(sp); 1795 sp->fcport->flags &= ~FCF_ASYNC_SENT; 1796 done: 1797 return rval; 1798 } 1799 1800 static void 1801 qla24xx_abort_iocb_timeout(void *data) 1802 { 1803 srb_t *sp = data; 1804 struct srb_iocb *abt = &sp->u.iocb_cmd; 1805 1806 abt->u.abt.comp_status = CS_TIMEOUT; 1807 sp->done(sp, QLA_FUNCTION_TIMEOUT); 1808 } 1809 1810 static void 1811 qla24xx_abort_sp_done(void *ptr, int res) 1812 { 1813 srb_t *sp = ptr; 1814 struct srb_iocb *abt = &sp->u.iocb_cmd; 1815 1816 if (del_timer(&sp->u.iocb_cmd.timer)) { 1817 if (sp->flags & SRB_WAKEUP_ON_COMP) 1818 complete(&abt->u.abt.comp); 1819 else 1820 sp->free(sp); 1821 } 1822 } 1823 1824 int 1825 qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) 1826 { 1827 scsi_qla_host_t *vha = cmd_sp->vha; 1828 struct srb_iocb *abt_iocb; 1829 srb_t *sp; 1830 int rval = QLA_FUNCTION_FAILED; 1831 1832 sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport, 1833 GFP_KERNEL); 1834 if (!sp) 1835 goto done; 1836 1837 abt_iocb = &sp->u.iocb_cmd; 1838 sp->type = SRB_ABT_CMD; 1839 sp->name = "abort"; 1840 sp->qpair = cmd_sp->qpair; 1841 if (wait) 1842 sp->flags = SRB_WAKEUP_ON_COMP; 1843 1844 abt_iocb->timeout = qla24xx_abort_iocb_timeout; 1845 init_completion(&abt_iocb->u.abt.comp); 1846 /* FW can send 2 x ABTS's timeout/20s */ 1847 qla2x00_init_timer(sp, 42); 1848 1849 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle; 1850 abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id); 1851 1852 sp->done = qla24xx_abort_sp_done; 1853 1854 ql_dbg(ql_dbg_async, vha, 0x507c, 1855 "Abort command issued - hdl=%x, type=%x\n", 1856 cmd_sp->handle, cmd_sp->type); 1857 1858 rval = qla2x00_start_sp(sp); 1859 if (rval != QLA_SUCCESS) 1860 goto done_free_sp; 1861 1862 if (wait) { 1863 wait_for_completion(&abt_iocb->u.abt.comp); 1864 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ? 1865 QLA_SUCCESS : QLA_FUNCTION_FAILED; 1866 } else { 1867 goto done; 1868 } 1869 1870 done_free_sp: 1871 sp->free(sp); 1872 done: 1873 return rval; 1874 } 1875 1876 int 1877 qla24xx_async_abort_command(srb_t *sp) 1878 { 1879 unsigned long flags = 0; 1880 1881 uint32_t handle; 1882 fc_port_t *fcport = sp->fcport; 1883 struct qla_qpair *qpair = sp->qpair; 1884 struct scsi_qla_host *vha = fcport->vha; 1885 struct req_que *req = qpair->req; 1886 1887 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 1888 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 1889 if (req->outstanding_cmds[handle] == sp) 1890 break; 1891 } 1892 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 1893 1894 if (handle == req->num_outstanding_cmds) { 1895 /* Command not found. */ 1896 return QLA_FUNCTION_FAILED; 1897 } 1898 if (sp->type == SRB_FXIOCB_DCMD) 1899 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport, 1900 FXDISC_ABORT_IOCTL); 1901 1902 return qla24xx_async_abort_cmd(sp, true); 1903 } 1904 1905 static void 1906 qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea) 1907 { 1908 switch (ea->data[0]) { 1909 case MBS_COMMAND_COMPLETE: 1910 ql_dbg(ql_dbg_disc, vha, 0x2118, 1911 "%s %d %8phC post gpdb\n", 1912 __func__, __LINE__, ea->fcport->port_name); 1913 1914 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; 1915 ea->fcport->logout_on_delete = 1; 1916 qla24xx_post_gpdb_work(vha, ea->fcport, 0); 1917 break; 1918 default: 1919 if ((ea->iop[0] == LSC_SCODE_ELS_REJECT) && 1920 (ea->iop[1] == 0x50000)) { /* reson 5=busy expl:0x0 */ 1921 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1922 ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP; 1923 break; 1924 } 1925 1926 if (ea->fcport->n2n_flag) { 1927 ql_dbg(ql_dbg_disc, vha, 0x2118, 1928 "%s %d %8phC post fc4 prli\n", 1929 __func__, __LINE__, ea->fcport->port_name); 1930 ea->fcport->fc4f_nvme = 0; 1931 ea->fcport->n2n_flag = 0; 1932 qla24xx_post_prli_work(vha, ea->fcport); 1933 } 1934 ql_dbg(ql_dbg_disc, vha, 0x2119, 1935 "%s %d %8phC unhandle event of %x\n", 1936 __func__, __LINE__, ea->fcport->port_name, ea->data[0]); 1937 break; 1938 } 1939 } 1940 1941 static void 1942 qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) 1943 { 1944 port_id_t cid; /* conflict Nport id */ 1945 u16 lid; 1946 struct fc_port *conflict_fcport; 1947 unsigned long flags; 1948 struct fc_port *fcport = ea->fcport; 1949 1950 ql_dbg(ql_dbg_disc, vha, 0xffff, 1951 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n", 1952 __func__, fcport->port_name, fcport->disc_state, 1953 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen, 1954 ea->sp->gen1, fcport->rscn_gen, 1955 ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]); 1956 1957 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 1958 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) { 1959 ql_dbg(ql_dbg_disc, vha, 0x20ea, 1960 "%s %d %8phC Remote is trying to login\n", 1961 __func__, __LINE__, fcport->port_name); 1962 return; 1963 } 1964 1965 if (fcport->disc_state == DSC_DELETE_PEND) 1966 return; 1967 1968 if (ea->sp->gen2 != fcport->login_gen) { 1969 /* target side must have changed it. */ 1970 ql_dbg(ql_dbg_disc, vha, 0x20d3, 1971 "%s %8phC generation changed\n", 1972 __func__, fcport->port_name); 1973 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1974 return; 1975 } else if (ea->sp->gen1 != fcport->rscn_gen) { 1976 ql_dbg(ql_dbg_disc, vha, 0x20d3, 1977 "%s %8phC RSCN generation changed\n", 1978 __func__, fcport->port_name); 1979 qla_rscn_replay(fcport); 1980 qlt_schedule_sess_for_deletion(fcport); 1981 return; 1982 } 1983 1984 switch (ea->data[0]) { 1985 case MBS_COMMAND_COMPLETE: 1986 /* 1987 * Driver must validate login state - If PRLI not complete, 1988 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI 1989 * requests. 1990 */ 1991 if (ea->fcport->fc4f_nvme) { 1992 ql_dbg(ql_dbg_disc, vha, 0x2117, 1993 "%s %d %8phC post prli\n", 1994 __func__, __LINE__, ea->fcport->port_name); 1995 qla24xx_post_prli_work(vha, ea->fcport); 1996 } else { 1997 ql_dbg(ql_dbg_disc, vha, 0x20ea, 1998 "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n", 1999 __func__, __LINE__, ea->fcport->port_name, 2000 ea->fcport->loop_id, ea->fcport->d_id.b24); 2001 2002 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); 2003 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 2004 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; 2005 ea->fcport->logout_on_delete = 1; 2006 ea->fcport->send_els_logo = 0; 2007 ea->fcport->fw_login_state = DSC_LS_PRLI_COMP; 2008 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 2009 2010 qla24xx_post_gpdb_work(vha, ea->fcport, 0); 2011 } 2012 break; 2013 case MBS_COMMAND_ERROR: 2014 ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n", 2015 __func__, __LINE__, ea->fcport->port_name, ea->data[1]); 2016 2017 ea->fcport->flags &= ~FCF_ASYNC_SENT; 2018 ea->fcport->disc_state = DSC_LOGIN_FAILED; 2019 if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED) 2020 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 2021 else 2022 qla2x00_mark_device_lost(vha, ea->fcport, 1, 0); 2023 break; 2024 case MBS_LOOP_ID_USED: 2025 /* data[1] = IO PARAM 1 = nport ID */ 2026 cid.b.domain = (ea->iop[1] >> 16) & 0xff; 2027 cid.b.area = (ea->iop[1] >> 8) & 0xff; 2028 cid.b.al_pa = ea->iop[1] & 0xff; 2029 cid.b.rsvd_1 = 0; 2030 2031 ql_dbg(ql_dbg_disc, vha, 0x20ec, 2032 "%s %d %8phC lid %#x in use with pid %06x post gnl\n", 2033 __func__, __LINE__, ea->fcport->port_name, 2034 ea->fcport->loop_id, cid.b24); 2035 2036 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); 2037 ea->fcport->loop_id = FC_NO_LOOP_ID; 2038 qla24xx_post_gnl_work(vha, ea->fcport); 2039 break; 2040 case MBS_PORT_ID_USED: 2041 lid = ea->iop[1] & 0xffff; 2042 qlt_find_sess_invalidate_other(vha, 2043 wwn_to_u64(ea->fcport->port_name), 2044 ea->fcport->d_id, lid, &conflict_fcport); 2045 2046 if (conflict_fcport) { 2047 /* 2048 * Another fcport share the same loop_id/nport id. 2049 * Conflict fcport needs to finish cleanup before this 2050 * fcport can proceed to login. 2051 */ 2052 conflict_fcport->conflict = ea->fcport; 2053 ea->fcport->login_pause = 1; 2054 2055 ql_dbg(ql_dbg_disc, vha, 0x20ed, 2056 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n", 2057 __func__, __LINE__, ea->fcport->port_name, 2058 ea->fcport->d_id.b24, lid); 2059 } else { 2060 ql_dbg(ql_dbg_disc, vha, 0x20ed, 2061 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n", 2062 __func__, __LINE__, ea->fcport->port_name, 2063 ea->fcport->d_id.b24, lid); 2064 2065 qla2x00_clear_loop_id(ea->fcport); 2066 set_bit(lid, vha->hw->loop_id_map); 2067 ea->fcport->loop_id = lid; 2068 ea->fcport->keep_nport_handle = 0; 2069 qlt_schedule_sess_for_deletion(ea->fcport); 2070 } 2071 break; 2072 } 2073 return; 2074 } 2075 2076 void 2077 qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport, 2078 uint16_t *data) 2079 { 2080 qlt_logo_completion_handler(fcport, data[0]); 2081 fcport->login_gen++; 2082 fcport->flags &= ~FCF_ASYNC_ACTIVE; 2083 return; 2084 } 2085 2086 /****************************************************************************/ 2087 /* QLogic ISP2x00 Hardware Support Functions. */ 2088 /****************************************************************************/ 2089 2090 static int 2091 qla83xx_nic_core_fw_load(scsi_qla_host_t *vha) 2092 { 2093 int rval = QLA_SUCCESS; 2094 struct qla_hw_data *ha = vha->hw; 2095 uint32_t idc_major_ver, idc_minor_ver; 2096 uint16_t config[4]; 2097 2098 qla83xx_idc_lock(vha, 0); 2099 2100 /* SV: TODO: Assign initialization timeout from 2101 * flash-info / other param 2102 */ 2103 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT; 2104 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT; 2105 2106 /* Set our fcoe function presence */ 2107 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) { 2108 ql_dbg(ql_dbg_p3p, vha, 0xb077, 2109 "Error while setting DRV-Presence.\n"); 2110 rval = QLA_FUNCTION_FAILED; 2111 goto exit; 2112 } 2113 2114 /* Decide the reset ownership */ 2115 qla83xx_reset_ownership(vha); 2116 2117 /* 2118 * On first protocol driver load: 2119 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery 2120 * register. 2121 * Others: Check compatibility with current IDC Major version. 2122 */ 2123 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver); 2124 if (ha->flags.nic_core_reset_owner) { 2125 /* Set IDC Major version */ 2126 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION; 2127 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver); 2128 2129 /* Clearing IDC-Lock-Recovery register */ 2130 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0); 2131 } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) { 2132 /* 2133 * Clear further IDC participation if we are not compatible with 2134 * the current IDC Major Version. 2135 */ 2136 ql_log(ql_log_warn, vha, 0xb07d, 2137 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n", 2138 idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION); 2139 __qla83xx_clear_drv_presence(vha); 2140 rval = QLA_FUNCTION_FAILED; 2141 goto exit; 2142 } 2143 /* Each function sets its supported Minor version. */ 2144 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver); 2145 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2)); 2146 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver); 2147 2148 if (ha->flags.nic_core_reset_owner) { 2149 memset(config, 0, sizeof(config)); 2150 if (!qla81xx_get_port_config(vha, config)) 2151 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, 2152 QLA8XXX_DEV_READY); 2153 } 2154 2155 rval = qla83xx_idc_state_handler(vha); 2156 2157 exit: 2158 qla83xx_idc_unlock(vha, 0); 2159 2160 return rval; 2161 } 2162 2163 /* 2164 * qla2x00_initialize_adapter 2165 * Initialize board. 2166 * 2167 * Input: 2168 * ha = adapter block pointer. 2169 * 2170 * Returns: 2171 * 0 = success 2172 */ 2173 int 2174 qla2x00_initialize_adapter(scsi_qla_host_t *vha) 2175 { 2176 int rval; 2177 struct qla_hw_data *ha = vha->hw; 2178 struct req_que *req = ha->req_q_map[0]; 2179 2180 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats)); 2181 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat)); 2182 2183 /* Clear adapter flags. */ 2184 vha->flags.online = 0; 2185 ha->flags.chip_reset_done = 0; 2186 vha->flags.reset_active = 0; 2187 ha->flags.pci_channel_io_perm_failure = 0; 2188 ha->flags.eeh_busy = 0; 2189 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); 2190 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 2191 atomic_set(&vha->loop_state, LOOP_DOWN); 2192 vha->device_flags = DFLG_NO_CABLE; 2193 vha->dpc_flags = 0; 2194 vha->flags.management_server_logged_in = 0; 2195 vha->marker_needed = 0; 2196 ha->isp_abort_cnt = 0; 2197 ha->beacon_blink_led = 0; 2198 2199 set_bit(0, ha->req_qid_map); 2200 set_bit(0, ha->rsp_qid_map); 2201 2202 ql_dbg(ql_dbg_init, vha, 0x0040, 2203 "Configuring PCI space...\n"); 2204 rval = ha->isp_ops->pci_config(vha); 2205 if (rval) { 2206 ql_log(ql_log_warn, vha, 0x0044, 2207 "Unable to configure PCI space.\n"); 2208 return (rval); 2209 } 2210 2211 ha->isp_ops->reset_chip(vha); 2212 2213 rval = qla2xxx_get_flash_info(vha); 2214 if (rval) { 2215 ql_log(ql_log_fatal, vha, 0x004f, 2216 "Unable to validate FLASH data.\n"); 2217 return rval; 2218 } 2219 2220 if (IS_QLA8044(ha)) { 2221 qla8044_read_reset_template(vha); 2222 2223 /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0. 2224 * If DONRESET_BIT0 is set, drivers should not set dev_state 2225 * to NEED_RESET. But if NEED_RESET is set, drivers should 2226 * should honor the reset. */ 2227 if (ql2xdontresethba == 1) 2228 qla8044_set_idc_dontreset(vha); 2229 } 2230 2231 ha->isp_ops->get_flash_version(vha, req->ring); 2232 ql_dbg(ql_dbg_init, vha, 0x0061, 2233 "Configure NVRAM parameters...\n"); 2234 2235 ha->isp_ops->nvram_config(vha); 2236 2237 if (ha->flags.disable_serdes) { 2238 /* Mask HBA via NVRAM settings? */ 2239 ql_log(ql_log_info, vha, 0x0077, 2240 "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name); 2241 return QLA_FUNCTION_FAILED; 2242 } 2243 2244 ql_dbg(ql_dbg_init, vha, 0x0078, 2245 "Verifying loaded RISC code...\n"); 2246 2247 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) { 2248 rval = ha->isp_ops->chip_diag(vha); 2249 if (rval) 2250 return (rval); 2251 rval = qla2x00_setup_chip(vha); 2252 if (rval) 2253 return (rval); 2254 } 2255 2256 if (IS_QLA84XX(ha)) { 2257 ha->cs84xx = qla84xx_get_chip(vha); 2258 if (!ha->cs84xx) { 2259 ql_log(ql_log_warn, vha, 0x00d0, 2260 "Unable to configure ISP84XX.\n"); 2261 return QLA_FUNCTION_FAILED; 2262 } 2263 } 2264 2265 if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) 2266 rval = qla2x00_init_rings(vha); 2267 2268 ha->flags.chip_reset_done = 1; 2269 2270 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) { 2271 /* Issue verify 84xx FW IOCB to complete 84xx initialization */ 2272 rval = qla84xx_init_chip(vha); 2273 if (rval != QLA_SUCCESS) { 2274 ql_log(ql_log_warn, vha, 0x00d4, 2275 "Unable to initialize ISP84XX.\n"); 2276 qla84xx_put_chip(vha); 2277 } 2278 } 2279 2280 /* Load the NIC Core f/w if we are the first protocol driver. */ 2281 if (IS_QLA8031(ha)) { 2282 rval = qla83xx_nic_core_fw_load(vha); 2283 if (rval) 2284 ql_log(ql_log_warn, vha, 0x0124, 2285 "Error in initializing NIC Core f/w.\n"); 2286 } 2287 2288 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) 2289 qla24xx_read_fcp_prio_cfg(vha); 2290 2291 if (IS_P3P_TYPE(ha)) 2292 qla82xx_set_driver_version(vha, QLA2XXX_VERSION); 2293 else 2294 qla25xx_set_driver_version(vha, QLA2XXX_VERSION); 2295 2296 return (rval); 2297 } 2298 2299 /** 2300 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers. 2301 * @vha: HA context 2302 * 2303 * Returns 0 on success. 2304 */ 2305 int 2306 qla2100_pci_config(scsi_qla_host_t *vha) 2307 { 2308 uint16_t w; 2309 unsigned long flags; 2310 struct qla_hw_data *ha = vha->hw; 2311 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2312 2313 pci_set_master(ha->pdev); 2314 pci_try_set_mwi(ha->pdev); 2315 2316 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 2317 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 2318 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 2319 2320 pci_disable_rom(ha->pdev); 2321 2322 /* Get PCI bus information. */ 2323 spin_lock_irqsave(&ha->hardware_lock, flags); 2324 ha->pci_attr = RD_REG_WORD(®->ctrl_status); 2325 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2326 2327 return QLA_SUCCESS; 2328 } 2329 2330 /** 2331 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers. 2332 * @vha: HA context 2333 * 2334 * Returns 0 on success. 2335 */ 2336 int 2337 qla2300_pci_config(scsi_qla_host_t *vha) 2338 { 2339 uint16_t w; 2340 unsigned long flags = 0; 2341 uint32_t cnt; 2342 struct qla_hw_data *ha = vha->hw; 2343 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2344 2345 pci_set_master(ha->pdev); 2346 pci_try_set_mwi(ha->pdev); 2347 2348 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 2349 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 2350 2351 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 2352 w &= ~PCI_COMMAND_INTX_DISABLE; 2353 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 2354 2355 /* 2356 * If this is a 2300 card and not 2312, reset the 2357 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately, 2358 * the 2310 also reports itself as a 2300 so we need to get the 2359 * fb revision level -- a 6 indicates it really is a 2300 and 2360 * not a 2310. 2361 */ 2362 if (IS_QLA2300(ha)) { 2363 spin_lock_irqsave(&ha->hardware_lock, flags); 2364 2365 /* Pause RISC. */ 2366 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); 2367 for (cnt = 0; cnt < 30000; cnt++) { 2368 if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) != 0) 2369 break; 2370 2371 udelay(10); 2372 } 2373 2374 /* Select FPM registers. */ 2375 WRT_REG_WORD(®->ctrl_status, 0x20); 2376 RD_REG_WORD(®->ctrl_status); 2377 2378 /* Get the fb rev level */ 2379 ha->fb_rev = RD_FB_CMD_REG(ha, reg); 2380 2381 if (ha->fb_rev == FPM_2300) 2382 pci_clear_mwi(ha->pdev); 2383 2384 /* Deselect FPM registers. */ 2385 WRT_REG_WORD(®->ctrl_status, 0x0); 2386 RD_REG_WORD(®->ctrl_status); 2387 2388 /* Release RISC module. */ 2389 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 2390 for (cnt = 0; cnt < 30000; cnt++) { 2391 if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0) 2392 break; 2393 2394 udelay(10); 2395 } 2396 2397 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2398 } 2399 2400 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 2401 2402 pci_disable_rom(ha->pdev); 2403 2404 /* Get PCI bus information. */ 2405 spin_lock_irqsave(&ha->hardware_lock, flags); 2406 ha->pci_attr = RD_REG_WORD(®->ctrl_status); 2407 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2408 2409 return QLA_SUCCESS; 2410 } 2411 2412 /** 2413 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers. 2414 * @vha: HA context 2415 * 2416 * Returns 0 on success. 2417 */ 2418 int 2419 qla24xx_pci_config(scsi_qla_host_t *vha) 2420 { 2421 uint16_t w; 2422 unsigned long flags = 0; 2423 struct qla_hw_data *ha = vha->hw; 2424 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2425 2426 pci_set_master(ha->pdev); 2427 pci_try_set_mwi(ha->pdev); 2428 2429 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 2430 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 2431 w &= ~PCI_COMMAND_INTX_DISABLE; 2432 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 2433 2434 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 2435 2436 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */ 2437 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX)) 2438 pcix_set_mmrbc(ha->pdev, 2048); 2439 2440 /* PCIe -- adjust Maximum Read Request Size (2048). */ 2441 if (pci_is_pcie(ha->pdev)) 2442 pcie_set_readrq(ha->pdev, 4096); 2443 2444 pci_disable_rom(ha->pdev); 2445 2446 ha->chip_revision = ha->pdev->revision; 2447 2448 /* Get PCI bus information. */ 2449 spin_lock_irqsave(&ha->hardware_lock, flags); 2450 ha->pci_attr = RD_REG_DWORD(®->ctrl_status); 2451 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2452 2453 return QLA_SUCCESS; 2454 } 2455 2456 /** 2457 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers. 2458 * @vha: HA context 2459 * 2460 * Returns 0 on success. 2461 */ 2462 int 2463 qla25xx_pci_config(scsi_qla_host_t *vha) 2464 { 2465 uint16_t w; 2466 struct qla_hw_data *ha = vha->hw; 2467 2468 pci_set_master(ha->pdev); 2469 pci_try_set_mwi(ha->pdev); 2470 2471 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 2472 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 2473 w &= ~PCI_COMMAND_INTX_DISABLE; 2474 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 2475 2476 /* PCIe -- adjust Maximum Read Request Size (2048). */ 2477 if (pci_is_pcie(ha->pdev)) 2478 pcie_set_readrq(ha->pdev, 4096); 2479 2480 pci_disable_rom(ha->pdev); 2481 2482 ha->chip_revision = ha->pdev->revision; 2483 2484 return QLA_SUCCESS; 2485 } 2486 2487 /** 2488 * qla2x00_isp_firmware() - Choose firmware image. 2489 * @vha: HA context 2490 * 2491 * Returns 0 on success. 2492 */ 2493 static int 2494 qla2x00_isp_firmware(scsi_qla_host_t *vha) 2495 { 2496 int rval; 2497 uint16_t loop_id, topo, sw_cap; 2498 uint8_t domain, area, al_pa; 2499 struct qla_hw_data *ha = vha->hw; 2500 2501 /* Assume loading risc code */ 2502 rval = QLA_FUNCTION_FAILED; 2503 2504 if (ha->flags.disable_risc_code_load) { 2505 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n"); 2506 2507 /* Verify checksum of loaded RISC code. */ 2508 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address); 2509 if (rval == QLA_SUCCESS) { 2510 /* And, verify we are not in ROM code. */ 2511 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, 2512 &area, &domain, &topo, &sw_cap); 2513 } 2514 } 2515 2516 if (rval) 2517 ql_dbg(ql_dbg_init, vha, 0x007a, 2518 "**** Load RISC code ****.\n"); 2519 2520 return (rval); 2521 } 2522 2523 /** 2524 * qla2x00_reset_chip() - Reset ISP chip. 2525 * @vha: HA context 2526 * 2527 * Returns 0 on success. 2528 */ 2529 void 2530 qla2x00_reset_chip(scsi_qla_host_t *vha) 2531 { 2532 unsigned long flags = 0; 2533 struct qla_hw_data *ha = vha->hw; 2534 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2535 uint32_t cnt; 2536 uint16_t cmd; 2537 2538 if (unlikely(pci_channel_offline(ha->pdev))) 2539 return; 2540 2541 ha->isp_ops->disable_intrs(ha); 2542 2543 spin_lock_irqsave(&ha->hardware_lock, flags); 2544 2545 /* Turn off master enable */ 2546 cmd = 0; 2547 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd); 2548 cmd &= ~PCI_COMMAND_MASTER; 2549 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); 2550 2551 if (!IS_QLA2100(ha)) { 2552 /* Pause RISC. */ 2553 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); 2554 if (IS_QLA2200(ha) || IS_QLA2300(ha)) { 2555 for (cnt = 0; cnt < 30000; cnt++) { 2556 if ((RD_REG_WORD(®->hccr) & 2557 HCCR_RISC_PAUSE) != 0) 2558 break; 2559 udelay(100); 2560 } 2561 } else { 2562 RD_REG_WORD(®->hccr); /* PCI Posting. */ 2563 udelay(10); 2564 } 2565 2566 /* Select FPM registers. */ 2567 WRT_REG_WORD(®->ctrl_status, 0x20); 2568 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ 2569 2570 /* FPM Soft Reset. */ 2571 WRT_REG_WORD(®->fpm_diag_config, 0x100); 2572 RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */ 2573 2574 /* Toggle Fpm Reset. */ 2575 if (!IS_QLA2200(ha)) { 2576 WRT_REG_WORD(®->fpm_diag_config, 0x0); 2577 RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */ 2578 } 2579 2580 /* Select frame buffer registers. */ 2581 WRT_REG_WORD(®->ctrl_status, 0x10); 2582 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ 2583 2584 /* Reset frame buffer FIFOs. */ 2585 if (IS_QLA2200(ha)) { 2586 WRT_FB_CMD_REG(ha, reg, 0xa000); 2587 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */ 2588 } else { 2589 WRT_FB_CMD_REG(ha, reg, 0x00fc); 2590 2591 /* Read back fb_cmd until zero or 3 seconds max */ 2592 for (cnt = 0; cnt < 3000; cnt++) { 2593 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0) 2594 break; 2595 udelay(100); 2596 } 2597 } 2598 2599 /* Select RISC module registers. */ 2600 WRT_REG_WORD(®->ctrl_status, 0); 2601 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ 2602 2603 /* Reset RISC processor. */ 2604 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 2605 RD_REG_WORD(®->hccr); /* PCI Posting. */ 2606 2607 /* Release RISC processor. */ 2608 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 2609 RD_REG_WORD(®->hccr); /* PCI Posting. */ 2610 } 2611 2612 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 2613 WRT_REG_WORD(®->hccr, HCCR_CLR_HOST_INT); 2614 2615 /* Reset ISP chip. */ 2616 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); 2617 2618 /* Wait for RISC to recover from reset. */ 2619 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 2620 /* 2621 * It is necessary to for a delay here since the card doesn't 2622 * respond to PCI reads during a reset. On some architectures 2623 * this will result in an MCA. 2624 */ 2625 udelay(20); 2626 for (cnt = 30000; cnt; cnt--) { 2627 if ((RD_REG_WORD(®->ctrl_status) & 2628 CSR_ISP_SOFT_RESET) == 0) 2629 break; 2630 udelay(100); 2631 } 2632 } else 2633 udelay(10); 2634 2635 /* Reset RISC processor. */ 2636 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 2637 2638 WRT_REG_WORD(®->semaphore, 0); 2639 2640 /* Release RISC processor. */ 2641 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 2642 RD_REG_WORD(®->hccr); /* PCI Posting. */ 2643 2644 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 2645 for (cnt = 0; cnt < 30000; cnt++) { 2646 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY) 2647 break; 2648 2649 udelay(100); 2650 } 2651 } else 2652 udelay(100); 2653 2654 /* Turn on master enable */ 2655 cmd |= PCI_COMMAND_MASTER; 2656 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); 2657 2658 /* Disable RISC pause on FPM parity error. */ 2659 if (!IS_QLA2100(ha)) { 2660 WRT_REG_WORD(®->hccr, HCCR_DISABLE_PARITY_PAUSE); 2661 RD_REG_WORD(®->hccr); /* PCI Posting. */ 2662 } 2663 2664 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2665 } 2666 2667 /** 2668 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC. 2669 * @vha: HA context 2670 * 2671 * Returns 0 on success. 2672 */ 2673 static int 2674 qla81xx_reset_mpi(scsi_qla_host_t *vha) 2675 { 2676 uint16_t mb[4] = {0x1010, 0, 1, 0}; 2677 2678 if (!IS_QLA81XX(vha->hw)) 2679 return QLA_SUCCESS; 2680 2681 return qla81xx_write_mpi_register(vha, mb); 2682 } 2683 2684 /** 2685 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC. 2686 * @vha: HA context 2687 * 2688 * Returns 0 on success. 2689 */ 2690 static inline int 2691 qla24xx_reset_risc(scsi_qla_host_t *vha) 2692 { 2693 unsigned long flags = 0; 2694 struct qla_hw_data *ha = vha->hw; 2695 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2696 uint32_t cnt; 2697 uint16_t wd; 2698 static int abts_cnt; /* ISP abort retry counts */ 2699 int rval = QLA_SUCCESS; 2700 2701 spin_lock_irqsave(&ha->hardware_lock, flags); 2702 2703 /* Reset RISC. */ 2704 WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 2705 for (cnt = 0; cnt < 30000; cnt++) { 2706 if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) 2707 break; 2708 2709 udelay(10); 2710 } 2711 2712 if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE)) 2713 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); 2714 2715 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e, 2716 "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n", 2717 RD_REG_DWORD(®->hccr), 2718 RD_REG_DWORD(®->ctrl_status), 2719 (RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE)); 2720 2721 WRT_REG_DWORD(®->ctrl_status, 2722 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 2723 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); 2724 2725 udelay(100); 2726 2727 /* Wait for firmware to complete NVRAM accesses. */ 2728 RD_REG_WORD(®->mailbox0); 2729 for (cnt = 10000; RD_REG_WORD(®->mailbox0) != 0 && 2730 rval == QLA_SUCCESS; cnt--) { 2731 barrier(); 2732 if (cnt) 2733 udelay(5); 2734 else 2735 rval = QLA_FUNCTION_TIMEOUT; 2736 } 2737 2738 if (rval == QLA_SUCCESS) 2739 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags); 2740 2741 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f, 2742 "HCCR: 0x%x, MailBox0 Status 0x%x\n", 2743 RD_REG_DWORD(®->hccr), 2744 RD_REG_DWORD(®->mailbox0)); 2745 2746 /* Wait for soft-reset to complete. */ 2747 RD_REG_DWORD(®->ctrl_status); 2748 for (cnt = 0; cnt < 60; cnt++) { 2749 barrier(); 2750 if ((RD_REG_DWORD(®->ctrl_status) & 2751 CSRX_ISP_SOFT_RESET) == 0) 2752 break; 2753 2754 udelay(5); 2755 } 2756 if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_ISP_SOFT_RESET)) 2757 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags); 2758 2759 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d, 2760 "HCCR: 0x%x, Soft Reset status: 0x%x\n", 2761 RD_REG_DWORD(®->hccr), 2762 RD_REG_DWORD(®->ctrl_status)); 2763 2764 /* If required, do an MPI FW reset now */ 2765 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) { 2766 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) { 2767 if (++abts_cnt < 5) { 2768 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2769 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags); 2770 } else { 2771 /* 2772 * We exhausted the ISP abort retries. We have to 2773 * set the board offline. 2774 */ 2775 abts_cnt = 0; 2776 vha->flags.online = 0; 2777 } 2778 } 2779 } 2780 2781 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET); 2782 RD_REG_DWORD(®->hccr); 2783 2784 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE); 2785 RD_REG_DWORD(®->hccr); 2786 2787 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET); 2788 RD_REG_DWORD(®->hccr); 2789 2790 RD_REG_WORD(®->mailbox0); 2791 for (cnt = 60; RD_REG_WORD(®->mailbox0) != 0 && 2792 rval == QLA_SUCCESS; cnt--) { 2793 barrier(); 2794 if (cnt) 2795 udelay(5); 2796 else 2797 rval = QLA_FUNCTION_TIMEOUT; 2798 } 2799 if (rval == QLA_SUCCESS) 2800 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); 2801 2802 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e, 2803 "Host Risc 0x%x, mailbox0 0x%x\n", 2804 RD_REG_DWORD(®->hccr), 2805 RD_REG_WORD(®->mailbox0)); 2806 2807 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2808 2809 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f, 2810 "Driver in %s mode\n", 2811 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling"); 2812 2813 if (IS_NOPOLLING_TYPE(ha)) 2814 ha->isp_ops->enable_intrs(ha); 2815 2816 return rval; 2817 } 2818 2819 static void 2820 qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data) 2821 { 2822 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; 2823 2824 WRT_REG_DWORD(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); 2825 *data = RD_REG_DWORD(®->iobase_window + RISC_REGISTER_WINDOW_OFFET); 2826 2827 } 2828 2829 static void 2830 qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data) 2831 { 2832 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; 2833 2834 WRT_REG_DWORD(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); 2835 WRT_REG_DWORD(®->iobase_window + RISC_REGISTER_WINDOW_OFFET, data); 2836 } 2837 2838 static void 2839 qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha) 2840 { 2841 uint32_t wd32 = 0; 2842 uint delta_msec = 100; 2843 uint elapsed_msec = 0; 2844 uint timeout_msec; 2845 ulong n; 2846 2847 if (vha->hw->pdev->subsystem_device != 0x0175 && 2848 vha->hw->pdev->subsystem_device != 0x0240) 2849 return; 2850 2851 WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE); 2852 udelay(100); 2853 2854 attempt: 2855 timeout_msec = TIMEOUT_SEMAPHORE; 2856 n = timeout_msec / delta_msec; 2857 while (n--) { 2858 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET); 2859 qla25xx_read_risc_sema_reg(vha, &wd32); 2860 if (wd32 & RISC_SEMAPHORE) 2861 break; 2862 msleep(delta_msec); 2863 elapsed_msec += delta_msec; 2864 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED) 2865 goto force; 2866 } 2867 2868 if (!(wd32 & RISC_SEMAPHORE)) 2869 goto force; 2870 2871 if (!(wd32 & RISC_SEMAPHORE_FORCE)) 2872 goto acquired; 2873 2874 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR); 2875 timeout_msec = TIMEOUT_SEMAPHORE_FORCE; 2876 n = timeout_msec / delta_msec; 2877 while (n--) { 2878 qla25xx_read_risc_sema_reg(vha, &wd32); 2879 if (!(wd32 & RISC_SEMAPHORE_FORCE)) 2880 break; 2881 msleep(delta_msec); 2882 elapsed_msec += delta_msec; 2883 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED) 2884 goto force; 2885 } 2886 2887 if (wd32 & RISC_SEMAPHORE_FORCE) 2888 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR); 2889 2890 goto attempt; 2891 2892 force: 2893 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET); 2894 2895 acquired: 2896 return; 2897 } 2898 2899 /** 2900 * qla24xx_reset_chip() - Reset ISP24xx chip. 2901 * @vha: HA context 2902 * 2903 * Returns 0 on success. 2904 */ 2905 void 2906 qla24xx_reset_chip(scsi_qla_host_t *vha) 2907 { 2908 struct qla_hw_data *ha = vha->hw; 2909 2910 if (pci_channel_offline(ha->pdev) && 2911 ha->flags.pci_channel_io_perm_failure) { 2912 return; 2913 } 2914 2915 ha->isp_ops->disable_intrs(ha); 2916 2917 qla25xx_manipulate_risc_semaphore(vha); 2918 2919 /* Perform RISC reset. */ 2920 qla24xx_reset_risc(vha); 2921 } 2922 2923 /** 2924 * qla2x00_chip_diag() - Test chip for proper operation. 2925 * @vha: HA context 2926 * 2927 * Returns 0 on success. 2928 */ 2929 int 2930 qla2x00_chip_diag(scsi_qla_host_t *vha) 2931 { 2932 int rval; 2933 struct qla_hw_data *ha = vha->hw; 2934 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2935 unsigned long flags = 0; 2936 uint16_t data; 2937 uint32_t cnt; 2938 uint16_t mb[5]; 2939 struct req_que *req = ha->req_q_map[0]; 2940 2941 /* Assume a failed state */ 2942 rval = QLA_FUNCTION_FAILED; 2943 2944 ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n", 2945 ®->flash_address); 2946 2947 spin_lock_irqsave(&ha->hardware_lock, flags); 2948 2949 /* Reset ISP chip. */ 2950 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); 2951 2952 /* 2953 * We need to have a delay here since the card will not respond while 2954 * in reset causing an MCA on some architectures. 2955 */ 2956 udelay(20); 2957 data = qla2x00_debounce_register(®->ctrl_status); 2958 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) { 2959 udelay(5); 2960 data = RD_REG_WORD(®->ctrl_status); 2961 barrier(); 2962 } 2963 2964 if (!cnt) 2965 goto chip_diag_failed; 2966 2967 ql_dbg(ql_dbg_init, vha, 0x007c, 2968 "Reset register cleared by chip reset.\n"); 2969 2970 /* Reset RISC processor. */ 2971 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 2972 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 2973 2974 /* Workaround for QLA2312 PCI parity error */ 2975 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 2976 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0)); 2977 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) { 2978 udelay(5); 2979 data = RD_MAILBOX_REG(ha, reg, 0); 2980 barrier(); 2981 } 2982 } else 2983 udelay(10); 2984 2985 if (!cnt) 2986 goto chip_diag_failed; 2987 2988 /* Check product ID of chip */ 2989 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n"); 2990 2991 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 2992 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 2993 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 2994 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4)); 2995 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) || 2996 mb[3] != PROD_ID_3) { 2997 ql_log(ql_log_warn, vha, 0x0062, 2998 "Wrong product ID = 0x%x,0x%x,0x%x.\n", 2999 mb[1], mb[2], mb[3]); 3000 3001 goto chip_diag_failed; 3002 } 3003 ha->product_id[0] = mb[1]; 3004 ha->product_id[1] = mb[2]; 3005 ha->product_id[2] = mb[3]; 3006 ha->product_id[3] = mb[4]; 3007 3008 /* Adjust fw RISC transfer size */ 3009 if (req->length > 1024) 3010 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; 3011 else 3012 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 3013 req->length; 3014 3015 if (IS_QLA2200(ha) && 3016 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { 3017 /* Limit firmware transfer size with a 2200A */ 3018 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n"); 3019 3020 ha->device_type |= DT_ISP2200A; 3021 ha->fw_transfer_size = 128; 3022 } 3023 3024 /* Wrap Incoming Mailboxes Test. */ 3025 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3026 3027 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n"); 3028 rval = qla2x00_mbx_reg_test(vha); 3029 if (rval) 3030 ql_log(ql_log_warn, vha, 0x0080, 3031 "Failed mailbox send register test.\n"); 3032 else 3033 /* Flag a successful rval */ 3034 rval = QLA_SUCCESS; 3035 spin_lock_irqsave(&ha->hardware_lock, flags); 3036 3037 chip_diag_failed: 3038 if (rval) 3039 ql_log(ql_log_info, vha, 0x0081, 3040 "Chip diagnostics **** FAILED ****.\n"); 3041 3042 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3043 3044 return (rval); 3045 } 3046 3047 /** 3048 * qla24xx_chip_diag() - Test ISP24xx for proper operation. 3049 * @vha: HA context 3050 * 3051 * Returns 0 on success. 3052 */ 3053 int 3054 qla24xx_chip_diag(scsi_qla_host_t *vha) 3055 { 3056 int rval; 3057 struct qla_hw_data *ha = vha->hw; 3058 struct req_que *req = ha->req_q_map[0]; 3059 3060 if (IS_P3P_TYPE(ha)) 3061 return QLA_SUCCESS; 3062 3063 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; 3064 3065 rval = qla2x00_mbx_reg_test(vha); 3066 if (rval) { 3067 ql_log(ql_log_warn, vha, 0x0082, 3068 "Failed mailbox send register test.\n"); 3069 } else { 3070 /* Flag a successful rval */ 3071 rval = QLA_SUCCESS; 3072 } 3073 3074 return rval; 3075 } 3076 3077 static void 3078 qla2x00_alloc_offload_mem(scsi_qla_host_t *vha) 3079 { 3080 int rval; 3081 dma_addr_t tc_dma; 3082 void *tc; 3083 struct qla_hw_data *ha = vha->hw; 3084 3085 if (ha->eft) { 3086 ql_dbg(ql_dbg_init, vha, 0x00bd, 3087 "%s: Offload Mem is already allocated.\n", 3088 __func__); 3089 return; 3090 } 3091 3092 if (IS_FWI2_CAPABLE(ha)) { 3093 /* Allocate memory for Fibre Channel Event Buffer. */ 3094 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 3095 !IS_QLA27XX(ha)) 3096 goto try_eft; 3097 3098 if (ha->fce) 3099 dma_free_coherent(&ha->pdev->dev, 3100 FCE_SIZE, ha->fce, ha->fce_dma); 3101 3102 /* Allocate memory for Fibre Channel Event Buffer. */ 3103 tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 3104 GFP_KERNEL); 3105 if (!tc) { 3106 ql_log(ql_log_warn, vha, 0x00be, 3107 "Unable to allocate (%d KB) for FCE.\n", 3108 FCE_SIZE / 1024); 3109 goto try_eft; 3110 } 3111 3112 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS, 3113 ha->fce_mb, &ha->fce_bufs); 3114 if (rval) { 3115 ql_log(ql_log_warn, vha, 0x00bf, 3116 "Unable to initialize FCE (%d).\n", rval); 3117 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, 3118 tc_dma); 3119 ha->flags.fce_enabled = 0; 3120 goto try_eft; 3121 } 3122 ql_dbg(ql_dbg_init, vha, 0x00c0, 3123 "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024); 3124 3125 ha->flags.fce_enabled = 1; 3126 ha->fce_dma = tc_dma; 3127 ha->fce = tc; 3128 3129 try_eft: 3130 if (ha->eft) 3131 dma_free_coherent(&ha->pdev->dev, 3132 EFT_SIZE, ha->eft, ha->eft_dma); 3133 3134 /* Allocate memory for Extended Trace Buffer. */ 3135 tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, 3136 GFP_KERNEL); 3137 if (!tc) { 3138 ql_log(ql_log_warn, vha, 0x00c1, 3139 "Unable to allocate (%d KB) for EFT.\n", 3140 EFT_SIZE / 1024); 3141 goto eft_err; 3142 } 3143 3144 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); 3145 if (rval) { 3146 ql_log(ql_log_warn, vha, 0x00c2, 3147 "Unable to initialize EFT (%d).\n", rval); 3148 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, 3149 tc_dma); 3150 goto eft_err; 3151 } 3152 ql_dbg(ql_dbg_init, vha, 0x00c3, 3153 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024); 3154 3155 ha->eft_dma = tc_dma; 3156 ha->eft = tc; 3157 } 3158 3159 eft_err: 3160 return; 3161 } 3162 3163 void 3164 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) 3165 { 3166 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size, 3167 eft_size, fce_size, mq_size; 3168 struct qla_hw_data *ha = vha->hw; 3169 struct req_que *req = ha->req_q_map[0]; 3170 struct rsp_que *rsp = ha->rsp_q_map[0]; 3171 struct qla2xxx_fw_dump *fw_dump; 3172 3173 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0; 3174 req_q_size = rsp_q_size = 0; 3175 3176 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 3177 fixed_size = sizeof(struct qla2100_fw_dump); 3178 } else if (IS_QLA23XX(ha)) { 3179 fixed_size = offsetof(struct qla2300_fw_dump, data_ram); 3180 mem_size = (ha->fw_memory_size - 0x11000 + 1) * 3181 sizeof(uint16_t); 3182 } else if (IS_FWI2_CAPABLE(ha)) { 3183 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) 3184 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem); 3185 else if (IS_QLA81XX(ha)) 3186 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); 3187 else if (IS_QLA25XX(ha)) 3188 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem); 3189 else 3190 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem); 3191 3192 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 3193 sizeof(uint32_t); 3194 if (ha->mqenable) { 3195 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) 3196 mq_size = sizeof(struct qla2xxx_mq_chain); 3197 /* 3198 * Allocate maximum buffer size for all queues. 3199 * Resizing must be done at end-of-dump processing. 3200 */ 3201 mq_size += ha->max_req_queues * 3202 (req->length * sizeof(request_t)); 3203 mq_size += ha->max_rsp_queues * 3204 (rsp->length * sizeof(response_t)); 3205 } 3206 if (ha->tgt.atio_ring) 3207 mq_size += ha->tgt.atio_q_length * sizeof(request_t); 3208 /* Allocate memory for Fibre Channel Event Buffer. */ 3209 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 3210 !IS_QLA27XX(ha)) 3211 goto try_eft; 3212 3213 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; 3214 try_eft: 3215 ql_dbg(ql_dbg_init, vha, 0x00c3, 3216 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024); 3217 eft_size = EFT_SIZE; 3218 } 3219 3220 if (IS_QLA27XX(ha)) { 3221 if (!ha->fw_dump_template) { 3222 ql_log(ql_log_warn, vha, 0x00ba, 3223 "Failed missing fwdump template\n"); 3224 return; 3225 } 3226 dump_size = qla27xx_fwdt_calculate_dump_size(vha); 3227 ql_dbg(ql_dbg_init, vha, 0x00fa, 3228 "-> allocating fwdump (%x bytes)...\n", dump_size); 3229 goto allocate; 3230 } 3231 3232 req_q_size = req->length * sizeof(request_t); 3233 rsp_q_size = rsp->length * sizeof(response_t); 3234 dump_size = offsetof(struct qla2xxx_fw_dump, isp); 3235 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size; 3236 ha->chain_offset = dump_size; 3237 dump_size += mq_size + fce_size; 3238 3239 if (ha->exchoffld_buf) 3240 dump_size += sizeof(struct qla2xxx_offld_chain) + 3241 ha->exchoffld_size; 3242 if (ha->exlogin_buf) 3243 dump_size += sizeof(struct qla2xxx_offld_chain) + 3244 ha->exlogin_size; 3245 3246 allocate: 3247 if (!ha->fw_dump_len || dump_size != ha->fw_dump_len) { 3248 fw_dump = vmalloc(dump_size); 3249 if (!fw_dump) { 3250 ql_log(ql_log_warn, vha, 0x00c4, 3251 "Unable to allocate (%d KB) for firmware dump.\n", 3252 dump_size / 1024); 3253 } else { 3254 if (ha->fw_dump) 3255 vfree(ha->fw_dump); 3256 ha->fw_dump = fw_dump; 3257 3258 ha->fw_dump_len = dump_size; 3259 ql_dbg(ql_dbg_init, vha, 0x00c5, 3260 "Allocated (%d KB) for firmware dump.\n", 3261 dump_size / 1024); 3262 3263 if (IS_QLA27XX(ha)) 3264 return; 3265 3266 ha->fw_dump->signature[0] = 'Q'; 3267 ha->fw_dump->signature[1] = 'L'; 3268 ha->fw_dump->signature[2] = 'G'; 3269 ha->fw_dump->signature[3] = 'C'; 3270 ha->fw_dump->version = htonl(1); 3271 3272 ha->fw_dump->fixed_size = htonl(fixed_size); 3273 ha->fw_dump->mem_size = htonl(mem_size); 3274 ha->fw_dump->req_q_size = htonl(req_q_size); 3275 ha->fw_dump->rsp_q_size = htonl(rsp_q_size); 3276 3277 ha->fw_dump->eft_size = htonl(eft_size); 3278 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma)); 3279 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma)); 3280 3281 ha->fw_dump->header_size = 3282 htonl(offsetof(struct qla2xxx_fw_dump, isp)); 3283 } 3284 } 3285 } 3286 3287 static int 3288 qla81xx_mpi_sync(scsi_qla_host_t *vha) 3289 { 3290 #define MPS_MASK 0xe0 3291 int rval; 3292 uint16_t dc; 3293 uint32_t dw; 3294 3295 if (!IS_QLA81XX(vha->hw)) 3296 return QLA_SUCCESS; 3297 3298 rval = qla2x00_write_ram_word(vha, 0x7c00, 1); 3299 if (rval != QLA_SUCCESS) { 3300 ql_log(ql_log_warn, vha, 0x0105, 3301 "Unable to acquire semaphore.\n"); 3302 goto done; 3303 } 3304 3305 pci_read_config_word(vha->hw->pdev, 0x54, &dc); 3306 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw); 3307 if (rval != QLA_SUCCESS) { 3308 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n"); 3309 goto done_release; 3310 } 3311 3312 dc &= MPS_MASK; 3313 if (dc == (dw & MPS_MASK)) 3314 goto done_release; 3315 3316 dw &= ~MPS_MASK; 3317 dw |= dc; 3318 rval = qla2x00_write_ram_word(vha, 0x7a15, dw); 3319 if (rval != QLA_SUCCESS) { 3320 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n"); 3321 } 3322 3323 done_release: 3324 rval = qla2x00_write_ram_word(vha, 0x7c00, 0); 3325 if (rval != QLA_SUCCESS) { 3326 ql_log(ql_log_warn, vha, 0x006d, 3327 "Unable to release semaphore.\n"); 3328 } 3329 3330 done: 3331 return rval; 3332 } 3333 3334 int 3335 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req) 3336 { 3337 /* Don't try to reallocate the array */ 3338 if (req->outstanding_cmds) 3339 return QLA_SUCCESS; 3340 3341 if (!IS_FWI2_CAPABLE(ha)) 3342 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS; 3343 else { 3344 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count) 3345 req->num_outstanding_cmds = ha->cur_fw_xcb_count; 3346 else 3347 req->num_outstanding_cmds = ha->cur_fw_iocb_count; 3348 } 3349 3350 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds, 3351 sizeof(srb_t *), 3352 GFP_KERNEL); 3353 3354 if (!req->outstanding_cmds) { 3355 /* 3356 * Try to allocate a minimal size just so we can get through 3357 * initialization. 3358 */ 3359 req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS; 3360 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds, 3361 sizeof(srb_t *), 3362 GFP_KERNEL); 3363 3364 if (!req->outstanding_cmds) { 3365 ql_log(ql_log_fatal, NULL, 0x0126, 3366 "Failed to allocate memory for " 3367 "outstanding_cmds for req_que %p.\n", req); 3368 req->num_outstanding_cmds = 0; 3369 return QLA_FUNCTION_FAILED; 3370 } 3371 } 3372 3373 return QLA_SUCCESS; 3374 } 3375 3376 #define PRINT_FIELD(_field, _flag, _str) { \ 3377 if (a0->_field & _flag) {\ 3378 if (p) {\ 3379 strcat(ptr, "|");\ 3380 ptr++;\ 3381 leftover--;\ 3382 } \ 3383 len = snprintf(ptr, leftover, "%s", _str); \ 3384 p = 1;\ 3385 leftover -= len;\ 3386 ptr += len; \ 3387 } \ 3388 } 3389 3390 static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha) 3391 { 3392 #define STR_LEN 64 3393 struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data; 3394 u8 str[STR_LEN], *ptr, p; 3395 int leftover, len; 3396 3397 memset(str, 0, STR_LEN); 3398 snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name); 3399 ql_dbg(ql_dbg_init, vha, 0x015a, 3400 "SFP MFG Name: %s\n", str); 3401 3402 memset(str, 0, STR_LEN); 3403 snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn); 3404 ql_dbg(ql_dbg_init, vha, 0x015c, 3405 "SFP Part Name: %s\n", str); 3406 3407 /* media */ 3408 memset(str, 0, STR_LEN); 3409 ptr = str; 3410 leftover = STR_LEN; 3411 p = len = 0; 3412 PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX"); 3413 PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair"); 3414 PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax"); 3415 PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax"); 3416 PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um"); 3417 PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um"); 3418 PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode"); 3419 ql_dbg(ql_dbg_init, vha, 0x0160, 3420 "SFP Media: %s\n", str); 3421 3422 /* link length */ 3423 memset(str, 0, STR_LEN); 3424 ptr = str; 3425 leftover = STR_LEN; 3426 p = len = 0; 3427 PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long"); 3428 PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short"); 3429 PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate"); 3430 PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long"); 3431 PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium"); 3432 ql_dbg(ql_dbg_init, vha, 0x0196, 3433 "SFP Link Length: %s\n", str); 3434 3435 memset(str, 0, STR_LEN); 3436 ptr = str; 3437 leftover = STR_LEN; 3438 p = len = 0; 3439 PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)"); 3440 PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)"); 3441 PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)"); 3442 PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)"); 3443 PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)"); 3444 ql_dbg(ql_dbg_init, vha, 0x016e, 3445 "SFP FC Link Tech: %s\n", str); 3446 3447 if (a0->length_km) 3448 ql_dbg(ql_dbg_init, vha, 0x016f, 3449 "SFP Distant: %d km\n", a0->length_km); 3450 if (a0->length_100m) 3451 ql_dbg(ql_dbg_init, vha, 0x0170, 3452 "SFP Distant: %d m\n", a0->length_100m*100); 3453 if (a0->length_50um_10m) 3454 ql_dbg(ql_dbg_init, vha, 0x0189, 3455 "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10); 3456 if (a0->length_62um_10m) 3457 ql_dbg(ql_dbg_init, vha, 0x018a, 3458 "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10); 3459 if (a0->length_om4_10m) 3460 ql_dbg(ql_dbg_init, vha, 0x0194, 3461 "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10); 3462 if (a0->length_om3_10m) 3463 ql_dbg(ql_dbg_init, vha, 0x0195, 3464 "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10); 3465 } 3466 3467 3468 /* 3469 * Return Code: 3470 * QLA_SUCCESS: no action 3471 * QLA_INTERFACE_ERROR: SFP is not there. 3472 * QLA_FUNCTION_FAILED: detected New SFP 3473 */ 3474 int 3475 qla24xx_detect_sfp(scsi_qla_host_t *vha) 3476 { 3477 int rc = QLA_SUCCESS; 3478 struct sff_8247_a0 *a; 3479 struct qla_hw_data *ha = vha->hw; 3480 3481 if (!AUTO_DETECT_SFP_SUPPORT(vha)) 3482 goto out; 3483 3484 rc = qla2x00_read_sfp_dev(vha, NULL, 0); 3485 if (rc) 3486 goto out; 3487 3488 a = (struct sff_8247_a0 *)vha->hw->sfp_data; 3489 qla2xxx_print_sfp_info(vha); 3490 3491 if (a->fc_ll_cc7 & FC_LL_VL || a->fc_ll_cc7 & FC_LL_L) { 3492 /* long range */ 3493 ha->flags.detected_lr_sfp = 1; 3494 3495 if (a->length_km > 5 || a->length_100m > 50) 3496 ha->long_range_distance = LR_DISTANCE_10K; 3497 else 3498 ha->long_range_distance = LR_DISTANCE_5K; 3499 3500 if (ha->flags.detected_lr_sfp != ha->flags.using_lr_setting) 3501 ql_dbg(ql_dbg_async, vha, 0x507b, 3502 "Detected Long Range SFP.\n"); 3503 } else { 3504 /* short range */ 3505 ha->flags.detected_lr_sfp = 0; 3506 if (ha->flags.using_lr_setting) 3507 ql_dbg(ql_dbg_async, vha, 0x5084, 3508 "Detected Short Range SFP.\n"); 3509 } 3510 3511 if (!vha->flags.init_done) 3512 rc = QLA_SUCCESS; 3513 out: 3514 return rc; 3515 } 3516 3517 /** 3518 * qla2x00_setup_chip() - Load and start RISC firmware. 3519 * @vha: HA context 3520 * 3521 * Returns 0 on success. 3522 */ 3523 static int 3524 qla2x00_setup_chip(scsi_qla_host_t *vha) 3525 { 3526 int rval; 3527 uint32_t srisc_address = 0; 3528 struct qla_hw_data *ha = vha->hw; 3529 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3530 unsigned long flags; 3531 uint16_t fw_major_version; 3532 3533 if (IS_P3P_TYPE(ha)) { 3534 rval = ha->isp_ops->load_risc(vha, &srisc_address); 3535 if (rval == QLA_SUCCESS) { 3536 qla2x00_stop_firmware(vha); 3537 goto enable_82xx_npiv; 3538 } else 3539 goto failed; 3540 } 3541 3542 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 3543 /* Disable SRAM, Instruction RAM and GP RAM parity. */ 3544 spin_lock_irqsave(&ha->hardware_lock, flags); 3545 WRT_REG_WORD(®->hccr, (HCCR_ENABLE_PARITY + 0x0)); 3546 RD_REG_WORD(®->hccr); 3547 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3548 } 3549 3550 qla81xx_mpi_sync(vha); 3551 3552 /* Load firmware sequences */ 3553 rval = ha->isp_ops->load_risc(vha, &srisc_address); 3554 if (rval == QLA_SUCCESS) { 3555 ql_dbg(ql_dbg_init, vha, 0x00c9, 3556 "Verifying Checksum of loaded RISC code.\n"); 3557 3558 rval = qla2x00_verify_checksum(vha, srisc_address); 3559 if (rval == QLA_SUCCESS) { 3560 /* Start firmware execution. */ 3561 ql_dbg(ql_dbg_init, vha, 0x00ca, 3562 "Starting firmware.\n"); 3563 3564 if (ql2xexlogins) 3565 ha->flags.exlogins_enabled = 1; 3566 3567 if (qla_is_exch_offld_enabled(vha)) 3568 ha->flags.exchoffld_enabled = 1; 3569 3570 rval = qla2x00_execute_fw(vha, srisc_address); 3571 /* Retrieve firmware information. */ 3572 if (rval == QLA_SUCCESS) { 3573 qla24xx_detect_sfp(vha); 3574 3575 if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && 3576 (ha->zio_mode == QLA_ZIO_MODE_6)) 3577 qla27xx_set_zio_threshold(vha, 3578 ha->last_zio_threshold); 3579 3580 rval = qla2x00_set_exlogins_buffer(vha); 3581 if (rval != QLA_SUCCESS) 3582 goto failed; 3583 3584 rval = qla2x00_set_exchoffld_buffer(vha); 3585 if (rval != QLA_SUCCESS) 3586 goto failed; 3587 3588 enable_82xx_npiv: 3589 fw_major_version = ha->fw_major_version; 3590 if (IS_P3P_TYPE(ha)) 3591 qla82xx_check_md_needed(vha); 3592 else 3593 rval = qla2x00_get_fw_version(vha); 3594 if (rval != QLA_SUCCESS) 3595 goto failed; 3596 ha->flags.npiv_supported = 0; 3597 if (IS_QLA2XXX_MIDTYPE(ha) && 3598 (ha->fw_attributes & BIT_2)) { 3599 ha->flags.npiv_supported = 1; 3600 if ((!ha->max_npiv_vports) || 3601 ((ha->max_npiv_vports + 1) % 3602 MIN_MULTI_ID_FABRIC)) 3603 ha->max_npiv_vports = 3604 MIN_MULTI_ID_FABRIC - 1; 3605 } 3606 qla2x00_get_resource_cnts(vha); 3607 3608 /* 3609 * Allocate the array of outstanding commands 3610 * now that we know the firmware resources. 3611 */ 3612 rval = qla2x00_alloc_outstanding_cmds(ha, 3613 vha->req); 3614 if (rval != QLA_SUCCESS) 3615 goto failed; 3616 3617 if (!fw_major_version && !(IS_P3P_TYPE(ha))) 3618 qla2x00_alloc_offload_mem(vha); 3619 3620 if (ql2xallocfwdump && !(IS_P3P_TYPE(ha))) 3621 qla2x00_alloc_fw_dump(vha); 3622 3623 } else { 3624 goto failed; 3625 } 3626 } else { 3627 ql_log(ql_log_fatal, vha, 0x00cd, 3628 "ISP Firmware failed checksum.\n"); 3629 goto failed; 3630 } 3631 } else 3632 goto failed; 3633 3634 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 3635 /* Enable proper parity. */ 3636 spin_lock_irqsave(&ha->hardware_lock, flags); 3637 if (IS_QLA2300(ha)) 3638 /* SRAM parity */ 3639 WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x1); 3640 else 3641 /* SRAM, Instruction RAM and GP RAM parity */ 3642 WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x7); 3643 RD_REG_WORD(®->hccr); 3644 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3645 } 3646 3647 if (IS_QLA27XX(ha)) 3648 ha->flags.fac_supported = 1; 3649 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { 3650 uint32_t size; 3651 3652 rval = qla81xx_fac_get_sector_size(vha, &size); 3653 if (rval == QLA_SUCCESS) { 3654 ha->flags.fac_supported = 1; 3655 ha->fdt_block_size = size << 2; 3656 } else { 3657 ql_log(ql_log_warn, vha, 0x00ce, 3658 "Unsupported FAC firmware (%d.%02d.%02d).\n", 3659 ha->fw_major_version, ha->fw_minor_version, 3660 ha->fw_subminor_version); 3661 3662 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 3663 ha->flags.fac_supported = 0; 3664 rval = QLA_SUCCESS; 3665 } 3666 } 3667 } 3668 failed: 3669 if (rval) { 3670 ql_log(ql_log_fatal, vha, 0x00cf, 3671 "Setup chip ****FAILED****.\n"); 3672 } 3673 3674 return (rval); 3675 } 3676 3677 /** 3678 * qla2x00_init_response_q_entries() - Initializes response queue entries. 3679 * @rsp: response queue 3680 * 3681 * Beginning of request ring has initialization control block already built 3682 * by nvram config routine. 3683 * 3684 * Returns 0 on success. 3685 */ 3686 void 3687 qla2x00_init_response_q_entries(struct rsp_que *rsp) 3688 { 3689 uint16_t cnt; 3690 response_t *pkt; 3691 3692 rsp->ring_ptr = rsp->ring; 3693 rsp->ring_index = 0; 3694 rsp->status_srb = NULL; 3695 pkt = rsp->ring_ptr; 3696 for (cnt = 0; cnt < rsp->length; cnt++) { 3697 pkt->signature = RESPONSE_PROCESSED; 3698 pkt++; 3699 } 3700 } 3701 3702 /** 3703 * qla2x00_update_fw_options() - Read and process firmware options. 3704 * @vha: HA context 3705 * 3706 * Returns 0 on success. 3707 */ 3708 void 3709 qla2x00_update_fw_options(scsi_qla_host_t *vha) 3710 { 3711 uint16_t swing, emphasis, tx_sens, rx_sens; 3712 struct qla_hw_data *ha = vha->hw; 3713 3714 memset(ha->fw_options, 0, sizeof(ha->fw_options)); 3715 qla2x00_get_fw_options(vha, ha->fw_options); 3716 3717 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 3718 return; 3719 3720 /* Serial Link options. */ 3721 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115, 3722 "Serial link options.\n"); 3723 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109, 3724 (uint8_t *)&ha->fw_seriallink_options, 3725 sizeof(ha->fw_seriallink_options)); 3726 3727 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 3728 if (ha->fw_seriallink_options[3] & BIT_2) { 3729 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING; 3730 3731 /* 1G settings */ 3732 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0); 3733 emphasis = (ha->fw_seriallink_options[2] & 3734 (BIT_4 | BIT_3)) >> 3; 3735 tx_sens = ha->fw_seriallink_options[0] & 3736 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 3737 rx_sens = (ha->fw_seriallink_options[0] & 3738 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; 3739 ha->fw_options[10] = (emphasis << 14) | (swing << 8); 3740 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 3741 if (rx_sens == 0x0) 3742 rx_sens = 0x3; 3743 ha->fw_options[10] |= (tx_sens << 4) | rx_sens; 3744 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) 3745 ha->fw_options[10] |= BIT_5 | 3746 ((rx_sens & (BIT_1 | BIT_0)) << 2) | 3747 (tx_sens & (BIT_1 | BIT_0)); 3748 3749 /* 2G settings */ 3750 swing = (ha->fw_seriallink_options[2] & 3751 (BIT_7 | BIT_6 | BIT_5)) >> 5; 3752 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0); 3753 tx_sens = ha->fw_seriallink_options[1] & 3754 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 3755 rx_sens = (ha->fw_seriallink_options[1] & 3756 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; 3757 ha->fw_options[11] = (emphasis << 14) | (swing << 8); 3758 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 3759 if (rx_sens == 0x0) 3760 rx_sens = 0x3; 3761 ha->fw_options[11] |= (tx_sens << 4) | rx_sens; 3762 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) 3763 ha->fw_options[11] |= BIT_5 | 3764 ((rx_sens & (BIT_1 | BIT_0)) << 2) | 3765 (tx_sens & (BIT_1 | BIT_0)); 3766 } 3767 3768 /* FCP2 options. */ 3769 /* Return command IOCBs without waiting for an ABTS to complete. */ 3770 ha->fw_options[3] |= BIT_13; 3771 3772 /* LED scheme. */ 3773 if (ha->flags.enable_led_scheme) 3774 ha->fw_options[2] |= BIT_12; 3775 3776 /* Detect ISP6312. */ 3777 if (IS_QLA6312(ha)) 3778 ha->fw_options[2] |= BIT_13; 3779 3780 /* Set Retry FLOGI in case of P2P connection */ 3781 if (ha->operating_mode == P2P) { 3782 ha->fw_options[2] |= BIT_3; 3783 ql_dbg(ql_dbg_disc, vha, 0x2100, 3784 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", 3785 __func__, ha->fw_options[2]); 3786 } 3787 3788 /* Update firmware options. */ 3789 qla2x00_set_fw_options(vha, ha->fw_options); 3790 } 3791 3792 void 3793 qla24xx_update_fw_options(scsi_qla_host_t *vha) 3794 { 3795 int rval; 3796 struct qla_hw_data *ha = vha->hw; 3797 3798 if (IS_P3P_TYPE(ha)) 3799 return; 3800 3801 /* Hold status IOCBs until ABTS response received. */ 3802 if (ql2xfwholdabts) 3803 ha->fw_options[3] |= BIT_12; 3804 3805 /* Set Retry FLOGI in case of P2P connection */ 3806 if (ha->operating_mode == P2P) { 3807 ha->fw_options[2] |= BIT_3; 3808 ql_dbg(ql_dbg_disc, vha, 0x2101, 3809 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", 3810 __func__, ha->fw_options[2]); 3811 } 3812 3813 /* Move PUREX, ABTS RX & RIDA to ATIOQ */ 3814 if (ql2xmvasynctoatio && 3815 (IS_QLA83XX(ha) || IS_QLA27XX(ha))) { 3816 if (qla_tgt_mode_enabled(vha) || 3817 qla_dual_mode_enabled(vha)) 3818 ha->fw_options[2] |= BIT_11; 3819 else 3820 ha->fw_options[2] &= ~BIT_11; 3821 } 3822 3823 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 3824 /* 3825 * Tell FW to track each exchange to prevent 3826 * driver from using stale exchange. 3827 */ 3828 if (qla_tgt_mode_enabled(vha) || 3829 qla_dual_mode_enabled(vha)) 3830 ha->fw_options[2] |= BIT_4; 3831 else 3832 ha->fw_options[2] &= ~BIT_4; 3833 3834 /* Reserve 1/2 of emergency exchanges for ELS.*/ 3835 if (qla2xuseresexchforels) 3836 ha->fw_options[2] |= BIT_8; 3837 else 3838 ha->fw_options[2] &= ~BIT_8; 3839 } 3840 3841 ql_dbg(ql_dbg_init, vha, 0x00e8, 3842 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", 3843 __func__, ha->fw_options[1], ha->fw_options[2], 3844 ha->fw_options[3], vha->host->active_mode); 3845 3846 if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3]) 3847 qla2x00_set_fw_options(vha, ha->fw_options); 3848 3849 /* Update Serial Link options. */ 3850 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) 3851 return; 3852 3853 rval = qla2x00_set_serdes_params(vha, 3854 le16_to_cpu(ha->fw_seriallink_options24[1]), 3855 le16_to_cpu(ha->fw_seriallink_options24[2]), 3856 le16_to_cpu(ha->fw_seriallink_options24[3])); 3857 if (rval != QLA_SUCCESS) { 3858 ql_log(ql_log_warn, vha, 0x0104, 3859 "Unable to update Serial Link options (%x).\n", rval); 3860 } 3861 } 3862 3863 void 3864 qla2x00_config_rings(struct scsi_qla_host *vha) 3865 { 3866 struct qla_hw_data *ha = vha->hw; 3867 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3868 struct req_que *req = ha->req_q_map[0]; 3869 struct rsp_que *rsp = ha->rsp_q_map[0]; 3870 3871 /* Setup ring parameters in initialization control block. */ 3872 ha->init_cb->request_q_outpointer = cpu_to_le16(0); 3873 ha->init_cb->response_q_inpointer = cpu_to_le16(0); 3874 ha->init_cb->request_q_length = cpu_to_le16(req->length); 3875 ha->init_cb->response_q_length = cpu_to_le16(rsp->length); 3876 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); 3877 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma)); 3878 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); 3879 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 3880 3881 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0); 3882 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0); 3883 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0); 3884 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0); 3885 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */ 3886 } 3887 3888 void 3889 qla24xx_config_rings(struct scsi_qla_host *vha) 3890 { 3891 struct qla_hw_data *ha = vha->hw; 3892 device_reg_t *reg = ISP_QUE_REG(ha, 0); 3893 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; 3894 struct qla_msix_entry *msix; 3895 struct init_cb_24xx *icb; 3896 uint16_t rid = 0; 3897 struct req_que *req = ha->req_q_map[0]; 3898 struct rsp_que *rsp = ha->rsp_q_map[0]; 3899 3900 /* Setup ring parameters in initialization control block. */ 3901 icb = (struct init_cb_24xx *)ha->init_cb; 3902 icb->request_q_outpointer = cpu_to_le16(0); 3903 icb->response_q_inpointer = cpu_to_le16(0); 3904 icb->request_q_length = cpu_to_le16(req->length); 3905 icb->response_q_length = cpu_to_le16(rsp->length); 3906 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); 3907 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma)); 3908 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); 3909 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 3910 3911 /* Setup ATIO queue dma pointers for target mode */ 3912 icb->atio_q_inpointer = cpu_to_le16(0); 3913 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length); 3914 icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma)); 3915 icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma)); 3916 3917 if (IS_SHADOW_REG_CAPABLE(ha)) 3918 icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29); 3919 3920 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 3921 icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS); 3922 icb->rid = cpu_to_le16(rid); 3923 if (ha->flags.msix_enabled) { 3924 msix = &ha->msix_entries[1]; 3925 ql_dbg(ql_dbg_init, vha, 0x0019, 3926 "Registering vector 0x%x for base que.\n", 3927 msix->entry); 3928 icb->msix = cpu_to_le16(msix->entry); 3929 } 3930 /* Use alternate PCI bus number */ 3931 if (MSB(rid)) 3932 icb->firmware_options_2 |= cpu_to_le32(BIT_19); 3933 /* Use alternate PCI devfn */ 3934 if (LSB(rid)) 3935 icb->firmware_options_2 |= cpu_to_le32(BIT_18); 3936 3937 /* Use Disable MSIX Handshake mode for capable adapters */ 3938 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) && 3939 (ha->flags.msix_enabled)) { 3940 icb->firmware_options_2 &= cpu_to_le32(~BIT_22); 3941 ha->flags.disable_msix_handshake = 1; 3942 ql_dbg(ql_dbg_init, vha, 0x00fe, 3943 "MSIX Handshake Disable Mode turned on.\n"); 3944 } else { 3945 icb->firmware_options_2 |= cpu_to_le32(BIT_22); 3946 } 3947 icb->firmware_options_2 |= cpu_to_le32(BIT_23); 3948 3949 WRT_REG_DWORD(®->isp25mq.req_q_in, 0); 3950 WRT_REG_DWORD(®->isp25mq.req_q_out, 0); 3951 WRT_REG_DWORD(®->isp25mq.rsp_q_in, 0); 3952 WRT_REG_DWORD(®->isp25mq.rsp_q_out, 0); 3953 } else { 3954 WRT_REG_DWORD(®->isp24.req_q_in, 0); 3955 WRT_REG_DWORD(®->isp24.req_q_out, 0); 3956 WRT_REG_DWORD(®->isp24.rsp_q_in, 0); 3957 WRT_REG_DWORD(®->isp24.rsp_q_out, 0); 3958 } 3959 qlt_24xx_config_rings(vha); 3960 3961 /* PCI posting */ 3962 RD_REG_DWORD(&ioreg->hccr); 3963 } 3964 3965 /** 3966 * qla2x00_init_rings() - Initializes firmware. 3967 * @vha: HA context 3968 * 3969 * Beginning of request ring has initialization control block already built 3970 * by nvram config routine. 3971 * 3972 * Returns 0 on success. 3973 */ 3974 int 3975 qla2x00_init_rings(scsi_qla_host_t *vha) 3976 { 3977 int rval; 3978 unsigned long flags = 0; 3979 int cnt, que; 3980 struct qla_hw_data *ha = vha->hw; 3981 struct req_que *req; 3982 struct rsp_que *rsp; 3983 struct mid_init_cb_24xx *mid_init_cb = 3984 (struct mid_init_cb_24xx *) ha->init_cb; 3985 3986 spin_lock_irqsave(&ha->hardware_lock, flags); 3987 3988 /* Clear outstanding commands array. */ 3989 for (que = 0; que < ha->max_req_queues; que++) { 3990 req = ha->req_q_map[que]; 3991 if (!req || !test_bit(que, ha->req_qid_map)) 3992 continue; 3993 req->out_ptr = (void *)(req->ring + req->length); 3994 *req->out_ptr = 0; 3995 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) 3996 req->outstanding_cmds[cnt] = NULL; 3997 3998 req->current_outstanding_cmd = 1; 3999 4000 /* Initialize firmware. */ 4001 req->ring_ptr = req->ring; 4002 req->ring_index = 0; 4003 req->cnt = req->length; 4004 } 4005 4006 for (que = 0; que < ha->max_rsp_queues; que++) { 4007 rsp = ha->rsp_q_map[que]; 4008 if (!rsp || !test_bit(que, ha->rsp_qid_map)) 4009 continue; 4010 rsp->in_ptr = (void *)(rsp->ring + rsp->length); 4011 *rsp->in_ptr = 0; 4012 /* Initialize response queue entries */ 4013 if (IS_QLAFX00(ha)) 4014 qlafx00_init_response_q_entries(rsp); 4015 else 4016 qla2x00_init_response_q_entries(rsp); 4017 } 4018 4019 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; 4020 ha->tgt.atio_ring_index = 0; 4021 /* Initialize ATIO queue entries */ 4022 qlt_init_atio_q_entries(vha); 4023 4024 ha->isp_ops->config_rings(vha); 4025 4026 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4027 4028 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n"); 4029 4030 if (IS_QLAFX00(ha)) { 4031 rval = qlafx00_init_firmware(vha, ha->init_cb_size); 4032 goto next_check; 4033 } 4034 4035 /* Update any ISP specific firmware options before initialization. */ 4036 ha->isp_ops->update_fw_options(vha); 4037 4038 if (ha->flags.npiv_supported) { 4039 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha)) 4040 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; 4041 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); 4042 } 4043 4044 if (IS_FWI2_CAPABLE(ha)) { 4045 mid_init_cb->options = cpu_to_le16(BIT_1); 4046 mid_init_cb->init_cb.execution_throttle = 4047 cpu_to_le16(ha->cur_fw_xcb_count); 4048 ha->flags.dport_enabled = 4049 (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0; 4050 ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n", 4051 (ha->flags.dport_enabled) ? "enabled" : "disabled"); 4052 /* FA-WWPN Status */ 4053 ha->flags.fawwpn_enabled = 4054 (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0; 4055 ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n", 4056 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled"); 4057 } 4058 4059 rval = qla2x00_init_firmware(vha, ha->init_cb_size); 4060 next_check: 4061 if (rval) { 4062 ql_log(ql_log_fatal, vha, 0x00d2, 4063 "Init Firmware **** FAILED ****.\n"); 4064 } else { 4065 ql_dbg(ql_dbg_init, vha, 0x00d3, 4066 "Init Firmware -- success.\n"); 4067 QLA_FW_STARTED(ha); 4068 vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0; 4069 } 4070 4071 return (rval); 4072 } 4073 4074 /** 4075 * qla2x00_fw_ready() - Waits for firmware ready. 4076 * @vha: HA context 4077 * 4078 * Returns 0 on success. 4079 */ 4080 static int 4081 qla2x00_fw_ready(scsi_qla_host_t *vha) 4082 { 4083 int rval; 4084 unsigned long wtime, mtime, cs84xx_time; 4085 uint16_t min_wait; /* Minimum wait time if loop is down */ 4086 uint16_t wait_time; /* Wait time if loop is coming ready */ 4087 uint16_t state[6]; 4088 struct qla_hw_data *ha = vha->hw; 4089 4090 if (IS_QLAFX00(vha->hw)) 4091 return qlafx00_fw_ready(vha); 4092 4093 rval = QLA_SUCCESS; 4094 4095 /* Time to wait for loop down */ 4096 if (IS_P3P_TYPE(ha)) 4097 min_wait = 30; 4098 else 4099 min_wait = 20; 4100 4101 /* 4102 * Firmware should take at most one RATOV to login, plus 5 seconds for 4103 * our own processing. 4104 */ 4105 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) { 4106 wait_time = min_wait; 4107 } 4108 4109 /* Min wait time if loop down */ 4110 mtime = jiffies + (min_wait * HZ); 4111 4112 /* wait time before firmware ready */ 4113 wtime = jiffies + (wait_time * HZ); 4114 4115 /* Wait for ISP to finish LIP */ 4116 if (!vha->flags.init_done) 4117 ql_log(ql_log_info, vha, 0x801e, 4118 "Waiting for LIP to complete.\n"); 4119 4120 do { 4121 memset(state, -1, sizeof(state)); 4122 rval = qla2x00_get_firmware_state(vha, state); 4123 if (rval == QLA_SUCCESS) { 4124 if (state[0] < FSTATE_LOSS_OF_SYNC) { 4125 vha->device_flags &= ~DFLG_NO_CABLE; 4126 } 4127 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { 4128 ql_dbg(ql_dbg_taskm, vha, 0x801f, 4129 "fw_state=%x 84xx=%x.\n", state[0], 4130 state[2]); 4131 if ((state[2] & FSTATE_LOGGED_IN) && 4132 (state[2] & FSTATE_WAITING_FOR_VERIFY)) { 4133 ql_dbg(ql_dbg_taskm, vha, 0x8028, 4134 "Sending verify iocb.\n"); 4135 4136 cs84xx_time = jiffies; 4137 rval = qla84xx_init_chip(vha); 4138 if (rval != QLA_SUCCESS) { 4139 ql_log(ql_log_warn, 4140 vha, 0x8007, 4141 "Init chip failed.\n"); 4142 break; 4143 } 4144 4145 /* Add time taken to initialize. */ 4146 cs84xx_time = jiffies - cs84xx_time; 4147 wtime += cs84xx_time; 4148 mtime += cs84xx_time; 4149 ql_dbg(ql_dbg_taskm, vha, 0x8008, 4150 "Increasing wait time by %ld. " 4151 "New time %ld.\n", cs84xx_time, 4152 wtime); 4153 } 4154 } else if (state[0] == FSTATE_READY) { 4155 ql_dbg(ql_dbg_taskm, vha, 0x8037, 4156 "F/W Ready - OK.\n"); 4157 4158 qla2x00_get_retry_cnt(vha, &ha->retry_count, 4159 &ha->login_timeout, &ha->r_a_tov); 4160 4161 rval = QLA_SUCCESS; 4162 break; 4163 } 4164 4165 rval = QLA_FUNCTION_FAILED; 4166 4167 if (atomic_read(&vha->loop_down_timer) && 4168 state[0] != FSTATE_READY) { 4169 /* Loop down. Timeout on min_wait for states 4170 * other than Wait for Login. 4171 */ 4172 if (time_after_eq(jiffies, mtime)) { 4173 ql_log(ql_log_info, vha, 0x8038, 4174 "Cable is unplugged...\n"); 4175 4176 vha->device_flags |= DFLG_NO_CABLE; 4177 break; 4178 } 4179 } 4180 } else { 4181 /* Mailbox cmd failed. Timeout on min_wait. */ 4182 if (time_after_eq(jiffies, mtime) || 4183 ha->flags.isp82xx_fw_hung) 4184 break; 4185 } 4186 4187 if (time_after_eq(jiffies, wtime)) 4188 break; 4189 4190 /* Delay for a while */ 4191 msleep(500); 4192 } while (1); 4193 4194 ql_dbg(ql_dbg_taskm, vha, 0x803a, 4195 "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0], 4196 state[1], state[2], state[3], state[4], state[5], jiffies); 4197 4198 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) { 4199 ql_log(ql_log_warn, vha, 0x803b, 4200 "Firmware ready **** FAILED ****.\n"); 4201 } 4202 4203 return (rval); 4204 } 4205 4206 /* 4207 * qla2x00_configure_hba 4208 * Setup adapter context. 4209 * 4210 * Input: 4211 * ha = adapter state pointer. 4212 * 4213 * Returns: 4214 * 0 = success 4215 * 4216 * Context: 4217 * Kernel context. 4218 */ 4219 static int 4220 qla2x00_configure_hba(scsi_qla_host_t *vha) 4221 { 4222 int rval; 4223 uint16_t loop_id; 4224 uint16_t topo; 4225 uint16_t sw_cap; 4226 uint8_t al_pa; 4227 uint8_t area; 4228 uint8_t domain; 4229 char connect_type[22]; 4230 struct qla_hw_data *ha = vha->hw; 4231 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 4232 port_id_t id; 4233 unsigned long flags; 4234 4235 /* Get host addresses. */ 4236 rval = qla2x00_get_adapter_id(vha, 4237 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); 4238 if (rval != QLA_SUCCESS) { 4239 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || 4240 IS_CNA_CAPABLE(ha) || 4241 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { 4242 ql_dbg(ql_dbg_disc, vha, 0x2008, 4243 "Loop is in a transition state.\n"); 4244 } else { 4245 ql_log(ql_log_warn, vha, 0x2009, 4246 "Unable to get host loop ID.\n"); 4247 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) && 4248 (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) { 4249 ql_log(ql_log_warn, vha, 0x1151, 4250 "Doing link init.\n"); 4251 if (qla24xx_link_initialize(vha) == QLA_SUCCESS) 4252 return rval; 4253 } 4254 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 4255 } 4256 return (rval); 4257 } 4258 4259 if (topo == 4) { 4260 ql_log(ql_log_info, vha, 0x200a, 4261 "Cannot get topology - retrying.\n"); 4262 return (QLA_FUNCTION_FAILED); 4263 } 4264 4265 vha->loop_id = loop_id; 4266 4267 /* initialize */ 4268 ha->min_external_loopid = SNS_FIRST_LOOP_ID; 4269 ha->operating_mode = LOOP; 4270 ha->switch_cap = 0; 4271 4272 switch (topo) { 4273 case 0: 4274 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n"); 4275 ha->current_topology = ISP_CFG_NL; 4276 strcpy(connect_type, "(Loop)"); 4277 break; 4278 4279 case 1: 4280 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n"); 4281 ha->switch_cap = sw_cap; 4282 ha->current_topology = ISP_CFG_FL; 4283 strcpy(connect_type, "(FL_Port)"); 4284 break; 4285 4286 case 2: 4287 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n"); 4288 ha->operating_mode = P2P; 4289 ha->current_topology = ISP_CFG_N; 4290 strcpy(connect_type, "(N_Port-to-N_Port)"); 4291 break; 4292 4293 case 3: 4294 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n"); 4295 ha->switch_cap = sw_cap; 4296 ha->operating_mode = P2P; 4297 ha->current_topology = ISP_CFG_F; 4298 strcpy(connect_type, "(F_Port)"); 4299 break; 4300 4301 default: 4302 ql_dbg(ql_dbg_disc, vha, 0x200f, 4303 "HBA in unknown topology %x, using NL.\n", topo); 4304 ha->current_topology = ISP_CFG_NL; 4305 strcpy(connect_type, "(Loop)"); 4306 break; 4307 } 4308 4309 /* Save Host port and loop ID. */ 4310 /* byte order - Big Endian */ 4311 id.b.domain = domain; 4312 id.b.area = area; 4313 id.b.al_pa = al_pa; 4314 id.b.rsvd_1 = 0; 4315 spin_lock_irqsave(&ha->hardware_lock, flags); 4316 if (!(topo == 2 && ha->flags.n2n_bigger)) 4317 qlt_update_host_map(vha, id); 4318 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4319 4320 if (!vha->flags.init_done) 4321 ql_log(ql_log_info, vha, 0x2010, 4322 "Topology - %s, Host Loop address 0x%x.\n", 4323 connect_type, vha->loop_id); 4324 4325 return(rval); 4326 } 4327 4328 inline void 4329 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, 4330 char *def) 4331 { 4332 char *st, *en; 4333 uint16_t index; 4334 struct qla_hw_data *ha = vha->hw; 4335 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && 4336 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha); 4337 4338 if (memcmp(model, BINZERO, len) != 0) { 4339 strncpy(ha->model_number, model, len); 4340 st = en = ha->model_number; 4341 en += len - 1; 4342 while (en > st) { 4343 if (*en != 0x20 && *en != 0x00) 4344 break; 4345 *en-- = '\0'; 4346 } 4347 4348 index = (ha->pdev->subsystem_device & 0xff); 4349 if (use_tbl && 4350 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 4351 index < QLA_MODEL_NAMES) 4352 strncpy(ha->model_desc, 4353 qla2x00_model_name[index * 2 + 1], 4354 sizeof(ha->model_desc) - 1); 4355 } else { 4356 index = (ha->pdev->subsystem_device & 0xff); 4357 if (use_tbl && 4358 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 4359 index < QLA_MODEL_NAMES) { 4360 strcpy(ha->model_number, 4361 qla2x00_model_name[index * 2]); 4362 strncpy(ha->model_desc, 4363 qla2x00_model_name[index * 2 + 1], 4364 sizeof(ha->model_desc) - 1); 4365 } else { 4366 strcpy(ha->model_number, def); 4367 } 4368 } 4369 if (IS_FWI2_CAPABLE(ha)) 4370 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc, 4371 sizeof(ha->model_desc)); 4372 } 4373 4374 /* On sparc systems, obtain port and node WWN from firmware 4375 * properties. 4376 */ 4377 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv) 4378 { 4379 #ifdef CONFIG_SPARC 4380 struct qla_hw_data *ha = vha->hw; 4381 struct pci_dev *pdev = ha->pdev; 4382 struct device_node *dp = pci_device_to_OF_node(pdev); 4383 const u8 *val; 4384 int len; 4385 4386 val = of_get_property(dp, "port-wwn", &len); 4387 if (val && len >= WWN_SIZE) 4388 memcpy(nv->port_name, val, WWN_SIZE); 4389 4390 val = of_get_property(dp, "node-wwn", &len); 4391 if (val && len >= WWN_SIZE) 4392 memcpy(nv->node_name, val, WWN_SIZE); 4393 #endif 4394 } 4395 4396 /* 4397 * NVRAM configuration for ISP 2xxx 4398 * 4399 * Input: 4400 * ha = adapter block pointer. 4401 * 4402 * Output: 4403 * initialization control block in response_ring 4404 * host adapters parameters in host adapter block 4405 * 4406 * Returns: 4407 * 0 = success. 4408 */ 4409 int 4410 qla2x00_nvram_config(scsi_qla_host_t *vha) 4411 { 4412 int rval; 4413 uint8_t chksum = 0; 4414 uint16_t cnt; 4415 uint8_t *dptr1, *dptr2; 4416 struct qla_hw_data *ha = vha->hw; 4417 init_cb_t *icb = ha->init_cb; 4418 nvram_t *nv = ha->nvram; 4419 uint8_t *ptr = ha->nvram; 4420 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 4421 4422 rval = QLA_SUCCESS; 4423 4424 /* Determine NVRAM starting address. */ 4425 ha->nvram_size = sizeof(nvram_t); 4426 ha->nvram_base = 0; 4427 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) 4428 if ((RD_REG_WORD(®->ctrl_status) >> 14) == 1) 4429 ha->nvram_base = 0x80; 4430 4431 /* Get NVRAM data and calculate checksum. */ 4432 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size); 4433 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) 4434 chksum += *ptr++; 4435 4436 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f, 4437 "Contents of NVRAM.\n"); 4438 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110, 4439 (uint8_t *)nv, ha->nvram_size); 4440 4441 /* Bad NVRAM data, set defaults parameters. */ 4442 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || 4443 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) { 4444 /* Reset NVRAM data. */ 4445 ql_log(ql_log_warn, vha, 0x0064, 4446 "Inconsistent NVRAM " 4447 "detected: checksum=0x%x id=%c version=0x%x.\n", 4448 chksum, nv->id[0], nv->nvram_version); 4449 ql_log(ql_log_warn, vha, 0x0065, 4450 "Falling back to " 4451 "functioning (yet invalid -- WWPN) defaults.\n"); 4452 4453 /* 4454 * Set default initialization control block. 4455 */ 4456 memset(nv, 0, ha->nvram_size); 4457 nv->parameter_block_version = ICB_VERSION; 4458 4459 if (IS_QLA23XX(ha)) { 4460 nv->firmware_options[0] = BIT_2 | BIT_1; 4461 nv->firmware_options[1] = BIT_7 | BIT_5; 4462 nv->add_firmware_options[0] = BIT_5; 4463 nv->add_firmware_options[1] = BIT_5 | BIT_4; 4464 nv->frame_payload_size = 2048; 4465 nv->special_options[1] = BIT_7; 4466 } else if (IS_QLA2200(ha)) { 4467 nv->firmware_options[0] = BIT_2 | BIT_1; 4468 nv->firmware_options[1] = BIT_7 | BIT_5; 4469 nv->add_firmware_options[0] = BIT_5; 4470 nv->add_firmware_options[1] = BIT_5 | BIT_4; 4471 nv->frame_payload_size = 1024; 4472 } else if (IS_QLA2100(ha)) { 4473 nv->firmware_options[0] = BIT_3 | BIT_1; 4474 nv->firmware_options[1] = BIT_5; 4475 nv->frame_payload_size = 1024; 4476 } 4477 4478 nv->max_iocb_allocation = cpu_to_le16(256); 4479 nv->execution_throttle = cpu_to_le16(16); 4480 nv->retry_count = 8; 4481 nv->retry_delay = 1; 4482 4483 nv->port_name[0] = 33; 4484 nv->port_name[3] = 224; 4485 nv->port_name[4] = 139; 4486 4487 qla2xxx_nvram_wwn_from_ofw(vha, nv); 4488 4489 nv->login_timeout = 4; 4490 4491 /* 4492 * Set default host adapter parameters 4493 */ 4494 nv->host_p[1] = BIT_2; 4495 nv->reset_delay = 5; 4496 nv->port_down_retry_count = 8; 4497 nv->max_luns_per_target = cpu_to_le16(8); 4498 nv->link_down_timeout = 60; 4499 4500 rval = 1; 4501 } 4502 4503 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 4504 /* 4505 * The SN2 does not provide BIOS emulation which means you can't change 4506 * potentially bogus BIOS settings. Force the use of default settings 4507 * for link rate and frame size. Hope that the rest of the settings 4508 * are valid. 4509 */ 4510 if (ia64_platform_is("sn2")) { 4511 nv->frame_payload_size = 2048; 4512 if (IS_QLA23XX(ha)) 4513 nv->special_options[1] = BIT_7; 4514 } 4515 #endif 4516 4517 /* Reset Initialization control block */ 4518 memset(icb, 0, ha->init_cb_size); 4519 4520 /* 4521 * Setup driver NVRAM options. 4522 */ 4523 nv->firmware_options[0] |= (BIT_6 | BIT_1); 4524 nv->firmware_options[0] &= ~(BIT_5 | BIT_4); 4525 nv->firmware_options[1] |= (BIT_5 | BIT_0); 4526 nv->firmware_options[1] &= ~BIT_4; 4527 4528 if (IS_QLA23XX(ha)) { 4529 nv->firmware_options[0] |= BIT_2; 4530 nv->firmware_options[0] &= ~BIT_3; 4531 nv->special_options[0] &= ~BIT_6; 4532 nv->add_firmware_options[1] |= BIT_5 | BIT_4; 4533 4534 if (IS_QLA2300(ha)) { 4535 if (ha->fb_rev == FPM_2310) { 4536 strcpy(ha->model_number, "QLA2310"); 4537 } else { 4538 strcpy(ha->model_number, "QLA2300"); 4539 } 4540 } else { 4541 qla2x00_set_model_info(vha, nv->model_number, 4542 sizeof(nv->model_number), "QLA23xx"); 4543 } 4544 } else if (IS_QLA2200(ha)) { 4545 nv->firmware_options[0] |= BIT_2; 4546 /* 4547 * 'Point-to-point preferred, else loop' is not a safe 4548 * connection mode setting. 4549 */ 4550 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) == 4551 (BIT_5 | BIT_4)) { 4552 /* Force 'loop preferred, else point-to-point'. */ 4553 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4); 4554 nv->add_firmware_options[0] |= BIT_5; 4555 } 4556 strcpy(ha->model_number, "QLA22xx"); 4557 } else /*if (IS_QLA2100(ha))*/ { 4558 strcpy(ha->model_number, "QLA2100"); 4559 } 4560 4561 /* 4562 * Copy over NVRAM RISC parameter block to initialization control block. 4563 */ 4564 dptr1 = (uint8_t *)icb; 4565 dptr2 = (uint8_t *)&nv->parameter_block_version; 4566 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version; 4567 while (cnt--) 4568 *dptr1++ = *dptr2++; 4569 4570 /* Copy 2nd half. */ 4571 dptr1 = (uint8_t *)icb->add_firmware_options; 4572 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options; 4573 while (cnt--) 4574 *dptr1++ = *dptr2++; 4575 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); 4576 /* Use alternate WWN? */ 4577 if (nv->host_p[1] & BIT_7) { 4578 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 4579 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 4580 } 4581 4582 /* Prepare nodename */ 4583 if ((icb->firmware_options[1] & BIT_6) == 0) { 4584 /* 4585 * Firmware will apply the following mask if the nodename was 4586 * not provided. 4587 */ 4588 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 4589 icb->node_name[0] &= 0xF0; 4590 } 4591 4592 /* 4593 * Set host adapter parameters. 4594 */ 4595 4596 /* 4597 * BIT_7 in the host-parameters section allows for modification to 4598 * internal driver logging. 4599 */ 4600 if (nv->host_p[0] & BIT_7) 4601 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; 4602 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); 4603 /* Always load RISC code on non ISP2[12]00 chips. */ 4604 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) 4605 ha->flags.disable_risc_code_load = 0; 4606 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0); 4607 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0); 4608 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0); 4609 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0; 4610 ha->flags.disable_serdes = 0; 4611 4612 ha->operating_mode = 4613 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4; 4614 4615 memcpy(ha->fw_seriallink_options, nv->seriallink_options, 4616 sizeof(ha->fw_seriallink_options)); 4617 4618 /* save HBA serial number */ 4619 ha->serial0 = icb->port_name[5]; 4620 ha->serial1 = icb->port_name[6]; 4621 ha->serial2 = icb->port_name[7]; 4622 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 4623 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 4624 4625 icb->execution_throttle = cpu_to_le16(0xFFFF); 4626 4627 ha->retry_count = nv->retry_count; 4628 4629 /* Set minimum login_timeout to 4 seconds. */ 4630 if (nv->login_timeout != ql2xlogintimeout) 4631 nv->login_timeout = ql2xlogintimeout; 4632 if (nv->login_timeout < 4) 4633 nv->login_timeout = 4; 4634 ha->login_timeout = nv->login_timeout; 4635 4636 /* Set minimum RATOV to 100 tenths of a second. */ 4637 ha->r_a_tov = 100; 4638 4639 ha->loop_reset_delay = nv->reset_delay; 4640 4641 /* Link Down Timeout = 0: 4642 * 4643 * When Port Down timer expires we will start returning 4644 * I/O's to OS with "DID_NO_CONNECT". 4645 * 4646 * Link Down Timeout != 0: 4647 * 4648 * The driver waits for the link to come up after link down 4649 * before returning I/Os to OS with "DID_NO_CONNECT". 4650 */ 4651 if (nv->link_down_timeout == 0) { 4652 ha->loop_down_abort_time = 4653 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 4654 } else { 4655 ha->link_down_timeout = nv->link_down_timeout; 4656 ha->loop_down_abort_time = 4657 (LOOP_DOWN_TIME - ha->link_down_timeout); 4658 } 4659 4660 /* 4661 * Need enough time to try and get the port back. 4662 */ 4663 ha->port_down_retry_count = nv->port_down_retry_count; 4664 if (qlport_down_retry) 4665 ha->port_down_retry_count = qlport_down_retry; 4666 /* Set login_retry_count */ 4667 ha->login_retry_count = nv->retry_count; 4668 if (ha->port_down_retry_count == nv->port_down_retry_count && 4669 ha->port_down_retry_count > 3) 4670 ha->login_retry_count = ha->port_down_retry_count; 4671 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 4672 ha->login_retry_count = ha->port_down_retry_count; 4673 if (ql2xloginretrycount) 4674 ha->login_retry_count = ql2xloginretrycount; 4675 4676 icb->lun_enables = cpu_to_le16(0); 4677 icb->command_resource_count = 0; 4678 icb->immediate_notify_resource_count = 0; 4679 icb->timeout = cpu_to_le16(0); 4680 4681 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 4682 /* Enable RIO */ 4683 icb->firmware_options[0] &= ~BIT_3; 4684 icb->add_firmware_options[0] &= 4685 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 4686 icb->add_firmware_options[0] |= BIT_2; 4687 icb->response_accumulation_timer = 3; 4688 icb->interrupt_delay_timer = 5; 4689 4690 vha->flags.process_response_queue = 1; 4691 } else { 4692 /* Enable ZIO. */ 4693 if (!vha->flags.init_done) { 4694 ha->zio_mode = icb->add_firmware_options[0] & 4695 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 4696 ha->zio_timer = icb->interrupt_delay_timer ? 4697 icb->interrupt_delay_timer: 2; 4698 } 4699 icb->add_firmware_options[0] &= 4700 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 4701 vha->flags.process_response_queue = 0; 4702 if (ha->zio_mode != QLA_ZIO_DISABLED) { 4703 ha->zio_mode = QLA_ZIO_MODE_6; 4704 4705 ql_log(ql_log_info, vha, 0x0068, 4706 "ZIO mode %d enabled; timer delay (%d us).\n", 4707 ha->zio_mode, ha->zio_timer * 100); 4708 4709 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode; 4710 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer; 4711 vha->flags.process_response_queue = 1; 4712 } 4713 } 4714 4715 if (rval) { 4716 ql_log(ql_log_warn, vha, 0x0069, 4717 "NVRAM configuration failed.\n"); 4718 } 4719 return (rval); 4720 } 4721 4722 static void 4723 qla2x00_rport_del(void *data) 4724 { 4725 fc_port_t *fcport = data; 4726 struct fc_rport *rport; 4727 unsigned long flags; 4728 4729 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 4730 rport = fcport->drport ? fcport->drport: fcport->rport; 4731 fcport->drport = NULL; 4732 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 4733 if (rport) { 4734 ql_dbg(ql_dbg_disc, fcport->vha, 0x210b, 4735 "%s %8phN. rport %p roles %x\n", 4736 __func__, fcport->port_name, rport, 4737 rport->roles); 4738 4739 fc_remote_port_delete(rport); 4740 } 4741 } 4742 4743 /** 4744 * qla2x00_alloc_fcport() - Allocate a generic fcport. 4745 * @vha: HA context 4746 * @flags: allocation flags 4747 * 4748 * Returns a pointer to the allocated fcport, or NULL, if none available. 4749 */ 4750 fc_port_t * 4751 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) 4752 { 4753 fc_port_t *fcport; 4754 4755 fcport = kzalloc(sizeof(fc_port_t), flags); 4756 if (!fcport) 4757 return NULL; 4758 4759 /* Setup fcport template structure. */ 4760 fcport->vha = vha; 4761 fcport->port_type = FCT_UNKNOWN; 4762 fcport->loop_id = FC_NO_LOOP_ID; 4763 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); 4764 fcport->supported_classes = FC_COS_UNSPECIFIED; 4765 4766 fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev, 4767 sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma, 4768 flags); 4769 fcport->disc_state = DSC_DELETED; 4770 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 4771 fcport->deleted = QLA_SESS_DELETED; 4772 fcport->login_retry = vha->hw->login_retry_count; 4773 fcport->logout_on_delete = 1; 4774 4775 if (!fcport->ct_desc.ct_sns) { 4776 ql_log(ql_log_warn, vha, 0xd049, 4777 "Failed to allocate ct_sns request.\n"); 4778 kfree(fcport); 4779 fcport = NULL; 4780 } 4781 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); 4782 INIT_WORK(&fcport->reg_work, qla_register_fcport_fn); 4783 INIT_LIST_HEAD(&fcport->gnl_entry); 4784 INIT_LIST_HEAD(&fcport->list); 4785 4786 return fcport; 4787 } 4788 4789 void 4790 qla2x00_free_fcport(fc_port_t *fcport) 4791 { 4792 if (fcport->ct_desc.ct_sns) { 4793 dma_free_coherent(&fcport->vha->hw->pdev->dev, 4794 sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns, 4795 fcport->ct_desc.ct_sns_dma); 4796 4797 fcport->ct_desc.ct_sns = NULL; 4798 } 4799 kfree(fcport); 4800 } 4801 4802 /* 4803 * qla2x00_configure_loop 4804 * Updates Fibre Channel Device Database with what is actually on loop. 4805 * 4806 * Input: 4807 * ha = adapter block pointer. 4808 * 4809 * Returns: 4810 * 0 = success. 4811 * 1 = error. 4812 * 2 = database was full and device was not configured. 4813 */ 4814 static int 4815 qla2x00_configure_loop(scsi_qla_host_t *vha) 4816 { 4817 int rval; 4818 unsigned long flags, save_flags; 4819 struct qla_hw_data *ha = vha->hw; 4820 rval = QLA_SUCCESS; 4821 4822 /* Get Initiator ID */ 4823 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) { 4824 rval = qla2x00_configure_hba(vha); 4825 if (rval != QLA_SUCCESS) { 4826 ql_dbg(ql_dbg_disc, vha, 0x2013, 4827 "Unable to configure HBA.\n"); 4828 return (rval); 4829 } 4830 } 4831 4832 save_flags = flags = vha->dpc_flags; 4833 ql_dbg(ql_dbg_disc, vha, 0x2014, 4834 "Configure loop -- dpc flags = 0x%lx.\n", flags); 4835 4836 /* 4837 * If we have both an RSCN and PORT UPDATE pending then handle them 4838 * both at the same time. 4839 */ 4840 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 4841 clear_bit(RSCN_UPDATE, &vha->dpc_flags); 4842 4843 qla2x00_get_data_rate(vha); 4844 4845 /* Determine what we need to do */ 4846 if (ha->current_topology == ISP_CFG_FL && 4847 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 4848 4849 set_bit(RSCN_UPDATE, &flags); 4850 4851 } else if (ha->current_topology == ISP_CFG_F && 4852 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 4853 4854 set_bit(RSCN_UPDATE, &flags); 4855 clear_bit(LOCAL_LOOP_UPDATE, &flags); 4856 4857 } else if (ha->current_topology == ISP_CFG_N) { 4858 clear_bit(RSCN_UPDATE, &flags); 4859 if (qla_tgt_mode_enabled(vha)) { 4860 /* allow the other side to start the login */ 4861 clear_bit(LOCAL_LOOP_UPDATE, &flags); 4862 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 4863 } 4864 } else if (ha->current_topology == ISP_CFG_NL) { 4865 clear_bit(RSCN_UPDATE, &flags); 4866 set_bit(LOCAL_LOOP_UPDATE, &flags); 4867 } else if (!vha->flags.online || 4868 (test_bit(ABORT_ISP_ACTIVE, &flags))) { 4869 set_bit(RSCN_UPDATE, &flags); 4870 set_bit(LOCAL_LOOP_UPDATE, &flags); 4871 } 4872 4873 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) { 4874 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 4875 ql_dbg(ql_dbg_disc, vha, 0x2015, 4876 "Loop resync needed, failing.\n"); 4877 rval = QLA_FUNCTION_FAILED; 4878 } else 4879 rval = qla2x00_configure_local_loop(vha); 4880 } 4881 4882 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) { 4883 if (LOOP_TRANSITION(vha)) { 4884 ql_dbg(ql_dbg_disc, vha, 0x2099, 4885 "Needs RSCN update and loop transition.\n"); 4886 rval = QLA_FUNCTION_FAILED; 4887 } 4888 else 4889 rval = qla2x00_configure_fabric(vha); 4890 } 4891 4892 if (rval == QLA_SUCCESS) { 4893 if (atomic_read(&vha->loop_down_timer) || 4894 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 4895 rval = QLA_FUNCTION_FAILED; 4896 } else { 4897 atomic_set(&vha->loop_state, LOOP_READY); 4898 ql_dbg(ql_dbg_disc, vha, 0x2069, 4899 "LOOP READY.\n"); 4900 ha->flags.fw_init_done = 1; 4901 4902 /* 4903 * Process any ATIO queue entries that came in 4904 * while we weren't online. 4905 */ 4906 if (qla_tgt_mode_enabled(vha) || 4907 qla_dual_mode_enabled(vha)) { 4908 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 4909 qlt_24xx_process_atio_queue(vha, 0); 4910 spin_unlock_irqrestore(&ha->tgt.atio_lock, 4911 flags); 4912 } 4913 } 4914 } 4915 4916 if (rval) { 4917 ql_dbg(ql_dbg_disc, vha, 0x206a, 4918 "%s *** FAILED ***.\n", __func__); 4919 } else { 4920 ql_dbg(ql_dbg_disc, vha, 0x206b, 4921 "%s: exiting normally.\n", __func__); 4922 } 4923 4924 /* Restore state if a resync event occurred during processing */ 4925 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 4926 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 4927 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 4928 if (test_bit(RSCN_UPDATE, &save_flags)) { 4929 set_bit(RSCN_UPDATE, &vha->dpc_flags); 4930 } 4931 } 4932 4933 return (rval); 4934 } 4935 4936 /* 4937 * qla2x00_configure_local_loop 4938 * Updates Fibre Channel Device Database with local loop devices. 4939 * 4940 * Input: 4941 * ha = adapter block pointer. 4942 * 4943 * Returns: 4944 * 0 = success. 4945 */ 4946 static int 4947 qla2x00_configure_local_loop(scsi_qla_host_t *vha) 4948 { 4949 int rval, rval2; 4950 int found_devs; 4951 int found; 4952 fc_port_t *fcport, *new_fcport; 4953 4954 uint16_t index; 4955 uint16_t entries; 4956 char *id_iter; 4957 uint16_t loop_id; 4958 uint8_t domain, area, al_pa; 4959 struct qla_hw_data *ha = vha->hw; 4960 unsigned long flags; 4961 4962 /* Inititae N2N login. */ 4963 if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) { 4964 /* borrowing */ 4965 u32 *bp, i, sz; 4966 4967 memset(ha->init_cb, 0, ha->init_cb_size); 4968 sz = min_t(int, sizeof(struct els_plogi_payload), 4969 ha->init_cb_size); 4970 rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma, 4971 (void *)ha->init_cb, sz); 4972 if (rval == QLA_SUCCESS) { 4973 bp = (uint32_t *)ha->init_cb; 4974 for (i = 0; i < sz/4 ; i++, bp++) 4975 *bp = cpu_to_be32(*bp); 4976 4977 memcpy(&ha->plogi_els_payld.data, (void *)ha->init_cb, 4978 sizeof(ha->plogi_els_payld.data)); 4979 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 4980 } else { 4981 ql_dbg(ql_dbg_init, vha, 0x00d1, 4982 "PLOGI ELS param read fail.\n"); 4983 } 4984 return QLA_SUCCESS; 4985 } 4986 4987 found_devs = 0; 4988 new_fcport = NULL; 4989 entries = MAX_FIBRE_DEVICES_LOOP; 4990 4991 /* Get list of logged in devices. */ 4992 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha)); 4993 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma, 4994 &entries); 4995 if (rval != QLA_SUCCESS) 4996 goto cleanup_allocation; 4997 4998 ql_dbg(ql_dbg_disc, vha, 0x2011, 4999 "Entries in ID list (%d).\n", entries); 5000 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075, 5001 (uint8_t *)ha->gid_list, 5002 entries * sizeof(struct gid_list_info)); 5003 5004 if (entries == 0) { 5005 spin_lock_irqsave(&vha->work_lock, flags); 5006 vha->scan.scan_retry++; 5007 spin_unlock_irqrestore(&vha->work_lock, flags); 5008 5009 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { 5010 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 5011 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5012 } 5013 } else { 5014 vha->scan.scan_retry = 0; 5015 } 5016 5017 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5018 fcport->scan_state = QLA_FCPORT_SCAN; 5019 } 5020 5021 /* Allocate temporary fcport for any new fcports discovered. */ 5022 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 5023 if (new_fcport == NULL) { 5024 ql_log(ql_log_warn, vha, 0x2012, 5025 "Memory allocation failed for fcport.\n"); 5026 rval = QLA_MEMORY_ALLOC_FAILED; 5027 goto cleanup_allocation; 5028 } 5029 new_fcport->flags &= ~FCF_FABRIC_DEVICE; 5030 5031 /* Add devices to port list. */ 5032 id_iter = (char *)ha->gid_list; 5033 for (index = 0; index < entries; index++) { 5034 domain = ((struct gid_list_info *)id_iter)->domain; 5035 area = ((struct gid_list_info *)id_iter)->area; 5036 al_pa = ((struct gid_list_info *)id_iter)->al_pa; 5037 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 5038 loop_id = (uint16_t) 5039 ((struct gid_list_info *)id_iter)->loop_id_2100; 5040 else 5041 loop_id = le16_to_cpu( 5042 ((struct gid_list_info *)id_iter)->loop_id); 5043 id_iter += ha->gid_list_info_size; 5044 5045 /* Bypass reserved domain fields. */ 5046 if ((domain & 0xf0) == 0xf0) 5047 continue; 5048 5049 /* Bypass if not same domain and area of adapter. */ 5050 if (area && domain && 5051 (area != vha->d_id.b.area || domain != vha->d_id.b.domain)) 5052 continue; 5053 5054 /* Bypass invalid local loop ID. */ 5055 if (loop_id > LAST_LOCAL_LOOP_ID) 5056 continue; 5057 5058 memset(new_fcport->port_name, 0, WWN_SIZE); 5059 5060 /* Fill in member data. */ 5061 new_fcport->d_id.b.domain = domain; 5062 new_fcport->d_id.b.area = area; 5063 new_fcport->d_id.b.al_pa = al_pa; 5064 new_fcport->loop_id = loop_id; 5065 new_fcport->scan_state = QLA_FCPORT_FOUND; 5066 5067 rval2 = qla2x00_get_port_database(vha, new_fcport, 0); 5068 if (rval2 != QLA_SUCCESS) { 5069 ql_dbg(ql_dbg_disc, vha, 0x2097, 5070 "Failed to retrieve fcport information " 5071 "-- get_port_database=%x, loop_id=0x%04x.\n", 5072 rval2, new_fcport->loop_id); 5073 /* Skip retry if N2N */ 5074 if (ha->current_topology != ISP_CFG_N) { 5075 ql_dbg(ql_dbg_disc, vha, 0x2105, 5076 "Scheduling resync.\n"); 5077 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5078 continue; 5079 } 5080 } 5081 5082 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5083 /* Check for matching device in port list. */ 5084 found = 0; 5085 fcport = NULL; 5086 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5087 if (memcmp(new_fcport->port_name, fcport->port_name, 5088 WWN_SIZE)) 5089 continue; 5090 5091 fcport->flags &= ~FCF_FABRIC_DEVICE; 5092 fcport->loop_id = new_fcport->loop_id; 5093 fcport->port_type = new_fcport->port_type; 5094 fcport->d_id.b24 = new_fcport->d_id.b24; 5095 memcpy(fcport->node_name, new_fcport->node_name, 5096 WWN_SIZE); 5097 fcport->scan_state = QLA_FCPORT_FOUND; 5098 found++; 5099 break; 5100 } 5101 5102 if (!found) { 5103 /* New device, add to fcports list. */ 5104 list_add_tail(&new_fcport->list, &vha->vp_fcports); 5105 5106 /* Allocate a new replacement fcport. */ 5107 fcport = new_fcport; 5108 5109 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5110 5111 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 5112 5113 if (new_fcport == NULL) { 5114 ql_log(ql_log_warn, vha, 0xd031, 5115 "Failed to allocate memory for fcport.\n"); 5116 rval = QLA_MEMORY_ALLOC_FAILED; 5117 goto cleanup_allocation; 5118 } 5119 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5120 new_fcport->flags &= ~FCF_FABRIC_DEVICE; 5121 } 5122 5123 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5124 5125 /* Base iIDMA settings on HBA port speed. */ 5126 fcport->fp_speed = ha->link_data_rate; 5127 5128 found_devs++; 5129 } 5130 5131 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5132 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5133 break; 5134 5135 if (fcport->scan_state == QLA_FCPORT_SCAN) { 5136 if ((qla_dual_mode_enabled(vha) || 5137 qla_ini_mode_enabled(vha)) && 5138 atomic_read(&fcport->state) == FCS_ONLINE) { 5139 qla2x00_mark_device_lost(vha, fcport, 5140 ql2xplogiabsentdevice, 0); 5141 if (fcport->loop_id != FC_NO_LOOP_ID && 5142 (fcport->flags & FCF_FCP2_DEVICE) == 0 && 5143 fcport->port_type != FCT_INITIATOR && 5144 fcport->port_type != FCT_BROADCAST) { 5145 ql_dbg(ql_dbg_disc, vha, 0x20f0, 5146 "%s %d %8phC post del sess\n", 5147 __func__, __LINE__, 5148 fcport->port_name); 5149 5150 qlt_schedule_sess_for_deletion(fcport); 5151 continue; 5152 } 5153 } 5154 } 5155 5156 if (fcport->scan_state == QLA_FCPORT_FOUND) 5157 qla24xx_fcport_handle_login(vha, fcport); 5158 } 5159 5160 cleanup_allocation: 5161 kfree(new_fcport); 5162 5163 if (rval != QLA_SUCCESS) { 5164 ql_dbg(ql_dbg_disc, vha, 0x2098, 5165 "Configure local loop error exit: rval=%x.\n", rval); 5166 } 5167 5168 return (rval); 5169 } 5170 5171 static void 5172 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 5173 { 5174 int rval; 5175 uint16_t mb[MAILBOX_REGISTER_COUNT]; 5176 struct qla_hw_data *ha = vha->hw; 5177 5178 if (!IS_IIDMA_CAPABLE(ha)) 5179 return; 5180 5181 if (atomic_read(&fcport->state) != FCS_ONLINE) 5182 return; 5183 5184 if (fcport->fp_speed == PORT_SPEED_UNKNOWN || 5185 fcport->fp_speed > ha->link_data_rate || 5186 !ha->flags.gpsc_supported) 5187 return; 5188 5189 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed, 5190 mb); 5191 if (rval != QLA_SUCCESS) { 5192 ql_dbg(ql_dbg_disc, vha, 0x2004, 5193 "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n", 5194 fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]); 5195 } else { 5196 ql_dbg(ql_dbg_disc, vha, 0x2005, 5197 "iIDMA adjusted to %s GB/s (%X) on %8phN.\n", 5198 qla2x00_get_link_speed_str(ha, fcport->fp_speed), 5199 fcport->fp_speed, fcport->port_name); 5200 } 5201 } 5202 5203 void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport) 5204 { 5205 qla2x00_iidma_fcport(vha, fcport); 5206 qla24xx_update_fcport_fcp_prio(vha, fcport); 5207 } 5208 5209 int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport) 5210 { 5211 struct qla_work_evt *e; 5212 5213 e = qla2x00_alloc_work(vha, QLA_EVT_IIDMA); 5214 if (!e) 5215 return QLA_FUNCTION_FAILED; 5216 5217 e->u.fcport.fcport = fcport; 5218 return qla2x00_post_work(vha, e); 5219 } 5220 5221 /* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/ 5222 static void 5223 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) 5224 { 5225 struct fc_rport_identifiers rport_ids; 5226 struct fc_rport *rport; 5227 unsigned long flags; 5228 5229 if (atomic_read(&fcport->state) == FCS_ONLINE) 5230 return; 5231 5232 rport_ids.node_name = wwn_to_u64(fcport->node_name); 5233 rport_ids.port_name = wwn_to_u64(fcport->port_name); 5234 rport_ids.port_id = fcport->d_id.b.domain << 16 | 5235 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; 5236 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 5237 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids); 5238 if (!rport) { 5239 ql_log(ql_log_warn, vha, 0x2006, 5240 "Unable to allocate fc remote port.\n"); 5241 return; 5242 } 5243 5244 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 5245 *((fc_port_t **)rport->dd_data) = fcport; 5246 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 5247 5248 rport->supported_classes = fcport->supported_classes; 5249 5250 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 5251 if (fcport->port_type == FCT_INITIATOR) 5252 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; 5253 if (fcport->port_type == FCT_TARGET) 5254 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; 5255 5256 ql_dbg(ql_dbg_disc, vha, 0x20ee, 5257 "%s %8phN. rport %p is %s mode\n", 5258 __func__, fcport->port_name, rport, 5259 (fcport->port_type == FCT_TARGET) ? "tgt" : "ini"); 5260 5261 fc_remote_port_rolechg(rport, rport_ids.roles); 5262 } 5263 5264 /* 5265 * qla2x00_update_fcport 5266 * Updates device on list. 5267 * 5268 * Input: 5269 * ha = adapter block pointer. 5270 * fcport = port structure pointer. 5271 * 5272 * Return: 5273 * 0 - Success 5274 * BIT_0 - error 5275 * 5276 * Context: 5277 * Kernel context. 5278 */ 5279 void 5280 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 5281 { 5282 if (IS_SW_RESV_ADDR(fcport->d_id)) 5283 return; 5284 5285 ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n", 5286 __func__, fcport->port_name); 5287 5288 fcport->disc_state = DSC_UPD_FCPORT; 5289 fcport->login_retry = vha->hw->login_retry_count; 5290 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 5291 fcport->deleted = 0; 5292 fcport->logout_on_delete = 1; 5293 fcport->login_retry = vha->hw->login_retry_count; 5294 fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0; 5295 5296 switch (vha->hw->current_topology) { 5297 case ISP_CFG_N: 5298 case ISP_CFG_NL: 5299 fcport->keep_nport_handle = 1; 5300 break; 5301 default: 5302 break; 5303 } 5304 5305 qla2x00_iidma_fcport(vha, fcport); 5306 5307 if (fcport->fc4f_nvme) { 5308 qla_nvme_register_remote(vha, fcport); 5309 fcport->disc_state = DSC_LOGIN_COMPLETE; 5310 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 5311 return; 5312 } 5313 5314 qla24xx_update_fcport_fcp_prio(vha, fcport); 5315 5316 switch (vha->host->active_mode) { 5317 case MODE_INITIATOR: 5318 qla2x00_reg_remote_port(vha, fcport); 5319 break; 5320 case MODE_TARGET: 5321 if (!vha->vha_tgt.qla_tgt->tgt_stop && 5322 !vha->vha_tgt.qla_tgt->tgt_stopped) 5323 qlt_fc_port_added(vha, fcport); 5324 break; 5325 case MODE_DUAL: 5326 qla2x00_reg_remote_port(vha, fcport); 5327 if (!vha->vha_tgt.qla_tgt->tgt_stop && 5328 !vha->vha_tgt.qla_tgt->tgt_stopped) 5329 qlt_fc_port_added(vha, fcport); 5330 break; 5331 default: 5332 break; 5333 } 5334 5335 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 5336 5337 if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) { 5338 if (fcport->id_changed) { 5339 fcport->id_changed = 0; 5340 ql_dbg(ql_dbg_disc, vha, 0x20d7, 5341 "%s %d %8phC post gfpnid fcp_cnt %d\n", 5342 __func__, __LINE__, fcport->port_name, 5343 vha->fcport_count); 5344 qla24xx_post_gfpnid_work(vha, fcport); 5345 } else { 5346 ql_dbg(ql_dbg_disc, vha, 0x20d7, 5347 "%s %d %8phC post gpsc fcp_cnt %d\n", 5348 __func__, __LINE__, fcport->port_name, 5349 vha->fcport_count); 5350 qla24xx_post_gpsc_work(vha, fcport); 5351 } 5352 } 5353 5354 fcport->disc_state = DSC_LOGIN_COMPLETE; 5355 } 5356 5357 void qla_register_fcport_fn(struct work_struct *work) 5358 { 5359 fc_port_t *fcport = container_of(work, struct fc_port, reg_work); 5360 u32 rscn_gen = fcport->rscn_gen; 5361 u16 data[2]; 5362 5363 if (IS_SW_RESV_ADDR(fcport->d_id)) 5364 return; 5365 5366 qla2x00_update_fcport(fcport->vha, fcport); 5367 5368 if (rscn_gen != fcport->rscn_gen) { 5369 /* RSCN(s) came in while registration */ 5370 switch (fcport->next_disc_state) { 5371 case DSC_DELETE_PEND: 5372 qlt_schedule_sess_for_deletion(fcport); 5373 break; 5374 case DSC_ADISC: 5375 data[0] = data[1] = 0; 5376 qla2x00_post_async_adisc_work(fcport->vha, fcport, 5377 data); 5378 break; 5379 default: 5380 break; 5381 } 5382 } 5383 } 5384 5385 /* 5386 * qla2x00_configure_fabric 5387 * Setup SNS devices with loop ID's. 5388 * 5389 * Input: 5390 * ha = adapter block pointer. 5391 * 5392 * Returns: 5393 * 0 = success. 5394 * BIT_0 = error 5395 */ 5396 static int 5397 qla2x00_configure_fabric(scsi_qla_host_t *vha) 5398 { 5399 int rval; 5400 fc_port_t *fcport; 5401 uint16_t mb[MAILBOX_REGISTER_COUNT]; 5402 uint16_t loop_id; 5403 LIST_HEAD(new_fcports); 5404 struct qla_hw_data *ha = vha->hw; 5405 int discovery_gen; 5406 5407 /* If FL port exists, then SNS is present */ 5408 if (IS_FWI2_CAPABLE(ha)) 5409 loop_id = NPH_F_PORT; 5410 else 5411 loop_id = SNS_FL_PORT; 5412 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1); 5413 if (rval != QLA_SUCCESS) { 5414 ql_dbg(ql_dbg_disc, vha, 0x20a0, 5415 "MBX_GET_PORT_NAME failed, No FL Port.\n"); 5416 5417 vha->device_flags &= ~SWITCH_FOUND; 5418 return (QLA_SUCCESS); 5419 } 5420 vha->device_flags |= SWITCH_FOUND; 5421 5422 5423 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { 5424 rval = qla2x00_send_change_request(vha, 0x3, 0); 5425 if (rval != QLA_SUCCESS) 5426 ql_log(ql_log_warn, vha, 0x121, 5427 "Failed to enable receiving of RSCN requests: 0x%x.\n", 5428 rval); 5429 } 5430 5431 5432 do { 5433 qla2x00_mgmt_svr_login(vha); 5434 5435 /* FDMI support. */ 5436 if (ql2xfdmienable && 5437 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags)) 5438 qla2x00_fdmi_register(vha); 5439 5440 /* Ensure we are logged into the SNS. */ 5441 loop_id = NPH_SNS_LID(ha); 5442 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, 5443 0xfc, mb, BIT_1|BIT_0); 5444 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { 5445 ql_dbg(ql_dbg_disc, vha, 0x20a1, 5446 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n", 5447 loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval); 5448 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5449 return rval; 5450 } 5451 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) { 5452 if (qla2x00_rft_id(vha)) { 5453 /* EMPTY */ 5454 ql_dbg(ql_dbg_disc, vha, 0x20a2, 5455 "Register FC-4 TYPE failed.\n"); 5456 if (test_bit(LOOP_RESYNC_NEEDED, 5457 &vha->dpc_flags)) 5458 break; 5459 } 5460 if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) { 5461 /* EMPTY */ 5462 ql_dbg(ql_dbg_disc, vha, 0x209a, 5463 "Register FC-4 Features failed.\n"); 5464 if (test_bit(LOOP_RESYNC_NEEDED, 5465 &vha->dpc_flags)) 5466 break; 5467 } 5468 if (vha->flags.nvme_enabled) { 5469 if (qla2x00_rff_id(vha, FC_TYPE_NVME)) { 5470 ql_dbg(ql_dbg_disc, vha, 0x2049, 5471 "Register NVME FC Type Features failed.\n"); 5472 } 5473 } 5474 if (qla2x00_rnn_id(vha)) { 5475 /* EMPTY */ 5476 ql_dbg(ql_dbg_disc, vha, 0x2104, 5477 "Register Node Name failed.\n"); 5478 if (test_bit(LOOP_RESYNC_NEEDED, 5479 &vha->dpc_flags)) 5480 break; 5481 } else if (qla2x00_rsnn_nn(vha)) { 5482 /* EMPTY */ 5483 ql_dbg(ql_dbg_disc, vha, 0x209b, 5484 "Register Symbolic Node Name failed.\n"); 5485 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5486 break; 5487 } 5488 } 5489 5490 5491 /* Mark the time right before querying FW for connected ports. 5492 * This process is long, asynchronous and by the time it's done, 5493 * collected information might not be accurate anymore. E.g. 5494 * disconnected port might have re-connected and a brand new 5495 * session has been created. In this case session's generation 5496 * will be newer than discovery_gen. */ 5497 qlt_do_generation_tick(vha, &discovery_gen); 5498 5499 if (USE_ASYNC_SCAN(ha)) { 5500 rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI, 5501 NULL); 5502 if (rval) 5503 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5504 } else { 5505 list_for_each_entry(fcport, &vha->vp_fcports, list) 5506 fcport->scan_state = QLA_FCPORT_SCAN; 5507 5508 rval = qla2x00_find_all_fabric_devs(vha); 5509 } 5510 if (rval != QLA_SUCCESS) 5511 break; 5512 } while (0); 5513 5514 if (!vha->nvme_local_port && vha->flags.nvme_enabled) 5515 qla_nvme_register_hba(vha); 5516 5517 if (rval) 5518 ql_dbg(ql_dbg_disc, vha, 0x2068, 5519 "Configure fabric error exit rval=%d.\n", rval); 5520 5521 return (rval); 5522 } 5523 5524 /* 5525 * qla2x00_find_all_fabric_devs 5526 * 5527 * Input: 5528 * ha = adapter block pointer. 5529 * dev = database device entry pointer. 5530 * 5531 * Returns: 5532 * 0 = success. 5533 * 5534 * Context: 5535 * Kernel context. 5536 */ 5537 static int 5538 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) 5539 { 5540 int rval; 5541 uint16_t loop_id; 5542 fc_port_t *fcport, *new_fcport; 5543 int found; 5544 5545 sw_info_t *swl; 5546 int swl_idx; 5547 int first_dev, last_dev; 5548 port_id_t wrap = {}, nxt_d_id; 5549 struct qla_hw_data *ha = vha->hw; 5550 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 5551 unsigned long flags; 5552 5553 rval = QLA_SUCCESS; 5554 5555 /* Try GID_PT to get device list, else GAN. */ 5556 if (!ha->swl) 5557 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t), 5558 GFP_KERNEL); 5559 swl = ha->swl; 5560 if (!swl) { 5561 /*EMPTY*/ 5562 ql_dbg(ql_dbg_disc, vha, 0x209c, 5563 "GID_PT allocations failed, fallback on GA_NXT.\n"); 5564 } else { 5565 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t)); 5566 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) { 5567 swl = NULL; 5568 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5569 return rval; 5570 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) { 5571 swl = NULL; 5572 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5573 return rval; 5574 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) { 5575 swl = NULL; 5576 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5577 return rval; 5578 } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) { 5579 swl = NULL; 5580 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5581 return rval; 5582 } 5583 5584 /* If other queries succeeded probe for FC-4 type */ 5585 if (swl) { 5586 qla2x00_gff_id(vha, swl); 5587 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5588 return rval; 5589 } 5590 } 5591 swl_idx = 0; 5592 5593 /* Allocate temporary fcport for any new fcports discovered. */ 5594 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 5595 if (new_fcport == NULL) { 5596 ql_log(ql_log_warn, vha, 0x209d, 5597 "Failed to allocate memory for fcport.\n"); 5598 return (QLA_MEMORY_ALLOC_FAILED); 5599 } 5600 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 5601 /* Set start port ID scan at adapter ID. */ 5602 first_dev = 1; 5603 last_dev = 0; 5604 5605 /* Starting free loop ID. */ 5606 loop_id = ha->min_external_loopid; 5607 for (; loop_id <= ha->max_loop_id; loop_id++) { 5608 if (qla2x00_is_reserved_id(vha, loop_id)) 5609 continue; 5610 5611 if (ha->current_topology == ISP_CFG_FL && 5612 (atomic_read(&vha->loop_down_timer) || 5613 LOOP_TRANSITION(vha))) { 5614 atomic_set(&vha->loop_down_timer, 0); 5615 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5616 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 5617 break; 5618 } 5619 5620 if (swl != NULL) { 5621 if (last_dev) { 5622 wrap.b24 = new_fcport->d_id.b24; 5623 } else { 5624 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24; 5625 memcpy(new_fcport->node_name, 5626 swl[swl_idx].node_name, WWN_SIZE); 5627 memcpy(new_fcport->port_name, 5628 swl[swl_idx].port_name, WWN_SIZE); 5629 memcpy(new_fcport->fabric_port_name, 5630 swl[swl_idx].fabric_port_name, WWN_SIZE); 5631 new_fcport->fp_speed = swl[swl_idx].fp_speed; 5632 new_fcport->fc4_type = swl[swl_idx].fc4_type; 5633 5634 new_fcport->nvme_flag = 0; 5635 new_fcport->fc4f_nvme = 0; 5636 if (vha->flags.nvme_enabled && 5637 swl[swl_idx].fc4f_nvme) { 5638 new_fcport->fc4f_nvme = 5639 swl[swl_idx].fc4f_nvme; 5640 ql_log(ql_log_info, vha, 0x2131, 5641 "FOUND: NVME port %8phC as FC Type 28h\n", 5642 new_fcport->port_name); 5643 } 5644 5645 if (swl[swl_idx].d_id.b.rsvd_1 != 0) { 5646 last_dev = 1; 5647 } 5648 swl_idx++; 5649 } 5650 } else { 5651 /* Send GA_NXT to the switch */ 5652 rval = qla2x00_ga_nxt(vha, new_fcport); 5653 if (rval != QLA_SUCCESS) { 5654 ql_log(ql_log_warn, vha, 0x209e, 5655 "SNS scan failed -- assuming " 5656 "zero-entry result.\n"); 5657 rval = QLA_SUCCESS; 5658 break; 5659 } 5660 } 5661 5662 /* If wrap on switch device list, exit. */ 5663 if (first_dev) { 5664 wrap.b24 = new_fcport->d_id.b24; 5665 first_dev = 0; 5666 } else if (new_fcport->d_id.b24 == wrap.b24) { 5667 ql_dbg(ql_dbg_disc, vha, 0x209f, 5668 "Device wrap (%02x%02x%02x).\n", 5669 new_fcport->d_id.b.domain, 5670 new_fcport->d_id.b.area, 5671 new_fcport->d_id.b.al_pa); 5672 break; 5673 } 5674 5675 /* Bypass if same physical adapter. */ 5676 if (new_fcport->d_id.b24 == base_vha->d_id.b24) 5677 continue; 5678 5679 /* Bypass virtual ports of the same host. */ 5680 if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24)) 5681 continue; 5682 5683 /* Bypass if same domain and area of adapter. */ 5684 if (((new_fcport->d_id.b24 & 0xffff00) == 5685 (vha->d_id.b24 & 0xffff00)) && ha->current_topology == 5686 ISP_CFG_FL) 5687 continue; 5688 5689 /* Bypass reserved domain fields. */ 5690 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0) 5691 continue; 5692 5693 /* Bypass ports whose FCP-4 type is not FCP_SCSI */ 5694 if (ql2xgffidenable && 5695 (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI && 5696 new_fcport->fc4_type != FC4_TYPE_UNKNOWN)) 5697 continue; 5698 5699 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5700 5701 /* Locate matching device in database. */ 5702 found = 0; 5703 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5704 if (memcmp(new_fcport->port_name, fcport->port_name, 5705 WWN_SIZE)) 5706 continue; 5707 5708 fcport->scan_state = QLA_FCPORT_FOUND; 5709 5710 found++; 5711 5712 /* Update port state. */ 5713 memcpy(fcport->fabric_port_name, 5714 new_fcport->fabric_port_name, WWN_SIZE); 5715 fcport->fp_speed = new_fcport->fp_speed; 5716 5717 /* 5718 * If address the same and state FCS_ONLINE 5719 * (or in target mode), nothing changed. 5720 */ 5721 if (fcport->d_id.b24 == new_fcport->d_id.b24 && 5722 (atomic_read(&fcport->state) == FCS_ONLINE || 5723 (vha->host->active_mode == MODE_TARGET))) { 5724 break; 5725 } 5726 5727 /* 5728 * If device was not a fabric device before. 5729 */ 5730 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { 5731 fcport->d_id.b24 = new_fcport->d_id.b24; 5732 qla2x00_clear_loop_id(fcport); 5733 fcport->flags |= (FCF_FABRIC_DEVICE | 5734 FCF_LOGIN_NEEDED); 5735 break; 5736 } 5737 5738 /* 5739 * Port ID changed or device was marked to be updated; 5740 * Log it out if still logged in and mark it for 5741 * relogin later. 5742 */ 5743 if (qla_tgt_mode_enabled(base_vha)) { 5744 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080, 5745 "port changed FC ID, %8phC" 5746 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n", 5747 fcport->port_name, 5748 fcport->d_id.b.domain, 5749 fcport->d_id.b.area, 5750 fcport->d_id.b.al_pa, 5751 fcport->loop_id, 5752 new_fcport->d_id.b.domain, 5753 new_fcport->d_id.b.area, 5754 new_fcport->d_id.b.al_pa); 5755 fcport->d_id.b24 = new_fcport->d_id.b24; 5756 break; 5757 } 5758 5759 fcport->d_id.b24 = new_fcport->d_id.b24; 5760 fcport->flags |= FCF_LOGIN_NEEDED; 5761 break; 5762 } 5763 5764 if (fcport->fc4f_nvme) { 5765 if (fcport->disc_state == DSC_DELETE_PEND) { 5766 fcport->disc_state = DSC_GNL; 5767 vha->fcport_count--; 5768 fcport->login_succ = 0; 5769 } 5770 } 5771 5772 if (found) { 5773 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5774 continue; 5775 } 5776 /* If device was not in our fcports list, then add it. */ 5777 new_fcport->scan_state = QLA_FCPORT_FOUND; 5778 list_add_tail(&new_fcport->list, &vha->vp_fcports); 5779 5780 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5781 5782 5783 /* Allocate a new replacement fcport. */ 5784 nxt_d_id.b24 = new_fcport->d_id.b24; 5785 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 5786 if (new_fcport == NULL) { 5787 ql_log(ql_log_warn, vha, 0xd032, 5788 "Memory allocation failed for fcport.\n"); 5789 return (QLA_MEMORY_ALLOC_FAILED); 5790 } 5791 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 5792 new_fcport->d_id.b24 = nxt_d_id.b24; 5793 } 5794 5795 qla2x00_free_fcport(new_fcport); 5796 5797 /* 5798 * Logout all previous fabric dev marked lost, except FCP2 devices. 5799 */ 5800 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5801 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5802 break; 5803 5804 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || 5805 (fcport->flags & FCF_LOGIN_NEEDED) == 0) 5806 continue; 5807 5808 if (fcport->scan_state == QLA_FCPORT_SCAN) { 5809 if ((qla_dual_mode_enabled(vha) || 5810 qla_ini_mode_enabled(vha)) && 5811 atomic_read(&fcport->state) == FCS_ONLINE) { 5812 qla2x00_mark_device_lost(vha, fcport, 5813 ql2xplogiabsentdevice, 0); 5814 if (fcport->loop_id != FC_NO_LOOP_ID && 5815 (fcport->flags & FCF_FCP2_DEVICE) == 0 && 5816 fcport->port_type != FCT_INITIATOR && 5817 fcport->port_type != FCT_BROADCAST) { 5818 ql_dbg(ql_dbg_disc, vha, 0x20f0, 5819 "%s %d %8phC post del sess\n", 5820 __func__, __LINE__, 5821 fcport->port_name); 5822 qlt_schedule_sess_for_deletion(fcport); 5823 continue; 5824 } 5825 } 5826 } 5827 5828 if (fcport->scan_state == QLA_FCPORT_FOUND) 5829 qla24xx_fcport_handle_login(vha, fcport); 5830 } 5831 return (rval); 5832 } 5833 5834 /* 5835 * qla2x00_find_new_loop_id 5836 * Scan through our port list and find a new usable loop ID. 5837 * 5838 * Input: 5839 * ha: adapter state pointer. 5840 * dev: port structure pointer. 5841 * 5842 * Returns: 5843 * qla2x00 local function return status code. 5844 * 5845 * Context: 5846 * Kernel context. 5847 */ 5848 int 5849 qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) 5850 { 5851 int rval; 5852 struct qla_hw_data *ha = vha->hw; 5853 unsigned long flags = 0; 5854 5855 rval = QLA_SUCCESS; 5856 5857 spin_lock_irqsave(&ha->vport_slock, flags); 5858 5859 dev->loop_id = find_first_zero_bit(ha->loop_id_map, 5860 LOOPID_MAP_SIZE); 5861 if (dev->loop_id >= LOOPID_MAP_SIZE || 5862 qla2x00_is_reserved_id(vha, dev->loop_id)) { 5863 dev->loop_id = FC_NO_LOOP_ID; 5864 rval = QLA_FUNCTION_FAILED; 5865 } else 5866 set_bit(dev->loop_id, ha->loop_id_map); 5867 5868 spin_unlock_irqrestore(&ha->vport_slock, flags); 5869 5870 if (rval == QLA_SUCCESS) 5871 ql_dbg(ql_dbg_disc, dev->vha, 0x2086, 5872 "Assigning new loopid=%x, portid=%x.\n", 5873 dev->loop_id, dev->d_id.b24); 5874 else 5875 ql_log(ql_log_warn, dev->vha, 0x2087, 5876 "No loop_id's available, portid=%x.\n", 5877 dev->d_id.b24); 5878 5879 return (rval); 5880 } 5881 5882 5883 /* FW does not set aside Loop id for MGMT Server/FFFFFAh */ 5884 int 5885 qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha) 5886 { 5887 int loop_id = FC_NO_LOOP_ID; 5888 int lid = NPH_MGMT_SERVER - vha->vp_idx; 5889 unsigned long flags; 5890 struct qla_hw_data *ha = vha->hw; 5891 5892 if (vha->vp_idx == 0) { 5893 set_bit(NPH_MGMT_SERVER, ha->loop_id_map); 5894 return NPH_MGMT_SERVER; 5895 } 5896 5897 /* pick id from high and work down to low */ 5898 spin_lock_irqsave(&ha->vport_slock, flags); 5899 for (; lid > 0; lid--) { 5900 if (!test_bit(lid, vha->hw->loop_id_map)) { 5901 set_bit(lid, vha->hw->loop_id_map); 5902 loop_id = lid; 5903 break; 5904 } 5905 } 5906 spin_unlock_irqrestore(&ha->vport_slock, flags); 5907 5908 return loop_id; 5909 } 5910 5911 /* 5912 * qla2x00_fabric_login 5913 * Issue fabric login command. 5914 * 5915 * Input: 5916 * ha = adapter block pointer. 5917 * device = pointer to FC device type structure. 5918 * 5919 * Returns: 5920 * 0 - Login successfully 5921 * 1 - Login failed 5922 * 2 - Initiator device 5923 * 3 - Fatal error 5924 */ 5925 int 5926 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport, 5927 uint16_t *next_loopid) 5928 { 5929 int rval; 5930 int retry; 5931 uint16_t tmp_loopid; 5932 uint16_t mb[MAILBOX_REGISTER_COUNT]; 5933 struct qla_hw_data *ha = vha->hw; 5934 5935 retry = 0; 5936 tmp_loopid = 0; 5937 5938 for (;;) { 5939 ql_dbg(ql_dbg_disc, vha, 0x2000, 5940 "Trying Fabric Login w/loop id 0x%04x for port " 5941 "%02x%02x%02x.\n", 5942 fcport->loop_id, fcport->d_id.b.domain, 5943 fcport->d_id.b.area, fcport->d_id.b.al_pa); 5944 5945 /* Login fcport on switch. */ 5946 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id, 5947 fcport->d_id.b.domain, fcport->d_id.b.area, 5948 fcport->d_id.b.al_pa, mb, BIT_0); 5949 if (rval != QLA_SUCCESS) { 5950 return rval; 5951 } 5952 if (mb[0] == MBS_PORT_ID_USED) { 5953 /* 5954 * Device has another loop ID. The firmware team 5955 * recommends the driver perform an implicit login with 5956 * the specified ID again. The ID we just used is save 5957 * here so we return with an ID that can be tried by 5958 * the next login. 5959 */ 5960 retry++; 5961 tmp_loopid = fcport->loop_id; 5962 fcport->loop_id = mb[1]; 5963 5964 ql_dbg(ql_dbg_disc, vha, 0x2001, 5965 "Fabric Login: port in use - next loop " 5966 "id=0x%04x, port id= %02x%02x%02x.\n", 5967 fcport->loop_id, fcport->d_id.b.domain, 5968 fcport->d_id.b.area, fcport->d_id.b.al_pa); 5969 5970 } else if (mb[0] == MBS_COMMAND_COMPLETE) { 5971 /* 5972 * Login succeeded. 5973 */ 5974 if (retry) { 5975 /* A retry occurred before. */ 5976 *next_loopid = tmp_loopid; 5977 } else { 5978 /* 5979 * No retry occurred before. Just increment the 5980 * ID value for next login. 5981 */ 5982 *next_loopid = (fcport->loop_id + 1); 5983 } 5984 5985 if (mb[1] & BIT_0) { 5986 fcport->port_type = FCT_INITIATOR; 5987 } else { 5988 fcport->port_type = FCT_TARGET; 5989 if (mb[1] & BIT_1) { 5990 fcport->flags |= FCF_FCP2_DEVICE; 5991 } 5992 } 5993 5994 if (mb[10] & BIT_0) 5995 fcport->supported_classes |= FC_COS_CLASS2; 5996 if (mb[10] & BIT_1) 5997 fcport->supported_classes |= FC_COS_CLASS3; 5998 5999 if (IS_FWI2_CAPABLE(ha)) { 6000 if (mb[10] & BIT_7) 6001 fcport->flags |= 6002 FCF_CONF_COMP_SUPPORTED; 6003 } 6004 6005 rval = QLA_SUCCESS; 6006 break; 6007 } else if (mb[0] == MBS_LOOP_ID_USED) { 6008 /* 6009 * Loop ID already used, try next loop ID. 6010 */ 6011 fcport->loop_id++; 6012 rval = qla2x00_find_new_loop_id(vha, fcport); 6013 if (rval != QLA_SUCCESS) { 6014 /* Ran out of loop IDs to use */ 6015 break; 6016 } 6017 } else if (mb[0] == MBS_COMMAND_ERROR) { 6018 /* 6019 * Firmware possibly timed out during login. If NO 6020 * retries are left to do then the device is declared 6021 * dead. 6022 */ 6023 *next_loopid = fcport->loop_id; 6024 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 6025 fcport->d_id.b.domain, fcport->d_id.b.area, 6026 fcport->d_id.b.al_pa); 6027 qla2x00_mark_device_lost(vha, fcport, 1, 0); 6028 6029 rval = 1; 6030 break; 6031 } else { 6032 /* 6033 * unrecoverable / not handled error 6034 */ 6035 ql_dbg(ql_dbg_disc, vha, 0x2002, 6036 "Failed=%x port_id=%02x%02x%02x loop_id=%x " 6037 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain, 6038 fcport->d_id.b.area, fcport->d_id.b.al_pa, 6039 fcport->loop_id, jiffies); 6040 6041 *next_loopid = fcport->loop_id; 6042 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 6043 fcport->d_id.b.domain, fcport->d_id.b.area, 6044 fcport->d_id.b.al_pa); 6045 qla2x00_clear_loop_id(fcport); 6046 fcport->login_retry = 0; 6047 6048 rval = 3; 6049 break; 6050 } 6051 } 6052 6053 return (rval); 6054 } 6055 6056 /* 6057 * qla2x00_local_device_login 6058 * Issue local device login command. 6059 * 6060 * Input: 6061 * ha = adapter block pointer. 6062 * loop_id = loop id of device to login to. 6063 * 6064 * Returns (Where's the #define!!!!): 6065 * 0 - Login successfully 6066 * 1 - Login failed 6067 * 3 - Fatal error 6068 */ 6069 int 6070 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport) 6071 { 6072 int rval; 6073 uint16_t mb[MAILBOX_REGISTER_COUNT]; 6074 6075 memset(mb, 0, sizeof(mb)); 6076 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0); 6077 if (rval == QLA_SUCCESS) { 6078 /* Interrogate mailbox registers for any errors */ 6079 if (mb[0] == MBS_COMMAND_ERROR) 6080 rval = 1; 6081 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR) 6082 /* device not in PCB table */ 6083 rval = 3; 6084 } 6085 6086 return (rval); 6087 } 6088 6089 /* 6090 * qla2x00_loop_resync 6091 * Resync with fibre channel devices. 6092 * 6093 * Input: 6094 * ha = adapter block pointer. 6095 * 6096 * Returns: 6097 * 0 = success 6098 */ 6099 int 6100 qla2x00_loop_resync(scsi_qla_host_t *vha) 6101 { 6102 int rval = QLA_SUCCESS; 6103 uint32_t wait_time; 6104 struct req_que *req; 6105 struct rsp_que *rsp; 6106 6107 req = vha->req; 6108 rsp = req->rsp; 6109 6110 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 6111 if (vha->flags.online) { 6112 if (!(rval = qla2x00_fw_ready(vha))) { 6113 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 6114 wait_time = 256; 6115 do { 6116 if (!IS_QLAFX00(vha->hw)) { 6117 /* 6118 * Issue a marker after FW becomes 6119 * ready. 6120 */ 6121 qla2x00_marker(vha, req, rsp, 0, 0, 6122 MK_SYNC_ALL); 6123 vha->marker_needed = 0; 6124 } 6125 6126 /* Remap devices on Loop. */ 6127 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 6128 6129 if (IS_QLAFX00(vha->hw)) 6130 qlafx00_configure_devices(vha); 6131 else 6132 qla2x00_configure_loop(vha); 6133 6134 wait_time--; 6135 } while (!atomic_read(&vha->loop_down_timer) && 6136 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 6137 && wait_time && (test_bit(LOOP_RESYNC_NEEDED, 6138 &vha->dpc_flags))); 6139 } 6140 } 6141 6142 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 6143 return (QLA_FUNCTION_FAILED); 6144 6145 if (rval) 6146 ql_dbg(ql_dbg_disc, vha, 0x206c, 6147 "%s *** FAILED ***.\n", __func__); 6148 6149 return (rval); 6150 } 6151 6152 /* 6153 * qla2x00_perform_loop_resync 6154 * Description: This function will set the appropriate flags and call 6155 * qla2x00_loop_resync. If successful loop will be resynced 6156 * Arguments : scsi_qla_host_t pointer 6157 * returm : Success or Failure 6158 */ 6159 6160 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha) 6161 { 6162 int32_t rval = 0; 6163 6164 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) { 6165 /*Configure the flags so that resync happens properly*/ 6166 atomic_set(&ha->loop_down_timer, 0); 6167 if (!(ha->device_flags & DFLG_NO_CABLE)) { 6168 atomic_set(&ha->loop_state, LOOP_UP); 6169 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 6170 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); 6171 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 6172 6173 rval = qla2x00_loop_resync(ha); 6174 } else 6175 atomic_set(&ha->loop_state, LOOP_DEAD); 6176 6177 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); 6178 } 6179 6180 return rval; 6181 } 6182 6183 void 6184 qla2x00_update_fcports(scsi_qla_host_t *base_vha) 6185 { 6186 fc_port_t *fcport; 6187 struct scsi_qla_host *vha; 6188 struct qla_hw_data *ha = base_vha->hw; 6189 unsigned long flags; 6190 6191 spin_lock_irqsave(&ha->vport_slock, flags); 6192 /* Go with deferred removal of rport references. */ 6193 list_for_each_entry(vha, &base_vha->hw->vp_list, list) { 6194 atomic_inc(&vha->vref_count); 6195 list_for_each_entry(fcport, &vha->vp_fcports, list) { 6196 if (fcport->drport && 6197 atomic_read(&fcport->state) != FCS_UNCONFIGURED) { 6198 spin_unlock_irqrestore(&ha->vport_slock, flags); 6199 qla2x00_rport_del(fcport); 6200 6201 spin_lock_irqsave(&ha->vport_slock, flags); 6202 } 6203 } 6204 atomic_dec(&vha->vref_count); 6205 wake_up(&vha->vref_waitq); 6206 } 6207 spin_unlock_irqrestore(&ha->vport_slock, flags); 6208 } 6209 6210 /* Assumes idc_lock always held on entry */ 6211 void 6212 qla83xx_reset_ownership(scsi_qla_host_t *vha) 6213 { 6214 struct qla_hw_data *ha = vha->hw; 6215 uint32_t drv_presence, drv_presence_mask; 6216 uint32_t dev_part_info1, dev_part_info2, class_type; 6217 uint32_t class_type_mask = 0x3; 6218 uint16_t fcoe_other_function = 0xffff, i; 6219 6220 if (IS_QLA8044(ha)) { 6221 drv_presence = qla8044_rd_direct(vha, 6222 QLA8044_CRB_DRV_ACTIVE_INDEX); 6223 dev_part_info1 = qla8044_rd_direct(vha, 6224 QLA8044_CRB_DEV_PART_INFO_INDEX); 6225 dev_part_info2 = qla8044_rd_direct(vha, 6226 QLA8044_CRB_DEV_PART_INFO2); 6227 } else { 6228 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 6229 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1); 6230 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2); 6231 } 6232 for (i = 0; i < 8; i++) { 6233 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask); 6234 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) && 6235 (i != ha->portnum)) { 6236 fcoe_other_function = i; 6237 break; 6238 } 6239 } 6240 if (fcoe_other_function == 0xffff) { 6241 for (i = 0; i < 8; i++) { 6242 class_type = ((dev_part_info2 >> (i * 4)) & 6243 class_type_mask); 6244 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) && 6245 ((i + 8) != ha->portnum)) { 6246 fcoe_other_function = i + 8; 6247 break; 6248 } 6249 } 6250 } 6251 /* 6252 * Prepare drv-presence mask based on fcoe functions present. 6253 * However consider only valid physical fcoe function numbers (0-15). 6254 */ 6255 drv_presence_mask = ~((1 << (ha->portnum)) | 6256 ((fcoe_other_function == 0xffff) ? 6257 0 : (1 << (fcoe_other_function)))); 6258 6259 /* We are the reset owner iff: 6260 * - No other protocol drivers present. 6261 * - This is the lowest among fcoe functions. */ 6262 if (!(drv_presence & drv_presence_mask) && 6263 (ha->portnum < fcoe_other_function)) { 6264 ql_dbg(ql_dbg_p3p, vha, 0xb07f, 6265 "This host is Reset owner.\n"); 6266 ha->flags.nic_core_reset_owner = 1; 6267 } 6268 } 6269 6270 static int 6271 __qla83xx_set_drv_ack(scsi_qla_host_t *vha) 6272 { 6273 int rval = QLA_SUCCESS; 6274 struct qla_hw_data *ha = vha->hw; 6275 uint32_t drv_ack; 6276 6277 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); 6278 if (rval == QLA_SUCCESS) { 6279 drv_ack |= (1 << ha->portnum); 6280 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack); 6281 } 6282 6283 return rval; 6284 } 6285 6286 static int 6287 __qla83xx_clear_drv_ack(scsi_qla_host_t *vha) 6288 { 6289 int rval = QLA_SUCCESS; 6290 struct qla_hw_data *ha = vha->hw; 6291 uint32_t drv_ack; 6292 6293 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); 6294 if (rval == QLA_SUCCESS) { 6295 drv_ack &= ~(1 << ha->portnum); 6296 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack); 6297 } 6298 6299 return rval; 6300 } 6301 6302 static const char * 6303 qla83xx_dev_state_to_string(uint32_t dev_state) 6304 { 6305 switch (dev_state) { 6306 case QLA8XXX_DEV_COLD: 6307 return "COLD/RE-INIT"; 6308 case QLA8XXX_DEV_INITIALIZING: 6309 return "INITIALIZING"; 6310 case QLA8XXX_DEV_READY: 6311 return "READY"; 6312 case QLA8XXX_DEV_NEED_RESET: 6313 return "NEED RESET"; 6314 case QLA8XXX_DEV_NEED_QUIESCENT: 6315 return "NEED QUIESCENT"; 6316 case QLA8XXX_DEV_FAILED: 6317 return "FAILED"; 6318 case QLA8XXX_DEV_QUIESCENT: 6319 return "QUIESCENT"; 6320 default: 6321 return "Unknown"; 6322 } 6323 } 6324 6325 /* Assumes idc-lock always held on entry */ 6326 void 6327 qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type) 6328 { 6329 struct qla_hw_data *ha = vha->hw; 6330 uint32_t idc_audit_reg = 0, duration_secs = 0; 6331 6332 switch (audit_type) { 6333 case IDC_AUDIT_TIMESTAMP: 6334 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000); 6335 idc_audit_reg = (ha->portnum) | 6336 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8); 6337 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg); 6338 break; 6339 6340 case IDC_AUDIT_COMPLETION: 6341 duration_secs = ((jiffies_to_msecs(jiffies) - 6342 jiffies_to_msecs(ha->idc_audit_ts)) / 1000); 6343 idc_audit_reg = (ha->portnum) | 6344 (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8); 6345 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg); 6346 break; 6347 6348 default: 6349 ql_log(ql_log_warn, vha, 0xb078, 6350 "Invalid audit type specified.\n"); 6351 break; 6352 } 6353 } 6354 6355 /* Assumes idc_lock always held on entry */ 6356 static int 6357 qla83xx_initiating_reset(scsi_qla_host_t *vha) 6358 { 6359 struct qla_hw_data *ha = vha->hw; 6360 uint32_t idc_control, dev_state; 6361 6362 __qla83xx_get_idc_control(vha, &idc_control); 6363 if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) { 6364 ql_log(ql_log_info, vha, 0xb080, 6365 "NIC Core reset has been disabled. idc-control=0x%x\n", 6366 idc_control); 6367 return QLA_FUNCTION_FAILED; 6368 } 6369 6370 /* Set NEED-RESET iff in READY state and we are the reset-owner */ 6371 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state); 6372 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) { 6373 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, 6374 QLA8XXX_DEV_NEED_RESET); 6375 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n"); 6376 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP); 6377 } else { 6378 const char *state = qla83xx_dev_state_to_string(dev_state); 6379 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state); 6380 6381 /* SV: XXX: Is timeout required here? */ 6382 /* Wait for IDC state change READY -> NEED_RESET */ 6383 while (dev_state == QLA8XXX_DEV_READY) { 6384 qla83xx_idc_unlock(vha, 0); 6385 msleep(200); 6386 qla83xx_idc_lock(vha, 0); 6387 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state); 6388 } 6389 } 6390 6391 /* Send IDC ack by writing to drv-ack register */ 6392 __qla83xx_set_drv_ack(vha); 6393 6394 return QLA_SUCCESS; 6395 } 6396 6397 int 6398 __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control) 6399 { 6400 return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control); 6401 } 6402 6403 int 6404 __qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control) 6405 { 6406 return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control); 6407 } 6408 6409 static int 6410 qla83xx_check_driver_presence(scsi_qla_host_t *vha) 6411 { 6412 uint32_t drv_presence = 0; 6413 struct qla_hw_data *ha = vha->hw; 6414 6415 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 6416 if (drv_presence & (1 << ha->portnum)) 6417 return QLA_SUCCESS; 6418 else 6419 return QLA_TEST_FAILED; 6420 } 6421 6422 int 6423 qla83xx_nic_core_reset(scsi_qla_host_t *vha) 6424 { 6425 int rval = QLA_SUCCESS; 6426 struct qla_hw_data *ha = vha->hw; 6427 6428 ql_dbg(ql_dbg_p3p, vha, 0xb058, 6429 "Entered %s().\n", __func__); 6430 6431 if (vha->device_flags & DFLG_DEV_FAILED) { 6432 ql_log(ql_log_warn, vha, 0xb059, 6433 "Device in unrecoverable FAILED state.\n"); 6434 return QLA_FUNCTION_FAILED; 6435 } 6436 6437 qla83xx_idc_lock(vha, 0); 6438 6439 if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) { 6440 ql_log(ql_log_warn, vha, 0xb05a, 6441 "Function=0x%x has been removed from IDC participation.\n", 6442 ha->portnum); 6443 rval = QLA_FUNCTION_FAILED; 6444 goto exit; 6445 } 6446 6447 qla83xx_reset_ownership(vha); 6448 6449 rval = qla83xx_initiating_reset(vha); 6450 6451 /* 6452 * Perform reset if we are the reset-owner, 6453 * else wait till IDC state changes to READY/FAILED. 6454 */ 6455 if (rval == QLA_SUCCESS) { 6456 rval = qla83xx_idc_state_handler(vha); 6457 6458 if (rval == QLA_SUCCESS) 6459 ha->flags.nic_core_hung = 0; 6460 __qla83xx_clear_drv_ack(vha); 6461 } 6462 6463 exit: 6464 qla83xx_idc_unlock(vha, 0); 6465 6466 ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__); 6467 6468 return rval; 6469 } 6470 6471 int 6472 qla2xxx_mctp_dump(scsi_qla_host_t *vha) 6473 { 6474 struct qla_hw_data *ha = vha->hw; 6475 int rval = QLA_FUNCTION_FAILED; 6476 6477 if (!IS_MCTP_CAPABLE(ha)) { 6478 /* This message can be removed from the final version */ 6479 ql_log(ql_log_info, vha, 0x506d, 6480 "This board is not MCTP capable\n"); 6481 return rval; 6482 } 6483 6484 if (!ha->mctp_dump) { 6485 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev, 6486 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL); 6487 6488 if (!ha->mctp_dump) { 6489 ql_log(ql_log_warn, vha, 0x506e, 6490 "Failed to allocate memory for mctp dump\n"); 6491 return rval; 6492 } 6493 } 6494 6495 #define MCTP_DUMP_STR_ADDR 0x00000000 6496 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma, 6497 MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4); 6498 if (rval != QLA_SUCCESS) { 6499 ql_log(ql_log_warn, vha, 0x506f, 6500 "Failed to capture mctp dump\n"); 6501 } else { 6502 ql_log(ql_log_info, vha, 0x5070, 6503 "Mctp dump capture for host (%ld/%p).\n", 6504 vha->host_no, ha->mctp_dump); 6505 ha->mctp_dumped = 1; 6506 } 6507 6508 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) { 6509 ha->flags.nic_core_reset_hdlr_active = 1; 6510 rval = qla83xx_restart_nic_firmware(vha); 6511 if (rval) 6512 /* NIC Core reset failed. */ 6513 ql_log(ql_log_warn, vha, 0x5071, 6514 "Failed to restart nic firmware\n"); 6515 else 6516 ql_dbg(ql_dbg_p3p, vha, 0xb084, 6517 "Restarted NIC firmware successfully.\n"); 6518 ha->flags.nic_core_reset_hdlr_active = 0; 6519 } 6520 6521 return rval; 6522 6523 } 6524 6525 /* 6526 * qla2x00_quiesce_io 6527 * Description: This function will block the new I/Os 6528 * Its not aborting any I/Os as context 6529 * is not destroyed during quiescence 6530 * Arguments: scsi_qla_host_t 6531 * return : void 6532 */ 6533 void 6534 qla2x00_quiesce_io(scsi_qla_host_t *vha) 6535 { 6536 struct qla_hw_data *ha = vha->hw; 6537 struct scsi_qla_host *vp; 6538 6539 ql_dbg(ql_dbg_dpc, vha, 0x401d, 6540 "Quiescing I/O - ha=%p.\n", ha); 6541 6542 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 6543 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 6544 atomic_set(&vha->loop_state, LOOP_DOWN); 6545 qla2x00_mark_all_devices_lost(vha, 0); 6546 list_for_each_entry(vp, &ha->vp_list, list) 6547 qla2x00_mark_all_devices_lost(vp, 0); 6548 } else { 6549 if (!atomic_read(&vha->loop_down_timer)) 6550 atomic_set(&vha->loop_down_timer, 6551 LOOP_DOWN_TIME); 6552 } 6553 /* Wait for pending cmds to complete */ 6554 qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST); 6555 } 6556 6557 void 6558 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) 6559 { 6560 struct qla_hw_data *ha = vha->hw; 6561 struct scsi_qla_host *vp; 6562 unsigned long flags; 6563 fc_port_t *fcport; 6564 u16 i; 6565 6566 /* For ISP82XX, driver waits for completion of the commands. 6567 * online flag should be set. 6568 */ 6569 if (!(IS_P3P_TYPE(ha))) 6570 vha->flags.online = 0; 6571 ha->flags.chip_reset_done = 0; 6572 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 6573 vha->qla_stats.total_isp_aborts++; 6574 6575 ql_log(ql_log_info, vha, 0x00af, 6576 "Performing ISP error recovery - ha=%p.\n", ha); 6577 6578 ha->flags.purge_mbox = 1; 6579 /* For ISP82XX, reset_chip is just disabling interrupts. 6580 * Driver waits for the completion of the commands. 6581 * the interrupts need to be enabled. 6582 */ 6583 if (!(IS_P3P_TYPE(ha))) 6584 ha->isp_ops->reset_chip(vha); 6585 6586 ha->link_data_rate = PORT_SPEED_UNKNOWN; 6587 SAVE_TOPO(ha); 6588 ha->flags.rida_fmt2 = 0; 6589 ha->flags.n2n_ae = 0; 6590 ha->flags.lip_ae = 0; 6591 ha->current_topology = 0; 6592 ha->flags.fw_started = 0; 6593 ha->flags.fw_init_done = 0; 6594 ha->chip_reset++; 6595 ha->base_qpair->chip_reset = ha->chip_reset; 6596 for (i = 0; i < ha->max_qpairs; i++) { 6597 if (ha->queue_pair_map[i]) 6598 ha->queue_pair_map[i]->chip_reset = 6599 ha->base_qpair->chip_reset; 6600 } 6601 6602 /* purge MBox commands */ 6603 if (atomic_read(&ha->num_pend_mbx_stage3)) { 6604 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 6605 complete(&ha->mbx_intr_comp); 6606 } 6607 6608 i = 0; 6609 while (atomic_read(&ha->num_pend_mbx_stage3) || 6610 atomic_read(&ha->num_pend_mbx_stage2) || 6611 atomic_read(&ha->num_pend_mbx_stage1)) { 6612 msleep(20); 6613 i++; 6614 if (i > 50) 6615 break; 6616 } 6617 ha->flags.purge_mbox = 0; 6618 6619 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 6620 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 6621 atomic_set(&vha->loop_state, LOOP_DOWN); 6622 qla2x00_mark_all_devices_lost(vha, 0); 6623 6624 spin_lock_irqsave(&ha->vport_slock, flags); 6625 list_for_each_entry(vp, &ha->vp_list, list) { 6626 atomic_inc(&vp->vref_count); 6627 spin_unlock_irqrestore(&ha->vport_slock, flags); 6628 6629 qla2x00_mark_all_devices_lost(vp, 0); 6630 6631 spin_lock_irqsave(&ha->vport_slock, flags); 6632 atomic_dec(&vp->vref_count); 6633 } 6634 spin_unlock_irqrestore(&ha->vport_slock, flags); 6635 } else { 6636 if (!atomic_read(&vha->loop_down_timer)) 6637 atomic_set(&vha->loop_down_timer, 6638 LOOP_DOWN_TIME); 6639 } 6640 6641 /* Clear all async request states across all VPs. */ 6642 list_for_each_entry(fcport, &vha->vp_fcports, list) 6643 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 6644 spin_lock_irqsave(&ha->vport_slock, flags); 6645 list_for_each_entry(vp, &ha->vp_list, list) { 6646 atomic_inc(&vp->vref_count); 6647 spin_unlock_irqrestore(&ha->vport_slock, flags); 6648 6649 list_for_each_entry(fcport, &vp->vp_fcports, list) 6650 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 6651 6652 spin_lock_irqsave(&ha->vport_slock, flags); 6653 atomic_dec(&vp->vref_count); 6654 } 6655 spin_unlock_irqrestore(&ha->vport_slock, flags); 6656 6657 if (!ha->flags.eeh_busy) { 6658 /* Make sure for ISP 82XX IO DMA is complete */ 6659 if (IS_P3P_TYPE(ha)) { 6660 qla82xx_chip_reset_cleanup(vha); 6661 ql_log(ql_log_info, vha, 0x00b4, 6662 "Done chip reset cleanup.\n"); 6663 6664 /* Done waiting for pending commands. 6665 * Reset the online flag. 6666 */ 6667 vha->flags.online = 0; 6668 } 6669 6670 /* Requeue all commands in outstanding command list. */ 6671 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 6672 } 6673 /* memory barrier */ 6674 wmb(); 6675 } 6676 6677 /* 6678 * qla2x00_abort_isp 6679 * Resets ISP and aborts all outstanding commands. 6680 * 6681 * Input: 6682 * ha = adapter block pointer. 6683 * 6684 * Returns: 6685 * 0 = success 6686 */ 6687 int 6688 qla2x00_abort_isp(scsi_qla_host_t *vha) 6689 { 6690 int rval; 6691 uint8_t status = 0; 6692 struct qla_hw_data *ha = vha->hw; 6693 struct scsi_qla_host *vp; 6694 struct req_que *req = ha->req_q_map[0]; 6695 unsigned long flags; 6696 6697 if (vha->flags.online) { 6698 qla2x00_abort_isp_cleanup(vha); 6699 6700 if (IS_QLA8031(ha)) { 6701 ql_dbg(ql_dbg_p3p, vha, 0xb05c, 6702 "Clearing fcoe driver presence.\n"); 6703 if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS) 6704 ql_dbg(ql_dbg_p3p, vha, 0xb073, 6705 "Error while clearing DRV-Presence.\n"); 6706 } 6707 6708 if (unlikely(pci_channel_offline(ha->pdev) && 6709 ha->flags.pci_channel_io_perm_failure)) { 6710 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 6711 status = 0; 6712 return status; 6713 } 6714 6715 switch (vha->qlini_mode) { 6716 case QLA2XXX_INI_MODE_DISABLED: 6717 if (!qla_tgt_mode_enabled(vha)) 6718 return 0; 6719 break; 6720 case QLA2XXX_INI_MODE_DUAL: 6721 if (!qla_dual_mode_enabled(vha)) 6722 return 0; 6723 break; 6724 case QLA2XXX_INI_MODE_ENABLED: 6725 default: 6726 break; 6727 } 6728 6729 ha->isp_ops->get_flash_version(vha, req->ring); 6730 6731 ha->isp_ops->nvram_config(vha); 6732 6733 if (!qla2x00_restart_isp(vha)) { 6734 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 6735 6736 if (!atomic_read(&vha->loop_down_timer)) { 6737 /* 6738 * Issue marker command only when we are going 6739 * to start the I/O . 6740 */ 6741 vha->marker_needed = 1; 6742 } 6743 6744 vha->flags.online = 1; 6745 6746 ha->isp_ops->enable_intrs(ha); 6747 6748 ha->isp_abort_cnt = 0; 6749 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 6750 6751 if (IS_QLA81XX(ha) || IS_QLA8031(ha)) 6752 qla2x00_get_fw_version(vha); 6753 if (ha->fce) { 6754 ha->flags.fce_enabled = 1; 6755 memset(ha->fce, 0, 6756 fce_calc_size(ha->fce_bufs)); 6757 rval = qla2x00_enable_fce_trace(vha, 6758 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 6759 &ha->fce_bufs); 6760 if (rval) { 6761 ql_log(ql_log_warn, vha, 0x8033, 6762 "Unable to reinitialize FCE " 6763 "(%d).\n", rval); 6764 ha->flags.fce_enabled = 0; 6765 } 6766 } 6767 6768 if (ha->eft) { 6769 memset(ha->eft, 0, EFT_SIZE); 6770 rval = qla2x00_enable_eft_trace(vha, 6771 ha->eft_dma, EFT_NUM_BUFFERS); 6772 if (rval) { 6773 ql_log(ql_log_warn, vha, 0x8034, 6774 "Unable to reinitialize EFT " 6775 "(%d).\n", rval); 6776 } 6777 } 6778 } else { /* failed the ISP abort */ 6779 vha->flags.online = 1; 6780 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 6781 if (ha->isp_abort_cnt == 0) { 6782 ql_log(ql_log_fatal, vha, 0x8035, 6783 "ISP error recover failed - " 6784 "board disabled.\n"); 6785 /* 6786 * The next call disables the board 6787 * completely. 6788 */ 6789 qla2x00_abort_isp_cleanup(vha); 6790 vha->flags.online = 0; 6791 clear_bit(ISP_ABORT_RETRY, 6792 &vha->dpc_flags); 6793 status = 0; 6794 } else { /* schedule another ISP abort */ 6795 ha->isp_abort_cnt--; 6796 ql_dbg(ql_dbg_taskm, vha, 0x8020, 6797 "ISP abort - retry remaining %d.\n", 6798 ha->isp_abort_cnt); 6799 status = 1; 6800 } 6801 } else { 6802 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 6803 ql_dbg(ql_dbg_taskm, vha, 0x8021, 6804 "ISP error recovery - retrying (%d) " 6805 "more times.\n", ha->isp_abort_cnt); 6806 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 6807 status = 1; 6808 } 6809 } 6810 6811 } 6812 6813 if (!status) { 6814 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__); 6815 qla2x00_configure_hba(vha); 6816 spin_lock_irqsave(&ha->vport_slock, flags); 6817 list_for_each_entry(vp, &ha->vp_list, list) { 6818 if (vp->vp_idx) { 6819 atomic_inc(&vp->vref_count); 6820 spin_unlock_irqrestore(&ha->vport_slock, flags); 6821 6822 qla2x00_vp_abort_isp(vp); 6823 6824 spin_lock_irqsave(&ha->vport_slock, flags); 6825 atomic_dec(&vp->vref_count); 6826 } 6827 } 6828 spin_unlock_irqrestore(&ha->vport_slock, flags); 6829 6830 if (IS_QLA8031(ha)) { 6831 ql_dbg(ql_dbg_p3p, vha, 0xb05d, 6832 "Setting back fcoe driver presence.\n"); 6833 if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS) 6834 ql_dbg(ql_dbg_p3p, vha, 0xb074, 6835 "Error while setting DRV-Presence.\n"); 6836 } 6837 } else { 6838 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n", 6839 __func__); 6840 } 6841 6842 return(status); 6843 } 6844 6845 /* 6846 * qla2x00_restart_isp 6847 * restarts the ISP after a reset 6848 * 6849 * Input: 6850 * ha = adapter block pointer. 6851 * 6852 * Returns: 6853 * 0 = success 6854 */ 6855 static int 6856 qla2x00_restart_isp(scsi_qla_host_t *vha) 6857 { 6858 int status = 0; 6859 struct qla_hw_data *ha = vha->hw; 6860 struct req_que *req = ha->req_q_map[0]; 6861 struct rsp_que *rsp = ha->rsp_q_map[0]; 6862 6863 /* If firmware needs to be loaded */ 6864 if (qla2x00_isp_firmware(vha)) { 6865 vha->flags.online = 0; 6866 status = ha->isp_ops->chip_diag(vha); 6867 if (!status) 6868 status = qla2x00_setup_chip(vha); 6869 } 6870 6871 if (!status && !(status = qla2x00_init_rings(vha))) { 6872 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 6873 ha->flags.chip_reset_done = 1; 6874 6875 /* Initialize the queues in use */ 6876 qla25xx_init_queues(ha); 6877 6878 status = qla2x00_fw_ready(vha); 6879 if (!status) { 6880 /* Issue a marker after FW becomes ready. */ 6881 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 6882 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 6883 } 6884 6885 /* if no cable then assume it's good */ 6886 if ((vha->device_flags & DFLG_NO_CABLE)) 6887 status = 0; 6888 } 6889 return (status); 6890 } 6891 6892 static int 6893 qla25xx_init_queues(struct qla_hw_data *ha) 6894 { 6895 struct rsp_que *rsp = NULL; 6896 struct req_que *req = NULL; 6897 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 6898 int ret = -1; 6899 int i; 6900 6901 for (i = 1; i < ha->max_rsp_queues; i++) { 6902 rsp = ha->rsp_q_map[i]; 6903 if (rsp && test_bit(i, ha->rsp_qid_map)) { 6904 rsp->options &= ~BIT_0; 6905 ret = qla25xx_init_rsp_que(base_vha, rsp); 6906 if (ret != QLA_SUCCESS) 6907 ql_dbg(ql_dbg_init, base_vha, 0x00ff, 6908 "%s Rsp que: %d init failed.\n", 6909 __func__, rsp->id); 6910 else 6911 ql_dbg(ql_dbg_init, base_vha, 0x0100, 6912 "%s Rsp que: %d inited.\n", 6913 __func__, rsp->id); 6914 } 6915 } 6916 for (i = 1; i < ha->max_req_queues; i++) { 6917 req = ha->req_q_map[i]; 6918 if (req && test_bit(i, ha->req_qid_map)) { 6919 /* Clear outstanding commands array. */ 6920 req->options &= ~BIT_0; 6921 ret = qla25xx_init_req_que(base_vha, req); 6922 if (ret != QLA_SUCCESS) 6923 ql_dbg(ql_dbg_init, base_vha, 0x0101, 6924 "%s Req que: %d init failed.\n", 6925 __func__, req->id); 6926 else 6927 ql_dbg(ql_dbg_init, base_vha, 0x0102, 6928 "%s Req que: %d inited.\n", 6929 __func__, req->id); 6930 } 6931 } 6932 return ret; 6933 } 6934 6935 /* 6936 * qla2x00_reset_adapter 6937 * Reset adapter. 6938 * 6939 * Input: 6940 * ha = adapter block pointer. 6941 */ 6942 void 6943 qla2x00_reset_adapter(scsi_qla_host_t *vha) 6944 { 6945 unsigned long flags = 0; 6946 struct qla_hw_data *ha = vha->hw; 6947 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 6948 6949 vha->flags.online = 0; 6950 ha->isp_ops->disable_intrs(ha); 6951 6952 spin_lock_irqsave(&ha->hardware_lock, flags); 6953 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 6954 RD_REG_WORD(®->hccr); /* PCI Posting. */ 6955 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 6956 RD_REG_WORD(®->hccr); /* PCI Posting. */ 6957 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6958 } 6959 6960 void 6961 qla24xx_reset_adapter(scsi_qla_host_t *vha) 6962 { 6963 unsigned long flags = 0; 6964 struct qla_hw_data *ha = vha->hw; 6965 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 6966 6967 if (IS_P3P_TYPE(ha)) 6968 return; 6969 6970 vha->flags.online = 0; 6971 ha->isp_ops->disable_intrs(ha); 6972 6973 spin_lock_irqsave(&ha->hardware_lock, flags); 6974 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET); 6975 RD_REG_DWORD(®->hccr); 6976 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE); 6977 RD_REG_DWORD(®->hccr); 6978 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6979 6980 if (IS_NOPOLLING_TYPE(ha)) 6981 ha->isp_ops->enable_intrs(ha); 6982 } 6983 6984 /* On sparc systems, obtain port and node WWN from firmware 6985 * properties. 6986 */ 6987 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, 6988 struct nvram_24xx *nv) 6989 { 6990 #ifdef CONFIG_SPARC 6991 struct qla_hw_data *ha = vha->hw; 6992 struct pci_dev *pdev = ha->pdev; 6993 struct device_node *dp = pci_device_to_OF_node(pdev); 6994 const u8 *val; 6995 int len; 6996 6997 val = of_get_property(dp, "port-wwn", &len); 6998 if (val && len >= WWN_SIZE) 6999 memcpy(nv->port_name, val, WWN_SIZE); 7000 7001 val = of_get_property(dp, "node-wwn", &len); 7002 if (val && len >= WWN_SIZE) 7003 memcpy(nv->node_name, val, WWN_SIZE); 7004 #endif 7005 } 7006 7007 int 7008 qla24xx_nvram_config(scsi_qla_host_t *vha) 7009 { 7010 int rval; 7011 struct init_cb_24xx *icb; 7012 struct nvram_24xx *nv; 7013 uint32_t *dptr; 7014 uint8_t *dptr1, *dptr2; 7015 uint32_t chksum; 7016 uint16_t cnt; 7017 struct qla_hw_data *ha = vha->hw; 7018 7019 rval = QLA_SUCCESS; 7020 icb = (struct init_cb_24xx *)ha->init_cb; 7021 nv = ha->nvram; 7022 7023 /* Determine NVRAM starting address. */ 7024 if (ha->port_no == 0) { 7025 ha->nvram_base = FA_NVRAM_FUNC0_ADDR; 7026 ha->vpd_base = FA_NVRAM_VPD0_ADDR; 7027 } else { 7028 ha->nvram_base = FA_NVRAM_FUNC1_ADDR; 7029 ha->vpd_base = FA_NVRAM_VPD1_ADDR; 7030 } 7031 7032 ha->nvram_size = sizeof(struct nvram_24xx); 7033 ha->vpd_size = FA_NVRAM_VPD_SIZE; 7034 7035 /* Get VPD data into cache */ 7036 ha->vpd = ha->nvram + VPD_OFFSET; 7037 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, 7038 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); 7039 7040 /* Get NVRAM data into cache and calculate checksum. */ 7041 dptr = (uint32_t *)nv; 7042 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base, 7043 ha->nvram_size); 7044 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) 7045 chksum += le32_to_cpu(*dptr); 7046 7047 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a, 7048 "Contents of NVRAM\n"); 7049 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d, 7050 (uint8_t *)nv, ha->nvram_size); 7051 7052 /* Bad NVRAM data, set defaults parameters. */ 7053 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' 7054 || nv->id[3] != ' ' || 7055 nv->nvram_version < cpu_to_le16(ICB_VERSION)) { 7056 /* Reset NVRAM data. */ 7057 ql_log(ql_log_warn, vha, 0x006b, 7058 "Inconsistent NVRAM detected: checksum=0x%x id=%c " 7059 "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version); 7060 ql_log(ql_log_warn, vha, 0x006c, 7061 "Falling back to functioning (yet invalid -- WWPN) " 7062 "defaults.\n"); 7063 7064 /* 7065 * Set default initialization control block. 7066 */ 7067 memset(nv, 0, ha->nvram_size); 7068 nv->nvram_version = cpu_to_le16(ICB_VERSION); 7069 nv->version = cpu_to_le16(ICB_VERSION); 7070 nv->frame_payload_size = 2048; 7071 nv->execution_throttle = cpu_to_le16(0xFFFF); 7072 nv->exchange_count = cpu_to_le16(0); 7073 nv->hard_address = cpu_to_le16(124); 7074 nv->port_name[0] = 0x21; 7075 nv->port_name[1] = 0x00 + ha->port_no + 1; 7076 nv->port_name[2] = 0x00; 7077 nv->port_name[3] = 0xe0; 7078 nv->port_name[4] = 0x8b; 7079 nv->port_name[5] = 0x1c; 7080 nv->port_name[6] = 0x55; 7081 nv->port_name[7] = 0x86; 7082 nv->node_name[0] = 0x20; 7083 nv->node_name[1] = 0x00; 7084 nv->node_name[2] = 0x00; 7085 nv->node_name[3] = 0xe0; 7086 nv->node_name[4] = 0x8b; 7087 nv->node_name[5] = 0x1c; 7088 nv->node_name[6] = 0x55; 7089 nv->node_name[7] = 0x86; 7090 qla24xx_nvram_wwn_from_ofw(vha, nv); 7091 nv->login_retry_count = cpu_to_le16(8); 7092 nv->interrupt_delay_timer = cpu_to_le16(0); 7093 nv->login_timeout = cpu_to_le16(0); 7094 nv->firmware_options_1 = 7095 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); 7096 nv->firmware_options_2 = cpu_to_le32(2 << 4); 7097 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 7098 nv->firmware_options_3 = cpu_to_le32(2 << 13); 7099 nv->host_p = cpu_to_le32(BIT_11|BIT_10); 7100 nv->efi_parameters = cpu_to_le32(0); 7101 nv->reset_delay = 5; 7102 nv->max_luns_per_target = cpu_to_le16(128); 7103 nv->port_down_retry_count = cpu_to_le16(30); 7104 nv->link_down_timeout = cpu_to_le16(30); 7105 7106 rval = 1; 7107 } 7108 7109 if (qla_tgt_mode_enabled(vha)) { 7110 /* Don't enable full login after initial LIP */ 7111 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 7112 /* Don't enable LIP full login for initiator */ 7113 nv->host_p &= cpu_to_le32(~BIT_10); 7114 } 7115 7116 qlt_24xx_config_nvram_stage1(vha, nv); 7117 7118 /* Reset Initialization control block */ 7119 memset(icb, 0, ha->init_cb_size); 7120 7121 /* Copy 1st segment. */ 7122 dptr1 = (uint8_t *)icb; 7123 dptr2 = (uint8_t *)&nv->version; 7124 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; 7125 while (cnt--) 7126 *dptr1++ = *dptr2++; 7127 7128 icb->login_retry_count = nv->login_retry_count; 7129 icb->link_down_on_nos = nv->link_down_on_nos; 7130 7131 /* Copy 2nd segment. */ 7132 dptr1 = (uint8_t *)&icb->interrupt_delay_timer; 7133 dptr2 = (uint8_t *)&nv->interrupt_delay_timer; 7134 cnt = (uint8_t *)&icb->reserved_3 - 7135 (uint8_t *)&icb->interrupt_delay_timer; 7136 while (cnt--) 7137 *dptr1++ = *dptr2++; 7138 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); 7139 /* 7140 * Setup driver NVRAM options. 7141 */ 7142 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), 7143 "QLA2462"); 7144 7145 qlt_24xx_config_nvram_stage2(vha, icb); 7146 7147 if (nv->host_p & cpu_to_le32(BIT_15)) { 7148 /* Use alternate WWN? */ 7149 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 7150 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 7151 } 7152 7153 /* Prepare nodename */ 7154 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) { 7155 /* 7156 * Firmware will apply the following mask if the nodename was 7157 * not provided. 7158 */ 7159 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 7160 icb->node_name[0] &= 0xF0; 7161 } 7162 7163 /* Set host adapter parameters. */ 7164 ha->flags.disable_risc_code_load = 0; 7165 ha->flags.enable_lip_reset = 0; 7166 ha->flags.enable_lip_full_login = 7167 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0; 7168 ha->flags.enable_target_reset = 7169 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0; 7170 ha->flags.enable_led_scheme = 0; 7171 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0; 7172 7173 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & 7174 (BIT_6 | BIT_5 | BIT_4)) >> 4; 7175 7176 memcpy(ha->fw_seriallink_options24, nv->seriallink_options, 7177 sizeof(ha->fw_seriallink_options24)); 7178 7179 /* save HBA serial number */ 7180 ha->serial0 = icb->port_name[5]; 7181 ha->serial1 = icb->port_name[6]; 7182 ha->serial2 = icb->port_name[7]; 7183 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 7184 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 7185 7186 icb->execution_throttle = cpu_to_le16(0xFFFF); 7187 7188 ha->retry_count = le16_to_cpu(nv->login_retry_count); 7189 7190 /* Set minimum login_timeout to 4 seconds. */ 7191 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) 7192 nv->login_timeout = cpu_to_le16(ql2xlogintimeout); 7193 if (le16_to_cpu(nv->login_timeout) < 4) 7194 nv->login_timeout = cpu_to_le16(4); 7195 ha->login_timeout = le16_to_cpu(nv->login_timeout); 7196 7197 /* Set minimum RATOV to 100 tenths of a second. */ 7198 ha->r_a_tov = 100; 7199 7200 ha->loop_reset_delay = nv->reset_delay; 7201 7202 /* Link Down Timeout = 0: 7203 * 7204 * When Port Down timer expires we will start returning 7205 * I/O's to OS with "DID_NO_CONNECT". 7206 * 7207 * Link Down Timeout != 0: 7208 * 7209 * The driver waits for the link to come up after link down 7210 * before returning I/Os to OS with "DID_NO_CONNECT". 7211 */ 7212 if (le16_to_cpu(nv->link_down_timeout) == 0) { 7213 ha->loop_down_abort_time = 7214 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 7215 } else { 7216 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); 7217 ha->loop_down_abort_time = 7218 (LOOP_DOWN_TIME - ha->link_down_timeout); 7219 } 7220 7221 /* Need enough time to try and get the port back. */ 7222 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); 7223 if (qlport_down_retry) 7224 ha->port_down_retry_count = qlport_down_retry; 7225 7226 /* Set login_retry_count */ 7227 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); 7228 if (ha->port_down_retry_count == 7229 le16_to_cpu(nv->port_down_retry_count) && 7230 ha->port_down_retry_count > 3) 7231 ha->login_retry_count = ha->port_down_retry_count; 7232 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 7233 ha->login_retry_count = ha->port_down_retry_count; 7234 if (ql2xloginretrycount) 7235 ha->login_retry_count = ql2xloginretrycount; 7236 7237 /* N2N: driver will initiate Login instead of FW */ 7238 icb->firmware_options_3 |= BIT_8; 7239 7240 /* Enable ZIO. */ 7241 if (!vha->flags.init_done) { 7242 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 7243 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 7244 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 7245 le16_to_cpu(icb->interrupt_delay_timer): 2; 7246 } 7247 icb->firmware_options_2 &= cpu_to_le32( 7248 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 7249 if (ha->zio_mode != QLA_ZIO_DISABLED) { 7250 ha->zio_mode = QLA_ZIO_MODE_6; 7251 7252 ql_log(ql_log_info, vha, 0x006f, 7253 "ZIO mode %d enabled; timer delay (%d us).\n", 7254 ha->zio_mode, ha->zio_timer * 100); 7255 7256 icb->firmware_options_2 |= cpu_to_le32( 7257 (uint32_t)ha->zio_mode); 7258 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); 7259 } 7260 7261 if (rval) { 7262 ql_log(ql_log_warn, vha, 0x0070, 7263 "NVRAM configuration failed.\n"); 7264 } 7265 return (rval); 7266 } 7267 7268 uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha) 7269 { 7270 struct qla27xx_image_status pri_image_status, sec_image_status; 7271 uint8_t valid_pri_image, valid_sec_image; 7272 uint32_t *wptr; 7273 uint32_t cnt, chksum, size; 7274 struct qla_hw_data *ha = vha->hw; 7275 7276 valid_pri_image = valid_sec_image = 1; 7277 ha->active_image = 0; 7278 size = sizeof(struct qla27xx_image_status) / sizeof(uint32_t); 7279 7280 if (!ha->flt_region_img_status_pri) { 7281 valid_pri_image = 0; 7282 goto check_sec_image; 7283 } 7284 7285 qla24xx_read_flash_data(vha, (uint32_t *)(&pri_image_status), 7286 ha->flt_region_img_status_pri, size); 7287 7288 if (pri_image_status.signature != QLA27XX_IMG_STATUS_SIGN) { 7289 ql_dbg(ql_dbg_init, vha, 0x018b, 7290 "Primary image signature (0x%x) not valid\n", 7291 pri_image_status.signature); 7292 valid_pri_image = 0; 7293 goto check_sec_image; 7294 } 7295 7296 wptr = (uint32_t *)(&pri_image_status); 7297 cnt = size; 7298 7299 for (chksum = 0; cnt--; wptr++) 7300 chksum += le32_to_cpu(*wptr); 7301 7302 if (chksum) { 7303 ql_dbg(ql_dbg_init, vha, 0x018c, 7304 "Checksum validation failed for primary image (0x%x)\n", 7305 chksum); 7306 valid_pri_image = 0; 7307 } 7308 7309 check_sec_image: 7310 if (!ha->flt_region_img_status_sec) { 7311 valid_sec_image = 0; 7312 goto check_valid_image; 7313 } 7314 7315 qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status), 7316 ha->flt_region_img_status_sec, size); 7317 7318 if (sec_image_status.signature != QLA27XX_IMG_STATUS_SIGN) { 7319 ql_dbg(ql_dbg_init, vha, 0x018d, 7320 "Secondary image signature(0x%x) not valid\n", 7321 sec_image_status.signature); 7322 valid_sec_image = 0; 7323 goto check_valid_image; 7324 } 7325 7326 wptr = (uint32_t *)(&sec_image_status); 7327 cnt = size; 7328 for (chksum = 0; cnt--; wptr++) 7329 chksum += le32_to_cpu(*wptr); 7330 if (chksum) { 7331 ql_dbg(ql_dbg_init, vha, 0x018e, 7332 "Checksum validation failed for secondary image (0x%x)\n", 7333 chksum); 7334 valid_sec_image = 0; 7335 } 7336 7337 check_valid_image: 7338 if (valid_pri_image && (pri_image_status.image_status_mask & 0x1)) 7339 ha->active_image = QLA27XX_PRIMARY_IMAGE; 7340 if (valid_sec_image && (sec_image_status.image_status_mask & 0x1)) { 7341 if (!ha->active_image || 7342 pri_image_status.generation_number < 7343 sec_image_status.generation_number) 7344 ha->active_image = QLA27XX_SECONDARY_IMAGE; 7345 } 7346 7347 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x018f, "%s image\n", 7348 ha->active_image == 0 ? "default bootld and fw" : 7349 ha->active_image == 1 ? "primary" : 7350 ha->active_image == 2 ? "secondary" : 7351 "Invalid"); 7352 7353 return ha->active_image; 7354 } 7355 7356 static int 7357 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, 7358 uint32_t faddr) 7359 { 7360 int rval = QLA_SUCCESS; 7361 int segments, fragment; 7362 uint32_t *dcode, dlen; 7363 uint32_t risc_addr; 7364 uint32_t risc_size; 7365 uint32_t i; 7366 struct qla_hw_data *ha = vha->hw; 7367 struct req_que *req = ha->req_q_map[0]; 7368 7369 ql_dbg(ql_dbg_init, vha, 0x008b, 7370 "FW: Loading firmware from flash (%x).\n", faddr); 7371 7372 rval = QLA_SUCCESS; 7373 7374 segments = FA_RISC_CODE_SEGMENTS; 7375 dcode = (uint32_t *)req->ring; 7376 *srisc_addr = 0; 7377 7378 if (IS_QLA27XX(ha) && 7379 qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE) 7380 faddr = ha->flt_region_fw_sec; 7381 7382 /* Validate firmware image by checking version. */ 7383 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4); 7384 for (i = 0; i < 4; i++) 7385 dcode[i] = be32_to_cpu(dcode[i]); 7386 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff && 7387 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 7388 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 7389 dcode[3] == 0)) { 7390 ql_log(ql_log_fatal, vha, 0x008c, 7391 "Unable to verify the integrity of flash firmware " 7392 "image.\n"); 7393 ql_log(ql_log_fatal, vha, 0x008d, 7394 "Firmware data: %08x %08x %08x %08x.\n", 7395 dcode[0], dcode[1], dcode[2], dcode[3]); 7396 7397 return QLA_FUNCTION_FAILED; 7398 } 7399 7400 while (segments && rval == QLA_SUCCESS) { 7401 /* Read segment's load information. */ 7402 qla24xx_read_flash_data(vha, dcode, faddr, 4); 7403 7404 risc_addr = be32_to_cpu(dcode[2]); 7405 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr; 7406 risc_size = be32_to_cpu(dcode[3]); 7407 7408 fragment = 0; 7409 while (risc_size > 0 && rval == QLA_SUCCESS) { 7410 dlen = (uint32_t)(ha->fw_transfer_size >> 2); 7411 if (dlen > risc_size) 7412 dlen = risc_size; 7413 7414 ql_dbg(ql_dbg_init, vha, 0x008e, 7415 "Loading risc segment@ risc addr %x " 7416 "number of dwords 0x%x offset 0x%x.\n", 7417 risc_addr, dlen, faddr); 7418 7419 qla24xx_read_flash_data(vha, dcode, faddr, dlen); 7420 for (i = 0; i < dlen; i++) 7421 dcode[i] = swab32(dcode[i]); 7422 7423 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 7424 dlen); 7425 if (rval) { 7426 ql_log(ql_log_fatal, vha, 0x008f, 7427 "Failed to load segment %d of firmware.\n", 7428 fragment); 7429 return QLA_FUNCTION_FAILED; 7430 } 7431 7432 faddr += dlen; 7433 risc_addr += dlen; 7434 risc_size -= dlen; 7435 fragment++; 7436 } 7437 7438 /* Next segment. */ 7439 segments--; 7440 } 7441 7442 if (!IS_QLA27XX(ha)) 7443 return rval; 7444 7445 if (ha->fw_dump_template) 7446 vfree(ha->fw_dump_template); 7447 ha->fw_dump_template = NULL; 7448 ha->fw_dump_template_len = 0; 7449 7450 ql_dbg(ql_dbg_init, vha, 0x0161, 7451 "Loading fwdump template from %x\n", faddr); 7452 qla24xx_read_flash_data(vha, dcode, faddr, 7); 7453 risc_size = be32_to_cpu(dcode[2]); 7454 ql_dbg(ql_dbg_init, vha, 0x0162, 7455 "-> array size %x dwords\n", risc_size); 7456 if (risc_size == 0 || risc_size == ~0) 7457 goto default_template; 7458 7459 dlen = (risc_size - 8) * sizeof(*dcode); 7460 ql_dbg(ql_dbg_init, vha, 0x0163, 7461 "-> template allocating %x bytes...\n", dlen); 7462 ha->fw_dump_template = vmalloc(dlen); 7463 if (!ha->fw_dump_template) { 7464 ql_log(ql_log_warn, vha, 0x0164, 7465 "Failed fwdump template allocate %x bytes.\n", risc_size); 7466 goto default_template; 7467 } 7468 7469 faddr += 7; 7470 risc_size -= 8; 7471 dcode = ha->fw_dump_template; 7472 qla24xx_read_flash_data(vha, dcode, faddr, risc_size); 7473 for (i = 0; i < risc_size; i++) 7474 dcode[i] = le32_to_cpu(dcode[i]); 7475 7476 if (!qla27xx_fwdt_template_valid(dcode)) { 7477 ql_log(ql_log_warn, vha, 0x0165, 7478 "Failed fwdump template validate\n"); 7479 goto default_template; 7480 } 7481 7482 dlen = qla27xx_fwdt_template_size(dcode); 7483 ql_dbg(ql_dbg_init, vha, 0x0166, 7484 "-> template size %x bytes\n", dlen); 7485 if (dlen > risc_size * sizeof(*dcode)) { 7486 ql_log(ql_log_warn, vha, 0x0167, 7487 "Failed fwdump template exceeds array by %zx bytes\n", 7488 (size_t)(dlen - risc_size * sizeof(*dcode))); 7489 goto default_template; 7490 } 7491 ha->fw_dump_template_len = dlen; 7492 return rval; 7493 7494 default_template: 7495 ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n"); 7496 if (ha->fw_dump_template) 7497 vfree(ha->fw_dump_template); 7498 ha->fw_dump_template = NULL; 7499 ha->fw_dump_template_len = 0; 7500 7501 dlen = qla27xx_fwdt_template_default_size(); 7502 ql_dbg(ql_dbg_init, vha, 0x0169, 7503 "-> template allocating %x bytes...\n", dlen); 7504 ha->fw_dump_template = vmalloc(dlen); 7505 if (!ha->fw_dump_template) { 7506 ql_log(ql_log_warn, vha, 0x016a, 7507 "Failed fwdump template allocate %x bytes.\n", risc_size); 7508 goto failed_template; 7509 } 7510 7511 dcode = ha->fw_dump_template; 7512 risc_size = dlen / sizeof(*dcode); 7513 memcpy(dcode, qla27xx_fwdt_template_default(), dlen); 7514 for (i = 0; i < risc_size; i++) 7515 dcode[i] = be32_to_cpu(dcode[i]); 7516 7517 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) { 7518 ql_log(ql_log_warn, vha, 0x016b, 7519 "Failed fwdump template validate\n"); 7520 goto failed_template; 7521 } 7522 7523 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template); 7524 ql_dbg(ql_dbg_init, vha, 0x016c, 7525 "-> template size %x bytes\n", dlen); 7526 ha->fw_dump_template_len = dlen; 7527 return rval; 7528 7529 failed_template: 7530 ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n"); 7531 if (ha->fw_dump_template) 7532 vfree(ha->fw_dump_template); 7533 ha->fw_dump_template = NULL; 7534 ha->fw_dump_template_len = 0; 7535 return rval; 7536 } 7537 7538 #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/" 7539 7540 int 7541 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 7542 { 7543 int rval; 7544 int i, fragment; 7545 uint16_t *wcode, *fwcode; 7546 uint32_t risc_addr, risc_size, fwclen, wlen, *seg; 7547 struct fw_blob *blob; 7548 struct qla_hw_data *ha = vha->hw; 7549 struct req_que *req = ha->req_q_map[0]; 7550 7551 /* Load firmware blob. */ 7552 blob = qla2x00_request_firmware(vha); 7553 if (!blob) { 7554 ql_log(ql_log_info, vha, 0x0083, 7555 "Firmware image unavailable.\n"); 7556 ql_log(ql_log_info, vha, 0x0084, 7557 "Firmware images can be retrieved from: "QLA_FW_URL ".\n"); 7558 return QLA_FUNCTION_FAILED; 7559 } 7560 7561 rval = QLA_SUCCESS; 7562 7563 wcode = (uint16_t *)req->ring; 7564 *srisc_addr = 0; 7565 fwcode = (uint16_t *)blob->fw->data; 7566 fwclen = 0; 7567 7568 /* Validate firmware image by checking version. */ 7569 if (blob->fw->size < 8 * sizeof(uint16_t)) { 7570 ql_log(ql_log_fatal, vha, 0x0085, 7571 "Unable to verify integrity of firmware image (%zd).\n", 7572 blob->fw->size); 7573 goto fail_fw_integrity; 7574 } 7575 for (i = 0; i < 4; i++) 7576 wcode[i] = be16_to_cpu(fwcode[i + 4]); 7577 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff && 7578 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 && 7579 wcode[2] == 0 && wcode[3] == 0)) { 7580 ql_log(ql_log_fatal, vha, 0x0086, 7581 "Unable to verify integrity of firmware image.\n"); 7582 ql_log(ql_log_fatal, vha, 0x0087, 7583 "Firmware data: %04x %04x %04x %04x.\n", 7584 wcode[0], wcode[1], wcode[2], wcode[3]); 7585 goto fail_fw_integrity; 7586 } 7587 7588 seg = blob->segs; 7589 while (*seg && rval == QLA_SUCCESS) { 7590 risc_addr = *seg; 7591 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr; 7592 risc_size = be16_to_cpu(fwcode[3]); 7593 7594 /* Validate firmware image size. */ 7595 fwclen += risc_size * sizeof(uint16_t); 7596 if (blob->fw->size < fwclen) { 7597 ql_log(ql_log_fatal, vha, 0x0088, 7598 "Unable to verify integrity of firmware image " 7599 "(%zd).\n", blob->fw->size); 7600 goto fail_fw_integrity; 7601 } 7602 7603 fragment = 0; 7604 while (risc_size > 0 && rval == QLA_SUCCESS) { 7605 wlen = (uint16_t)(ha->fw_transfer_size >> 1); 7606 if (wlen > risc_size) 7607 wlen = risc_size; 7608 ql_dbg(ql_dbg_init, vha, 0x0089, 7609 "Loading risc segment@ risc addr %x number of " 7610 "words 0x%x.\n", risc_addr, wlen); 7611 7612 for (i = 0; i < wlen; i++) 7613 wcode[i] = swab16(fwcode[i]); 7614 7615 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 7616 wlen); 7617 if (rval) { 7618 ql_log(ql_log_fatal, vha, 0x008a, 7619 "Failed to load segment %d of firmware.\n", 7620 fragment); 7621 break; 7622 } 7623 7624 fwcode += wlen; 7625 risc_addr += wlen; 7626 risc_size -= wlen; 7627 fragment++; 7628 } 7629 7630 /* Next segment. */ 7631 seg++; 7632 } 7633 return rval; 7634 7635 fail_fw_integrity: 7636 return QLA_FUNCTION_FAILED; 7637 } 7638 7639 static int 7640 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) 7641 { 7642 int rval; 7643 int segments, fragment; 7644 uint32_t *dcode, dlen; 7645 uint32_t risc_addr; 7646 uint32_t risc_size; 7647 uint32_t i; 7648 struct fw_blob *blob; 7649 const uint32_t *fwcode; 7650 uint32_t fwclen; 7651 struct qla_hw_data *ha = vha->hw; 7652 struct req_que *req = ha->req_q_map[0]; 7653 7654 /* Load firmware blob. */ 7655 blob = qla2x00_request_firmware(vha); 7656 if (!blob) { 7657 ql_log(ql_log_warn, vha, 0x0090, 7658 "Firmware image unavailable.\n"); 7659 ql_log(ql_log_warn, vha, 0x0091, 7660 "Firmware images can be retrieved from: " 7661 QLA_FW_URL ".\n"); 7662 7663 return QLA_FUNCTION_FAILED; 7664 } 7665 7666 ql_dbg(ql_dbg_init, vha, 0x0092, 7667 "FW: Loading via request-firmware.\n"); 7668 7669 rval = QLA_SUCCESS; 7670 7671 segments = FA_RISC_CODE_SEGMENTS; 7672 dcode = (uint32_t *)req->ring; 7673 *srisc_addr = 0; 7674 fwcode = (uint32_t *)blob->fw->data; 7675 fwclen = 0; 7676 7677 /* Validate firmware image by checking version. */ 7678 if (blob->fw->size < 8 * sizeof(uint32_t)) { 7679 ql_log(ql_log_fatal, vha, 0x0093, 7680 "Unable to verify integrity of firmware image (%zd).\n", 7681 blob->fw->size); 7682 return QLA_FUNCTION_FAILED; 7683 } 7684 for (i = 0; i < 4; i++) 7685 dcode[i] = be32_to_cpu(fwcode[i + 4]); 7686 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff && 7687 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 7688 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 7689 dcode[3] == 0)) { 7690 ql_log(ql_log_fatal, vha, 0x0094, 7691 "Unable to verify integrity of firmware image (%zd).\n", 7692 blob->fw->size); 7693 ql_log(ql_log_fatal, vha, 0x0095, 7694 "Firmware data: %08x %08x %08x %08x.\n", 7695 dcode[0], dcode[1], dcode[2], dcode[3]); 7696 return QLA_FUNCTION_FAILED; 7697 } 7698 7699 while (segments && rval == QLA_SUCCESS) { 7700 risc_addr = be32_to_cpu(fwcode[2]); 7701 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr; 7702 risc_size = be32_to_cpu(fwcode[3]); 7703 7704 /* Validate firmware image size. */ 7705 fwclen += risc_size * sizeof(uint32_t); 7706 if (blob->fw->size < fwclen) { 7707 ql_log(ql_log_fatal, vha, 0x0096, 7708 "Unable to verify integrity of firmware image " 7709 "(%zd).\n", blob->fw->size); 7710 return QLA_FUNCTION_FAILED; 7711 } 7712 7713 fragment = 0; 7714 while (risc_size > 0 && rval == QLA_SUCCESS) { 7715 dlen = (uint32_t)(ha->fw_transfer_size >> 2); 7716 if (dlen > risc_size) 7717 dlen = risc_size; 7718 7719 ql_dbg(ql_dbg_init, vha, 0x0097, 7720 "Loading risc segment@ risc addr %x " 7721 "number of dwords 0x%x.\n", risc_addr, dlen); 7722 7723 for (i = 0; i < dlen; i++) 7724 dcode[i] = swab32(fwcode[i]); 7725 7726 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 7727 dlen); 7728 if (rval) { 7729 ql_log(ql_log_fatal, vha, 0x0098, 7730 "Failed to load segment %d of firmware.\n", 7731 fragment); 7732 return QLA_FUNCTION_FAILED; 7733 } 7734 7735 fwcode += dlen; 7736 risc_addr += dlen; 7737 risc_size -= dlen; 7738 fragment++; 7739 } 7740 7741 /* Next segment. */ 7742 segments--; 7743 } 7744 7745 if (!IS_QLA27XX(ha)) 7746 return rval; 7747 7748 if (ha->fw_dump_template) 7749 vfree(ha->fw_dump_template); 7750 ha->fw_dump_template = NULL; 7751 ha->fw_dump_template_len = 0; 7752 7753 ql_dbg(ql_dbg_init, vha, 0x171, 7754 "Loading fwdump template from %x\n", 7755 (uint32_t)((void *)fwcode - (void *)blob->fw->data)); 7756 risc_size = be32_to_cpu(fwcode[2]); 7757 ql_dbg(ql_dbg_init, vha, 0x172, 7758 "-> array size %x dwords\n", risc_size); 7759 if (risc_size == 0 || risc_size == ~0) 7760 goto default_template; 7761 7762 dlen = (risc_size - 8) * sizeof(*fwcode); 7763 ql_dbg(ql_dbg_init, vha, 0x0173, 7764 "-> template allocating %x bytes...\n", dlen); 7765 ha->fw_dump_template = vmalloc(dlen); 7766 if (!ha->fw_dump_template) { 7767 ql_log(ql_log_warn, vha, 0x0174, 7768 "Failed fwdump template allocate %x bytes.\n", risc_size); 7769 goto default_template; 7770 } 7771 7772 fwcode += 7; 7773 risc_size -= 8; 7774 dcode = ha->fw_dump_template; 7775 for (i = 0; i < risc_size; i++) 7776 dcode[i] = le32_to_cpu(fwcode[i]); 7777 7778 if (!qla27xx_fwdt_template_valid(dcode)) { 7779 ql_log(ql_log_warn, vha, 0x0175, 7780 "Failed fwdump template validate\n"); 7781 goto default_template; 7782 } 7783 7784 dlen = qla27xx_fwdt_template_size(dcode); 7785 ql_dbg(ql_dbg_init, vha, 0x0176, 7786 "-> template size %x bytes\n", dlen); 7787 if (dlen > risc_size * sizeof(*fwcode)) { 7788 ql_log(ql_log_warn, vha, 0x0177, 7789 "Failed fwdump template exceeds array by %zx bytes\n", 7790 (size_t)(dlen - risc_size * sizeof(*fwcode))); 7791 goto default_template; 7792 } 7793 ha->fw_dump_template_len = dlen; 7794 return rval; 7795 7796 default_template: 7797 ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n"); 7798 if (ha->fw_dump_template) 7799 vfree(ha->fw_dump_template); 7800 ha->fw_dump_template = NULL; 7801 ha->fw_dump_template_len = 0; 7802 7803 dlen = qla27xx_fwdt_template_default_size(); 7804 ql_dbg(ql_dbg_init, vha, 0x0179, 7805 "-> template allocating %x bytes...\n", dlen); 7806 ha->fw_dump_template = vmalloc(dlen); 7807 if (!ha->fw_dump_template) { 7808 ql_log(ql_log_warn, vha, 0x017a, 7809 "Failed fwdump template allocate %x bytes.\n", risc_size); 7810 goto failed_template; 7811 } 7812 7813 dcode = ha->fw_dump_template; 7814 risc_size = dlen / sizeof(*fwcode); 7815 fwcode = qla27xx_fwdt_template_default(); 7816 for (i = 0; i < risc_size; i++) 7817 dcode[i] = be32_to_cpu(fwcode[i]); 7818 7819 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) { 7820 ql_log(ql_log_warn, vha, 0x017b, 7821 "Failed fwdump template validate\n"); 7822 goto failed_template; 7823 } 7824 7825 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template); 7826 ql_dbg(ql_dbg_init, vha, 0x017c, 7827 "-> template size %x bytes\n", dlen); 7828 ha->fw_dump_template_len = dlen; 7829 return rval; 7830 7831 failed_template: 7832 ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n"); 7833 if (ha->fw_dump_template) 7834 vfree(ha->fw_dump_template); 7835 ha->fw_dump_template = NULL; 7836 ha->fw_dump_template_len = 0; 7837 return rval; 7838 } 7839 7840 int 7841 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 7842 { 7843 int rval; 7844 7845 if (ql2xfwloadbin == 1) 7846 return qla81xx_load_risc(vha, srisc_addr); 7847 7848 /* 7849 * FW Load priority: 7850 * 1) Firmware via request-firmware interface (.bin file). 7851 * 2) Firmware residing in flash. 7852 */ 7853 rval = qla24xx_load_risc_blob(vha, srisc_addr); 7854 if (rval == QLA_SUCCESS) 7855 return rval; 7856 7857 return qla24xx_load_risc_flash(vha, srisc_addr, 7858 vha->hw->flt_region_fw); 7859 } 7860 7861 int 7862 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 7863 { 7864 int rval; 7865 struct qla_hw_data *ha = vha->hw; 7866 7867 if (ql2xfwloadbin == 2) 7868 goto try_blob_fw; 7869 7870 /* 7871 * FW Load priority: 7872 * 1) Firmware residing in flash. 7873 * 2) Firmware via request-firmware interface (.bin file). 7874 * 3) Golden-Firmware residing in flash -- limited operation. 7875 */ 7876 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw); 7877 if (rval == QLA_SUCCESS) 7878 return rval; 7879 7880 try_blob_fw: 7881 rval = qla24xx_load_risc_blob(vha, srisc_addr); 7882 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw) 7883 return rval; 7884 7885 ql_log(ql_log_info, vha, 0x0099, 7886 "Attempting to fallback to golden firmware.\n"); 7887 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); 7888 if (rval != QLA_SUCCESS) 7889 return rval; 7890 7891 ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n"); 7892 ha->flags.running_gold_fw = 1; 7893 return rval; 7894 } 7895 7896 void 7897 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) 7898 { 7899 int ret, retries; 7900 struct qla_hw_data *ha = vha->hw; 7901 7902 if (ha->flags.pci_channel_io_perm_failure) 7903 return; 7904 if (!IS_FWI2_CAPABLE(ha)) 7905 return; 7906 if (!ha->fw_major_version) 7907 return; 7908 if (!ha->flags.fw_started) 7909 return; 7910 7911 ret = qla2x00_stop_firmware(vha); 7912 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && 7913 ret != QLA_INVALID_COMMAND && retries ; retries--) { 7914 ha->isp_ops->reset_chip(vha); 7915 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS) 7916 continue; 7917 if (qla2x00_setup_chip(vha) != QLA_SUCCESS) 7918 continue; 7919 ql_log(ql_log_info, vha, 0x8015, 7920 "Attempting retry of stop-firmware command.\n"); 7921 ret = qla2x00_stop_firmware(vha); 7922 } 7923 7924 QLA_FW_STOPPED(ha); 7925 ha->flags.fw_init_done = 0; 7926 } 7927 7928 int 7929 qla24xx_configure_vhba(scsi_qla_host_t *vha) 7930 { 7931 int rval = QLA_SUCCESS; 7932 int rval2; 7933 uint16_t mb[MAILBOX_REGISTER_COUNT]; 7934 struct qla_hw_data *ha = vha->hw; 7935 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 7936 struct req_que *req; 7937 struct rsp_que *rsp; 7938 7939 if (!vha->vp_idx) 7940 return -EINVAL; 7941 7942 rval = qla2x00_fw_ready(base_vha); 7943 if (vha->qpair) 7944 req = vha->qpair->req; 7945 else 7946 req = ha->req_q_map[0]; 7947 rsp = req->rsp; 7948 7949 if (rval == QLA_SUCCESS) { 7950 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 7951 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 7952 } 7953 7954 vha->flags.management_server_logged_in = 0; 7955 7956 /* Login to SNS first */ 7957 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, 7958 BIT_1); 7959 if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { 7960 if (rval2 == QLA_MEMORY_ALLOC_FAILED) 7961 ql_dbg(ql_dbg_init, vha, 0x0120, 7962 "Failed SNS login: loop_id=%x, rval2=%d\n", 7963 NPH_SNS, rval2); 7964 else 7965 ql_dbg(ql_dbg_init, vha, 0x0103, 7966 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " 7967 "mb[2]=%x mb[6]=%x mb[7]=%x.\n", 7968 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]); 7969 return (QLA_FUNCTION_FAILED); 7970 } 7971 7972 atomic_set(&vha->loop_down_timer, 0); 7973 atomic_set(&vha->loop_state, LOOP_UP); 7974 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 7975 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 7976 rval = qla2x00_loop_resync(base_vha); 7977 7978 return rval; 7979 } 7980 7981 /* 84XX Support **************************************************************/ 7982 7983 static LIST_HEAD(qla_cs84xx_list); 7984 static DEFINE_MUTEX(qla_cs84xx_mutex); 7985 7986 static struct qla_chip_state_84xx * 7987 qla84xx_get_chip(struct scsi_qla_host *vha) 7988 { 7989 struct qla_chip_state_84xx *cs84xx; 7990 struct qla_hw_data *ha = vha->hw; 7991 7992 mutex_lock(&qla_cs84xx_mutex); 7993 7994 /* Find any shared 84xx chip. */ 7995 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) { 7996 if (cs84xx->bus == ha->pdev->bus) { 7997 kref_get(&cs84xx->kref); 7998 goto done; 7999 } 8000 } 8001 8002 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL); 8003 if (!cs84xx) 8004 goto done; 8005 8006 kref_init(&cs84xx->kref); 8007 spin_lock_init(&cs84xx->access_lock); 8008 mutex_init(&cs84xx->fw_update_mutex); 8009 cs84xx->bus = ha->pdev->bus; 8010 8011 list_add_tail(&cs84xx->list, &qla_cs84xx_list); 8012 done: 8013 mutex_unlock(&qla_cs84xx_mutex); 8014 return cs84xx; 8015 } 8016 8017 static void 8018 __qla84xx_chip_release(struct kref *kref) 8019 { 8020 struct qla_chip_state_84xx *cs84xx = 8021 container_of(kref, struct qla_chip_state_84xx, kref); 8022 8023 mutex_lock(&qla_cs84xx_mutex); 8024 list_del(&cs84xx->list); 8025 mutex_unlock(&qla_cs84xx_mutex); 8026 kfree(cs84xx); 8027 } 8028 8029 void 8030 qla84xx_put_chip(struct scsi_qla_host *vha) 8031 { 8032 struct qla_hw_data *ha = vha->hw; 8033 if (ha->cs84xx) 8034 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release); 8035 } 8036 8037 static int 8038 qla84xx_init_chip(scsi_qla_host_t *vha) 8039 { 8040 int rval; 8041 uint16_t status[2]; 8042 struct qla_hw_data *ha = vha->hw; 8043 8044 mutex_lock(&ha->cs84xx->fw_update_mutex); 8045 8046 rval = qla84xx_verify_chip(vha, status); 8047 8048 mutex_unlock(&ha->cs84xx->fw_update_mutex); 8049 8050 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED: 8051 QLA_SUCCESS; 8052 } 8053 8054 /* 81XX Support **************************************************************/ 8055 8056 int 8057 qla81xx_nvram_config(scsi_qla_host_t *vha) 8058 { 8059 int rval; 8060 struct init_cb_81xx *icb; 8061 struct nvram_81xx *nv; 8062 uint32_t *dptr; 8063 uint8_t *dptr1, *dptr2; 8064 uint32_t chksum; 8065 uint16_t cnt; 8066 struct qla_hw_data *ha = vha->hw; 8067 8068 rval = QLA_SUCCESS; 8069 icb = (struct init_cb_81xx *)ha->init_cb; 8070 nv = ha->nvram; 8071 8072 /* Determine NVRAM starting address. */ 8073 ha->nvram_size = sizeof(struct nvram_81xx); 8074 ha->vpd_size = FA_NVRAM_VPD_SIZE; 8075 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha)) 8076 ha->vpd_size = FA_VPD_SIZE_82XX; 8077 8078 /* Get VPD data into cache */ 8079 ha->vpd = ha->nvram + VPD_OFFSET; 8080 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2, 8081 ha->vpd_size); 8082 8083 /* Get NVRAM data into cache and calculate checksum. */ 8084 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2, 8085 ha->nvram_size); 8086 dptr = (uint32_t *)nv; 8087 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) 8088 chksum += le32_to_cpu(*dptr); 8089 8090 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111, 8091 "Contents of NVRAM:\n"); 8092 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112, 8093 (uint8_t *)nv, ha->nvram_size); 8094 8095 /* Bad NVRAM data, set defaults parameters. */ 8096 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' 8097 || nv->id[3] != ' ' || 8098 nv->nvram_version < cpu_to_le16(ICB_VERSION)) { 8099 /* Reset NVRAM data. */ 8100 ql_log(ql_log_info, vha, 0x0073, 8101 "Inconsistent NVRAM detected: checksum=0x%x id=%c " 8102 "version=0x%x.\n", chksum, nv->id[0], 8103 le16_to_cpu(nv->nvram_version)); 8104 ql_log(ql_log_info, vha, 0x0074, 8105 "Falling back to functioning (yet invalid -- WWPN) " 8106 "defaults.\n"); 8107 8108 /* 8109 * Set default initialization control block. 8110 */ 8111 memset(nv, 0, ha->nvram_size); 8112 nv->nvram_version = cpu_to_le16(ICB_VERSION); 8113 nv->version = cpu_to_le16(ICB_VERSION); 8114 nv->frame_payload_size = 2048; 8115 nv->execution_throttle = cpu_to_le16(0xFFFF); 8116 nv->exchange_count = cpu_to_le16(0); 8117 nv->port_name[0] = 0x21; 8118 nv->port_name[1] = 0x00 + ha->port_no + 1; 8119 nv->port_name[2] = 0x00; 8120 nv->port_name[3] = 0xe0; 8121 nv->port_name[4] = 0x8b; 8122 nv->port_name[5] = 0x1c; 8123 nv->port_name[6] = 0x55; 8124 nv->port_name[7] = 0x86; 8125 nv->node_name[0] = 0x20; 8126 nv->node_name[1] = 0x00; 8127 nv->node_name[2] = 0x00; 8128 nv->node_name[3] = 0xe0; 8129 nv->node_name[4] = 0x8b; 8130 nv->node_name[5] = 0x1c; 8131 nv->node_name[6] = 0x55; 8132 nv->node_name[7] = 0x86; 8133 nv->login_retry_count = cpu_to_le16(8); 8134 nv->interrupt_delay_timer = cpu_to_le16(0); 8135 nv->login_timeout = cpu_to_le16(0); 8136 nv->firmware_options_1 = 8137 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); 8138 nv->firmware_options_2 = cpu_to_le32(2 << 4); 8139 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 8140 nv->firmware_options_3 = cpu_to_le32(2 << 13); 8141 nv->host_p = cpu_to_le32(BIT_11|BIT_10); 8142 nv->efi_parameters = cpu_to_le32(0); 8143 nv->reset_delay = 5; 8144 nv->max_luns_per_target = cpu_to_le16(128); 8145 nv->port_down_retry_count = cpu_to_le16(30); 8146 nv->link_down_timeout = cpu_to_le16(180); 8147 nv->enode_mac[0] = 0x00; 8148 nv->enode_mac[1] = 0xC0; 8149 nv->enode_mac[2] = 0xDD; 8150 nv->enode_mac[3] = 0x04; 8151 nv->enode_mac[4] = 0x05; 8152 nv->enode_mac[5] = 0x06 + ha->port_no + 1; 8153 8154 rval = 1; 8155 } 8156 8157 if (IS_T10_PI_CAPABLE(ha)) 8158 nv->frame_payload_size &= ~7; 8159 8160 qlt_81xx_config_nvram_stage1(vha, nv); 8161 8162 /* Reset Initialization control block */ 8163 memset(icb, 0, ha->init_cb_size); 8164 8165 /* Copy 1st segment. */ 8166 dptr1 = (uint8_t *)icb; 8167 dptr2 = (uint8_t *)&nv->version; 8168 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; 8169 while (cnt--) 8170 *dptr1++ = *dptr2++; 8171 8172 icb->login_retry_count = nv->login_retry_count; 8173 8174 /* Copy 2nd segment. */ 8175 dptr1 = (uint8_t *)&icb->interrupt_delay_timer; 8176 dptr2 = (uint8_t *)&nv->interrupt_delay_timer; 8177 cnt = (uint8_t *)&icb->reserved_5 - 8178 (uint8_t *)&icb->interrupt_delay_timer; 8179 while (cnt--) 8180 *dptr1++ = *dptr2++; 8181 8182 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac)); 8183 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */ 8184 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) { 8185 icb->enode_mac[0] = 0x00; 8186 icb->enode_mac[1] = 0xC0; 8187 icb->enode_mac[2] = 0xDD; 8188 icb->enode_mac[3] = 0x04; 8189 icb->enode_mac[4] = 0x05; 8190 icb->enode_mac[5] = 0x06 + ha->port_no + 1; 8191 } 8192 8193 /* Use extended-initialization control block. */ 8194 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb)); 8195 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); 8196 /* 8197 * Setup driver NVRAM options. 8198 */ 8199 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), 8200 "QLE8XXX"); 8201 8202 qlt_81xx_config_nvram_stage2(vha, icb); 8203 8204 /* Use alternate WWN? */ 8205 if (nv->host_p & cpu_to_le32(BIT_15)) { 8206 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 8207 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 8208 } 8209 8210 /* Prepare nodename */ 8211 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) { 8212 /* 8213 * Firmware will apply the following mask if the nodename was 8214 * not provided. 8215 */ 8216 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 8217 icb->node_name[0] &= 0xF0; 8218 } 8219 8220 /* Set host adapter parameters. */ 8221 ha->flags.disable_risc_code_load = 0; 8222 ha->flags.enable_lip_reset = 0; 8223 ha->flags.enable_lip_full_login = 8224 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0; 8225 ha->flags.enable_target_reset = 8226 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0; 8227 ha->flags.enable_led_scheme = 0; 8228 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0; 8229 8230 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & 8231 (BIT_6 | BIT_5 | BIT_4)) >> 4; 8232 8233 /* save HBA serial number */ 8234 ha->serial0 = icb->port_name[5]; 8235 ha->serial1 = icb->port_name[6]; 8236 ha->serial2 = icb->port_name[7]; 8237 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 8238 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 8239 8240 icb->execution_throttle = cpu_to_le16(0xFFFF); 8241 8242 ha->retry_count = le16_to_cpu(nv->login_retry_count); 8243 8244 /* Set minimum login_timeout to 4 seconds. */ 8245 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) 8246 nv->login_timeout = cpu_to_le16(ql2xlogintimeout); 8247 if (le16_to_cpu(nv->login_timeout) < 4) 8248 nv->login_timeout = cpu_to_le16(4); 8249 ha->login_timeout = le16_to_cpu(nv->login_timeout); 8250 8251 /* Set minimum RATOV to 100 tenths of a second. */ 8252 ha->r_a_tov = 100; 8253 8254 ha->loop_reset_delay = nv->reset_delay; 8255 8256 /* Link Down Timeout = 0: 8257 * 8258 * When Port Down timer expires we will start returning 8259 * I/O's to OS with "DID_NO_CONNECT". 8260 * 8261 * Link Down Timeout != 0: 8262 * 8263 * The driver waits for the link to come up after link down 8264 * before returning I/Os to OS with "DID_NO_CONNECT". 8265 */ 8266 if (le16_to_cpu(nv->link_down_timeout) == 0) { 8267 ha->loop_down_abort_time = 8268 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 8269 } else { 8270 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); 8271 ha->loop_down_abort_time = 8272 (LOOP_DOWN_TIME - ha->link_down_timeout); 8273 } 8274 8275 /* Need enough time to try and get the port back. */ 8276 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); 8277 if (qlport_down_retry) 8278 ha->port_down_retry_count = qlport_down_retry; 8279 8280 /* Set login_retry_count */ 8281 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); 8282 if (ha->port_down_retry_count == 8283 le16_to_cpu(nv->port_down_retry_count) && 8284 ha->port_down_retry_count > 3) 8285 ha->login_retry_count = ha->port_down_retry_count; 8286 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 8287 ha->login_retry_count = ha->port_down_retry_count; 8288 if (ql2xloginretrycount) 8289 ha->login_retry_count = ql2xloginretrycount; 8290 8291 /* if not running MSI-X we need handshaking on interrupts */ 8292 if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha))) 8293 icb->firmware_options_2 |= cpu_to_le32(BIT_22); 8294 8295 /* Enable ZIO. */ 8296 if (!vha->flags.init_done) { 8297 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 8298 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 8299 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 8300 le16_to_cpu(icb->interrupt_delay_timer): 2; 8301 } 8302 icb->firmware_options_2 &= cpu_to_le32( 8303 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 8304 vha->flags.process_response_queue = 0; 8305 if (ha->zio_mode != QLA_ZIO_DISABLED) { 8306 ha->zio_mode = QLA_ZIO_MODE_6; 8307 8308 ql_log(ql_log_info, vha, 0x0075, 8309 "ZIO mode %d enabled; timer delay (%d us).\n", 8310 ha->zio_mode, 8311 ha->zio_timer * 100); 8312 8313 icb->firmware_options_2 |= cpu_to_le32( 8314 (uint32_t)ha->zio_mode); 8315 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); 8316 vha->flags.process_response_queue = 1; 8317 } 8318 8319 /* enable RIDA Format2 */ 8320 icb->firmware_options_3 |= BIT_0; 8321 8322 /* N2N: driver will initiate Login instead of FW */ 8323 icb->firmware_options_3 |= BIT_8; 8324 8325 if (IS_QLA27XX(ha)) { 8326 icb->firmware_options_3 |= BIT_8; 8327 ql_dbg(ql_log_info, vha, 0x0075, 8328 "Enabling direct connection.\n"); 8329 } 8330 8331 if (rval) { 8332 ql_log(ql_log_warn, vha, 0x0076, 8333 "NVRAM configuration failed.\n"); 8334 } 8335 return (rval); 8336 } 8337 8338 int 8339 qla82xx_restart_isp(scsi_qla_host_t *vha) 8340 { 8341 int status, rval; 8342 struct qla_hw_data *ha = vha->hw; 8343 struct req_que *req = ha->req_q_map[0]; 8344 struct rsp_que *rsp = ha->rsp_q_map[0]; 8345 struct scsi_qla_host *vp; 8346 unsigned long flags; 8347 8348 status = qla2x00_init_rings(vha); 8349 if (!status) { 8350 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 8351 ha->flags.chip_reset_done = 1; 8352 8353 status = qla2x00_fw_ready(vha); 8354 if (!status) { 8355 /* Issue a marker after FW becomes ready. */ 8356 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 8357 vha->flags.online = 1; 8358 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 8359 } 8360 8361 /* if no cable then assume it's good */ 8362 if ((vha->device_flags & DFLG_NO_CABLE)) 8363 status = 0; 8364 } 8365 8366 if (!status) { 8367 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 8368 8369 if (!atomic_read(&vha->loop_down_timer)) { 8370 /* 8371 * Issue marker command only when we are going 8372 * to start the I/O . 8373 */ 8374 vha->marker_needed = 1; 8375 } 8376 8377 ha->isp_ops->enable_intrs(ha); 8378 8379 ha->isp_abort_cnt = 0; 8380 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 8381 8382 /* Update the firmware version */ 8383 status = qla82xx_check_md_needed(vha); 8384 8385 if (ha->fce) { 8386 ha->flags.fce_enabled = 1; 8387 memset(ha->fce, 0, 8388 fce_calc_size(ha->fce_bufs)); 8389 rval = qla2x00_enable_fce_trace(vha, 8390 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 8391 &ha->fce_bufs); 8392 if (rval) { 8393 ql_log(ql_log_warn, vha, 0x8001, 8394 "Unable to reinitialize FCE (%d).\n", 8395 rval); 8396 ha->flags.fce_enabled = 0; 8397 } 8398 } 8399 8400 if (ha->eft) { 8401 memset(ha->eft, 0, EFT_SIZE); 8402 rval = qla2x00_enable_eft_trace(vha, 8403 ha->eft_dma, EFT_NUM_BUFFERS); 8404 if (rval) { 8405 ql_log(ql_log_warn, vha, 0x8010, 8406 "Unable to reinitialize EFT (%d).\n", 8407 rval); 8408 } 8409 } 8410 } 8411 8412 if (!status) { 8413 ql_dbg(ql_dbg_taskm, vha, 0x8011, 8414 "qla82xx_restart_isp succeeded.\n"); 8415 8416 spin_lock_irqsave(&ha->vport_slock, flags); 8417 list_for_each_entry(vp, &ha->vp_list, list) { 8418 if (vp->vp_idx) { 8419 atomic_inc(&vp->vref_count); 8420 spin_unlock_irqrestore(&ha->vport_slock, flags); 8421 8422 qla2x00_vp_abort_isp(vp); 8423 8424 spin_lock_irqsave(&ha->vport_slock, flags); 8425 atomic_dec(&vp->vref_count); 8426 } 8427 } 8428 spin_unlock_irqrestore(&ha->vport_slock, flags); 8429 8430 } else { 8431 ql_log(ql_log_warn, vha, 0x8016, 8432 "qla82xx_restart_isp **** FAILED ****.\n"); 8433 } 8434 8435 return status; 8436 } 8437 8438 void 8439 qla81xx_update_fw_options(scsi_qla_host_t *vha) 8440 { 8441 struct qla_hw_data *ha = vha->hw; 8442 8443 /* Hold status IOCBs until ABTS response received. */ 8444 if (ql2xfwholdabts) 8445 ha->fw_options[3] |= BIT_12; 8446 8447 /* Set Retry FLOGI in case of P2P connection */ 8448 if (ha->operating_mode == P2P) { 8449 ha->fw_options[2] |= BIT_3; 8450 ql_dbg(ql_dbg_disc, vha, 0x2103, 8451 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", 8452 __func__, ha->fw_options[2]); 8453 } 8454 8455 /* Move PUREX, ABTS RX & RIDA to ATIOQ */ 8456 if (ql2xmvasynctoatio) { 8457 if (qla_tgt_mode_enabled(vha) || 8458 qla_dual_mode_enabled(vha)) 8459 ha->fw_options[2] |= BIT_11; 8460 else 8461 ha->fw_options[2] &= ~BIT_11; 8462 } 8463 8464 if (qla_tgt_mode_enabled(vha) || 8465 qla_dual_mode_enabled(vha)) { 8466 /* FW auto send SCSI status during */ 8467 ha->fw_options[1] |= BIT_8; 8468 ha->fw_options[10] |= (u16)SAM_STAT_BUSY << 8; 8469 8470 /* FW perform Exchange validation */ 8471 ha->fw_options[2] |= BIT_4; 8472 } else { 8473 ha->fw_options[1] &= ~BIT_8; 8474 ha->fw_options[10] &= 0x00ff; 8475 8476 ha->fw_options[2] &= ~BIT_4; 8477 } 8478 8479 if (ql2xetsenable) { 8480 /* Enable ETS Burst. */ 8481 memset(ha->fw_options, 0, sizeof(ha->fw_options)); 8482 ha->fw_options[2] |= BIT_9; 8483 } 8484 8485 ql_dbg(ql_dbg_init, vha, 0x00e9, 8486 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", 8487 __func__, ha->fw_options[1], ha->fw_options[2], 8488 ha->fw_options[3], vha->host->active_mode); 8489 8490 qla2x00_set_fw_options(vha, ha->fw_options); 8491 } 8492 8493 /* 8494 * qla24xx_get_fcp_prio 8495 * Gets the fcp cmd priority value for the logged in port. 8496 * Looks for a match of the port descriptors within 8497 * each of the fcp prio config entries. If a match is found, 8498 * the tag (priority) value is returned. 8499 * 8500 * Input: 8501 * vha = scsi host structure pointer. 8502 * fcport = port structure pointer. 8503 * 8504 * Return: 8505 * non-zero (if found) 8506 * -1 (if not found) 8507 * 8508 * Context: 8509 * Kernel context 8510 */ 8511 static int 8512 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) 8513 { 8514 int i, entries; 8515 uint8_t pid_match, wwn_match; 8516 int priority; 8517 uint32_t pid1, pid2; 8518 uint64_t wwn1, wwn2; 8519 struct qla_fcp_prio_entry *pri_entry; 8520 struct qla_hw_data *ha = vha->hw; 8521 8522 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled) 8523 return -1; 8524 8525 priority = -1; 8526 entries = ha->fcp_prio_cfg->num_entries; 8527 pri_entry = &ha->fcp_prio_cfg->entry[0]; 8528 8529 for (i = 0; i < entries; i++) { 8530 pid_match = wwn_match = 0; 8531 8532 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) { 8533 pri_entry++; 8534 continue; 8535 } 8536 8537 /* check source pid for a match */ 8538 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) { 8539 pid1 = pri_entry->src_pid & INVALID_PORT_ID; 8540 pid2 = vha->d_id.b24 & INVALID_PORT_ID; 8541 if (pid1 == INVALID_PORT_ID) 8542 pid_match++; 8543 else if (pid1 == pid2) 8544 pid_match++; 8545 } 8546 8547 /* check destination pid for a match */ 8548 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) { 8549 pid1 = pri_entry->dst_pid & INVALID_PORT_ID; 8550 pid2 = fcport->d_id.b24 & INVALID_PORT_ID; 8551 if (pid1 == INVALID_PORT_ID) 8552 pid_match++; 8553 else if (pid1 == pid2) 8554 pid_match++; 8555 } 8556 8557 /* check source WWN for a match */ 8558 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) { 8559 wwn1 = wwn_to_u64(vha->port_name); 8560 wwn2 = wwn_to_u64(pri_entry->src_wwpn); 8561 if (wwn2 == (uint64_t)-1) 8562 wwn_match++; 8563 else if (wwn1 == wwn2) 8564 wwn_match++; 8565 } 8566 8567 /* check destination WWN for a match */ 8568 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) { 8569 wwn1 = wwn_to_u64(fcport->port_name); 8570 wwn2 = wwn_to_u64(pri_entry->dst_wwpn); 8571 if (wwn2 == (uint64_t)-1) 8572 wwn_match++; 8573 else if (wwn1 == wwn2) 8574 wwn_match++; 8575 } 8576 8577 if (pid_match == 2 || wwn_match == 2) { 8578 /* Found a matching entry */ 8579 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID) 8580 priority = pri_entry->tag; 8581 break; 8582 } 8583 8584 pri_entry++; 8585 } 8586 8587 return priority; 8588 } 8589 8590 /* 8591 * qla24xx_update_fcport_fcp_prio 8592 * Activates fcp priority for the logged in fc port 8593 * 8594 * Input: 8595 * vha = scsi host structure pointer. 8596 * fcp = port structure pointer. 8597 * 8598 * Return: 8599 * QLA_SUCCESS or QLA_FUNCTION_FAILED 8600 * 8601 * Context: 8602 * Kernel context. 8603 */ 8604 int 8605 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) 8606 { 8607 int ret; 8608 int priority; 8609 uint16_t mb[5]; 8610 8611 if (fcport->port_type != FCT_TARGET || 8612 fcport->loop_id == FC_NO_LOOP_ID) 8613 return QLA_FUNCTION_FAILED; 8614 8615 priority = qla24xx_get_fcp_prio(vha, fcport); 8616 if (priority < 0) 8617 return QLA_FUNCTION_FAILED; 8618 8619 if (IS_P3P_TYPE(vha->hw)) { 8620 fcport->fcp_prio = priority & 0xf; 8621 return QLA_SUCCESS; 8622 } 8623 8624 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb); 8625 if (ret == QLA_SUCCESS) { 8626 if (fcport->fcp_prio != priority) 8627 ql_dbg(ql_dbg_user, vha, 0x709e, 8628 "Updated FCP_CMND priority - value=%d loop_id=%d " 8629 "port_id=%02x%02x%02x.\n", priority, 8630 fcport->loop_id, fcport->d_id.b.domain, 8631 fcport->d_id.b.area, fcport->d_id.b.al_pa); 8632 fcport->fcp_prio = priority & 0xf; 8633 } else 8634 ql_dbg(ql_dbg_user, vha, 0x704f, 8635 "Unable to update FCP_CMND priority - ret=0x%x for " 8636 "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id, 8637 fcport->d_id.b.domain, fcport->d_id.b.area, 8638 fcport->d_id.b.al_pa); 8639 return ret; 8640 } 8641 8642 /* 8643 * qla24xx_update_all_fcp_prio 8644 * Activates fcp priority for all the logged in ports 8645 * 8646 * Input: 8647 * ha = adapter block pointer. 8648 * 8649 * Return: 8650 * QLA_SUCCESS or QLA_FUNCTION_FAILED 8651 * 8652 * Context: 8653 * Kernel context. 8654 */ 8655 int 8656 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha) 8657 { 8658 int ret; 8659 fc_port_t *fcport; 8660 8661 ret = QLA_FUNCTION_FAILED; 8662 /* We need to set priority for all logged in ports */ 8663 list_for_each_entry(fcport, &vha->vp_fcports, list) 8664 ret = qla24xx_update_fcport_fcp_prio(vha, fcport); 8665 8666 return ret; 8667 } 8668 8669 struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, 8670 int vp_idx, bool startqp) 8671 { 8672 int rsp_id = 0; 8673 int req_id = 0; 8674 int i; 8675 struct qla_hw_data *ha = vha->hw; 8676 uint16_t qpair_id = 0; 8677 struct qla_qpair *qpair = NULL; 8678 struct qla_msix_entry *msix; 8679 8680 if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) { 8681 ql_log(ql_log_warn, vha, 0x00181, 8682 "FW/Driver is not multi-queue capable.\n"); 8683 return NULL; 8684 } 8685 8686 if (ql2xmqsupport || ql2xnvmeenable) { 8687 qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL); 8688 if (qpair == NULL) { 8689 ql_log(ql_log_warn, vha, 0x0182, 8690 "Failed to allocate memory for queue pair.\n"); 8691 return NULL; 8692 } 8693 memset(qpair, 0, sizeof(struct qla_qpair)); 8694 8695 qpair->hw = vha->hw; 8696 qpair->vha = vha; 8697 qpair->qp_lock_ptr = &qpair->qp_lock; 8698 spin_lock_init(&qpair->qp_lock); 8699 qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; 8700 8701 /* Assign available que pair id */ 8702 mutex_lock(&ha->mq_lock); 8703 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs); 8704 if (ha->num_qpairs >= ha->max_qpairs) { 8705 mutex_unlock(&ha->mq_lock); 8706 ql_log(ql_log_warn, vha, 0x0183, 8707 "No resources to create additional q pair.\n"); 8708 goto fail_qid_map; 8709 } 8710 ha->num_qpairs++; 8711 set_bit(qpair_id, ha->qpair_qid_map); 8712 ha->queue_pair_map[qpair_id] = qpair; 8713 qpair->id = qpair_id; 8714 qpair->vp_idx = vp_idx; 8715 qpair->fw_started = ha->flags.fw_started; 8716 INIT_LIST_HEAD(&qpair->hints_list); 8717 qpair->chip_reset = ha->base_qpair->chip_reset; 8718 qpair->enable_class_2 = ha->base_qpair->enable_class_2; 8719 qpair->enable_explicit_conf = 8720 ha->base_qpair->enable_explicit_conf; 8721 8722 for (i = 0; i < ha->msix_count; i++) { 8723 msix = &ha->msix_entries[i]; 8724 if (msix->in_use) 8725 continue; 8726 qpair->msix = msix; 8727 ql_dbg(ql_dbg_multiq, vha, 0xc00f, 8728 "Vector %x selected for qpair\n", msix->vector); 8729 break; 8730 } 8731 if (!qpair->msix) { 8732 ql_log(ql_log_warn, vha, 0x0184, 8733 "Out of MSI-X vectors!.\n"); 8734 goto fail_msix; 8735 } 8736 8737 qpair->msix->in_use = 1; 8738 list_add_tail(&qpair->qp_list_elem, &vha->qp_list); 8739 qpair->pdev = ha->pdev; 8740 if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) 8741 qpair->reqq_start_iocbs = qla_83xx_start_iocbs; 8742 8743 mutex_unlock(&ha->mq_lock); 8744 8745 /* Create response queue first */ 8746 rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp); 8747 if (!rsp_id) { 8748 ql_log(ql_log_warn, vha, 0x0185, 8749 "Failed to create response queue.\n"); 8750 goto fail_rsp; 8751 } 8752 8753 qpair->rsp = ha->rsp_q_map[rsp_id]; 8754 8755 /* Create request queue */ 8756 req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos, 8757 startqp); 8758 if (!req_id) { 8759 ql_log(ql_log_warn, vha, 0x0186, 8760 "Failed to create request queue.\n"); 8761 goto fail_req; 8762 } 8763 8764 qpair->req = ha->req_q_map[req_id]; 8765 qpair->rsp->req = qpair->req; 8766 qpair->rsp->qpair = qpair; 8767 /* init qpair to this cpu. Will adjust at run time. */ 8768 qla_cpu_update(qpair, smp_processor_id()); 8769 8770 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 8771 if (ha->fw_attributes & BIT_4) 8772 qpair->difdix_supported = 1; 8773 } 8774 8775 qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 8776 if (!qpair->srb_mempool) { 8777 ql_log(ql_log_warn, vha, 0xd036, 8778 "Failed to create srb mempool for qpair %d\n", 8779 qpair->id); 8780 goto fail_mempool; 8781 } 8782 8783 /* Mark as online */ 8784 qpair->online = 1; 8785 8786 if (!vha->flags.qpairs_available) 8787 vha->flags.qpairs_available = 1; 8788 8789 ql_dbg(ql_dbg_multiq, vha, 0xc00d, 8790 "Request/Response queue pair created, id %d\n", 8791 qpair->id); 8792 ql_dbg(ql_dbg_init, vha, 0x0187, 8793 "Request/Response queue pair created, id %d\n", 8794 qpair->id); 8795 } 8796 return qpair; 8797 8798 fail_mempool: 8799 fail_req: 8800 qla25xx_delete_rsp_que(vha, qpair->rsp); 8801 fail_rsp: 8802 mutex_lock(&ha->mq_lock); 8803 qpair->msix->in_use = 0; 8804 list_del(&qpair->qp_list_elem); 8805 if (list_empty(&vha->qp_list)) 8806 vha->flags.qpairs_available = 0; 8807 fail_msix: 8808 ha->queue_pair_map[qpair_id] = NULL; 8809 clear_bit(qpair_id, ha->qpair_qid_map); 8810 ha->num_qpairs--; 8811 mutex_unlock(&ha->mq_lock); 8812 fail_qid_map: 8813 kfree(qpair); 8814 return NULL; 8815 } 8816 8817 int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) 8818 { 8819 int ret = QLA_FUNCTION_FAILED; 8820 struct qla_hw_data *ha = qpair->hw; 8821 8822 qpair->delete_in_progress = 1; 8823 while (atomic_read(&qpair->ref_count)) 8824 msleep(500); 8825 8826 ret = qla25xx_delete_req_que(vha, qpair->req); 8827 if (ret != QLA_SUCCESS) 8828 goto fail; 8829 8830 ret = qla25xx_delete_rsp_que(vha, qpair->rsp); 8831 if (ret != QLA_SUCCESS) 8832 goto fail; 8833 8834 mutex_lock(&ha->mq_lock); 8835 ha->queue_pair_map[qpair->id] = NULL; 8836 clear_bit(qpair->id, ha->qpair_qid_map); 8837 ha->num_qpairs--; 8838 list_del(&qpair->qp_list_elem); 8839 if (list_empty(&vha->qp_list)) { 8840 vha->flags.qpairs_available = 0; 8841 vha->flags.qpairs_req_created = 0; 8842 vha->flags.qpairs_rsp_created = 0; 8843 } 8844 mempool_destroy(qpair->srb_mempool); 8845 kfree(qpair); 8846 mutex_unlock(&ha->mq_lock); 8847 8848 return QLA_SUCCESS; 8849 fail: 8850 return ret; 8851 } 8852