1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_gbl.h" 9 10 #include <linux/delay.h> 11 #include <linux/slab.h> 12 #include <linux/vmalloc.h> 13 14 #include "qla_devtbl.h" 15 16 #ifdef CONFIG_SPARC 17 #include <asm/prom.h> 18 #endif 19 20 #include <target/target_core_base.h> 21 #include "qla_target.h" 22 23 /* 24 * QLogic ISP2x00 Hardware Support Function Prototypes. 25 */ 26 static int qla2x00_isp_firmware(scsi_qla_host_t *); 27 static int qla2x00_setup_chip(scsi_qla_host_t *); 28 static int qla2x00_fw_ready(scsi_qla_host_t *); 29 static int qla2x00_configure_hba(scsi_qla_host_t *); 30 static int qla2x00_configure_loop(scsi_qla_host_t *); 31 static int qla2x00_configure_local_loop(scsi_qla_host_t *); 32 static int qla2x00_configure_fabric(scsi_qla_host_t *); 33 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *); 34 static int qla2x00_restart_isp(scsi_qla_host_t *); 35 36 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); 37 static int qla84xx_init_chip(scsi_qla_host_t *); 38 static int qla25xx_init_queues(struct qla_hw_data *); 39 static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *); 40 static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *, 41 struct event_arg *); 42 static void qla24xx_handle_prli_done_event(struct scsi_qla_host *, 43 struct event_arg *); 44 static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *); 45 46 /* SRB Extensions ---------------------------------------------------------- */ 47 48 void 49 qla2x00_sp_timeout(struct timer_list *t) 50 { 51 srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer); 52 struct srb_iocb *iocb; 53 scsi_qla_host_t *vha = sp->vha; 54 struct req_que *req; 55 unsigned long flags; 56 57 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 58 req = vha->hw->req_q_map[0]; 59 req->outstanding_cmds[sp->handle] = NULL; 60 iocb = &sp->u.iocb_cmd; 61 iocb->timeout(sp); 62 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 63 } 64 65 void 66 qla2x00_sp_free(void *ptr) 67 { 68 srb_t *sp = ptr; 69 struct srb_iocb *iocb = &sp->u.iocb_cmd; 70 71 del_timer(&iocb->timer); 72 qla2x00_rel_sp(sp); 73 } 74 75 /* Asynchronous Login/Logout Routines -------------------------------------- */ 76 77 unsigned long 78 qla2x00_get_async_timeout(struct scsi_qla_host *vha) 79 { 80 unsigned long tmo; 81 struct qla_hw_data *ha = vha->hw; 82 83 /* Firmware should use switch negotiated r_a_tov for timeout. */ 84 tmo = ha->r_a_tov / 10 * 2; 85 if (IS_QLAFX00(ha)) { 86 tmo = FX00_DEF_RATOV * 2; 87 } else if (!IS_FWI2_CAPABLE(ha)) { 88 /* 89 * Except for earlier ISPs where the timeout is seeded from the 90 * initialization control block. 91 */ 92 tmo = ha->login_timeout; 93 } 94 return tmo; 95 } 96 97 void 98 qla2x00_async_iocb_timeout(void *data) 99 { 100 srb_t *sp = data; 101 fc_port_t *fcport = sp->fcport; 102 struct srb_iocb *lio = &sp->u.iocb_cmd; 103 104 if (fcport) { 105 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, 106 "Async-%s timeout - hdl=%x portid=%06x %8phC.\n", 107 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name); 108 109 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 110 } else { 111 pr_info("Async-%s timeout - hdl=%x.\n", 112 sp->name, sp->handle); 113 } 114 115 switch (sp->type) { 116 case SRB_LOGIN_CMD: 117 /* Retry as needed. */ 118 lio->u.logio.data[0] = MBS_COMMAND_ERROR; 119 lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 120 QLA_LOGIO_LOGIN_RETRIED : 0; 121 sp->done(sp, QLA_FUNCTION_TIMEOUT); 122 break; 123 case SRB_LOGOUT_CMD: 124 case SRB_CT_PTHRU_CMD: 125 case SRB_MB_IOCB: 126 case SRB_NACK_PLOGI: 127 case SRB_NACK_PRLI: 128 case SRB_NACK_LOGO: 129 case SRB_CTRL_VP: 130 sp->done(sp, QLA_FUNCTION_TIMEOUT); 131 break; 132 } 133 } 134 135 static void 136 qla2x00_async_login_sp_done(void *ptr, int res) 137 { 138 srb_t *sp = ptr; 139 struct scsi_qla_host *vha = sp->vha; 140 struct srb_iocb *lio = &sp->u.iocb_cmd; 141 struct event_arg ea; 142 143 ql_dbg(ql_dbg_disc, vha, 0x20dd, 144 "%s %8phC res %d \n", __func__, sp->fcport->port_name, res); 145 146 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 147 148 if (!test_bit(UNLOADING, &vha->dpc_flags)) { 149 memset(&ea, 0, sizeof(ea)); 150 ea.event = FCME_PLOGI_DONE; 151 ea.fcport = sp->fcport; 152 ea.data[0] = lio->u.logio.data[0]; 153 ea.data[1] = lio->u.logio.data[1]; 154 ea.iop[0] = lio->u.logio.iop[0]; 155 ea.iop[1] = lio->u.logio.iop[1]; 156 ea.sp = sp; 157 qla2x00_fcport_event_handler(vha, &ea); 158 } 159 160 sp->free(sp); 161 } 162 163 int 164 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, 165 uint16_t *data) 166 { 167 srb_t *sp; 168 struct srb_iocb *lio; 169 int rval = QLA_FUNCTION_FAILED; 170 171 if (!vha->flags.online) 172 goto done; 173 174 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 175 if (!sp) 176 goto done; 177 178 fcport->flags |= FCF_ASYNC_SENT; 179 fcport->logout_completed = 0; 180 181 fcport->disc_state = DSC_LOGIN_PEND; 182 sp->type = SRB_LOGIN_CMD; 183 sp->name = "login"; 184 sp->gen1 = fcport->rscn_gen; 185 sp->gen2 = fcport->login_gen; 186 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 187 188 lio = &sp->u.iocb_cmd; 189 lio->timeout = qla2x00_async_iocb_timeout; 190 sp->done = qla2x00_async_login_sp_done; 191 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; 192 193 if (fcport->fc4f_nvme) 194 lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI; 195 196 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 197 lio->u.logio.flags |= SRB_LOGIN_RETRIED; 198 rval = qla2x00_start_sp(sp); 199 if (rval != QLA_SUCCESS) { 200 fcport->flags |= FCF_LOGIN_NEEDED; 201 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 202 goto done_free_sp; 203 } 204 205 ql_dbg(ql_dbg_disc, vha, 0x2072, 206 "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x " 207 "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id, 208 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 209 fcport->login_retry); 210 return rval; 211 212 done_free_sp: 213 sp->free(sp); 214 fcport->flags &= ~FCF_ASYNC_SENT; 215 done: 216 return rval; 217 } 218 219 static void 220 qla2x00_async_logout_sp_done(void *ptr, int res) 221 { 222 srb_t *sp = ptr; 223 224 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 225 sp->fcport->login_gen++; 226 qlt_logo_completion_handler(sp->fcport, res); 227 sp->free(sp); 228 } 229 230 int 231 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) 232 { 233 srb_t *sp; 234 struct srb_iocb *lio; 235 int rval = QLA_FUNCTION_FAILED; 236 237 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) 238 return rval; 239 240 fcport->flags |= FCF_ASYNC_SENT; 241 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 242 if (!sp) 243 goto done; 244 245 sp->type = SRB_LOGOUT_CMD; 246 sp->name = "logout"; 247 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 248 249 lio = &sp->u.iocb_cmd; 250 lio->timeout = qla2x00_async_iocb_timeout; 251 sp->done = qla2x00_async_logout_sp_done; 252 rval = qla2x00_start_sp(sp); 253 if (rval != QLA_SUCCESS) 254 goto done_free_sp; 255 256 ql_dbg(ql_dbg_disc, vha, 0x2070, 257 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n", 258 sp->handle, fcport->loop_id, fcport->d_id.b.domain, 259 fcport->d_id.b.area, fcport->d_id.b.al_pa, 260 fcport->port_name); 261 return rval; 262 263 done_free_sp: 264 sp->free(sp); 265 done: 266 fcport->flags &= ~FCF_ASYNC_SENT; 267 return rval; 268 } 269 270 void 271 qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport, 272 uint16_t *data) 273 { 274 /* Don't re-login in target mode */ 275 if (!fcport->tgt_session) 276 qla2x00_mark_device_lost(vha, fcport, 1, 0); 277 qlt_logo_completion_handler(fcport, data[0]); 278 } 279 280 static void 281 qla2x00_async_prlo_sp_done(void *s, int res) 282 { 283 srb_t *sp = (srb_t *)s; 284 struct srb_iocb *lio = &sp->u.iocb_cmd; 285 struct scsi_qla_host *vha = sp->vha; 286 287 if (!test_bit(UNLOADING, &vha->dpc_flags)) 288 qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport, 289 lio->u.logio.data); 290 sp->free(sp); 291 } 292 293 int 294 qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport) 295 { 296 srb_t *sp; 297 struct srb_iocb *lio; 298 int rval; 299 300 rval = QLA_FUNCTION_FAILED; 301 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 302 if (!sp) 303 goto done; 304 305 sp->type = SRB_PRLO_CMD; 306 sp->name = "prlo"; 307 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 308 309 lio = &sp->u.iocb_cmd; 310 lio->timeout = qla2x00_async_iocb_timeout; 311 sp->done = qla2x00_async_prlo_sp_done; 312 rval = qla2x00_start_sp(sp); 313 if (rval != QLA_SUCCESS) 314 goto done_free_sp; 315 316 ql_dbg(ql_dbg_disc, vha, 0x2070, 317 "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n", 318 sp->handle, fcport->loop_id, fcport->d_id.b.domain, 319 fcport->d_id.b.area, fcport->d_id.b.al_pa); 320 return rval; 321 322 done_free_sp: 323 sp->free(sp); 324 done: 325 return rval; 326 } 327 328 static 329 void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea) 330 { 331 struct fc_port *fcport = ea->fcport; 332 333 ql_dbg(ql_dbg_disc, vha, 0x20d2, 334 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n", 335 __func__, fcport->port_name, fcport->disc_state, 336 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2, 337 fcport->rscn_gen, ea->sp->gen1, fcport->loop_id); 338 339 if (ea->data[0] != MBS_COMMAND_COMPLETE) { 340 ql_dbg(ql_dbg_disc, vha, 0x2066, 341 "%s %8phC: adisc fail: post delete\n", 342 __func__, ea->fcport->port_name); 343 qlt_schedule_sess_for_deletion(ea->fcport); 344 return; 345 } 346 347 if (ea->fcport->disc_state == DSC_DELETE_PEND) 348 return; 349 350 if (ea->sp->gen2 != ea->fcport->login_gen) { 351 /* target side must have changed it. */ 352 ql_dbg(ql_dbg_disc, vha, 0x20d3, 353 "%s %8phC generation changed\n", 354 __func__, ea->fcport->port_name); 355 return; 356 } else if (ea->sp->gen1 != ea->fcport->rscn_gen) { 357 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n", 358 __func__, __LINE__, ea->fcport->port_name); 359 qla24xx_post_gidpn_work(vha, ea->fcport); 360 return; 361 } 362 363 __qla24xx_handle_gpdb_event(vha, ea); 364 } 365 366 static void 367 qla2x00_async_adisc_sp_done(void *ptr, int res) 368 { 369 srb_t *sp = ptr; 370 struct scsi_qla_host *vha = sp->vha; 371 struct event_arg ea; 372 struct srb_iocb *lio = &sp->u.iocb_cmd; 373 374 ql_dbg(ql_dbg_disc, vha, 0x2066, 375 "Async done-%s res %x %8phC\n", 376 sp->name, res, sp->fcport->port_name); 377 378 memset(&ea, 0, sizeof(ea)); 379 ea.event = FCME_ADISC_DONE; 380 ea.rc = res; 381 ea.data[0] = lio->u.logio.data[0]; 382 ea.data[1] = lio->u.logio.data[1]; 383 ea.iop[0] = lio->u.logio.iop[0]; 384 ea.iop[1] = lio->u.logio.iop[1]; 385 ea.fcport = sp->fcport; 386 ea.sp = sp; 387 388 qla2x00_fcport_event_handler(vha, &ea); 389 390 sp->free(sp); 391 } 392 393 int 394 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, 395 uint16_t *data) 396 { 397 srb_t *sp; 398 struct srb_iocb *lio; 399 int rval; 400 401 rval = QLA_FUNCTION_FAILED; 402 fcport->flags |= FCF_ASYNC_SENT; 403 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 404 if (!sp) 405 goto done; 406 407 sp->type = SRB_ADISC_CMD; 408 sp->name = "adisc"; 409 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 410 411 lio = &sp->u.iocb_cmd; 412 lio->timeout = qla2x00_async_iocb_timeout; 413 sp->done = qla2x00_async_adisc_sp_done; 414 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 415 lio->u.logio.flags |= SRB_LOGIN_RETRIED; 416 rval = qla2x00_start_sp(sp); 417 if (rval != QLA_SUCCESS) 418 goto done_free_sp; 419 420 ql_dbg(ql_dbg_disc, vha, 0x206f, 421 "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n", 422 sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name); 423 return rval; 424 425 done_free_sp: 426 sp->free(sp); 427 done: 428 fcport->flags &= ~FCF_ASYNC_SENT; 429 qla2x00_post_async_adisc_work(vha, fcport, data); 430 return rval; 431 } 432 433 static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, 434 struct event_arg *ea) 435 { 436 fc_port_t *fcport, *conflict_fcport; 437 struct get_name_list_extended *e; 438 u16 i, n, found = 0, loop_id; 439 port_id_t id; 440 u64 wwn; 441 u16 data[2]; 442 u8 current_login_state; 443 444 fcport = ea->fcport; 445 ql_dbg(ql_dbg_disc, vha, 0xffff, 446 "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d\n", 447 __func__, fcport->port_name, fcport->disc_state, 448 fcport->fw_login_state, ea->rc, 449 fcport->login_gen, fcport->last_login_gen, 450 fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id); 451 452 if (fcport->disc_state == DSC_DELETE_PEND) 453 return; 454 455 if (ea->rc) { /* rval */ 456 if (fcport->login_retry == 0) { 457 fcport->login_retry = vha->hw->login_retry_count; 458 ql_dbg(ql_dbg_disc, vha, 0x20de, 459 "GNL failed Port login retry %8phN, retry cnt=%d.\n", 460 fcport->port_name, fcport->login_retry); 461 } 462 return; 463 } 464 465 if (fcport->last_rscn_gen != fcport->rscn_gen) { 466 ql_dbg(ql_dbg_disc, vha, 0x20df, 467 "%s %8phC rscn gen changed rscn %d|%d \n", 468 __func__, fcport->port_name, 469 fcport->last_rscn_gen, fcport->rscn_gen); 470 qla24xx_post_gidpn_work(vha, fcport); 471 return; 472 } else if (fcport->last_login_gen != fcport->login_gen) { 473 ql_dbg(ql_dbg_disc, vha, 0x20e0, 474 "%s %8phC login gen changed\n", 475 __func__, fcport->port_name); 476 return; 477 } 478 479 n = ea->data[0] / sizeof(struct get_name_list_extended); 480 481 ql_dbg(ql_dbg_disc, vha, 0x20e1, 482 "%s %d %8phC n %d %02x%02x%02x lid %d \n", 483 __func__, __LINE__, fcport->port_name, n, 484 fcport->d_id.b.domain, fcport->d_id.b.area, 485 fcport->d_id.b.al_pa, fcport->loop_id); 486 487 for (i = 0; i < n; i++) { 488 e = &vha->gnl.l[i]; 489 wwn = wwn_to_u64(e->port_name); 490 491 if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE)) 492 continue; 493 494 found = 1; 495 id.b.domain = e->port_id[2]; 496 id.b.area = e->port_id[1]; 497 id.b.al_pa = e->port_id[0]; 498 id.b.rsvd_1 = 0; 499 500 loop_id = le16_to_cpu(e->nport_handle); 501 loop_id = (loop_id & 0x7fff); 502 503 ql_dbg(ql_dbg_disc, vha, 0x20e2, 504 "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n", 505 __func__, fcport->port_name, 506 e->current_login_state, fcport->fw_login_state, 507 id.b.domain, id.b.area, id.b.al_pa, 508 fcport->d_id.b.domain, fcport->d_id.b.area, 509 fcport->d_id.b.al_pa, loop_id, fcport->loop_id); 510 511 if ((id.b24 != fcport->d_id.b24) || 512 ((fcport->loop_id != FC_NO_LOOP_ID) && 513 (fcport->loop_id != loop_id))) { 514 ql_dbg(ql_dbg_disc, vha, 0x20e3, 515 "%s %d %8phC post del sess\n", 516 __func__, __LINE__, fcport->port_name); 517 qlt_schedule_sess_for_deletion(fcport); 518 return; 519 } 520 521 fcport->loop_id = loop_id; 522 523 wwn = wwn_to_u64(fcport->port_name); 524 qlt_find_sess_invalidate_other(vha, wwn, 525 id, loop_id, &conflict_fcport); 526 527 if (conflict_fcport) { 528 /* 529 * Another share fcport share the same loop_id & 530 * nport id. Conflict fcport needs to finish 531 * cleanup before this fcport can proceed to login. 532 */ 533 conflict_fcport->conflict = fcport; 534 fcport->login_pause = 1; 535 } 536 537 if (fcport->fc4f_nvme) 538 current_login_state = e->current_login_state >> 4; 539 else 540 current_login_state = e->current_login_state & 0xf; 541 542 switch (current_login_state) { 543 case DSC_LS_PRLI_COMP: 544 ql_dbg(ql_dbg_disc, vha, 0x20e4, 545 "%s %d %8phC post gpdb\n", 546 __func__, __LINE__, fcport->port_name); 547 548 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0) 549 fcport->port_type = FCT_INITIATOR; 550 else 551 fcport->port_type = FCT_TARGET; 552 553 data[0] = data[1] = 0; 554 qla2x00_post_async_adisc_work(vha, fcport, data); 555 break; 556 case DSC_LS_PORT_UNAVAIL: 557 default: 558 if (fcport->loop_id == FC_NO_LOOP_ID) { 559 qla2x00_find_new_loop_id(vha, fcport); 560 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 561 } 562 ql_dbg(ql_dbg_disc, vha, 0x20e5, 563 "%s %d %8phC\n", 564 __func__, __LINE__, fcport->port_name); 565 qla24xx_fcport_handle_login(vha, fcport); 566 break; 567 } 568 } 569 570 if (!found) { 571 /* fw has no record of this port */ 572 for (i = 0; i < n; i++) { 573 e = &vha->gnl.l[i]; 574 id.b.domain = e->port_id[0]; 575 id.b.area = e->port_id[1]; 576 id.b.al_pa = e->port_id[2]; 577 id.b.rsvd_1 = 0; 578 loop_id = le16_to_cpu(e->nport_handle); 579 580 if (fcport->d_id.b24 == id.b24) { 581 conflict_fcport = 582 qla2x00_find_fcport_by_wwpn(vha, 583 e->port_name, 0); 584 ql_dbg(ql_dbg_disc, vha, 0x20e6, 585 "%s %d %8phC post del sess\n", 586 __func__, __LINE__, 587 conflict_fcport->port_name); 588 qlt_schedule_sess_for_deletion 589 (conflict_fcport); 590 } 591 592 /* FW already picked this loop id for another fcport */ 593 if (fcport->loop_id == loop_id) 594 fcport->loop_id = FC_NO_LOOP_ID; 595 } 596 qla24xx_fcport_handle_login(vha, fcport); 597 } 598 } /* gnl_event */ 599 600 static void 601 qla24xx_async_gnl_sp_done(void *s, int res) 602 { 603 struct srb *sp = s; 604 struct scsi_qla_host *vha = sp->vha; 605 unsigned long flags; 606 struct fc_port *fcport = NULL, *tf; 607 u16 i, n = 0, loop_id; 608 struct event_arg ea; 609 struct get_name_list_extended *e; 610 u64 wwn; 611 struct list_head h; 612 bool found = false; 613 614 ql_dbg(ql_dbg_disc, vha, 0x20e7, 615 "Async done-%s res %x mb[1]=%x mb[2]=%x \n", 616 sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1], 617 sp->u.iocb_cmd.u.mbx.in_mb[2]); 618 619 memset(&ea, 0, sizeof(ea)); 620 ea.sp = sp; 621 ea.rc = res; 622 ea.event = FCME_GNL_DONE; 623 624 if (sp->u.iocb_cmd.u.mbx.in_mb[1] >= 625 sizeof(struct get_name_list_extended)) { 626 n = sp->u.iocb_cmd.u.mbx.in_mb[1] / 627 sizeof(struct get_name_list_extended); 628 ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */ 629 } 630 631 for (i = 0; i < n; i++) { 632 e = &vha->gnl.l[i]; 633 loop_id = le16_to_cpu(e->nport_handle); 634 /* mask out reserve bit */ 635 loop_id = (loop_id & 0x7fff); 636 set_bit(loop_id, vha->hw->loop_id_map); 637 wwn = wwn_to_u64(e->port_name); 638 639 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8, 640 "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n", 641 __func__, (void *)&wwn, e->port_id[2], e->port_id[1], 642 e->port_id[0], e->current_login_state, e->last_login_state, 643 (loop_id & 0x7fff)); 644 } 645 646 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 647 vha->gnl.sent = 0; 648 649 INIT_LIST_HEAD(&h); 650 fcport = tf = NULL; 651 if (!list_empty(&vha->gnl.fcports)) 652 list_splice_init(&vha->gnl.fcports, &h); 653 654 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { 655 list_del_init(&fcport->gnl_entry); 656 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 657 ea.fcport = fcport; 658 659 qla2x00_fcport_event_handler(vha, &ea); 660 } 661 662 /* create new fcport if fw has knowledge of new sessions */ 663 for (i = 0; i < n; i++) { 664 port_id_t id; 665 u64 wwnn; 666 667 e = &vha->gnl.l[i]; 668 wwn = wwn_to_u64(e->port_name); 669 670 found = false; 671 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { 672 if (!memcmp((u8 *)&wwn, fcport->port_name, 673 WWN_SIZE)) { 674 found = true; 675 break; 676 } 677 } 678 679 id.b.domain = e->port_id[2]; 680 id.b.area = e->port_id[1]; 681 id.b.al_pa = e->port_id[0]; 682 id.b.rsvd_1 = 0; 683 684 if (!found && wwn && !IS_SW_RESV_ADDR(id)) { 685 ql_dbg(ql_dbg_disc, vha, 0x2065, 686 "%s %d %8phC %06x post new sess\n", 687 __func__, __LINE__, (u8 *)&wwn, id.b24); 688 wwnn = wwn_to_u64(e->node_name); 689 qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn, 690 (u8 *)&wwnn, NULL, FC4_TYPE_UNKNOWN); 691 } 692 } 693 694 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 695 696 sp->free(sp); 697 } 698 699 int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) 700 { 701 srb_t *sp; 702 struct srb_iocb *mbx; 703 int rval = QLA_FUNCTION_FAILED; 704 unsigned long flags; 705 u16 *mb; 706 707 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) 708 return rval; 709 710 ql_dbg(ql_dbg_disc, vha, 0x20d9, 711 "Async-gnlist WWPN %8phC \n", fcport->port_name); 712 713 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 714 fcport->disc_state = DSC_GNL; 715 fcport->last_rscn_gen = fcport->rscn_gen; 716 fcport->last_login_gen = fcport->login_gen; 717 718 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports); 719 if (vha->gnl.sent) { 720 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 721 return QLA_SUCCESS; 722 } 723 vha->gnl.sent = 1; 724 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 725 726 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 727 if (!sp) 728 goto done; 729 730 fcport->flags |= FCF_ASYNC_SENT; 731 sp->type = SRB_MB_IOCB; 732 sp->name = "gnlist"; 733 sp->gen1 = fcport->rscn_gen; 734 sp->gen2 = fcport->login_gen; 735 736 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2); 737 738 mb = sp->u.iocb_cmd.u.mbx.out_mb; 739 mb[0] = MBC_PORT_NODE_NAME_LIST; 740 mb[1] = BIT_2 | BIT_3; 741 mb[2] = MSW(vha->gnl.ldma); 742 mb[3] = LSW(vha->gnl.ldma); 743 mb[6] = MSW(MSD(vha->gnl.ldma)); 744 mb[7] = LSW(MSD(vha->gnl.ldma)); 745 mb[8] = vha->gnl.size; 746 mb[9] = vha->vp_idx; 747 748 mbx = &sp->u.iocb_cmd; 749 mbx->timeout = qla2x00_async_iocb_timeout; 750 751 sp->done = qla24xx_async_gnl_sp_done; 752 753 rval = qla2x00_start_sp(sp); 754 if (rval != QLA_SUCCESS) 755 goto done_free_sp; 756 757 ql_dbg(ql_dbg_disc, vha, 0x20da, 758 "Async-%s - OUT WWPN %8phC hndl %x\n", 759 sp->name, fcport->port_name, sp->handle); 760 761 return rval; 762 763 done_free_sp: 764 sp->free(sp); 765 fcport->flags &= ~FCF_ASYNC_SENT; 766 done: 767 return rval; 768 } 769 770 int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport) 771 { 772 struct qla_work_evt *e; 773 774 e = qla2x00_alloc_work(vha, QLA_EVT_GNL); 775 if (!e) 776 return QLA_FUNCTION_FAILED; 777 778 e->u.fcport.fcport = fcport; 779 fcport->flags |= FCF_ASYNC_ACTIVE; 780 return qla2x00_post_work(vha, e); 781 } 782 783 static 784 void qla24xx_async_gpdb_sp_done(void *s, int res) 785 { 786 struct srb *sp = s; 787 struct scsi_qla_host *vha = sp->vha; 788 struct qla_hw_data *ha = vha->hw; 789 fc_port_t *fcport = sp->fcport; 790 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb; 791 struct event_arg ea; 792 793 ql_dbg(ql_dbg_disc, vha, 0x20db, 794 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n", 795 sp->name, res, fcport->port_name, mb[1], mb[2]); 796 797 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 798 799 memset(&ea, 0, sizeof(ea)); 800 ea.event = FCME_GPDB_DONE; 801 ea.fcport = fcport; 802 ea.sp = sp; 803 804 qla2x00_fcport_event_handler(vha, &ea); 805 806 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in, 807 sp->u.iocb_cmd.u.mbx.in_dma); 808 809 sp->free(sp); 810 } 811 812 static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport) 813 { 814 struct qla_work_evt *e; 815 816 e = qla2x00_alloc_work(vha, QLA_EVT_PRLI); 817 if (!e) 818 return QLA_FUNCTION_FAILED; 819 820 e->u.fcport.fcport = fcport; 821 822 return qla2x00_post_work(vha, e); 823 } 824 825 static void 826 qla2x00_async_prli_sp_done(void *ptr, int res) 827 { 828 srb_t *sp = ptr; 829 struct scsi_qla_host *vha = sp->vha; 830 struct srb_iocb *lio = &sp->u.iocb_cmd; 831 struct event_arg ea; 832 833 ql_dbg(ql_dbg_disc, vha, 0x2129, 834 "%s %8phC res %d \n", __func__, 835 sp->fcport->port_name, res); 836 837 sp->fcport->flags &= ~FCF_ASYNC_SENT; 838 839 if (!test_bit(UNLOADING, &vha->dpc_flags)) { 840 memset(&ea, 0, sizeof(ea)); 841 ea.event = FCME_PRLI_DONE; 842 ea.fcport = sp->fcport; 843 ea.data[0] = lio->u.logio.data[0]; 844 ea.data[1] = lio->u.logio.data[1]; 845 ea.iop[0] = lio->u.logio.iop[0]; 846 ea.iop[1] = lio->u.logio.iop[1]; 847 ea.sp = sp; 848 849 qla2x00_fcport_event_handler(vha, &ea); 850 } 851 852 sp->free(sp); 853 } 854 855 int 856 qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport) 857 { 858 srb_t *sp; 859 struct srb_iocb *lio; 860 int rval = QLA_FUNCTION_FAILED; 861 862 if (!vha->flags.online) 863 return rval; 864 865 if (fcport->fw_login_state == DSC_LS_PLOGI_PEND || 866 fcport->fw_login_state == DSC_LS_PLOGI_COMP || 867 fcport->fw_login_state == DSC_LS_PRLI_PEND) 868 return rval; 869 870 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 871 if (!sp) 872 return rval; 873 874 fcport->flags |= FCF_ASYNC_SENT; 875 fcport->logout_completed = 0; 876 877 sp->type = SRB_PRLI_CMD; 878 sp->name = "prli"; 879 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 880 881 lio = &sp->u.iocb_cmd; 882 lio->timeout = qla2x00_async_iocb_timeout; 883 sp->done = qla2x00_async_prli_sp_done; 884 lio->u.logio.flags = 0; 885 886 if (fcport->fc4f_nvme) 887 lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI; 888 889 rval = qla2x00_start_sp(sp); 890 if (rval != QLA_SUCCESS) { 891 fcport->flags |= FCF_LOGIN_NEEDED; 892 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 893 goto done_free_sp; 894 } 895 896 ql_dbg(ql_dbg_disc, vha, 0x211b, 897 "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n", 898 fcport->port_name, sp->handle, fcport->loop_id, 899 fcport->d_id.b24, fcport->login_retry); 900 901 return rval; 902 903 done_free_sp: 904 sp->free(sp); 905 fcport->flags &= ~FCF_ASYNC_SENT; 906 return rval; 907 } 908 909 int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 910 { 911 struct qla_work_evt *e; 912 913 e = qla2x00_alloc_work(vha, QLA_EVT_GPDB); 914 if (!e) 915 return QLA_FUNCTION_FAILED; 916 917 e->u.fcport.fcport = fcport; 918 e->u.fcport.opt = opt; 919 fcport->flags |= FCF_ASYNC_ACTIVE; 920 return qla2x00_post_work(vha, e); 921 } 922 923 int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 924 { 925 srb_t *sp; 926 struct srb_iocb *mbx; 927 int rval = QLA_FUNCTION_FAILED; 928 u16 *mb; 929 dma_addr_t pd_dma; 930 struct port_database_24xx *pd; 931 struct qla_hw_data *ha = vha->hw; 932 933 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) 934 return rval; 935 936 fcport->disc_state = DSC_GPDB; 937 938 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 939 if (!sp) 940 goto done; 941 942 fcport->flags |= FCF_ASYNC_SENT; 943 sp->type = SRB_MB_IOCB; 944 sp->name = "gpdb"; 945 sp->gen1 = fcport->rscn_gen; 946 sp->gen2 = fcport->login_gen; 947 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 948 949 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 950 if (pd == NULL) { 951 ql_log(ql_log_warn, vha, 0xd043, 952 "Failed to allocate port database structure.\n"); 953 goto done_free_sp; 954 } 955 956 mb = sp->u.iocb_cmd.u.mbx.out_mb; 957 mb[0] = MBC_GET_PORT_DATABASE; 958 mb[1] = fcport->loop_id; 959 mb[2] = MSW(pd_dma); 960 mb[3] = LSW(pd_dma); 961 mb[6] = MSW(MSD(pd_dma)); 962 mb[7] = LSW(MSD(pd_dma)); 963 mb[9] = vha->vp_idx; 964 mb[10] = opt; 965 966 mbx = &sp->u.iocb_cmd; 967 mbx->timeout = qla2x00_async_iocb_timeout; 968 mbx->u.mbx.in = (void *)pd; 969 mbx->u.mbx.in_dma = pd_dma; 970 971 sp->done = qla24xx_async_gpdb_sp_done; 972 973 rval = qla2x00_start_sp(sp); 974 if (rval != QLA_SUCCESS) 975 goto done_free_sp; 976 977 ql_dbg(ql_dbg_disc, vha, 0x20dc, 978 "Async-%s %8phC hndl %x opt %x\n", 979 sp->name, fcport->port_name, sp->handle, opt); 980 981 return rval; 982 983 done_free_sp: 984 if (pd) 985 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 986 987 sp->free(sp); 988 fcport->flags &= ~FCF_ASYNC_SENT; 989 done: 990 qla24xx_post_gpdb_work(vha, fcport, opt); 991 return rval; 992 } 993 994 static 995 void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) 996 { 997 unsigned long flags; 998 999 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1000 ea->fcport->login_gen++; 1001 ea->fcport->deleted = 0; 1002 ea->fcport->logout_on_delete = 1; 1003 1004 if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) { 1005 vha->fcport_count++; 1006 ea->fcport->login_succ = 1; 1007 1008 if (!IS_IIDMA_CAPABLE(vha->hw) || 1009 !vha->hw->flags.gpsc_supported) { 1010 ql_dbg(ql_dbg_disc, vha, 0x20d6, 1011 "%s %d %8phC post upd_fcport fcp_cnt %d\n", 1012 __func__, __LINE__, ea->fcport->port_name, 1013 vha->fcport_count); 1014 1015 qla24xx_post_upd_fcport_work(vha, ea->fcport); 1016 } else { 1017 if (ea->fcport->id_changed) { 1018 ea->fcport->id_changed = 0; 1019 ql_dbg(ql_dbg_disc, vha, 0x20d7, 1020 "%s %d %8phC post gfpnid fcp_cnt %d\n", 1021 __func__, __LINE__, ea->fcport->port_name, 1022 vha->fcport_count); 1023 qla24xx_post_gfpnid_work(vha, ea->fcport); 1024 } else { 1025 ql_dbg(ql_dbg_disc, vha, 0x20d7, 1026 "%s %d %8phC post gpsc fcp_cnt %d\n", 1027 __func__, __LINE__, ea->fcport->port_name, 1028 vha->fcport_count); 1029 qla24xx_post_gpsc_work(vha, ea->fcport); 1030 } 1031 } 1032 } else if (ea->fcport->login_succ) { 1033 /* 1034 * We have an existing session. A late RSCN delivery 1035 * must have triggered the session to be re-validate. 1036 * Session is still valid. 1037 */ 1038 ql_dbg(ql_dbg_disc, vha, 0x20d6, 1039 "%s %d %8phC session revalidate success\n", 1040 __func__, __LINE__, ea->fcport->port_name); 1041 ea->fcport->disc_state = DSC_LOGIN_COMPLETE; 1042 } 1043 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1044 } 1045 1046 static 1047 void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) 1048 { 1049 fc_port_t *fcport = ea->fcport; 1050 struct port_database_24xx *pd; 1051 struct srb *sp = ea->sp; 1052 1053 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; 1054 1055 fcport->flags &= ~FCF_ASYNC_SENT; 1056 1057 ql_dbg(ql_dbg_disc, vha, 0x20d2, 1058 "%s %8phC DS %d LS %d rc %d\n", __func__, fcport->port_name, 1059 fcport->disc_state, pd->current_login_state, ea->rc); 1060 1061 if (fcport->disc_state == DSC_DELETE_PEND) 1062 return; 1063 1064 switch (pd->current_login_state) { 1065 case PDS_PRLI_COMPLETE: 1066 __qla24xx_parse_gpdb(vha, fcport, pd); 1067 break; 1068 case PDS_PLOGI_PENDING: 1069 case PDS_PLOGI_COMPLETE: 1070 case PDS_PRLI_PENDING: 1071 case PDS_PRLI2_PENDING: 1072 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC relogin needed\n", 1073 __func__, __LINE__, fcport->port_name); 1074 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1075 return; 1076 case PDS_LOGO_PENDING: 1077 case PDS_PORT_UNAVAILABLE: 1078 default: 1079 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n", 1080 __func__, __LINE__, fcport->port_name); 1081 qlt_schedule_sess_for_deletion(fcport); 1082 return; 1083 } 1084 __qla24xx_handle_gpdb_event(vha, ea); 1085 } /* gpdb event */ 1086 1087 static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport) 1088 { 1089 u8 login = 0; 1090 int rc; 1091 1092 if (qla_tgt_mode_enabled(vha)) 1093 return; 1094 1095 if (qla_dual_mode_enabled(vha)) { 1096 if (N2N_TOPO(vha->hw)) { 1097 u64 mywwn, wwn; 1098 1099 mywwn = wwn_to_u64(vha->port_name); 1100 wwn = wwn_to_u64(fcport->port_name); 1101 if (mywwn > wwn) 1102 login = 1; 1103 else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP) 1104 && time_after_eq(jiffies, 1105 fcport->plogi_nack_done_deadline)) 1106 login = 1; 1107 } else { 1108 login = 1; 1109 } 1110 } else { 1111 /* initiator mode */ 1112 login = 1; 1113 } 1114 1115 if (login) { 1116 if (fcport->loop_id == FC_NO_LOOP_ID) { 1117 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 1118 rc = qla2x00_find_new_loop_id(vha, fcport); 1119 if (rc) { 1120 ql_dbg(ql_dbg_disc, vha, 0x20e6, 1121 "%s %d %8phC post del sess - out of loopid\n", 1122 __func__, __LINE__, fcport->port_name); 1123 fcport->scan_state = 0; 1124 qlt_schedule_sess_for_deletion(fcport); 1125 return; 1126 } 1127 } 1128 ql_dbg(ql_dbg_disc, vha, 0x20bf, 1129 "%s %d %8phC post login\n", 1130 __func__, __LINE__, fcport->port_name); 1131 qla2x00_post_async_login_work(vha, fcport, NULL); 1132 } 1133 } 1134 1135 int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) 1136 { 1137 u16 data[2]; 1138 u64 wwn; 1139 1140 ql_dbg(ql_dbg_disc, vha, 0x20d8, 1141 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d retry %d lid %d scan %d\n", 1142 __func__, fcport->port_name, fcport->disc_state, 1143 fcport->fw_login_state, fcport->login_pause, fcport->flags, 1144 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen, 1145 fcport->login_gen, fcport->login_retry, 1146 fcport->loop_id, fcport->scan_state); 1147 1148 if (fcport->login_retry == 0) 1149 return 0; 1150 1151 if (fcport->scan_state != QLA_FCPORT_FOUND) 1152 return 0; 1153 1154 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 1155 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) 1156 return 0; 1157 1158 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { 1159 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) { 1160 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1161 return 0; 1162 } 1163 } 1164 1165 /* for pure Target Mode. Login will not be initiated */ 1166 if (vha->host->active_mode == MODE_TARGET) 1167 return 0; 1168 1169 if (fcport->flags & FCF_ASYNC_SENT) { 1170 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1171 return 0; 1172 } 1173 1174 fcport->login_retry--; 1175 1176 switch (fcport->disc_state) { 1177 case DSC_DELETED: 1178 wwn = wwn_to_u64(fcport->node_name); 1179 if (wwn == 0) { 1180 ql_dbg(ql_dbg_disc, vha, 0xffff, 1181 "%s %d %8phC post GNNID\n", 1182 __func__, __LINE__, fcport->port_name); 1183 qla24xx_post_gnnid_work(vha, fcport); 1184 } else if (fcport->loop_id == FC_NO_LOOP_ID) { 1185 ql_dbg(ql_dbg_disc, vha, 0x20bd, 1186 "%s %d %8phC post gnl\n", 1187 __func__, __LINE__, fcport->port_name); 1188 qla24xx_post_gnl_work(vha, fcport); 1189 } else { 1190 qla_chk_n2n_b4_login(vha, fcport); 1191 } 1192 break; 1193 1194 case DSC_GNL: 1195 if (fcport->login_pause) { 1196 fcport->last_rscn_gen = fcport->rscn_gen; 1197 fcport->last_login_gen = fcport->login_gen; 1198 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1199 break; 1200 } 1201 1202 qla_chk_n2n_b4_login(vha, fcport); 1203 break; 1204 1205 case DSC_LOGIN_FAILED: 1206 ql_dbg(ql_dbg_disc, vha, 0x20d0, 1207 "%s %d %8phC post gidpn\n", 1208 __func__, __LINE__, fcport->port_name); 1209 if (N2N_TOPO(vha->hw)) 1210 qla_chk_n2n_b4_login(vha, fcport); 1211 else 1212 qla24xx_post_gidpn_work(vha, fcport); 1213 break; 1214 1215 case DSC_LOGIN_COMPLETE: 1216 /* recheck login state */ 1217 ql_dbg(ql_dbg_disc, vha, 0x20d1, 1218 "%s %d %8phC post adisc\n", 1219 __func__, __LINE__, fcport->port_name); 1220 data[0] = data[1] = 0; 1221 qla2x00_post_async_adisc_work(vha, fcport, data); 1222 break; 1223 1224 default: 1225 break; 1226 } 1227 1228 return 0; 1229 } 1230 1231 static 1232 void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea) 1233 { 1234 fcport->rscn_gen++; 1235 1236 ql_dbg(ql_dbg_disc, fcport->vha, 0x210c, 1237 "%s %8phC DS %d LS %d\n", 1238 __func__, fcport->port_name, fcport->disc_state, 1239 fcport->fw_login_state); 1240 1241 if (fcport->flags & FCF_ASYNC_SENT) 1242 return; 1243 1244 switch (fcport->disc_state) { 1245 case DSC_DELETED: 1246 case DSC_LOGIN_COMPLETE: 1247 qla24xx_post_gpnid_work(fcport->vha, &ea->id); 1248 break; 1249 default: 1250 break; 1251 } 1252 } 1253 1254 int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id, 1255 u8 *port_name, u8 *node_name, void *pla, u8 fc4_type) 1256 { 1257 struct qla_work_evt *e; 1258 e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS); 1259 if (!e) 1260 return QLA_FUNCTION_FAILED; 1261 1262 e->u.new_sess.id = *id; 1263 e->u.new_sess.pla = pla; 1264 e->u.new_sess.fc4_type = fc4_type; 1265 memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE); 1266 if (node_name) 1267 memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE); 1268 1269 return qla2x00_post_work(vha, e); 1270 } 1271 1272 static 1273 void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, 1274 struct event_arg *ea) 1275 { 1276 fc_port_t *fcport = ea->fcport; 1277 1278 ql_dbg(ql_dbg_disc, vha, 0x2102, 1279 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n", 1280 __func__, fcport->port_name, fcport->disc_state, 1281 fcport->fw_login_state, fcport->login_pause, 1282 fcport->deleted, fcport->conflict, 1283 fcport->last_rscn_gen, fcport->rscn_gen, 1284 fcport->last_login_gen, fcport->login_gen, 1285 fcport->flags); 1286 1287 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 1288 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) 1289 return; 1290 1291 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { 1292 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) { 1293 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1294 return; 1295 } 1296 } 1297 1298 if (fcport->flags & FCF_ASYNC_SENT) { 1299 fcport->login_retry++; 1300 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1301 return; 1302 } 1303 1304 if (fcport->disc_state == DSC_DELETE_PEND) { 1305 fcport->login_retry++; 1306 return; 1307 } 1308 1309 if (fcport->last_rscn_gen != fcport->rscn_gen) { 1310 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n", 1311 __func__, __LINE__, fcport->port_name); 1312 1313 qla24xx_post_gidpn_work(vha, fcport); 1314 return; 1315 } 1316 1317 qla24xx_fcport_handle_login(vha, fcport); 1318 } 1319 1320 void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea) 1321 { 1322 fc_port_t *f, *tf; 1323 uint32_t id = 0, mask, rid; 1324 unsigned long flags; 1325 1326 switch (ea->event) { 1327 case FCME_RSCN: 1328 case FCME_GIDPN_DONE: 1329 case FCME_GPSC_DONE: 1330 case FCME_GPNID_DONE: 1331 case FCME_GNNID_DONE: 1332 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) || 1333 test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags)) 1334 return; 1335 break; 1336 default: 1337 break; 1338 } 1339 1340 switch (ea->event) { 1341 case FCME_RELOGIN: 1342 if (test_bit(UNLOADING, &vha->dpc_flags)) 1343 return; 1344 1345 qla24xx_handle_relogin_event(vha, ea); 1346 break; 1347 case FCME_RSCN: 1348 if (test_bit(UNLOADING, &vha->dpc_flags)) 1349 return; 1350 switch (ea->id.b.rsvd_1) { 1351 case RSCN_PORT_ADDR: 1352 spin_lock_irqsave(&vha->work_lock, flags); 1353 if (vha->scan.scan_flags == 0) { 1354 ql_dbg(ql_dbg_disc, vha, 0xffff, 1355 "%s: schedule\n", __func__); 1356 vha->scan.scan_flags |= SF_QUEUED; 1357 schedule_delayed_work(&vha->scan.scan_work, 5); 1358 } 1359 spin_unlock_irqrestore(&vha->work_lock, flags); 1360 1361 break; 1362 case RSCN_AREA_ADDR: 1363 case RSCN_DOM_ADDR: 1364 if (ea->id.b.rsvd_1 == RSCN_AREA_ADDR) { 1365 mask = 0xffff00; 1366 ql_dbg(ql_dbg_async, vha, 0x5044, 1367 "RSCN: Area 0x%06x was affected\n", 1368 ea->id.b24); 1369 } else { 1370 mask = 0xff0000; 1371 ql_dbg(ql_dbg_async, vha, 0x507a, 1372 "RSCN: Domain 0x%06x was affected\n", 1373 ea->id.b24); 1374 } 1375 1376 rid = ea->id.b24 & mask; 1377 list_for_each_entry_safe(f, tf, &vha->vp_fcports, 1378 list) { 1379 id = f->d_id.b24 & mask; 1380 if (rid == id) { 1381 ea->fcport = f; 1382 qla24xx_handle_rscn_event(f, ea); 1383 } 1384 } 1385 break; 1386 case RSCN_FAB_ADDR: 1387 default: 1388 ql_log(ql_log_warn, vha, 0xd045, 1389 "RSCN: Fabric was affected. Addr format %d\n", 1390 ea->id.b.rsvd_1); 1391 qla2x00_mark_all_devices_lost(vha, 1); 1392 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1393 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1394 } 1395 break; 1396 case FCME_GIDPN_DONE: 1397 qla24xx_handle_gidpn_event(vha, ea); 1398 break; 1399 case FCME_GNL_DONE: 1400 qla24xx_handle_gnl_done_event(vha, ea); 1401 break; 1402 case FCME_GPSC_DONE: 1403 qla24xx_handle_gpsc_event(vha, ea); 1404 break; 1405 case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */ 1406 qla24xx_handle_plogi_done_event(vha, ea); 1407 break; 1408 case FCME_PRLI_DONE: 1409 qla24xx_handle_prli_done_event(vha, ea); 1410 break; 1411 case FCME_GPDB_DONE: 1412 qla24xx_handle_gpdb_event(vha, ea); 1413 break; 1414 case FCME_GPNID_DONE: 1415 qla24xx_handle_gpnid_event(vha, ea); 1416 break; 1417 case FCME_GFFID_DONE: 1418 qla24xx_handle_gffid_event(vha, ea); 1419 break; 1420 case FCME_ADISC_DONE: 1421 qla24xx_handle_adisc_event(vha, ea); 1422 break; 1423 case FCME_GNNID_DONE: 1424 qla24xx_handle_gnnid_event(vha, ea); 1425 break; 1426 case FCME_GFPNID_DONE: 1427 qla24xx_handle_gfpnid_event(vha, ea); 1428 break; 1429 default: 1430 BUG_ON(1); 1431 break; 1432 } 1433 } 1434 1435 static void 1436 qla2x00_tmf_iocb_timeout(void *data) 1437 { 1438 srb_t *sp = data; 1439 struct srb_iocb *tmf = &sp->u.iocb_cmd; 1440 1441 tmf->u.tmf.comp_status = CS_TIMEOUT; 1442 complete(&tmf->u.tmf.comp); 1443 } 1444 1445 static void 1446 qla2x00_tmf_sp_done(void *ptr, int res) 1447 { 1448 srb_t *sp = ptr; 1449 struct srb_iocb *tmf = &sp->u.iocb_cmd; 1450 1451 complete(&tmf->u.tmf.comp); 1452 } 1453 1454 int 1455 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, 1456 uint32_t tag) 1457 { 1458 struct scsi_qla_host *vha = fcport->vha; 1459 struct srb_iocb *tm_iocb; 1460 srb_t *sp; 1461 int rval = QLA_FUNCTION_FAILED; 1462 1463 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1464 if (!sp) 1465 goto done; 1466 1467 tm_iocb = &sp->u.iocb_cmd; 1468 sp->type = SRB_TM_CMD; 1469 sp->name = "tmf"; 1470 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)); 1471 tm_iocb->u.tmf.flags = flags; 1472 tm_iocb->u.tmf.lun = lun; 1473 tm_iocb->u.tmf.data = tag; 1474 sp->done = qla2x00_tmf_sp_done; 1475 tm_iocb->timeout = qla2x00_tmf_iocb_timeout; 1476 init_completion(&tm_iocb->u.tmf.comp); 1477 1478 rval = qla2x00_start_sp(sp); 1479 if (rval != QLA_SUCCESS) 1480 goto done_free_sp; 1481 1482 ql_dbg(ql_dbg_taskm, vha, 0x802f, 1483 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n", 1484 sp->handle, fcport->loop_id, fcport->d_id.b.domain, 1485 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1486 1487 wait_for_completion(&tm_iocb->u.tmf.comp); 1488 1489 rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ? 1490 QLA_SUCCESS : QLA_FUNCTION_FAILED; 1491 1492 if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) { 1493 ql_dbg(ql_dbg_taskm, vha, 0x8030, 1494 "TM IOCB failed (%x).\n", rval); 1495 } 1496 1497 if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) { 1498 flags = tm_iocb->u.tmf.flags; 1499 lun = (uint16_t)tm_iocb->u.tmf.lun; 1500 1501 /* Issue Marker IOCB */ 1502 qla2x00_marker(vha, vha->hw->req_q_map[0], 1503 vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun, 1504 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 1505 } 1506 1507 done_free_sp: 1508 sp->free(sp); 1509 sp->fcport->flags &= ~FCF_ASYNC_SENT; 1510 done: 1511 return rval; 1512 } 1513 1514 static void 1515 qla24xx_abort_iocb_timeout(void *data) 1516 { 1517 srb_t *sp = data; 1518 struct srb_iocb *abt = &sp->u.iocb_cmd; 1519 1520 abt->u.abt.comp_status = CS_TIMEOUT; 1521 complete(&abt->u.abt.comp); 1522 } 1523 1524 static void 1525 qla24xx_abort_sp_done(void *ptr, int res) 1526 { 1527 srb_t *sp = ptr; 1528 struct srb_iocb *abt = &sp->u.iocb_cmd; 1529 1530 complete(&abt->u.abt.comp); 1531 } 1532 1533 int 1534 qla24xx_async_abort_cmd(srb_t *cmd_sp) 1535 { 1536 scsi_qla_host_t *vha = cmd_sp->vha; 1537 fc_port_t *fcport = cmd_sp->fcport; 1538 struct srb_iocb *abt_iocb; 1539 srb_t *sp; 1540 int rval = QLA_FUNCTION_FAILED; 1541 1542 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1543 if (!sp) 1544 goto done; 1545 1546 abt_iocb = &sp->u.iocb_cmd; 1547 sp->type = SRB_ABT_CMD; 1548 sp->name = "abort"; 1549 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)); 1550 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle; 1551 1552 if (vha->flags.qpairs_available && cmd_sp->qpair) 1553 abt_iocb->u.abt.req_que_no = 1554 cpu_to_le16(cmd_sp->qpair->req->id); 1555 else 1556 abt_iocb->u.abt.req_que_no = cpu_to_le16(vha->req->id); 1557 1558 sp->done = qla24xx_abort_sp_done; 1559 abt_iocb->timeout = qla24xx_abort_iocb_timeout; 1560 init_completion(&abt_iocb->u.abt.comp); 1561 1562 rval = qla2x00_start_sp(sp); 1563 if (rval != QLA_SUCCESS) 1564 goto done_free_sp; 1565 1566 ql_dbg(ql_dbg_async, vha, 0x507c, 1567 "Abort command issued - hdl=%x, target_id=%x\n", 1568 cmd_sp->handle, fcport->tgt_id); 1569 1570 wait_for_completion(&abt_iocb->u.abt.comp); 1571 1572 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ? 1573 QLA_SUCCESS : QLA_FUNCTION_FAILED; 1574 1575 done_free_sp: 1576 sp->free(sp); 1577 done: 1578 return rval; 1579 } 1580 1581 int 1582 qla24xx_async_abort_command(srb_t *sp) 1583 { 1584 unsigned long flags = 0; 1585 1586 uint32_t handle; 1587 fc_port_t *fcport = sp->fcport; 1588 struct scsi_qla_host *vha = fcport->vha; 1589 struct qla_hw_data *ha = vha->hw; 1590 struct req_que *req = vha->req; 1591 1592 if (vha->flags.qpairs_available && sp->qpair) 1593 req = sp->qpair->req; 1594 1595 spin_lock_irqsave(&ha->hardware_lock, flags); 1596 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 1597 if (req->outstanding_cmds[handle] == sp) 1598 break; 1599 } 1600 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1601 if (handle == req->num_outstanding_cmds) { 1602 /* Command not found. */ 1603 return QLA_FUNCTION_FAILED; 1604 } 1605 if (sp->type == SRB_FXIOCB_DCMD) 1606 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport, 1607 FXDISC_ABORT_IOCTL); 1608 1609 return qla24xx_async_abort_cmd(sp); 1610 } 1611 1612 static void 1613 qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea) 1614 { 1615 switch (ea->data[0]) { 1616 case MBS_COMMAND_COMPLETE: 1617 ql_dbg(ql_dbg_disc, vha, 0x2118, 1618 "%s %d %8phC post gpdb\n", 1619 __func__, __LINE__, ea->fcport->port_name); 1620 1621 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; 1622 ea->fcport->logout_on_delete = 1; 1623 qla24xx_post_gpdb_work(vha, ea->fcport, 0); 1624 break; 1625 default: 1626 if (ea->fcport->n2n_flag) { 1627 ql_dbg(ql_dbg_disc, vha, 0x2118, 1628 "%s %d %8phC post fc4 prli\n", 1629 __func__, __LINE__, ea->fcport->port_name); 1630 ea->fcport->fc4f_nvme = 0; 1631 ea->fcport->n2n_flag = 0; 1632 qla24xx_post_prli_work(vha, ea->fcport); 1633 } 1634 ql_dbg(ql_dbg_disc, vha, 0x2119, 1635 "%s %d %8phC unhandle event of %x\n", 1636 __func__, __LINE__, ea->fcport->port_name, ea->data[0]); 1637 break; 1638 } 1639 } 1640 1641 static void 1642 qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) 1643 { 1644 port_id_t cid; /* conflict Nport id */ 1645 u16 lid; 1646 struct fc_port *conflict_fcport; 1647 unsigned long flags; 1648 struct fc_port *fcport = ea->fcport; 1649 1650 ql_dbg(ql_dbg_disc, vha, 0xffff, 1651 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n", 1652 __func__, fcport->port_name, fcport->disc_state, 1653 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen, 1654 ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, 1655 ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]); 1656 1657 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 1658 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) { 1659 ql_dbg(ql_dbg_disc, vha, 0x20ea, 1660 "%s %d %8phC Remote is trying to login\n", 1661 __func__, __LINE__, fcport->port_name); 1662 return; 1663 } 1664 1665 if (fcport->disc_state == DSC_DELETE_PEND) 1666 return; 1667 1668 if (ea->sp->gen2 != fcport->login_gen) { 1669 /* target side must have changed it. */ 1670 ql_dbg(ql_dbg_disc, vha, 0x20d3, 1671 "%s %8phC generation changed\n", 1672 __func__, fcport->port_name); 1673 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1674 return; 1675 } else if (ea->sp->gen1 != fcport->rscn_gen) { 1676 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n", 1677 __func__, __LINE__, fcport->port_name); 1678 qla24xx_post_gidpn_work(vha, fcport); 1679 return; 1680 } 1681 1682 switch (ea->data[0]) { 1683 case MBS_COMMAND_COMPLETE: 1684 /* 1685 * Driver must validate login state - If PRLI not complete, 1686 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI 1687 * requests. 1688 */ 1689 if (ea->fcport->fc4f_nvme) { 1690 ql_dbg(ql_dbg_disc, vha, 0x2117, 1691 "%s %d %8phC post prli\n", 1692 __func__, __LINE__, ea->fcport->port_name); 1693 qla24xx_post_prli_work(vha, ea->fcport); 1694 } else { 1695 ql_dbg(ql_dbg_disc, vha, 0x20ea, 1696 "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n", 1697 __func__, __LINE__, ea->fcport->port_name, 1698 ea->fcport->loop_id, ea->fcport->d_id.b24); 1699 1700 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); 1701 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1702 ea->fcport->loop_id = FC_NO_LOOP_ID; 1703 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; 1704 ea->fcport->logout_on_delete = 1; 1705 ea->fcport->send_els_logo = 0; 1706 ea->fcport->fw_login_state = DSC_LS_PRLI_COMP; 1707 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1708 1709 qla24xx_post_gpdb_work(vha, ea->fcport, 0); 1710 } 1711 break; 1712 case MBS_COMMAND_ERROR: 1713 ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n", 1714 __func__, __LINE__, ea->fcport->port_name, ea->data[1]); 1715 1716 ea->fcport->flags &= ~FCF_ASYNC_SENT; 1717 ea->fcport->disc_state = DSC_LOGIN_FAILED; 1718 if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED) 1719 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1720 else 1721 qla2x00_mark_device_lost(vha, ea->fcport, 1, 0); 1722 break; 1723 case MBS_LOOP_ID_USED: 1724 /* data[1] = IO PARAM 1 = nport ID */ 1725 cid.b.domain = (ea->iop[1] >> 16) & 0xff; 1726 cid.b.area = (ea->iop[1] >> 8) & 0xff; 1727 cid.b.al_pa = ea->iop[1] & 0xff; 1728 cid.b.rsvd_1 = 0; 1729 1730 ql_dbg(ql_dbg_disc, vha, 0x20ec, 1731 "%s %d %8phC LoopID 0x%x in use post gnl\n", 1732 __func__, __LINE__, ea->fcport->port_name, 1733 ea->fcport->loop_id); 1734 1735 if (IS_SW_RESV_ADDR(cid)) { 1736 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); 1737 ea->fcport->loop_id = FC_NO_LOOP_ID; 1738 } else { 1739 qla2x00_clear_loop_id(ea->fcport); 1740 } 1741 qla24xx_post_gnl_work(vha, ea->fcport); 1742 break; 1743 case MBS_PORT_ID_USED: 1744 ql_dbg(ql_dbg_disc, vha, 0x20ed, 1745 "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n", 1746 __func__, __LINE__, ea->fcport->port_name, 1747 ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area, 1748 ea->fcport->d_id.b.al_pa); 1749 1750 lid = ea->iop[1] & 0xffff; 1751 qlt_find_sess_invalidate_other(vha, 1752 wwn_to_u64(ea->fcport->port_name), 1753 ea->fcport->d_id, lid, &conflict_fcport); 1754 1755 if (conflict_fcport) { 1756 /* 1757 * Another fcport share the same loop_id/nport id. 1758 * Conflict fcport needs to finish cleanup before this 1759 * fcport can proceed to login. 1760 */ 1761 conflict_fcport->conflict = ea->fcport; 1762 ea->fcport->login_pause = 1; 1763 1764 ql_dbg(ql_dbg_disc, vha, 0x20ed, 1765 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n", 1766 __func__, __LINE__, ea->fcport->port_name, 1767 ea->fcport->d_id.b24, lid); 1768 qla2x00_clear_loop_id(ea->fcport); 1769 qla24xx_post_gidpn_work(vha, ea->fcport); 1770 } else { 1771 ql_dbg(ql_dbg_disc, vha, 0x20ed, 1772 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n", 1773 __func__, __LINE__, ea->fcport->port_name, 1774 ea->fcport->d_id.b24, lid); 1775 1776 qla2x00_clear_loop_id(ea->fcport); 1777 set_bit(lid, vha->hw->loop_id_map); 1778 ea->fcport->loop_id = lid; 1779 ea->fcport->keep_nport_handle = 0; 1780 qlt_schedule_sess_for_deletion(ea->fcport); 1781 } 1782 break; 1783 } 1784 return; 1785 } 1786 1787 void 1788 qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport, 1789 uint16_t *data) 1790 { 1791 qla2x00_mark_device_lost(vha, fcport, 1, 0); 1792 qlt_logo_completion_handler(fcport, data[0]); 1793 fcport->login_gen++; 1794 return; 1795 } 1796 1797 void 1798 qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport, 1799 uint16_t *data) 1800 { 1801 if (data[0] == MBS_COMMAND_COMPLETE) { 1802 qla2x00_update_fcport(vha, fcport); 1803 1804 return; 1805 } 1806 1807 /* Retry login. */ 1808 fcport->flags &= ~FCF_ASYNC_SENT; 1809 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 1810 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1811 else 1812 qla2x00_mark_device_lost(vha, fcport, 1, 0); 1813 1814 return; 1815 } 1816 1817 /****************************************************************************/ 1818 /* QLogic ISP2x00 Hardware Support Functions. */ 1819 /****************************************************************************/ 1820 1821 static int 1822 qla83xx_nic_core_fw_load(scsi_qla_host_t *vha) 1823 { 1824 int rval = QLA_SUCCESS; 1825 struct qla_hw_data *ha = vha->hw; 1826 uint32_t idc_major_ver, idc_minor_ver; 1827 uint16_t config[4]; 1828 1829 qla83xx_idc_lock(vha, 0); 1830 1831 /* SV: TODO: Assign initialization timeout from 1832 * flash-info / other param 1833 */ 1834 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT; 1835 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT; 1836 1837 /* Set our fcoe function presence */ 1838 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) { 1839 ql_dbg(ql_dbg_p3p, vha, 0xb077, 1840 "Error while setting DRV-Presence.\n"); 1841 rval = QLA_FUNCTION_FAILED; 1842 goto exit; 1843 } 1844 1845 /* Decide the reset ownership */ 1846 qla83xx_reset_ownership(vha); 1847 1848 /* 1849 * On first protocol driver load: 1850 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery 1851 * register. 1852 * Others: Check compatibility with current IDC Major version. 1853 */ 1854 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver); 1855 if (ha->flags.nic_core_reset_owner) { 1856 /* Set IDC Major version */ 1857 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION; 1858 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver); 1859 1860 /* Clearing IDC-Lock-Recovery register */ 1861 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0); 1862 } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) { 1863 /* 1864 * Clear further IDC participation if we are not compatible with 1865 * the current IDC Major Version. 1866 */ 1867 ql_log(ql_log_warn, vha, 0xb07d, 1868 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n", 1869 idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION); 1870 __qla83xx_clear_drv_presence(vha); 1871 rval = QLA_FUNCTION_FAILED; 1872 goto exit; 1873 } 1874 /* Each function sets its supported Minor version. */ 1875 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver); 1876 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2)); 1877 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver); 1878 1879 if (ha->flags.nic_core_reset_owner) { 1880 memset(config, 0, sizeof(config)); 1881 if (!qla81xx_get_port_config(vha, config)) 1882 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, 1883 QLA8XXX_DEV_READY); 1884 } 1885 1886 rval = qla83xx_idc_state_handler(vha); 1887 1888 exit: 1889 qla83xx_idc_unlock(vha, 0); 1890 1891 return rval; 1892 } 1893 1894 /* 1895 * qla2x00_initialize_adapter 1896 * Initialize board. 1897 * 1898 * Input: 1899 * ha = adapter block pointer. 1900 * 1901 * Returns: 1902 * 0 = success 1903 */ 1904 int 1905 qla2x00_initialize_adapter(scsi_qla_host_t *vha) 1906 { 1907 int rval; 1908 struct qla_hw_data *ha = vha->hw; 1909 struct req_que *req = ha->req_q_map[0]; 1910 1911 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats)); 1912 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat)); 1913 1914 /* Clear adapter flags. */ 1915 vha->flags.online = 0; 1916 ha->flags.chip_reset_done = 0; 1917 vha->flags.reset_active = 0; 1918 ha->flags.pci_channel_io_perm_failure = 0; 1919 ha->flags.eeh_busy = 0; 1920 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); 1921 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1922 atomic_set(&vha->loop_state, LOOP_DOWN); 1923 vha->device_flags = DFLG_NO_CABLE; 1924 vha->dpc_flags = 0; 1925 vha->flags.management_server_logged_in = 0; 1926 vha->marker_needed = 0; 1927 ha->isp_abort_cnt = 0; 1928 ha->beacon_blink_led = 0; 1929 1930 set_bit(0, ha->req_qid_map); 1931 set_bit(0, ha->rsp_qid_map); 1932 1933 ql_dbg(ql_dbg_init, vha, 0x0040, 1934 "Configuring PCI space...\n"); 1935 rval = ha->isp_ops->pci_config(vha); 1936 if (rval) { 1937 ql_log(ql_log_warn, vha, 0x0044, 1938 "Unable to configure PCI space.\n"); 1939 return (rval); 1940 } 1941 1942 ha->isp_ops->reset_chip(vha); 1943 1944 rval = qla2xxx_get_flash_info(vha); 1945 if (rval) { 1946 ql_log(ql_log_fatal, vha, 0x004f, 1947 "Unable to validate FLASH data.\n"); 1948 return rval; 1949 } 1950 1951 if (IS_QLA8044(ha)) { 1952 qla8044_read_reset_template(vha); 1953 1954 /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0. 1955 * If DONRESET_BIT0 is set, drivers should not set dev_state 1956 * to NEED_RESET. But if NEED_RESET is set, drivers should 1957 * should honor the reset. */ 1958 if (ql2xdontresethba == 1) 1959 qla8044_set_idc_dontreset(vha); 1960 } 1961 1962 ha->isp_ops->get_flash_version(vha, req->ring); 1963 ql_dbg(ql_dbg_init, vha, 0x0061, 1964 "Configure NVRAM parameters...\n"); 1965 1966 ha->isp_ops->nvram_config(vha); 1967 1968 if (ha->flags.disable_serdes) { 1969 /* Mask HBA via NVRAM settings? */ 1970 ql_log(ql_log_info, vha, 0x0077, 1971 "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name); 1972 return QLA_FUNCTION_FAILED; 1973 } 1974 1975 ql_dbg(ql_dbg_init, vha, 0x0078, 1976 "Verifying loaded RISC code...\n"); 1977 1978 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) { 1979 rval = ha->isp_ops->chip_diag(vha); 1980 if (rval) 1981 return (rval); 1982 rval = qla2x00_setup_chip(vha); 1983 if (rval) 1984 return (rval); 1985 } 1986 1987 if (IS_QLA84XX(ha)) { 1988 ha->cs84xx = qla84xx_get_chip(vha); 1989 if (!ha->cs84xx) { 1990 ql_log(ql_log_warn, vha, 0x00d0, 1991 "Unable to configure ISP84XX.\n"); 1992 return QLA_FUNCTION_FAILED; 1993 } 1994 } 1995 1996 if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) 1997 rval = qla2x00_init_rings(vha); 1998 1999 ha->flags.chip_reset_done = 1; 2000 2001 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) { 2002 /* Issue verify 84xx FW IOCB to complete 84xx initialization */ 2003 rval = qla84xx_init_chip(vha); 2004 if (rval != QLA_SUCCESS) { 2005 ql_log(ql_log_warn, vha, 0x00d4, 2006 "Unable to initialize ISP84XX.\n"); 2007 qla84xx_put_chip(vha); 2008 } 2009 } 2010 2011 /* Load the NIC Core f/w if we are the first protocol driver. */ 2012 if (IS_QLA8031(ha)) { 2013 rval = qla83xx_nic_core_fw_load(vha); 2014 if (rval) 2015 ql_log(ql_log_warn, vha, 0x0124, 2016 "Error in initializing NIC Core f/w.\n"); 2017 } 2018 2019 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) 2020 qla24xx_read_fcp_prio_cfg(vha); 2021 2022 if (IS_P3P_TYPE(ha)) 2023 qla82xx_set_driver_version(vha, QLA2XXX_VERSION); 2024 else 2025 qla25xx_set_driver_version(vha, QLA2XXX_VERSION); 2026 2027 return (rval); 2028 } 2029 2030 /** 2031 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers. 2032 * @ha: HA context 2033 * 2034 * Returns 0 on success. 2035 */ 2036 int 2037 qla2100_pci_config(scsi_qla_host_t *vha) 2038 { 2039 uint16_t w; 2040 unsigned long flags; 2041 struct qla_hw_data *ha = vha->hw; 2042 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2043 2044 pci_set_master(ha->pdev); 2045 pci_try_set_mwi(ha->pdev); 2046 2047 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 2048 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 2049 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 2050 2051 pci_disable_rom(ha->pdev); 2052 2053 /* Get PCI bus information. */ 2054 spin_lock_irqsave(&ha->hardware_lock, flags); 2055 ha->pci_attr = RD_REG_WORD(®->ctrl_status); 2056 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2057 2058 return QLA_SUCCESS; 2059 } 2060 2061 /** 2062 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers. 2063 * @ha: HA context 2064 * 2065 * Returns 0 on success. 2066 */ 2067 int 2068 qla2300_pci_config(scsi_qla_host_t *vha) 2069 { 2070 uint16_t w; 2071 unsigned long flags = 0; 2072 uint32_t cnt; 2073 struct qla_hw_data *ha = vha->hw; 2074 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2075 2076 pci_set_master(ha->pdev); 2077 pci_try_set_mwi(ha->pdev); 2078 2079 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 2080 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 2081 2082 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 2083 w &= ~PCI_COMMAND_INTX_DISABLE; 2084 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 2085 2086 /* 2087 * If this is a 2300 card and not 2312, reset the 2088 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately, 2089 * the 2310 also reports itself as a 2300 so we need to get the 2090 * fb revision level -- a 6 indicates it really is a 2300 and 2091 * not a 2310. 2092 */ 2093 if (IS_QLA2300(ha)) { 2094 spin_lock_irqsave(&ha->hardware_lock, flags); 2095 2096 /* Pause RISC. */ 2097 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); 2098 for (cnt = 0; cnt < 30000; cnt++) { 2099 if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) != 0) 2100 break; 2101 2102 udelay(10); 2103 } 2104 2105 /* Select FPM registers. */ 2106 WRT_REG_WORD(®->ctrl_status, 0x20); 2107 RD_REG_WORD(®->ctrl_status); 2108 2109 /* Get the fb rev level */ 2110 ha->fb_rev = RD_FB_CMD_REG(ha, reg); 2111 2112 if (ha->fb_rev == FPM_2300) 2113 pci_clear_mwi(ha->pdev); 2114 2115 /* Deselect FPM registers. */ 2116 WRT_REG_WORD(®->ctrl_status, 0x0); 2117 RD_REG_WORD(®->ctrl_status); 2118 2119 /* Release RISC module. */ 2120 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 2121 for (cnt = 0; cnt < 30000; cnt++) { 2122 if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0) 2123 break; 2124 2125 udelay(10); 2126 } 2127 2128 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2129 } 2130 2131 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 2132 2133 pci_disable_rom(ha->pdev); 2134 2135 /* Get PCI bus information. */ 2136 spin_lock_irqsave(&ha->hardware_lock, flags); 2137 ha->pci_attr = RD_REG_WORD(®->ctrl_status); 2138 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2139 2140 return QLA_SUCCESS; 2141 } 2142 2143 /** 2144 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers. 2145 * @ha: HA context 2146 * 2147 * Returns 0 on success. 2148 */ 2149 int 2150 qla24xx_pci_config(scsi_qla_host_t *vha) 2151 { 2152 uint16_t w; 2153 unsigned long flags = 0; 2154 struct qla_hw_data *ha = vha->hw; 2155 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2156 2157 pci_set_master(ha->pdev); 2158 pci_try_set_mwi(ha->pdev); 2159 2160 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 2161 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 2162 w &= ~PCI_COMMAND_INTX_DISABLE; 2163 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 2164 2165 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 2166 2167 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */ 2168 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX)) 2169 pcix_set_mmrbc(ha->pdev, 2048); 2170 2171 /* PCIe -- adjust Maximum Read Request Size (2048). */ 2172 if (pci_is_pcie(ha->pdev)) 2173 pcie_set_readrq(ha->pdev, 4096); 2174 2175 pci_disable_rom(ha->pdev); 2176 2177 ha->chip_revision = ha->pdev->revision; 2178 2179 /* Get PCI bus information. */ 2180 spin_lock_irqsave(&ha->hardware_lock, flags); 2181 ha->pci_attr = RD_REG_DWORD(®->ctrl_status); 2182 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2183 2184 return QLA_SUCCESS; 2185 } 2186 2187 /** 2188 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers. 2189 * @ha: HA context 2190 * 2191 * Returns 0 on success. 2192 */ 2193 int 2194 qla25xx_pci_config(scsi_qla_host_t *vha) 2195 { 2196 uint16_t w; 2197 struct qla_hw_data *ha = vha->hw; 2198 2199 pci_set_master(ha->pdev); 2200 pci_try_set_mwi(ha->pdev); 2201 2202 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 2203 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 2204 w &= ~PCI_COMMAND_INTX_DISABLE; 2205 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 2206 2207 /* PCIe -- adjust Maximum Read Request Size (2048). */ 2208 if (pci_is_pcie(ha->pdev)) 2209 pcie_set_readrq(ha->pdev, 4096); 2210 2211 pci_disable_rom(ha->pdev); 2212 2213 ha->chip_revision = ha->pdev->revision; 2214 2215 return QLA_SUCCESS; 2216 } 2217 2218 /** 2219 * qla2x00_isp_firmware() - Choose firmware image. 2220 * @ha: HA context 2221 * 2222 * Returns 0 on success. 2223 */ 2224 static int 2225 qla2x00_isp_firmware(scsi_qla_host_t *vha) 2226 { 2227 int rval; 2228 uint16_t loop_id, topo, sw_cap; 2229 uint8_t domain, area, al_pa; 2230 struct qla_hw_data *ha = vha->hw; 2231 2232 /* Assume loading risc code */ 2233 rval = QLA_FUNCTION_FAILED; 2234 2235 if (ha->flags.disable_risc_code_load) { 2236 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n"); 2237 2238 /* Verify checksum of loaded RISC code. */ 2239 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address); 2240 if (rval == QLA_SUCCESS) { 2241 /* And, verify we are not in ROM code. */ 2242 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, 2243 &area, &domain, &topo, &sw_cap); 2244 } 2245 } 2246 2247 if (rval) 2248 ql_dbg(ql_dbg_init, vha, 0x007a, 2249 "**** Load RISC code ****.\n"); 2250 2251 return (rval); 2252 } 2253 2254 /** 2255 * qla2x00_reset_chip() - Reset ISP chip. 2256 * @ha: HA context 2257 * 2258 * Returns 0 on success. 2259 */ 2260 void 2261 qla2x00_reset_chip(scsi_qla_host_t *vha) 2262 { 2263 unsigned long flags = 0; 2264 struct qla_hw_data *ha = vha->hw; 2265 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2266 uint32_t cnt; 2267 uint16_t cmd; 2268 2269 if (unlikely(pci_channel_offline(ha->pdev))) 2270 return; 2271 2272 ha->isp_ops->disable_intrs(ha); 2273 2274 spin_lock_irqsave(&ha->hardware_lock, flags); 2275 2276 /* Turn off master enable */ 2277 cmd = 0; 2278 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd); 2279 cmd &= ~PCI_COMMAND_MASTER; 2280 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); 2281 2282 if (!IS_QLA2100(ha)) { 2283 /* Pause RISC. */ 2284 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); 2285 if (IS_QLA2200(ha) || IS_QLA2300(ha)) { 2286 for (cnt = 0; cnt < 30000; cnt++) { 2287 if ((RD_REG_WORD(®->hccr) & 2288 HCCR_RISC_PAUSE) != 0) 2289 break; 2290 udelay(100); 2291 } 2292 } else { 2293 RD_REG_WORD(®->hccr); /* PCI Posting. */ 2294 udelay(10); 2295 } 2296 2297 /* Select FPM registers. */ 2298 WRT_REG_WORD(®->ctrl_status, 0x20); 2299 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ 2300 2301 /* FPM Soft Reset. */ 2302 WRT_REG_WORD(®->fpm_diag_config, 0x100); 2303 RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */ 2304 2305 /* Toggle Fpm Reset. */ 2306 if (!IS_QLA2200(ha)) { 2307 WRT_REG_WORD(®->fpm_diag_config, 0x0); 2308 RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */ 2309 } 2310 2311 /* Select frame buffer registers. */ 2312 WRT_REG_WORD(®->ctrl_status, 0x10); 2313 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ 2314 2315 /* Reset frame buffer FIFOs. */ 2316 if (IS_QLA2200(ha)) { 2317 WRT_FB_CMD_REG(ha, reg, 0xa000); 2318 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */ 2319 } else { 2320 WRT_FB_CMD_REG(ha, reg, 0x00fc); 2321 2322 /* Read back fb_cmd until zero or 3 seconds max */ 2323 for (cnt = 0; cnt < 3000; cnt++) { 2324 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0) 2325 break; 2326 udelay(100); 2327 } 2328 } 2329 2330 /* Select RISC module registers. */ 2331 WRT_REG_WORD(®->ctrl_status, 0); 2332 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ 2333 2334 /* Reset RISC processor. */ 2335 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 2336 RD_REG_WORD(®->hccr); /* PCI Posting. */ 2337 2338 /* Release RISC processor. */ 2339 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 2340 RD_REG_WORD(®->hccr); /* PCI Posting. */ 2341 } 2342 2343 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 2344 WRT_REG_WORD(®->hccr, HCCR_CLR_HOST_INT); 2345 2346 /* Reset ISP chip. */ 2347 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); 2348 2349 /* Wait for RISC to recover from reset. */ 2350 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 2351 /* 2352 * It is necessary to for a delay here since the card doesn't 2353 * respond to PCI reads during a reset. On some architectures 2354 * this will result in an MCA. 2355 */ 2356 udelay(20); 2357 for (cnt = 30000; cnt; cnt--) { 2358 if ((RD_REG_WORD(®->ctrl_status) & 2359 CSR_ISP_SOFT_RESET) == 0) 2360 break; 2361 udelay(100); 2362 } 2363 } else 2364 udelay(10); 2365 2366 /* Reset RISC processor. */ 2367 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 2368 2369 WRT_REG_WORD(®->semaphore, 0); 2370 2371 /* Release RISC processor. */ 2372 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 2373 RD_REG_WORD(®->hccr); /* PCI Posting. */ 2374 2375 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 2376 for (cnt = 0; cnt < 30000; cnt++) { 2377 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY) 2378 break; 2379 2380 udelay(100); 2381 } 2382 } else 2383 udelay(100); 2384 2385 /* Turn on master enable */ 2386 cmd |= PCI_COMMAND_MASTER; 2387 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); 2388 2389 /* Disable RISC pause on FPM parity error. */ 2390 if (!IS_QLA2100(ha)) { 2391 WRT_REG_WORD(®->hccr, HCCR_DISABLE_PARITY_PAUSE); 2392 RD_REG_WORD(®->hccr); /* PCI Posting. */ 2393 } 2394 2395 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2396 } 2397 2398 /** 2399 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC. 2400 * 2401 * Returns 0 on success. 2402 */ 2403 static int 2404 qla81xx_reset_mpi(scsi_qla_host_t *vha) 2405 { 2406 uint16_t mb[4] = {0x1010, 0, 1, 0}; 2407 2408 if (!IS_QLA81XX(vha->hw)) 2409 return QLA_SUCCESS; 2410 2411 return qla81xx_write_mpi_register(vha, mb); 2412 } 2413 2414 /** 2415 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC. 2416 * @ha: HA context 2417 * 2418 * Returns 0 on success. 2419 */ 2420 static inline int 2421 qla24xx_reset_risc(scsi_qla_host_t *vha) 2422 { 2423 unsigned long flags = 0; 2424 struct qla_hw_data *ha = vha->hw; 2425 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2426 uint32_t cnt; 2427 uint16_t wd; 2428 static int abts_cnt; /* ISP abort retry counts */ 2429 int rval = QLA_SUCCESS; 2430 2431 spin_lock_irqsave(&ha->hardware_lock, flags); 2432 2433 /* Reset RISC. */ 2434 WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 2435 for (cnt = 0; cnt < 30000; cnt++) { 2436 if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) 2437 break; 2438 2439 udelay(10); 2440 } 2441 2442 if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE)) 2443 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); 2444 2445 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e, 2446 "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n", 2447 RD_REG_DWORD(®->hccr), 2448 RD_REG_DWORD(®->ctrl_status), 2449 (RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE)); 2450 2451 WRT_REG_DWORD(®->ctrl_status, 2452 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 2453 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); 2454 2455 udelay(100); 2456 2457 /* Wait for firmware to complete NVRAM accesses. */ 2458 RD_REG_WORD(®->mailbox0); 2459 for (cnt = 10000; RD_REG_WORD(®->mailbox0) != 0 && 2460 rval == QLA_SUCCESS; cnt--) { 2461 barrier(); 2462 if (cnt) 2463 udelay(5); 2464 else 2465 rval = QLA_FUNCTION_TIMEOUT; 2466 } 2467 2468 if (rval == QLA_SUCCESS) 2469 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags); 2470 2471 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f, 2472 "HCCR: 0x%x, MailBox0 Status 0x%x\n", 2473 RD_REG_DWORD(®->hccr), 2474 RD_REG_DWORD(®->mailbox0)); 2475 2476 /* Wait for soft-reset to complete. */ 2477 RD_REG_DWORD(®->ctrl_status); 2478 for (cnt = 0; cnt < 60; cnt++) { 2479 barrier(); 2480 if ((RD_REG_DWORD(®->ctrl_status) & 2481 CSRX_ISP_SOFT_RESET) == 0) 2482 break; 2483 2484 udelay(5); 2485 } 2486 if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_ISP_SOFT_RESET)) 2487 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags); 2488 2489 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d, 2490 "HCCR: 0x%x, Soft Reset status: 0x%x\n", 2491 RD_REG_DWORD(®->hccr), 2492 RD_REG_DWORD(®->ctrl_status)); 2493 2494 /* If required, do an MPI FW reset now */ 2495 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) { 2496 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) { 2497 if (++abts_cnt < 5) { 2498 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2499 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags); 2500 } else { 2501 /* 2502 * We exhausted the ISP abort retries. We have to 2503 * set the board offline. 2504 */ 2505 abts_cnt = 0; 2506 vha->flags.online = 0; 2507 } 2508 } 2509 } 2510 2511 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET); 2512 RD_REG_DWORD(®->hccr); 2513 2514 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE); 2515 RD_REG_DWORD(®->hccr); 2516 2517 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET); 2518 RD_REG_DWORD(®->hccr); 2519 2520 RD_REG_WORD(®->mailbox0); 2521 for (cnt = 60; RD_REG_WORD(®->mailbox0) != 0 && 2522 rval == QLA_SUCCESS; cnt--) { 2523 barrier(); 2524 if (cnt) 2525 udelay(5); 2526 else 2527 rval = QLA_FUNCTION_TIMEOUT; 2528 } 2529 if (rval == QLA_SUCCESS) 2530 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); 2531 2532 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e, 2533 "Host Risc 0x%x, mailbox0 0x%x\n", 2534 RD_REG_DWORD(®->hccr), 2535 RD_REG_WORD(®->mailbox0)); 2536 2537 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2538 2539 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f, 2540 "Driver in %s mode\n", 2541 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling"); 2542 2543 if (IS_NOPOLLING_TYPE(ha)) 2544 ha->isp_ops->enable_intrs(ha); 2545 2546 return rval; 2547 } 2548 2549 static void 2550 qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data) 2551 { 2552 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; 2553 2554 WRT_REG_DWORD(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); 2555 *data = RD_REG_DWORD(®->iobase_window + RISC_REGISTER_WINDOW_OFFET); 2556 2557 } 2558 2559 static void 2560 qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data) 2561 { 2562 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; 2563 2564 WRT_REG_DWORD(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); 2565 WRT_REG_DWORD(®->iobase_window + RISC_REGISTER_WINDOW_OFFET, data); 2566 } 2567 2568 static void 2569 qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha) 2570 { 2571 uint32_t wd32 = 0; 2572 uint delta_msec = 100; 2573 uint elapsed_msec = 0; 2574 uint timeout_msec; 2575 ulong n; 2576 2577 if (vha->hw->pdev->subsystem_device != 0x0175 && 2578 vha->hw->pdev->subsystem_device != 0x0240) 2579 return; 2580 2581 WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE); 2582 udelay(100); 2583 2584 attempt: 2585 timeout_msec = TIMEOUT_SEMAPHORE; 2586 n = timeout_msec / delta_msec; 2587 while (n--) { 2588 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET); 2589 qla25xx_read_risc_sema_reg(vha, &wd32); 2590 if (wd32 & RISC_SEMAPHORE) 2591 break; 2592 msleep(delta_msec); 2593 elapsed_msec += delta_msec; 2594 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED) 2595 goto force; 2596 } 2597 2598 if (!(wd32 & RISC_SEMAPHORE)) 2599 goto force; 2600 2601 if (!(wd32 & RISC_SEMAPHORE_FORCE)) 2602 goto acquired; 2603 2604 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR); 2605 timeout_msec = TIMEOUT_SEMAPHORE_FORCE; 2606 n = timeout_msec / delta_msec; 2607 while (n--) { 2608 qla25xx_read_risc_sema_reg(vha, &wd32); 2609 if (!(wd32 & RISC_SEMAPHORE_FORCE)) 2610 break; 2611 msleep(delta_msec); 2612 elapsed_msec += delta_msec; 2613 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED) 2614 goto force; 2615 } 2616 2617 if (wd32 & RISC_SEMAPHORE_FORCE) 2618 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR); 2619 2620 goto attempt; 2621 2622 force: 2623 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET); 2624 2625 acquired: 2626 return; 2627 } 2628 2629 /** 2630 * qla24xx_reset_chip() - Reset ISP24xx chip. 2631 * @ha: HA context 2632 * 2633 * Returns 0 on success. 2634 */ 2635 void 2636 qla24xx_reset_chip(scsi_qla_host_t *vha) 2637 { 2638 struct qla_hw_data *ha = vha->hw; 2639 2640 if (pci_channel_offline(ha->pdev) && 2641 ha->flags.pci_channel_io_perm_failure) { 2642 return; 2643 } 2644 2645 ha->isp_ops->disable_intrs(ha); 2646 2647 qla25xx_manipulate_risc_semaphore(vha); 2648 2649 /* Perform RISC reset. */ 2650 qla24xx_reset_risc(vha); 2651 } 2652 2653 /** 2654 * qla2x00_chip_diag() - Test chip for proper operation. 2655 * @ha: HA context 2656 * 2657 * Returns 0 on success. 2658 */ 2659 int 2660 qla2x00_chip_diag(scsi_qla_host_t *vha) 2661 { 2662 int rval; 2663 struct qla_hw_data *ha = vha->hw; 2664 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2665 unsigned long flags = 0; 2666 uint16_t data; 2667 uint32_t cnt; 2668 uint16_t mb[5]; 2669 struct req_que *req = ha->req_q_map[0]; 2670 2671 /* Assume a failed state */ 2672 rval = QLA_FUNCTION_FAILED; 2673 2674 ql_dbg(ql_dbg_init, vha, 0x007b, 2675 "Testing device at %lx.\n", (u_long)®->flash_address); 2676 2677 spin_lock_irqsave(&ha->hardware_lock, flags); 2678 2679 /* Reset ISP chip. */ 2680 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); 2681 2682 /* 2683 * We need to have a delay here since the card will not respond while 2684 * in reset causing an MCA on some architectures. 2685 */ 2686 udelay(20); 2687 data = qla2x00_debounce_register(®->ctrl_status); 2688 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) { 2689 udelay(5); 2690 data = RD_REG_WORD(®->ctrl_status); 2691 barrier(); 2692 } 2693 2694 if (!cnt) 2695 goto chip_diag_failed; 2696 2697 ql_dbg(ql_dbg_init, vha, 0x007c, 2698 "Reset register cleared by chip reset.\n"); 2699 2700 /* Reset RISC processor. */ 2701 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 2702 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 2703 2704 /* Workaround for QLA2312 PCI parity error */ 2705 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 2706 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0)); 2707 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) { 2708 udelay(5); 2709 data = RD_MAILBOX_REG(ha, reg, 0); 2710 barrier(); 2711 } 2712 } else 2713 udelay(10); 2714 2715 if (!cnt) 2716 goto chip_diag_failed; 2717 2718 /* Check product ID of chip */ 2719 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n"); 2720 2721 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 2722 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 2723 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 2724 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4)); 2725 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) || 2726 mb[3] != PROD_ID_3) { 2727 ql_log(ql_log_warn, vha, 0x0062, 2728 "Wrong product ID = 0x%x,0x%x,0x%x.\n", 2729 mb[1], mb[2], mb[3]); 2730 2731 goto chip_diag_failed; 2732 } 2733 ha->product_id[0] = mb[1]; 2734 ha->product_id[1] = mb[2]; 2735 ha->product_id[2] = mb[3]; 2736 ha->product_id[3] = mb[4]; 2737 2738 /* Adjust fw RISC transfer size */ 2739 if (req->length > 1024) 2740 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; 2741 else 2742 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 2743 req->length; 2744 2745 if (IS_QLA2200(ha) && 2746 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { 2747 /* Limit firmware transfer size with a 2200A */ 2748 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n"); 2749 2750 ha->device_type |= DT_ISP2200A; 2751 ha->fw_transfer_size = 128; 2752 } 2753 2754 /* Wrap Incoming Mailboxes Test. */ 2755 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2756 2757 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n"); 2758 rval = qla2x00_mbx_reg_test(vha); 2759 if (rval) 2760 ql_log(ql_log_warn, vha, 0x0080, 2761 "Failed mailbox send register test.\n"); 2762 else 2763 /* Flag a successful rval */ 2764 rval = QLA_SUCCESS; 2765 spin_lock_irqsave(&ha->hardware_lock, flags); 2766 2767 chip_diag_failed: 2768 if (rval) 2769 ql_log(ql_log_info, vha, 0x0081, 2770 "Chip diagnostics **** FAILED ****.\n"); 2771 2772 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2773 2774 return (rval); 2775 } 2776 2777 /** 2778 * qla24xx_chip_diag() - Test ISP24xx for proper operation. 2779 * @ha: HA context 2780 * 2781 * Returns 0 on success. 2782 */ 2783 int 2784 qla24xx_chip_diag(scsi_qla_host_t *vha) 2785 { 2786 int rval; 2787 struct qla_hw_data *ha = vha->hw; 2788 struct req_que *req = ha->req_q_map[0]; 2789 2790 if (IS_P3P_TYPE(ha)) 2791 return QLA_SUCCESS; 2792 2793 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; 2794 2795 rval = qla2x00_mbx_reg_test(vha); 2796 if (rval) { 2797 ql_log(ql_log_warn, vha, 0x0082, 2798 "Failed mailbox send register test.\n"); 2799 } else { 2800 /* Flag a successful rval */ 2801 rval = QLA_SUCCESS; 2802 } 2803 2804 return rval; 2805 } 2806 2807 static void 2808 qla2x00_alloc_offload_mem(scsi_qla_host_t *vha) 2809 { 2810 int rval; 2811 dma_addr_t tc_dma; 2812 void *tc; 2813 struct qla_hw_data *ha = vha->hw; 2814 2815 if (ha->eft) { 2816 ql_dbg(ql_dbg_init, vha, 0x00bd, 2817 "%s: Offload Mem is already allocated.\n", 2818 __func__); 2819 return; 2820 } 2821 2822 if (IS_FWI2_CAPABLE(ha)) { 2823 /* Allocate memory for Fibre Channel Event Buffer. */ 2824 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 2825 !IS_QLA27XX(ha)) 2826 goto try_eft; 2827 2828 if (ha->fce) 2829 dma_free_coherent(&ha->pdev->dev, 2830 FCE_SIZE, ha->fce, ha->fce_dma); 2831 2832 /* Allocate memory for Fibre Channel Event Buffer. */ 2833 tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 2834 GFP_KERNEL); 2835 if (!tc) { 2836 ql_log(ql_log_warn, vha, 0x00be, 2837 "Unable to allocate (%d KB) for FCE.\n", 2838 FCE_SIZE / 1024); 2839 goto try_eft; 2840 } 2841 2842 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS, 2843 ha->fce_mb, &ha->fce_bufs); 2844 if (rval) { 2845 ql_log(ql_log_warn, vha, 0x00bf, 2846 "Unable to initialize FCE (%d).\n", rval); 2847 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, 2848 tc_dma); 2849 ha->flags.fce_enabled = 0; 2850 goto try_eft; 2851 } 2852 ql_dbg(ql_dbg_init, vha, 0x00c0, 2853 "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024); 2854 2855 ha->flags.fce_enabled = 1; 2856 ha->fce_dma = tc_dma; 2857 ha->fce = tc; 2858 2859 try_eft: 2860 if (ha->eft) 2861 dma_free_coherent(&ha->pdev->dev, 2862 EFT_SIZE, ha->eft, ha->eft_dma); 2863 2864 /* Allocate memory for Extended Trace Buffer. */ 2865 tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, 2866 GFP_KERNEL); 2867 if (!tc) { 2868 ql_log(ql_log_warn, vha, 0x00c1, 2869 "Unable to allocate (%d KB) for EFT.\n", 2870 EFT_SIZE / 1024); 2871 goto eft_err; 2872 } 2873 2874 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); 2875 if (rval) { 2876 ql_log(ql_log_warn, vha, 0x00c2, 2877 "Unable to initialize EFT (%d).\n", rval); 2878 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, 2879 tc_dma); 2880 goto eft_err; 2881 } 2882 ql_dbg(ql_dbg_init, vha, 0x00c3, 2883 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024); 2884 2885 ha->eft_dma = tc_dma; 2886 ha->eft = tc; 2887 } 2888 2889 eft_err: 2890 return; 2891 } 2892 2893 void 2894 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) 2895 { 2896 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size, 2897 eft_size, fce_size, mq_size; 2898 struct qla_hw_data *ha = vha->hw; 2899 struct req_que *req = ha->req_q_map[0]; 2900 struct rsp_que *rsp = ha->rsp_q_map[0]; 2901 struct qla2xxx_fw_dump *fw_dump; 2902 2903 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0; 2904 req_q_size = rsp_q_size = 0; 2905 2906 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 2907 fixed_size = sizeof(struct qla2100_fw_dump); 2908 } else if (IS_QLA23XX(ha)) { 2909 fixed_size = offsetof(struct qla2300_fw_dump, data_ram); 2910 mem_size = (ha->fw_memory_size - 0x11000 + 1) * 2911 sizeof(uint16_t); 2912 } else if (IS_FWI2_CAPABLE(ha)) { 2913 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) 2914 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem); 2915 else if (IS_QLA81XX(ha)) 2916 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); 2917 else if (IS_QLA25XX(ha)) 2918 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem); 2919 else 2920 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem); 2921 2922 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 2923 sizeof(uint32_t); 2924 if (ha->mqenable) { 2925 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) 2926 mq_size = sizeof(struct qla2xxx_mq_chain); 2927 /* 2928 * Allocate maximum buffer size for all queues. 2929 * Resizing must be done at end-of-dump processing. 2930 */ 2931 mq_size += ha->max_req_queues * 2932 (req->length * sizeof(request_t)); 2933 mq_size += ha->max_rsp_queues * 2934 (rsp->length * sizeof(response_t)); 2935 } 2936 if (ha->tgt.atio_ring) 2937 mq_size += ha->tgt.atio_q_length * sizeof(request_t); 2938 /* Allocate memory for Fibre Channel Event Buffer. */ 2939 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 2940 !IS_QLA27XX(ha)) 2941 goto try_eft; 2942 2943 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; 2944 try_eft: 2945 ql_dbg(ql_dbg_init, vha, 0x00c3, 2946 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024); 2947 eft_size = EFT_SIZE; 2948 } 2949 2950 if (IS_QLA27XX(ha)) { 2951 if (!ha->fw_dump_template) { 2952 ql_log(ql_log_warn, vha, 0x00ba, 2953 "Failed missing fwdump template\n"); 2954 return; 2955 } 2956 dump_size = qla27xx_fwdt_calculate_dump_size(vha); 2957 ql_dbg(ql_dbg_init, vha, 0x00fa, 2958 "-> allocating fwdump (%x bytes)...\n", dump_size); 2959 goto allocate; 2960 } 2961 2962 req_q_size = req->length * sizeof(request_t); 2963 rsp_q_size = rsp->length * sizeof(response_t); 2964 dump_size = offsetof(struct qla2xxx_fw_dump, isp); 2965 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size; 2966 ha->chain_offset = dump_size; 2967 dump_size += mq_size + fce_size; 2968 2969 if (ha->exchoffld_buf) 2970 dump_size += sizeof(struct qla2xxx_offld_chain) + 2971 ha->exchoffld_size; 2972 if (ha->exlogin_buf) 2973 dump_size += sizeof(struct qla2xxx_offld_chain) + 2974 ha->exlogin_size; 2975 2976 allocate: 2977 if (!ha->fw_dump_len || dump_size != ha->fw_dump_len) { 2978 fw_dump = vmalloc(dump_size); 2979 if (!fw_dump) { 2980 ql_log(ql_log_warn, vha, 0x00c4, 2981 "Unable to allocate (%d KB) for firmware dump.\n", 2982 dump_size / 1024); 2983 } else { 2984 if (ha->fw_dump) 2985 vfree(ha->fw_dump); 2986 ha->fw_dump = fw_dump; 2987 2988 ha->fw_dump_len = dump_size; 2989 ql_dbg(ql_dbg_init, vha, 0x00c5, 2990 "Allocated (%d KB) for firmware dump.\n", 2991 dump_size / 1024); 2992 2993 if (IS_QLA27XX(ha)) 2994 return; 2995 2996 ha->fw_dump->signature[0] = 'Q'; 2997 ha->fw_dump->signature[1] = 'L'; 2998 ha->fw_dump->signature[2] = 'G'; 2999 ha->fw_dump->signature[3] = 'C'; 3000 ha->fw_dump->version = htonl(1); 3001 3002 ha->fw_dump->fixed_size = htonl(fixed_size); 3003 ha->fw_dump->mem_size = htonl(mem_size); 3004 ha->fw_dump->req_q_size = htonl(req_q_size); 3005 ha->fw_dump->rsp_q_size = htonl(rsp_q_size); 3006 3007 ha->fw_dump->eft_size = htonl(eft_size); 3008 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma)); 3009 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma)); 3010 3011 ha->fw_dump->header_size = 3012 htonl(offsetof(struct qla2xxx_fw_dump, isp)); 3013 } 3014 } 3015 } 3016 3017 static int 3018 qla81xx_mpi_sync(scsi_qla_host_t *vha) 3019 { 3020 #define MPS_MASK 0xe0 3021 int rval; 3022 uint16_t dc; 3023 uint32_t dw; 3024 3025 if (!IS_QLA81XX(vha->hw)) 3026 return QLA_SUCCESS; 3027 3028 rval = qla2x00_write_ram_word(vha, 0x7c00, 1); 3029 if (rval != QLA_SUCCESS) { 3030 ql_log(ql_log_warn, vha, 0x0105, 3031 "Unable to acquire semaphore.\n"); 3032 goto done; 3033 } 3034 3035 pci_read_config_word(vha->hw->pdev, 0x54, &dc); 3036 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw); 3037 if (rval != QLA_SUCCESS) { 3038 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n"); 3039 goto done_release; 3040 } 3041 3042 dc &= MPS_MASK; 3043 if (dc == (dw & MPS_MASK)) 3044 goto done_release; 3045 3046 dw &= ~MPS_MASK; 3047 dw |= dc; 3048 rval = qla2x00_write_ram_word(vha, 0x7a15, dw); 3049 if (rval != QLA_SUCCESS) { 3050 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n"); 3051 } 3052 3053 done_release: 3054 rval = qla2x00_write_ram_word(vha, 0x7c00, 0); 3055 if (rval != QLA_SUCCESS) { 3056 ql_log(ql_log_warn, vha, 0x006d, 3057 "Unable to release semaphore.\n"); 3058 } 3059 3060 done: 3061 return rval; 3062 } 3063 3064 int 3065 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req) 3066 { 3067 /* Don't try to reallocate the array */ 3068 if (req->outstanding_cmds) 3069 return QLA_SUCCESS; 3070 3071 if (!IS_FWI2_CAPABLE(ha)) 3072 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS; 3073 else { 3074 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count) 3075 req->num_outstanding_cmds = ha->cur_fw_xcb_count; 3076 else 3077 req->num_outstanding_cmds = ha->cur_fw_iocb_count; 3078 } 3079 3080 req->outstanding_cmds = kzalloc(sizeof(srb_t *) * 3081 req->num_outstanding_cmds, GFP_KERNEL); 3082 3083 if (!req->outstanding_cmds) { 3084 /* 3085 * Try to allocate a minimal size just so we can get through 3086 * initialization. 3087 */ 3088 req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS; 3089 req->outstanding_cmds = kzalloc(sizeof(srb_t *) * 3090 req->num_outstanding_cmds, GFP_KERNEL); 3091 3092 if (!req->outstanding_cmds) { 3093 ql_log(ql_log_fatal, NULL, 0x0126, 3094 "Failed to allocate memory for " 3095 "outstanding_cmds for req_que %p.\n", req); 3096 req->num_outstanding_cmds = 0; 3097 return QLA_FUNCTION_FAILED; 3098 } 3099 } 3100 3101 return QLA_SUCCESS; 3102 } 3103 3104 #define PRINT_FIELD(_field, _flag, _str) { \ 3105 if (a0->_field & _flag) {\ 3106 if (p) {\ 3107 strcat(ptr, "|");\ 3108 ptr++;\ 3109 leftover--;\ 3110 } \ 3111 len = snprintf(ptr, leftover, "%s", _str); \ 3112 p = 1;\ 3113 leftover -= len;\ 3114 ptr += len; \ 3115 } \ 3116 } 3117 3118 static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha) 3119 { 3120 #define STR_LEN 64 3121 struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data; 3122 u8 str[STR_LEN], *ptr, p; 3123 int leftover, len; 3124 3125 memset(str, 0, STR_LEN); 3126 snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name); 3127 ql_dbg(ql_dbg_init, vha, 0x015a, 3128 "SFP MFG Name: %s\n", str); 3129 3130 memset(str, 0, STR_LEN); 3131 snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn); 3132 ql_dbg(ql_dbg_init, vha, 0x015c, 3133 "SFP Part Name: %s\n", str); 3134 3135 /* media */ 3136 memset(str, 0, STR_LEN); 3137 ptr = str; 3138 leftover = STR_LEN; 3139 p = len = 0; 3140 PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX"); 3141 PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair"); 3142 PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax"); 3143 PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax"); 3144 PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um"); 3145 PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um"); 3146 PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode"); 3147 ql_dbg(ql_dbg_init, vha, 0x0160, 3148 "SFP Media: %s\n", str); 3149 3150 /* link length */ 3151 memset(str, 0, STR_LEN); 3152 ptr = str; 3153 leftover = STR_LEN; 3154 p = len = 0; 3155 PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long"); 3156 PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short"); 3157 PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate"); 3158 PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long"); 3159 PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium"); 3160 ql_dbg(ql_dbg_init, vha, 0x0196, 3161 "SFP Link Length: %s\n", str); 3162 3163 memset(str, 0, STR_LEN); 3164 ptr = str; 3165 leftover = STR_LEN; 3166 p = len = 0; 3167 PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)"); 3168 PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)"); 3169 PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)"); 3170 PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)"); 3171 PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)"); 3172 ql_dbg(ql_dbg_init, vha, 0x016e, 3173 "SFP FC Link Tech: %s\n", str); 3174 3175 if (a0->length_km) 3176 ql_dbg(ql_dbg_init, vha, 0x016f, 3177 "SFP Distant: %d km\n", a0->length_km); 3178 if (a0->length_100m) 3179 ql_dbg(ql_dbg_init, vha, 0x0170, 3180 "SFP Distant: %d m\n", a0->length_100m*100); 3181 if (a0->length_50um_10m) 3182 ql_dbg(ql_dbg_init, vha, 0x0189, 3183 "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10); 3184 if (a0->length_62um_10m) 3185 ql_dbg(ql_dbg_init, vha, 0x018a, 3186 "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10); 3187 if (a0->length_om4_10m) 3188 ql_dbg(ql_dbg_init, vha, 0x0194, 3189 "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10); 3190 if (a0->length_om3_10m) 3191 ql_dbg(ql_dbg_init, vha, 0x0195, 3192 "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10); 3193 } 3194 3195 3196 /* 3197 * Return Code: 3198 * QLA_SUCCESS: no action 3199 * QLA_INTERFACE_ERROR: SFP is not there. 3200 * QLA_FUNCTION_FAILED: detected New SFP 3201 */ 3202 int 3203 qla24xx_detect_sfp(scsi_qla_host_t *vha) 3204 { 3205 int rc = QLA_SUCCESS; 3206 struct sff_8247_a0 *a; 3207 struct qla_hw_data *ha = vha->hw; 3208 3209 if (!AUTO_DETECT_SFP_SUPPORT(vha)) 3210 goto out; 3211 3212 rc = qla2x00_read_sfp_dev(vha, NULL, 0); 3213 if (rc) 3214 goto out; 3215 3216 a = (struct sff_8247_a0 *)vha->hw->sfp_data; 3217 qla2xxx_print_sfp_info(vha); 3218 3219 if (a->fc_ll_cc7 & FC_LL_VL || a->fc_ll_cc7 & FC_LL_L) { 3220 /* long range */ 3221 ha->flags.detected_lr_sfp = 1; 3222 3223 if (a->length_km > 5 || a->length_100m > 50) 3224 ha->long_range_distance = LR_DISTANCE_10K; 3225 else 3226 ha->long_range_distance = LR_DISTANCE_5K; 3227 3228 if (ha->flags.detected_lr_sfp != ha->flags.using_lr_setting) 3229 ql_dbg(ql_dbg_async, vha, 0x507b, 3230 "Detected Long Range SFP.\n"); 3231 } else { 3232 /* short range */ 3233 ha->flags.detected_lr_sfp = 0; 3234 if (ha->flags.using_lr_setting) 3235 ql_dbg(ql_dbg_async, vha, 0x5084, 3236 "Detected Short Range SFP.\n"); 3237 } 3238 3239 if (!vha->flags.init_done) 3240 rc = QLA_SUCCESS; 3241 out: 3242 return rc; 3243 } 3244 3245 /** 3246 * qla2x00_setup_chip() - Load and start RISC firmware. 3247 * @ha: HA context 3248 * 3249 * Returns 0 on success. 3250 */ 3251 static int 3252 qla2x00_setup_chip(scsi_qla_host_t *vha) 3253 { 3254 int rval; 3255 uint32_t srisc_address = 0; 3256 struct qla_hw_data *ha = vha->hw; 3257 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3258 unsigned long flags; 3259 uint16_t fw_major_version; 3260 3261 if (IS_P3P_TYPE(ha)) { 3262 rval = ha->isp_ops->load_risc(vha, &srisc_address); 3263 if (rval == QLA_SUCCESS) { 3264 qla2x00_stop_firmware(vha); 3265 goto enable_82xx_npiv; 3266 } else 3267 goto failed; 3268 } 3269 3270 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 3271 /* Disable SRAM, Instruction RAM and GP RAM parity. */ 3272 spin_lock_irqsave(&ha->hardware_lock, flags); 3273 WRT_REG_WORD(®->hccr, (HCCR_ENABLE_PARITY + 0x0)); 3274 RD_REG_WORD(®->hccr); 3275 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3276 } 3277 3278 qla81xx_mpi_sync(vha); 3279 3280 /* Load firmware sequences */ 3281 rval = ha->isp_ops->load_risc(vha, &srisc_address); 3282 if (rval == QLA_SUCCESS) { 3283 ql_dbg(ql_dbg_init, vha, 0x00c9, 3284 "Verifying Checksum of loaded RISC code.\n"); 3285 3286 rval = qla2x00_verify_checksum(vha, srisc_address); 3287 if (rval == QLA_SUCCESS) { 3288 /* Start firmware execution. */ 3289 ql_dbg(ql_dbg_init, vha, 0x00ca, 3290 "Starting firmware.\n"); 3291 3292 if (ql2xexlogins) 3293 ha->flags.exlogins_enabled = 1; 3294 3295 if (qla_is_exch_offld_enabled(vha)) 3296 ha->flags.exchoffld_enabled = 1; 3297 3298 rval = qla2x00_execute_fw(vha, srisc_address); 3299 /* Retrieve firmware information. */ 3300 if (rval == QLA_SUCCESS) { 3301 qla24xx_detect_sfp(vha); 3302 3303 rval = qla2x00_set_exlogins_buffer(vha); 3304 if (rval != QLA_SUCCESS) 3305 goto failed; 3306 3307 rval = qla2x00_set_exchoffld_buffer(vha); 3308 if (rval != QLA_SUCCESS) 3309 goto failed; 3310 3311 enable_82xx_npiv: 3312 fw_major_version = ha->fw_major_version; 3313 if (IS_P3P_TYPE(ha)) 3314 qla82xx_check_md_needed(vha); 3315 else 3316 rval = qla2x00_get_fw_version(vha); 3317 if (rval != QLA_SUCCESS) 3318 goto failed; 3319 ha->flags.npiv_supported = 0; 3320 if (IS_QLA2XXX_MIDTYPE(ha) && 3321 (ha->fw_attributes & BIT_2)) { 3322 ha->flags.npiv_supported = 1; 3323 if ((!ha->max_npiv_vports) || 3324 ((ha->max_npiv_vports + 1) % 3325 MIN_MULTI_ID_FABRIC)) 3326 ha->max_npiv_vports = 3327 MIN_MULTI_ID_FABRIC - 1; 3328 } 3329 qla2x00_get_resource_cnts(vha); 3330 3331 /* 3332 * Allocate the array of outstanding commands 3333 * now that we know the firmware resources. 3334 */ 3335 rval = qla2x00_alloc_outstanding_cmds(ha, 3336 vha->req); 3337 if (rval != QLA_SUCCESS) 3338 goto failed; 3339 3340 if (!fw_major_version && !(IS_P3P_TYPE(ha))) 3341 qla2x00_alloc_offload_mem(vha); 3342 3343 if (ql2xallocfwdump && !(IS_P3P_TYPE(ha))) 3344 qla2x00_alloc_fw_dump(vha); 3345 3346 } else { 3347 goto failed; 3348 } 3349 } else { 3350 ql_log(ql_log_fatal, vha, 0x00cd, 3351 "ISP Firmware failed checksum.\n"); 3352 goto failed; 3353 } 3354 } else 3355 goto failed; 3356 3357 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 3358 /* Enable proper parity. */ 3359 spin_lock_irqsave(&ha->hardware_lock, flags); 3360 if (IS_QLA2300(ha)) 3361 /* SRAM parity */ 3362 WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x1); 3363 else 3364 /* SRAM, Instruction RAM and GP RAM parity */ 3365 WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x7); 3366 RD_REG_WORD(®->hccr); 3367 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3368 } 3369 3370 if (IS_QLA27XX(ha)) 3371 ha->flags.fac_supported = 1; 3372 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { 3373 uint32_t size; 3374 3375 rval = qla81xx_fac_get_sector_size(vha, &size); 3376 if (rval == QLA_SUCCESS) { 3377 ha->flags.fac_supported = 1; 3378 ha->fdt_block_size = size << 2; 3379 } else { 3380 ql_log(ql_log_warn, vha, 0x00ce, 3381 "Unsupported FAC firmware (%d.%02d.%02d).\n", 3382 ha->fw_major_version, ha->fw_minor_version, 3383 ha->fw_subminor_version); 3384 3385 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 3386 ha->flags.fac_supported = 0; 3387 rval = QLA_SUCCESS; 3388 } 3389 } 3390 } 3391 failed: 3392 if (rval) { 3393 ql_log(ql_log_fatal, vha, 0x00cf, 3394 "Setup chip ****FAILED****.\n"); 3395 } 3396 3397 return (rval); 3398 } 3399 3400 /** 3401 * qla2x00_init_response_q_entries() - Initializes response queue entries. 3402 * @ha: HA context 3403 * 3404 * Beginning of request ring has initialization control block already built 3405 * by nvram config routine. 3406 * 3407 * Returns 0 on success. 3408 */ 3409 void 3410 qla2x00_init_response_q_entries(struct rsp_que *rsp) 3411 { 3412 uint16_t cnt; 3413 response_t *pkt; 3414 3415 rsp->ring_ptr = rsp->ring; 3416 rsp->ring_index = 0; 3417 rsp->status_srb = NULL; 3418 pkt = rsp->ring_ptr; 3419 for (cnt = 0; cnt < rsp->length; cnt++) { 3420 pkt->signature = RESPONSE_PROCESSED; 3421 pkt++; 3422 } 3423 } 3424 3425 /** 3426 * qla2x00_update_fw_options() - Read and process firmware options. 3427 * @ha: HA context 3428 * 3429 * Returns 0 on success. 3430 */ 3431 void 3432 qla2x00_update_fw_options(scsi_qla_host_t *vha) 3433 { 3434 uint16_t swing, emphasis, tx_sens, rx_sens; 3435 struct qla_hw_data *ha = vha->hw; 3436 3437 memset(ha->fw_options, 0, sizeof(ha->fw_options)); 3438 qla2x00_get_fw_options(vha, ha->fw_options); 3439 3440 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 3441 return; 3442 3443 /* Serial Link options. */ 3444 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115, 3445 "Serial link options.\n"); 3446 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109, 3447 (uint8_t *)&ha->fw_seriallink_options, 3448 sizeof(ha->fw_seriallink_options)); 3449 3450 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 3451 if (ha->fw_seriallink_options[3] & BIT_2) { 3452 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING; 3453 3454 /* 1G settings */ 3455 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0); 3456 emphasis = (ha->fw_seriallink_options[2] & 3457 (BIT_4 | BIT_3)) >> 3; 3458 tx_sens = ha->fw_seriallink_options[0] & 3459 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 3460 rx_sens = (ha->fw_seriallink_options[0] & 3461 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; 3462 ha->fw_options[10] = (emphasis << 14) | (swing << 8); 3463 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 3464 if (rx_sens == 0x0) 3465 rx_sens = 0x3; 3466 ha->fw_options[10] |= (tx_sens << 4) | rx_sens; 3467 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) 3468 ha->fw_options[10] |= BIT_5 | 3469 ((rx_sens & (BIT_1 | BIT_0)) << 2) | 3470 (tx_sens & (BIT_1 | BIT_0)); 3471 3472 /* 2G settings */ 3473 swing = (ha->fw_seriallink_options[2] & 3474 (BIT_7 | BIT_6 | BIT_5)) >> 5; 3475 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0); 3476 tx_sens = ha->fw_seriallink_options[1] & 3477 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 3478 rx_sens = (ha->fw_seriallink_options[1] & 3479 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; 3480 ha->fw_options[11] = (emphasis << 14) | (swing << 8); 3481 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 3482 if (rx_sens == 0x0) 3483 rx_sens = 0x3; 3484 ha->fw_options[11] |= (tx_sens << 4) | rx_sens; 3485 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) 3486 ha->fw_options[11] |= BIT_5 | 3487 ((rx_sens & (BIT_1 | BIT_0)) << 2) | 3488 (tx_sens & (BIT_1 | BIT_0)); 3489 } 3490 3491 /* FCP2 options. */ 3492 /* Return command IOCBs without waiting for an ABTS to complete. */ 3493 ha->fw_options[3] |= BIT_13; 3494 3495 /* LED scheme. */ 3496 if (ha->flags.enable_led_scheme) 3497 ha->fw_options[2] |= BIT_12; 3498 3499 /* Detect ISP6312. */ 3500 if (IS_QLA6312(ha)) 3501 ha->fw_options[2] |= BIT_13; 3502 3503 /* Set Retry FLOGI in case of P2P connection */ 3504 if (ha->operating_mode == P2P) { 3505 ha->fw_options[2] |= BIT_3; 3506 ql_dbg(ql_dbg_disc, vha, 0x2100, 3507 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", 3508 __func__, ha->fw_options[2]); 3509 } 3510 3511 /* Update firmware options. */ 3512 qla2x00_set_fw_options(vha, ha->fw_options); 3513 } 3514 3515 void 3516 qla24xx_update_fw_options(scsi_qla_host_t *vha) 3517 { 3518 int rval; 3519 struct qla_hw_data *ha = vha->hw; 3520 3521 if (IS_P3P_TYPE(ha)) 3522 return; 3523 3524 /* Hold status IOCBs until ABTS response received. */ 3525 if (ql2xfwholdabts) 3526 ha->fw_options[3] |= BIT_12; 3527 3528 /* Set Retry FLOGI in case of P2P connection */ 3529 if (ha->operating_mode == P2P) { 3530 ha->fw_options[2] |= BIT_3; 3531 ql_dbg(ql_dbg_disc, vha, 0x2101, 3532 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", 3533 __func__, ha->fw_options[2]); 3534 } 3535 3536 /* Move PUREX, ABTS RX & RIDA to ATIOQ */ 3537 if (ql2xmvasynctoatio && 3538 (IS_QLA83XX(ha) || IS_QLA27XX(ha))) { 3539 if (qla_tgt_mode_enabled(vha) || 3540 qla_dual_mode_enabled(vha)) 3541 ha->fw_options[2] |= BIT_11; 3542 else 3543 ha->fw_options[2] &= ~BIT_11; 3544 } 3545 3546 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 3547 /* 3548 * Tell FW to track each exchange to prevent 3549 * driver from using stale exchange. 3550 */ 3551 if (qla_tgt_mode_enabled(vha) || 3552 qla_dual_mode_enabled(vha)) 3553 ha->fw_options[2] |= BIT_4; 3554 else 3555 ha->fw_options[2] &= ~BIT_4; 3556 3557 /* Reserve 1/2 of emergency exchanges for ELS.*/ 3558 if (qla2xuseresexchforels) 3559 ha->fw_options[2] |= BIT_8; 3560 else 3561 ha->fw_options[2] &= ~BIT_8; 3562 } 3563 3564 ql_dbg(ql_dbg_init, vha, 0x00e8, 3565 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", 3566 __func__, ha->fw_options[1], ha->fw_options[2], 3567 ha->fw_options[3], vha->host->active_mode); 3568 3569 if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3]) 3570 qla2x00_set_fw_options(vha, ha->fw_options); 3571 3572 /* Update Serial Link options. */ 3573 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) 3574 return; 3575 3576 rval = qla2x00_set_serdes_params(vha, 3577 le16_to_cpu(ha->fw_seriallink_options24[1]), 3578 le16_to_cpu(ha->fw_seriallink_options24[2]), 3579 le16_to_cpu(ha->fw_seriallink_options24[3])); 3580 if (rval != QLA_SUCCESS) { 3581 ql_log(ql_log_warn, vha, 0x0104, 3582 "Unable to update Serial Link options (%x).\n", rval); 3583 } 3584 } 3585 3586 void 3587 qla2x00_config_rings(struct scsi_qla_host *vha) 3588 { 3589 struct qla_hw_data *ha = vha->hw; 3590 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3591 struct req_que *req = ha->req_q_map[0]; 3592 struct rsp_que *rsp = ha->rsp_q_map[0]; 3593 3594 /* Setup ring parameters in initialization control block. */ 3595 ha->init_cb->request_q_outpointer = cpu_to_le16(0); 3596 ha->init_cb->response_q_inpointer = cpu_to_le16(0); 3597 ha->init_cb->request_q_length = cpu_to_le16(req->length); 3598 ha->init_cb->response_q_length = cpu_to_le16(rsp->length); 3599 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); 3600 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma)); 3601 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); 3602 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 3603 3604 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0); 3605 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0); 3606 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0); 3607 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0); 3608 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */ 3609 } 3610 3611 void 3612 qla24xx_config_rings(struct scsi_qla_host *vha) 3613 { 3614 struct qla_hw_data *ha = vha->hw; 3615 device_reg_t *reg = ISP_QUE_REG(ha, 0); 3616 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; 3617 struct qla_msix_entry *msix; 3618 struct init_cb_24xx *icb; 3619 uint16_t rid = 0; 3620 struct req_que *req = ha->req_q_map[0]; 3621 struct rsp_que *rsp = ha->rsp_q_map[0]; 3622 3623 /* Setup ring parameters in initialization control block. */ 3624 icb = (struct init_cb_24xx *)ha->init_cb; 3625 icb->request_q_outpointer = cpu_to_le16(0); 3626 icb->response_q_inpointer = cpu_to_le16(0); 3627 icb->request_q_length = cpu_to_le16(req->length); 3628 icb->response_q_length = cpu_to_le16(rsp->length); 3629 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); 3630 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma)); 3631 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); 3632 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 3633 3634 /* Setup ATIO queue dma pointers for target mode */ 3635 icb->atio_q_inpointer = cpu_to_le16(0); 3636 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length); 3637 icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma)); 3638 icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma)); 3639 3640 if (IS_SHADOW_REG_CAPABLE(ha)) 3641 icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29); 3642 3643 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 3644 icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS); 3645 icb->rid = cpu_to_le16(rid); 3646 if (ha->flags.msix_enabled) { 3647 msix = &ha->msix_entries[1]; 3648 ql_dbg(ql_dbg_init, vha, 0x0019, 3649 "Registering vector 0x%x for base que.\n", 3650 msix->entry); 3651 icb->msix = cpu_to_le16(msix->entry); 3652 } 3653 /* Use alternate PCI bus number */ 3654 if (MSB(rid)) 3655 icb->firmware_options_2 |= cpu_to_le32(BIT_19); 3656 /* Use alternate PCI devfn */ 3657 if (LSB(rid)) 3658 icb->firmware_options_2 |= cpu_to_le32(BIT_18); 3659 3660 /* Use Disable MSIX Handshake mode for capable adapters */ 3661 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) && 3662 (ha->flags.msix_enabled)) { 3663 icb->firmware_options_2 &= cpu_to_le32(~BIT_22); 3664 ha->flags.disable_msix_handshake = 1; 3665 ql_dbg(ql_dbg_init, vha, 0x00fe, 3666 "MSIX Handshake Disable Mode turned on.\n"); 3667 } else { 3668 icb->firmware_options_2 |= cpu_to_le32(BIT_22); 3669 } 3670 icb->firmware_options_2 |= cpu_to_le32(BIT_23); 3671 3672 WRT_REG_DWORD(®->isp25mq.req_q_in, 0); 3673 WRT_REG_DWORD(®->isp25mq.req_q_out, 0); 3674 WRT_REG_DWORD(®->isp25mq.rsp_q_in, 0); 3675 WRT_REG_DWORD(®->isp25mq.rsp_q_out, 0); 3676 } else { 3677 WRT_REG_DWORD(®->isp24.req_q_in, 0); 3678 WRT_REG_DWORD(®->isp24.req_q_out, 0); 3679 WRT_REG_DWORD(®->isp24.rsp_q_in, 0); 3680 WRT_REG_DWORD(®->isp24.rsp_q_out, 0); 3681 } 3682 qlt_24xx_config_rings(vha); 3683 3684 /* PCI posting */ 3685 RD_REG_DWORD(&ioreg->hccr); 3686 } 3687 3688 /** 3689 * qla2x00_init_rings() - Initializes firmware. 3690 * @ha: HA context 3691 * 3692 * Beginning of request ring has initialization control block already built 3693 * by nvram config routine. 3694 * 3695 * Returns 0 on success. 3696 */ 3697 int 3698 qla2x00_init_rings(scsi_qla_host_t *vha) 3699 { 3700 int rval; 3701 unsigned long flags = 0; 3702 int cnt, que; 3703 struct qla_hw_data *ha = vha->hw; 3704 struct req_que *req; 3705 struct rsp_que *rsp; 3706 struct mid_init_cb_24xx *mid_init_cb = 3707 (struct mid_init_cb_24xx *) ha->init_cb; 3708 3709 spin_lock_irqsave(&ha->hardware_lock, flags); 3710 3711 /* Clear outstanding commands array. */ 3712 for (que = 0; que < ha->max_req_queues; que++) { 3713 req = ha->req_q_map[que]; 3714 if (!req || !test_bit(que, ha->req_qid_map)) 3715 continue; 3716 req->out_ptr = (void *)(req->ring + req->length); 3717 *req->out_ptr = 0; 3718 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) 3719 req->outstanding_cmds[cnt] = NULL; 3720 3721 req->current_outstanding_cmd = 1; 3722 3723 /* Initialize firmware. */ 3724 req->ring_ptr = req->ring; 3725 req->ring_index = 0; 3726 req->cnt = req->length; 3727 } 3728 3729 for (que = 0; que < ha->max_rsp_queues; que++) { 3730 rsp = ha->rsp_q_map[que]; 3731 if (!rsp || !test_bit(que, ha->rsp_qid_map)) 3732 continue; 3733 rsp->in_ptr = (void *)(rsp->ring + rsp->length); 3734 *rsp->in_ptr = 0; 3735 /* Initialize response queue entries */ 3736 if (IS_QLAFX00(ha)) 3737 qlafx00_init_response_q_entries(rsp); 3738 else 3739 qla2x00_init_response_q_entries(rsp); 3740 } 3741 3742 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; 3743 ha->tgt.atio_ring_index = 0; 3744 /* Initialize ATIO queue entries */ 3745 qlt_init_atio_q_entries(vha); 3746 3747 ha->isp_ops->config_rings(vha); 3748 3749 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3750 3751 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n"); 3752 3753 if (IS_QLAFX00(ha)) { 3754 rval = qlafx00_init_firmware(vha, ha->init_cb_size); 3755 goto next_check; 3756 } 3757 3758 /* Update any ISP specific firmware options before initialization. */ 3759 ha->isp_ops->update_fw_options(vha); 3760 3761 if (ha->flags.npiv_supported) { 3762 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha)) 3763 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; 3764 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); 3765 } 3766 3767 if (IS_FWI2_CAPABLE(ha)) { 3768 mid_init_cb->options = cpu_to_le16(BIT_1); 3769 mid_init_cb->init_cb.execution_throttle = 3770 cpu_to_le16(ha->cur_fw_xcb_count); 3771 ha->flags.dport_enabled = 3772 (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0; 3773 ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n", 3774 (ha->flags.dport_enabled) ? "enabled" : "disabled"); 3775 /* FA-WWPN Status */ 3776 ha->flags.fawwpn_enabled = 3777 (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0; 3778 ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n", 3779 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled"); 3780 } 3781 3782 rval = qla2x00_init_firmware(vha, ha->init_cb_size); 3783 next_check: 3784 if (rval) { 3785 ql_log(ql_log_fatal, vha, 0x00d2, 3786 "Init Firmware **** FAILED ****.\n"); 3787 } else { 3788 ql_dbg(ql_dbg_init, vha, 0x00d3, 3789 "Init Firmware -- success.\n"); 3790 QLA_FW_STARTED(ha); 3791 } 3792 3793 return (rval); 3794 } 3795 3796 /** 3797 * qla2x00_fw_ready() - Waits for firmware ready. 3798 * @ha: HA context 3799 * 3800 * Returns 0 on success. 3801 */ 3802 static int 3803 qla2x00_fw_ready(scsi_qla_host_t *vha) 3804 { 3805 int rval; 3806 unsigned long wtime, mtime, cs84xx_time; 3807 uint16_t min_wait; /* Minimum wait time if loop is down */ 3808 uint16_t wait_time; /* Wait time if loop is coming ready */ 3809 uint16_t state[6]; 3810 struct qla_hw_data *ha = vha->hw; 3811 3812 if (IS_QLAFX00(vha->hw)) 3813 return qlafx00_fw_ready(vha); 3814 3815 rval = QLA_SUCCESS; 3816 3817 /* Time to wait for loop down */ 3818 if (IS_P3P_TYPE(ha)) 3819 min_wait = 30; 3820 else 3821 min_wait = 20; 3822 3823 /* 3824 * Firmware should take at most one RATOV to login, plus 5 seconds for 3825 * our own processing. 3826 */ 3827 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) { 3828 wait_time = min_wait; 3829 } 3830 3831 /* Min wait time if loop down */ 3832 mtime = jiffies + (min_wait * HZ); 3833 3834 /* wait time before firmware ready */ 3835 wtime = jiffies + (wait_time * HZ); 3836 3837 /* Wait for ISP to finish LIP */ 3838 if (!vha->flags.init_done) 3839 ql_log(ql_log_info, vha, 0x801e, 3840 "Waiting for LIP to complete.\n"); 3841 3842 do { 3843 memset(state, -1, sizeof(state)); 3844 rval = qla2x00_get_firmware_state(vha, state); 3845 if (rval == QLA_SUCCESS) { 3846 if (state[0] < FSTATE_LOSS_OF_SYNC) { 3847 vha->device_flags &= ~DFLG_NO_CABLE; 3848 } 3849 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { 3850 ql_dbg(ql_dbg_taskm, vha, 0x801f, 3851 "fw_state=%x 84xx=%x.\n", state[0], 3852 state[2]); 3853 if ((state[2] & FSTATE_LOGGED_IN) && 3854 (state[2] & FSTATE_WAITING_FOR_VERIFY)) { 3855 ql_dbg(ql_dbg_taskm, vha, 0x8028, 3856 "Sending verify iocb.\n"); 3857 3858 cs84xx_time = jiffies; 3859 rval = qla84xx_init_chip(vha); 3860 if (rval != QLA_SUCCESS) { 3861 ql_log(ql_log_warn, 3862 vha, 0x8007, 3863 "Init chip failed.\n"); 3864 break; 3865 } 3866 3867 /* Add time taken to initialize. */ 3868 cs84xx_time = jiffies - cs84xx_time; 3869 wtime += cs84xx_time; 3870 mtime += cs84xx_time; 3871 ql_dbg(ql_dbg_taskm, vha, 0x8008, 3872 "Increasing wait time by %ld. " 3873 "New time %ld.\n", cs84xx_time, 3874 wtime); 3875 } 3876 } else if (state[0] == FSTATE_READY) { 3877 ql_dbg(ql_dbg_taskm, vha, 0x8037, 3878 "F/W Ready - OK.\n"); 3879 3880 qla2x00_get_retry_cnt(vha, &ha->retry_count, 3881 &ha->login_timeout, &ha->r_a_tov); 3882 3883 rval = QLA_SUCCESS; 3884 break; 3885 } 3886 3887 rval = QLA_FUNCTION_FAILED; 3888 3889 if (atomic_read(&vha->loop_down_timer) && 3890 state[0] != FSTATE_READY) { 3891 /* Loop down. Timeout on min_wait for states 3892 * other than Wait for Login. 3893 */ 3894 if (time_after_eq(jiffies, mtime)) { 3895 ql_log(ql_log_info, vha, 0x8038, 3896 "Cable is unplugged...\n"); 3897 3898 vha->device_flags |= DFLG_NO_CABLE; 3899 break; 3900 } 3901 } 3902 } else { 3903 /* Mailbox cmd failed. Timeout on min_wait. */ 3904 if (time_after_eq(jiffies, mtime) || 3905 ha->flags.isp82xx_fw_hung) 3906 break; 3907 } 3908 3909 if (time_after_eq(jiffies, wtime)) 3910 break; 3911 3912 /* Delay for a while */ 3913 msleep(500); 3914 } while (1); 3915 3916 ql_dbg(ql_dbg_taskm, vha, 0x803a, 3917 "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0], 3918 state[1], state[2], state[3], state[4], state[5], jiffies); 3919 3920 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) { 3921 ql_log(ql_log_warn, vha, 0x803b, 3922 "Firmware ready **** FAILED ****.\n"); 3923 } 3924 3925 return (rval); 3926 } 3927 3928 /* 3929 * qla2x00_configure_hba 3930 * Setup adapter context. 3931 * 3932 * Input: 3933 * ha = adapter state pointer. 3934 * 3935 * Returns: 3936 * 0 = success 3937 * 3938 * Context: 3939 * Kernel context. 3940 */ 3941 static int 3942 qla2x00_configure_hba(scsi_qla_host_t *vha) 3943 { 3944 int rval; 3945 uint16_t loop_id; 3946 uint16_t topo; 3947 uint16_t sw_cap; 3948 uint8_t al_pa; 3949 uint8_t area; 3950 uint8_t domain; 3951 char connect_type[22]; 3952 struct qla_hw_data *ha = vha->hw; 3953 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 3954 port_id_t id; 3955 unsigned long flags; 3956 3957 /* Get host addresses. */ 3958 rval = qla2x00_get_adapter_id(vha, 3959 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); 3960 if (rval != QLA_SUCCESS) { 3961 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || 3962 IS_CNA_CAPABLE(ha) || 3963 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { 3964 ql_dbg(ql_dbg_disc, vha, 0x2008, 3965 "Loop is in a transition state.\n"); 3966 } else { 3967 ql_log(ql_log_warn, vha, 0x2009, 3968 "Unable to get host loop ID.\n"); 3969 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) && 3970 (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) { 3971 ql_log(ql_log_warn, vha, 0x1151, 3972 "Doing link init.\n"); 3973 if (qla24xx_link_initialize(vha) == QLA_SUCCESS) 3974 return rval; 3975 } 3976 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3977 } 3978 return (rval); 3979 } 3980 3981 if (topo == 4) { 3982 ql_log(ql_log_info, vha, 0x200a, 3983 "Cannot get topology - retrying.\n"); 3984 return (QLA_FUNCTION_FAILED); 3985 } 3986 3987 vha->loop_id = loop_id; 3988 3989 /* initialize */ 3990 ha->min_external_loopid = SNS_FIRST_LOOP_ID; 3991 ha->operating_mode = LOOP; 3992 ha->switch_cap = 0; 3993 3994 switch (topo) { 3995 case 0: 3996 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n"); 3997 ha->current_topology = ISP_CFG_NL; 3998 strcpy(connect_type, "(Loop)"); 3999 break; 4000 4001 case 1: 4002 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n"); 4003 ha->switch_cap = sw_cap; 4004 ha->current_topology = ISP_CFG_FL; 4005 strcpy(connect_type, "(FL_Port)"); 4006 break; 4007 4008 case 2: 4009 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n"); 4010 ha->operating_mode = P2P; 4011 ha->current_topology = ISP_CFG_N; 4012 strcpy(connect_type, "(N_Port-to-N_Port)"); 4013 break; 4014 4015 case 3: 4016 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n"); 4017 ha->switch_cap = sw_cap; 4018 ha->operating_mode = P2P; 4019 ha->current_topology = ISP_CFG_F; 4020 strcpy(connect_type, "(F_Port)"); 4021 break; 4022 4023 default: 4024 ql_dbg(ql_dbg_disc, vha, 0x200f, 4025 "HBA in unknown topology %x, using NL.\n", topo); 4026 ha->current_topology = ISP_CFG_NL; 4027 strcpy(connect_type, "(Loop)"); 4028 break; 4029 } 4030 4031 /* Save Host port and loop ID. */ 4032 /* byte order - Big Endian */ 4033 id.b.domain = domain; 4034 id.b.area = area; 4035 id.b.al_pa = al_pa; 4036 id.b.rsvd_1 = 0; 4037 spin_lock_irqsave(&ha->hardware_lock, flags); 4038 qlt_update_host_map(vha, id); 4039 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4040 4041 if (!vha->flags.init_done) 4042 ql_log(ql_log_info, vha, 0x2010, 4043 "Topology - %s, Host Loop address 0x%x.\n", 4044 connect_type, vha->loop_id); 4045 4046 return(rval); 4047 } 4048 4049 inline void 4050 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, 4051 char *def) 4052 { 4053 char *st, *en; 4054 uint16_t index; 4055 struct qla_hw_data *ha = vha->hw; 4056 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && 4057 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha); 4058 4059 if (memcmp(model, BINZERO, len) != 0) { 4060 strncpy(ha->model_number, model, len); 4061 st = en = ha->model_number; 4062 en += len - 1; 4063 while (en > st) { 4064 if (*en != 0x20 && *en != 0x00) 4065 break; 4066 *en-- = '\0'; 4067 } 4068 4069 index = (ha->pdev->subsystem_device & 0xff); 4070 if (use_tbl && 4071 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 4072 index < QLA_MODEL_NAMES) 4073 strncpy(ha->model_desc, 4074 qla2x00_model_name[index * 2 + 1], 4075 sizeof(ha->model_desc) - 1); 4076 } else { 4077 index = (ha->pdev->subsystem_device & 0xff); 4078 if (use_tbl && 4079 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 4080 index < QLA_MODEL_NAMES) { 4081 strcpy(ha->model_number, 4082 qla2x00_model_name[index * 2]); 4083 strncpy(ha->model_desc, 4084 qla2x00_model_name[index * 2 + 1], 4085 sizeof(ha->model_desc) - 1); 4086 } else { 4087 strcpy(ha->model_number, def); 4088 } 4089 } 4090 if (IS_FWI2_CAPABLE(ha)) 4091 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc, 4092 sizeof(ha->model_desc)); 4093 } 4094 4095 /* On sparc systems, obtain port and node WWN from firmware 4096 * properties. 4097 */ 4098 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv) 4099 { 4100 #ifdef CONFIG_SPARC 4101 struct qla_hw_data *ha = vha->hw; 4102 struct pci_dev *pdev = ha->pdev; 4103 struct device_node *dp = pci_device_to_OF_node(pdev); 4104 const u8 *val; 4105 int len; 4106 4107 val = of_get_property(dp, "port-wwn", &len); 4108 if (val && len >= WWN_SIZE) 4109 memcpy(nv->port_name, val, WWN_SIZE); 4110 4111 val = of_get_property(dp, "node-wwn", &len); 4112 if (val && len >= WWN_SIZE) 4113 memcpy(nv->node_name, val, WWN_SIZE); 4114 #endif 4115 } 4116 4117 /* 4118 * NVRAM configuration for ISP 2xxx 4119 * 4120 * Input: 4121 * ha = adapter block pointer. 4122 * 4123 * Output: 4124 * initialization control block in response_ring 4125 * host adapters parameters in host adapter block 4126 * 4127 * Returns: 4128 * 0 = success. 4129 */ 4130 int 4131 qla2x00_nvram_config(scsi_qla_host_t *vha) 4132 { 4133 int rval; 4134 uint8_t chksum = 0; 4135 uint16_t cnt; 4136 uint8_t *dptr1, *dptr2; 4137 struct qla_hw_data *ha = vha->hw; 4138 init_cb_t *icb = ha->init_cb; 4139 nvram_t *nv = ha->nvram; 4140 uint8_t *ptr = ha->nvram; 4141 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 4142 4143 rval = QLA_SUCCESS; 4144 4145 /* Determine NVRAM starting address. */ 4146 ha->nvram_size = sizeof(nvram_t); 4147 ha->nvram_base = 0; 4148 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) 4149 if ((RD_REG_WORD(®->ctrl_status) >> 14) == 1) 4150 ha->nvram_base = 0x80; 4151 4152 /* Get NVRAM data and calculate checksum. */ 4153 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size); 4154 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) 4155 chksum += *ptr++; 4156 4157 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f, 4158 "Contents of NVRAM.\n"); 4159 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110, 4160 (uint8_t *)nv, ha->nvram_size); 4161 4162 /* Bad NVRAM data, set defaults parameters. */ 4163 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || 4164 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) { 4165 /* Reset NVRAM data. */ 4166 ql_log(ql_log_warn, vha, 0x0064, 4167 "Inconsistent NVRAM " 4168 "detected: checksum=0x%x id=%c version=0x%x.\n", 4169 chksum, nv->id[0], nv->nvram_version); 4170 ql_log(ql_log_warn, vha, 0x0065, 4171 "Falling back to " 4172 "functioning (yet invalid -- WWPN) defaults.\n"); 4173 4174 /* 4175 * Set default initialization control block. 4176 */ 4177 memset(nv, 0, ha->nvram_size); 4178 nv->parameter_block_version = ICB_VERSION; 4179 4180 if (IS_QLA23XX(ha)) { 4181 nv->firmware_options[0] = BIT_2 | BIT_1; 4182 nv->firmware_options[1] = BIT_7 | BIT_5; 4183 nv->add_firmware_options[0] = BIT_5; 4184 nv->add_firmware_options[1] = BIT_5 | BIT_4; 4185 nv->frame_payload_size = 2048; 4186 nv->special_options[1] = BIT_7; 4187 } else if (IS_QLA2200(ha)) { 4188 nv->firmware_options[0] = BIT_2 | BIT_1; 4189 nv->firmware_options[1] = BIT_7 | BIT_5; 4190 nv->add_firmware_options[0] = BIT_5; 4191 nv->add_firmware_options[1] = BIT_5 | BIT_4; 4192 nv->frame_payload_size = 1024; 4193 } else if (IS_QLA2100(ha)) { 4194 nv->firmware_options[0] = BIT_3 | BIT_1; 4195 nv->firmware_options[1] = BIT_5; 4196 nv->frame_payload_size = 1024; 4197 } 4198 4199 nv->max_iocb_allocation = cpu_to_le16(256); 4200 nv->execution_throttle = cpu_to_le16(16); 4201 nv->retry_count = 8; 4202 nv->retry_delay = 1; 4203 4204 nv->port_name[0] = 33; 4205 nv->port_name[3] = 224; 4206 nv->port_name[4] = 139; 4207 4208 qla2xxx_nvram_wwn_from_ofw(vha, nv); 4209 4210 nv->login_timeout = 4; 4211 4212 /* 4213 * Set default host adapter parameters 4214 */ 4215 nv->host_p[1] = BIT_2; 4216 nv->reset_delay = 5; 4217 nv->port_down_retry_count = 8; 4218 nv->max_luns_per_target = cpu_to_le16(8); 4219 nv->link_down_timeout = 60; 4220 4221 rval = 1; 4222 } 4223 4224 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 4225 /* 4226 * The SN2 does not provide BIOS emulation which means you can't change 4227 * potentially bogus BIOS settings. Force the use of default settings 4228 * for link rate and frame size. Hope that the rest of the settings 4229 * are valid. 4230 */ 4231 if (ia64_platform_is("sn2")) { 4232 nv->frame_payload_size = 2048; 4233 if (IS_QLA23XX(ha)) 4234 nv->special_options[1] = BIT_7; 4235 } 4236 #endif 4237 4238 /* Reset Initialization control block */ 4239 memset(icb, 0, ha->init_cb_size); 4240 4241 /* 4242 * Setup driver NVRAM options. 4243 */ 4244 nv->firmware_options[0] |= (BIT_6 | BIT_1); 4245 nv->firmware_options[0] &= ~(BIT_5 | BIT_4); 4246 nv->firmware_options[1] |= (BIT_5 | BIT_0); 4247 nv->firmware_options[1] &= ~BIT_4; 4248 4249 if (IS_QLA23XX(ha)) { 4250 nv->firmware_options[0] |= BIT_2; 4251 nv->firmware_options[0] &= ~BIT_3; 4252 nv->special_options[0] &= ~BIT_6; 4253 nv->add_firmware_options[1] |= BIT_5 | BIT_4; 4254 4255 if (IS_QLA2300(ha)) { 4256 if (ha->fb_rev == FPM_2310) { 4257 strcpy(ha->model_number, "QLA2310"); 4258 } else { 4259 strcpy(ha->model_number, "QLA2300"); 4260 } 4261 } else { 4262 qla2x00_set_model_info(vha, nv->model_number, 4263 sizeof(nv->model_number), "QLA23xx"); 4264 } 4265 } else if (IS_QLA2200(ha)) { 4266 nv->firmware_options[0] |= BIT_2; 4267 /* 4268 * 'Point-to-point preferred, else loop' is not a safe 4269 * connection mode setting. 4270 */ 4271 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) == 4272 (BIT_5 | BIT_4)) { 4273 /* Force 'loop preferred, else point-to-point'. */ 4274 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4); 4275 nv->add_firmware_options[0] |= BIT_5; 4276 } 4277 strcpy(ha->model_number, "QLA22xx"); 4278 } else /*if (IS_QLA2100(ha))*/ { 4279 strcpy(ha->model_number, "QLA2100"); 4280 } 4281 4282 /* 4283 * Copy over NVRAM RISC parameter block to initialization control block. 4284 */ 4285 dptr1 = (uint8_t *)icb; 4286 dptr2 = (uint8_t *)&nv->parameter_block_version; 4287 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version; 4288 while (cnt--) 4289 *dptr1++ = *dptr2++; 4290 4291 /* Copy 2nd half. */ 4292 dptr1 = (uint8_t *)icb->add_firmware_options; 4293 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options; 4294 while (cnt--) 4295 *dptr1++ = *dptr2++; 4296 4297 /* Use alternate WWN? */ 4298 if (nv->host_p[1] & BIT_7) { 4299 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 4300 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 4301 } 4302 4303 /* Prepare nodename */ 4304 if ((icb->firmware_options[1] & BIT_6) == 0) { 4305 /* 4306 * Firmware will apply the following mask if the nodename was 4307 * not provided. 4308 */ 4309 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 4310 icb->node_name[0] &= 0xF0; 4311 } 4312 4313 /* 4314 * Set host adapter parameters. 4315 */ 4316 4317 /* 4318 * BIT_7 in the host-parameters section allows for modification to 4319 * internal driver logging. 4320 */ 4321 if (nv->host_p[0] & BIT_7) 4322 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; 4323 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); 4324 /* Always load RISC code on non ISP2[12]00 chips. */ 4325 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) 4326 ha->flags.disable_risc_code_load = 0; 4327 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0); 4328 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0); 4329 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0); 4330 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0; 4331 ha->flags.disable_serdes = 0; 4332 4333 ha->operating_mode = 4334 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4; 4335 4336 memcpy(ha->fw_seriallink_options, nv->seriallink_options, 4337 sizeof(ha->fw_seriallink_options)); 4338 4339 /* save HBA serial number */ 4340 ha->serial0 = icb->port_name[5]; 4341 ha->serial1 = icb->port_name[6]; 4342 ha->serial2 = icb->port_name[7]; 4343 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 4344 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 4345 4346 icb->execution_throttle = cpu_to_le16(0xFFFF); 4347 4348 ha->retry_count = nv->retry_count; 4349 4350 /* Set minimum login_timeout to 4 seconds. */ 4351 if (nv->login_timeout != ql2xlogintimeout) 4352 nv->login_timeout = ql2xlogintimeout; 4353 if (nv->login_timeout < 4) 4354 nv->login_timeout = 4; 4355 ha->login_timeout = nv->login_timeout; 4356 4357 /* Set minimum RATOV to 100 tenths of a second. */ 4358 ha->r_a_tov = 100; 4359 4360 ha->loop_reset_delay = nv->reset_delay; 4361 4362 /* Link Down Timeout = 0: 4363 * 4364 * When Port Down timer expires we will start returning 4365 * I/O's to OS with "DID_NO_CONNECT". 4366 * 4367 * Link Down Timeout != 0: 4368 * 4369 * The driver waits for the link to come up after link down 4370 * before returning I/Os to OS with "DID_NO_CONNECT". 4371 */ 4372 if (nv->link_down_timeout == 0) { 4373 ha->loop_down_abort_time = 4374 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 4375 } else { 4376 ha->link_down_timeout = nv->link_down_timeout; 4377 ha->loop_down_abort_time = 4378 (LOOP_DOWN_TIME - ha->link_down_timeout); 4379 } 4380 4381 /* 4382 * Need enough time to try and get the port back. 4383 */ 4384 ha->port_down_retry_count = nv->port_down_retry_count; 4385 if (qlport_down_retry) 4386 ha->port_down_retry_count = qlport_down_retry; 4387 /* Set login_retry_count */ 4388 ha->login_retry_count = nv->retry_count; 4389 if (ha->port_down_retry_count == nv->port_down_retry_count && 4390 ha->port_down_retry_count > 3) 4391 ha->login_retry_count = ha->port_down_retry_count; 4392 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 4393 ha->login_retry_count = ha->port_down_retry_count; 4394 if (ql2xloginretrycount) 4395 ha->login_retry_count = ql2xloginretrycount; 4396 4397 icb->lun_enables = cpu_to_le16(0); 4398 icb->command_resource_count = 0; 4399 icb->immediate_notify_resource_count = 0; 4400 icb->timeout = cpu_to_le16(0); 4401 4402 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 4403 /* Enable RIO */ 4404 icb->firmware_options[0] &= ~BIT_3; 4405 icb->add_firmware_options[0] &= 4406 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 4407 icb->add_firmware_options[0] |= BIT_2; 4408 icb->response_accumulation_timer = 3; 4409 icb->interrupt_delay_timer = 5; 4410 4411 vha->flags.process_response_queue = 1; 4412 } else { 4413 /* Enable ZIO. */ 4414 if (!vha->flags.init_done) { 4415 ha->zio_mode = icb->add_firmware_options[0] & 4416 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 4417 ha->zio_timer = icb->interrupt_delay_timer ? 4418 icb->interrupt_delay_timer: 2; 4419 } 4420 icb->add_firmware_options[0] &= 4421 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 4422 vha->flags.process_response_queue = 0; 4423 if (ha->zio_mode != QLA_ZIO_DISABLED) { 4424 ha->zio_mode = QLA_ZIO_MODE_6; 4425 4426 ql_log(ql_log_info, vha, 0x0068, 4427 "ZIO mode %d enabled; timer delay (%d us).\n", 4428 ha->zio_mode, ha->zio_timer * 100); 4429 4430 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode; 4431 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer; 4432 vha->flags.process_response_queue = 1; 4433 } 4434 } 4435 4436 if (rval) { 4437 ql_log(ql_log_warn, vha, 0x0069, 4438 "NVRAM configuration failed.\n"); 4439 } 4440 return (rval); 4441 } 4442 4443 static void 4444 qla2x00_rport_del(void *data) 4445 { 4446 fc_port_t *fcport = data; 4447 struct fc_rport *rport; 4448 unsigned long flags; 4449 4450 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 4451 rport = fcport->drport ? fcport->drport: fcport->rport; 4452 fcport->drport = NULL; 4453 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 4454 if (rport) { 4455 ql_dbg(ql_dbg_disc, fcport->vha, 0x210b, 4456 "%s %8phN. rport %p roles %x\n", 4457 __func__, fcport->port_name, rport, 4458 rport->roles); 4459 4460 fc_remote_port_delete(rport); 4461 } 4462 } 4463 4464 /** 4465 * qla2x00_alloc_fcport() - Allocate a generic fcport. 4466 * @ha: HA context 4467 * @flags: allocation flags 4468 * 4469 * Returns a pointer to the allocated fcport, or NULL, if none available. 4470 */ 4471 fc_port_t * 4472 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) 4473 { 4474 fc_port_t *fcport; 4475 4476 fcport = kzalloc(sizeof(fc_port_t), flags); 4477 if (!fcport) 4478 return NULL; 4479 4480 /* Setup fcport template structure. */ 4481 fcport->vha = vha; 4482 fcport->port_type = FCT_UNKNOWN; 4483 fcport->loop_id = FC_NO_LOOP_ID; 4484 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); 4485 fcport->supported_classes = FC_COS_UNSPECIFIED; 4486 4487 fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev, 4488 sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma, 4489 flags); 4490 fcport->disc_state = DSC_DELETED; 4491 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 4492 fcport->deleted = QLA_SESS_DELETED; 4493 fcport->login_retry = vha->hw->login_retry_count; 4494 fcport->login_retry = 5; 4495 fcport->logout_on_delete = 1; 4496 4497 if (!fcport->ct_desc.ct_sns) { 4498 ql_log(ql_log_warn, vha, 0xd049, 4499 "Failed to allocate ct_sns request.\n"); 4500 kfree(fcport); 4501 fcport = NULL; 4502 } 4503 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); 4504 INIT_LIST_HEAD(&fcport->gnl_entry); 4505 INIT_LIST_HEAD(&fcport->list); 4506 4507 return fcport; 4508 } 4509 4510 void 4511 qla2x00_free_fcport(fc_port_t *fcport) 4512 { 4513 if (fcport->ct_desc.ct_sns) { 4514 dma_free_coherent(&fcport->vha->hw->pdev->dev, 4515 sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns, 4516 fcport->ct_desc.ct_sns_dma); 4517 4518 fcport->ct_desc.ct_sns = NULL; 4519 } 4520 kfree(fcport); 4521 } 4522 4523 /* 4524 * qla2x00_configure_loop 4525 * Updates Fibre Channel Device Database with what is actually on loop. 4526 * 4527 * Input: 4528 * ha = adapter block pointer. 4529 * 4530 * Returns: 4531 * 0 = success. 4532 * 1 = error. 4533 * 2 = database was full and device was not configured. 4534 */ 4535 static int 4536 qla2x00_configure_loop(scsi_qla_host_t *vha) 4537 { 4538 int rval; 4539 unsigned long flags, save_flags; 4540 struct qla_hw_data *ha = vha->hw; 4541 rval = QLA_SUCCESS; 4542 4543 /* Get Initiator ID */ 4544 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) { 4545 rval = qla2x00_configure_hba(vha); 4546 if (rval != QLA_SUCCESS) { 4547 ql_dbg(ql_dbg_disc, vha, 0x2013, 4548 "Unable to configure HBA.\n"); 4549 return (rval); 4550 } 4551 } 4552 4553 save_flags = flags = vha->dpc_flags; 4554 ql_dbg(ql_dbg_disc, vha, 0x2014, 4555 "Configure loop -- dpc flags = 0x%lx.\n", flags); 4556 4557 /* 4558 * If we have both an RSCN and PORT UPDATE pending then handle them 4559 * both at the same time. 4560 */ 4561 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 4562 clear_bit(RSCN_UPDATE, &vha->dpc_flags); 4563 4564 qla2x00_get_data_rate(vha); 4565 4566 /* Determine what we need to do */ 4567 if (ha->current_topology == ISP_CFG_FL && 4568 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 4569 4570 set_bit(RSCN_UPDATE, &flags); 4571 4572 } else if (ha->current_topology == ISP_CFG_F && 4573 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 4574 4575 set_bit(RSCN_UPDATE, &flags); 4576 clear_bit(LOCAL_LOOP_UPDATE, &flags); 4577 4578 } else if (ha->current_topology == ISP_CFG_N) { 4579 clear_bit(RSCN_UPDATE, &flags); 4580 if (ha->flags.rida_fmt2) { 4581 /* With Rida Format 2, the login is already triggered. 4582 * We know who is on the other side of the wire. 4583 * No need to login to do login to find out or drop into 4584 * qla2x00_configure_local_loop(). 4585 */ 4586 clear_bit(LOCAL_LOOP_UPDATE, &flags); 4587 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 4588 } else { 4589 if (qla_tgt_mode_enabled(vha)) { 4590 /* allow the other side to start the login */ 4591 clear_bit(LOCAL_LOOP_UPDATE, &flags); 4592 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 4593 } 4594 } 4595 } else if (ha->current_topology == ISP_CFG_NL) { 4596 clear_bit(RSCN_UPDATE, &flags); 4597 set_bit(LOCAL_LOOP_UPDATE, &flags); 4598 } else if (!vha->flags.online || 4599 (test_bit(ABORT_ISP_ACTIVE, &flags))) { 4600 set_bit(RSCN_UPDATE, &flags); 4601 set_bit(LOCAL_LOOP_UPDATE, &flags); 4602 } 4603 4604 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) { 4605 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 4606 ql_dbg(ql_dbg_disc, vha, 0x2015, 4607 "Loop resync needed, failing.\n"); 4608 rval = QLA_FUNCTION_FAILED; 4609 } else 4610 rval = qla2x00_configure_local_loop(vha); 4611 } 4612 4613 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) { 4614 if (LOOP_TRANSITION(vha)) { 4615 ql_dbg(ql_dbg_disc, vha, 0x2099, 4616 "Needs RSCN update and loop transition.\n"); 4617 rval = QLA_FUNCTION_FAILED; 4618 } 4619 else 4620 rval = qla2x00_configure_fabric(vha); 4621 } 4622 4623 if (rval == QLA_SUCCESS) { 4624 if (atomic_read(&vha->loop_down_timer) || 4625 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 4626 rval = QLA_FUNCTION_FAILED; 4627 } else { 4628 atomic_set(&vha->loop_state, LOOP_READY); 4629 ql_dbg(ql_dbg_disc, vha, 0x2069, 4630 "LOOP READY.\n"); 4631 ha->flags.fw_init_done = 1; 4632 4633 /* 4634 * Process any ATIO queue entries that came in 4635 * while we weren't online. 4636 */ 4637 if (qla_tgt_mode_enabled(vha) || 4638 qla_dual_mode_enabled(vha)) { 4639 if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) { 4640 spin_lock_irqsave(&ha->tgt.atio_lock, 4641 flags); 4642 qlt_24xx_process_atio_queue(vha, 0); 4643 spin_unlock_irqrestore( 4644 &ha->tgt.atio_lock, flags); 4645 } else { 4646 spin_lock_irqsave(&ha->hardware_lock, 4647 flags); 4648 qlt_24xx_process_atio_queue(vha, 1); 4649 spin_unlock_irqrestore( 4650 &ha->hardware_lock, flags); 4651 } 4652 } 4653 } 4654 } 4655 4656 if (rval) { 4657 ql_dbg(ql_dbg_disc, vha, 0x206a, 4658 "%s *** FAILED ***.\n", __func__); 4659 } else { 4660 ql_dbg(ql_dbg_disc, vha, 0x206b, 4661 "%s: exiting normally.\n", __func__); 4662 } 4663 4664 /* Restore state if a resync event occurred during processing */ 4665 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 4666 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 4667 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 4668 if (test_bit(RSCN_UPDATE, &save_flags)) { 4669 set_bit(RSCN_UPDATE, &vha->dpc_flags); 4670 } 4671 } 4672 4673 return (rval); 4674 } 4675 4676 /* 4677 * N2N Login 4678 * Updates Fibre Channel Device Database with local loop devices. 4679 * 4680 * Input: 4681 * ha = adapter block pointer. 4682 * 4683 * Returns: 4684 */ 4685 static int qla24xx_n2n_handle_login(struct scsi_qla_host *vha, 4686 fc_port_t *fcport) 4687 { 4688 struct qla_hw_data *ha = vha->hw; 4689 int res = QLA_SUCCESS, rval; 4690 int greater_wwpn = 0; 4691 int logged_in = 0; 4692 4693 if (ha->current_topology != ISP_CFG_N) 4694 return res; 4695 4696 if (wwn_to_u64(vha->port_name) > 4697 wwn_to_u64(vha->n2n_port_name)) { 4698 ql_dbg(ql_dbg_disc, vha, 0x2002, 4699 "HBA WWPN is greater %llx > target %llx\n", 4700 wwn_to_u64(vha->port_name), 4701 wwn_to_u64(vha->n2n_port_name)); 4702 greater_wwpn = 1; 4703 fcport->d_id.b24 = vha->n2n_id; 4704 } 4705 4706 fcport->loop_id = vha->loop_id; 4707 fcport->fc4f_nvme = 0; 4708 fcport->query = 1; 4709 4710 ql_dbg(ql_dbg_disc, vha, 0x4001, 4711 "Initiate N2N login handler: HBA port_id=%06x loopid=%d\n", 4712 fcport->d_id.b24, vha->loop_id); 4713 4714 /* Fill in member data. */ 4715 if (!greater_wwpn) { 4716 rval = qla2x00_get_port_database(vha, fcport, 0); 4717 ql_dbg(ql_dbg_disc, vha, 0x1051, 4718 "Remote login-state (%x/%x) port_id=%06x loop_id=%x, rval=%d\n", 4719 fcport->current_login_state, fcport->last_login_state, 4720 fcport->d_id.b24, fcport->loop_id, rval); 4721 4722 if (((fcport->current_login_state & 0xf) == 0x4) || 4723 ((fcport->current_login_state & 0xf) == 0x6)) 4724 logged_in = 1; 4725 } 4726 4727 if (logged_in || greater_wwpn) { 4728 if (!vha->nvme_local_port && vha->flags.nvme_enabled) 4729 qla_nvme_register_hba(vha); 4730 4731 /* Set connected N_Port d_id */ 4732 if (vha->flags.nvme_enabled) 4733 fcport->fc4f_nvme = 1; 4734 4735 fcport->scan_state = QLA_FCPORT_FOUND; 4736 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 4737 fcport->disc_state = DSC_GNL; 4738 fcport->n2n_flag = 1; 4739 fcport->flags = 3; 4740 vha->hw->flags.gpsc_supported = 0; 4741 4742 if (greater_wwpn) { 4743 ql_dbg(ql_dbg_disc, vha, 0x20e5, 4744 "%s %d PLOGI ELS %8phC\n", 4745 __func__, __LINE__, fcport->port_name); 4746 4747 res = qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI, 4748 fcport, fcport->d_id); 4749 } 4750 4751 if (res != QLA_SUCCESS) { 4752 ql_log(ql_log_info, vha, 0xd04d, 4753 "PLOGI Failed: portid=%06x - retrying\n", 4754 fcport->d_id.b24); 4755 res = QLA_SUCCESS; 4756 } else { 4757 /* State 0x6 means FCP PRLI complete */ 4758 if ((fcport->current_login_state & 0xf) == 0x6) { 4759 ql_dbg(ql_dbg_disc, vha, 0x2118, 4760 "%s %d %8phC post GPDB work\n", 4761 __func__, __LINE__, fcport->port_name); 4762 fcport->chip_reset = 4763 vha->hw->base_qpair->chip_reset; 4764 qla24xx_post_gpdb_work(vha, fcport, 0); 4765 } else { 4766 ql_dbg(ql_dbg_disc, vha, 0x2118, 4767 "%s %d %8phC post NVMe PRLI\n", 4768 __func__, __LINE__, fcport->port_name); 4769 qla24xx_post_prli_work(vha, fcport); 4770 } 4771 } 4772 } else { 4773 /* Wait for next database change */ 4774 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags); 4775 } 4776 4777 return res; 4778 } 4779 4780 /* 4781 * qla2x00_configure_local_loop 4782 * Updates Fibre Channel Device Database with local loop devices. 4783 * 4784 * Input: 4785 * ha = adapter block pointer. 4786 * 4787 * Returns: 4788 * 0 = success. 4789 */ 4790 static int 4791 qla2x00_configure_local_loop(scsi_qla_host_t *vha) 4792 { 4793 int rval, rval2; 4794 int found_devs; 4795 int found; 4796 fc_port_t *fcport, *new_fcport; 4797 4798 uint16_t index; 4799 uint16_t entries; 4800 char *id_iter; 4801 uint16_t loop_id; 4802 uint8_t domain, area, al_pa; 4803 struct qla_hw_data *ha = vha->hw; 4804 unsigned long flags; 4805 4806 found_devs = 0; 4807 new_fcport = NULL; 4808 entries = MAX_FIBRE_DEVICES_LOOP; 4809 4810 /* Get list of logged in devices. */ 4811 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha)); 4812 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma, 4813 &entries); 4814 if (rval != QLA_SUCCESS) 4815 goto cleanup_allocation; 4816 4817 ql_dbg(ql_dbg_disc, vha, 0x2011, 4818 "Entries in ID list (%d).\n", entries); 4819 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075, 4820 (uint8_t *)ha->gid_list, 4821 entries * sizeof(struct gid_list_info)); 4822 4823 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4824 fcport->scan_state = QLA_FCPORT_SCAN; 4825 } 4826 4827 /* Allocate temporary fcport for any new fcports discovered. */ 4828 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 4829 if (new_fcport == NULL) { 4830 ql_log(ql_log_warn, vha, 0x2012, 4831 "Memory allocation failed for fcport.\n"); 4832 rval = QLA_MEMORY_ALLOC_FAILED; 4833 goto cleanup_allocation; 4834 } 4835 new_fcport->flags &= ~FCF_FABRIC_DEVICE; 4836 4837 /* Inititae N2N login. */ 4838 if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) { 4839 rval = qla24xx_n2n_handle_login(vha, new_fcport); 4840 if (rval != QLA_SUCCESS) 4841 goto cleanup_allocation; 4842 return QLA_SUCCESS; 4843 } 4844 4845 /* Add devices to port list. */ 4846 id_iter = (char *)ha->gid_list; 4847 for (index = 0; index < entries; index++) { 4848 domain = ((struct gid_list_info *)id_iter)->domain; 4849 area = ((struct gid_list_info *)id_iter)->area; 4850 al_pa = ((struct gid_list_info *)id_iter)->al_pa; 4851 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 4852 loop_id = (uint16_t) 4853 ((struct gid_list_info *)id_iter)->loop_id_2100; 4854 else 4855 loop_id = le16_to_cpu( 4856 ((struct gid_list_info *)id_iter)->loop_id); 4857 id_iter += ha->gid_list_info_size; 4858 4859 /* Bypass reserved domain fields. */ 4860 if ((domain & 0xf0) == 0xf0) 4861 continue; 4862 4863 /* Bypass if not same domain and area of adapter. */ 4864 if (area && domain && 4865 (area != vha->d_id.b.area || domain != vha->d_id.b.domain)) 4866 continue; 4867 4868 /* Bypass invalid local loop ID. */ 4869 if (loop_id > LAST_LOCAL_LOOP_ID) 4870 continue; 4871 4872 memset(new_fcport->port_name, 0, WWN_SIZE); 4873 4874 /* Fill in member data. */ 4875 new_fcport->d_id.b.domain = domain; 4876 new_fcport->d_id.b.area = area; 4877 new_fcport->d_id.b.al_pa = al_pa; 4878 new_fcport->loop_id = loop_id; 4879 new_fcport->scan_state = QLA_FCPORT_FOUND; 4880 4881 rval2 = qla2x00_get_port_database(vha, new_fcport, 0); 4882 if (rval2 != QLA_SUCCESS) { 4883 ql_dbg(ql_dbg_disc, vha, 0x2097, 4884 "Failed to retrieve fcport information " 4885 "-- get_port_database=%x, loop_id=0x%04x.\n", 4886 rval2, new_fcport->loop_id); 4887 /* Skip retry if N2N */ 4888 if (ha->current_topology != ISP_CFG_N) { 4889 ql_dbg(ql_dbg_disc, vha, 0x2105, 4890 "Scheduling resync.\n"); 4891 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 4892 continue; 4893 } 4894 } 4895 4896 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 4897 /* Check for matching device in port list. */ 4898 found = 0; 4899 fcport = NULL; 4900 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4901 if (memcmp(new_fcport->port_name, fcport->port_name, 4902 WWN_SIZE)) 4903 continue; 4904 4905 fcport->flags &= ~FCF_FABRIC_DEVICE; 4906 fcport->loop_id = new_fcport->loop_id; 4907 fcport->port_type = new_fcport->port_type; 4908 fcport->d_id.b24 = new_fcport->d_id.b24; 4909 memcpy(fcport->node_name, new_fcport->node_name, 4910 WWN_SIZE); 4911 fcport->scan_state = QLA_FCPORT_FOUND; 4912 found++; 4913 break; 4914 } 4915 4916 if (!found) { 4917 /* New device, add to fcports list. */ 4918 list_add_tail(&new_fcport->list, &vha->vp_fcports); 4919 4920 /* Allocate a new replacement fcport. */ 4921 fcport = new_fcport; 4922 4923 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 4924 4925 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 4926 4927 if (new_fcport == NULL) { 4928 ql_log(ql_log_warn, vha, 0xd031, 4929 "Failed to allocate memory for fcport.\n"); 4930 rval = QLA_MEMORY_ALLOC_FAILED; 4931 goto cleanup_allocation; 4932 } 4933 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 4934 new_fcport->flags &= ~FCF_FABRIC_DEVICE; 4935 } 4936 4937 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 4938 4939 /* Base iIDMA settings on HBA port speed. */ 4940 fcport->fp_speed = ha->link_data_rate; 4941 4942 found_devs++; 4943 } 4944 4945 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4946 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 4947 break; 4948 4949 if (fcport->scan_state == QLA_FCPORT_SCAN) { 4950 if ((qla_dual_mode_enabled(vha) || 4951 qla_ini_mode_enabled(vha)) && 4952 atomic_read(&fcport->state) == FCS_ONLINE) { 4953 qla2x00_mark_device_lost(vha, fcport, 4954 ql2xplogiabsentdevice, 0); 4955 if (fcport->loop_id != FC_NO_LOOP_ID && 4956 (fcport->flags & FCF_FCP2_DEVICE) == 0 && 4957 fcport->port_type != FCT_INITIATOR && 4958 fcport->port_type != FCT_BROADCAST) { 4959 ql_dbg(ql_dbg_disc, vha, 0x20f0, 4960 "%s %d %8phC post del sess\n", 4961 __func__, __LINE__, 4962 fcport->port_name); 4963 4964 qlt_schedule_sess_for_deletion(fcport); 4965 continue; 4966 } 4967 } 4968 } 4969 4970 if (fcport->scan_state == QLA_FCPORT_FOUND) 4971 qla24xx_fcport_handle_login(vha, fcport); 4972 } 4973 4974 cleanup_allocation: 4975 kfree(new_fcport); 4976 4977 if (rval != QLA_SUCCESS) { 4978 ql_dbg(ql_dbg_disc, vha, 0x2098, 4979 "Configure local loop error exit: rval=%x.\n", rval); 4980 } 4981 4982 return (rval); 4983 } 4984 4985 static void 4986 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 4987 { 4988 int rval; 4989 uint16_t mb[MAILBOX_REGISTER_COUNT]; 4990 struct qla_hw_data *ha = vha->hw; 4991 4992 if (!IS_IIDMA_CAPABLE(ha)) 4993 return; 4994 4995 if (atomic_read(&fcport->state) != FCS_ONLINE) 4996 return; 4997 4998 if (fcport->fp_speed == PORT_SPEED_UNKNOWN || 4999 fcport->fp_speed > ha->link_data_rate) 5000 return; 5001 5002 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed, 5003 mb); 5004 if (rval != QLA_SUCCESS) { 5005 ql_dbg(ql_dbg_disc, vha, 0x2004, 5006 "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n", 5007 fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]); 5008 } else { 5009 ql_dbg(ql_dbg_disc, vha, 0x2005, 5010 "iIDMA adjusted to %s GB/s on %8phN.\n", 5011 qla2x00_get_link_speed_str(ha, fcport->fp_speed), 5012 fcport->port_name); 5013 } 5014 } 5015 5016 /* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/ 5017 static void 5018 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) 5019 { 5020 struct fc_rport_identifiers rport_ids; 5021 struct fc_rport *rport; 5022 unsigned long flags; 5023 5024 rport_ids.node_name = wwn_to_u64(fcport->node_name); 5025 rport_ids.port_name = wwn_to_u64(fcport->port_name); 5026 rport_ids.port_id = fcport->d_id.b.domain << 16 | 5027 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; 5028 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 5029 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids); 5030 if (!rport) { 5031 ql_log(ql_log_warn, vha, 0x2006, 5032 "Unable to allocate fc remote port.\n"); 5033 return; 5034 } 5035 5036 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 5037 *((fc_port_t **)rport->dd_data) = fcport; 5038 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 5039 5040 rport->supported_classes = fcport->supported_classes; 5041 5042 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 5043 if (fcport->port_type == FCT_INITIATOR) 5044 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; 5045 if (fcport->port_type == FCT_TARGET) 5046 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; 5047 5048 ql_dbg(ql_dbg_disc, vha, 0x20ee, 5049 "%s %8phN. rport %p is %s mode\n", 5050 __func__, fcport->port_name, rport, 5051 (fcport->port_type == FCT_TARGET) ? "tgt" : "ini"); 5052 5053 fc_remote_port_rolechg(rport, rport_ids.roles); 5054 } 5055 5056 /* 5057 * qla2x00_update_fcport 5058 * Updates device on list. 5059 * 5060 * Input: 5061 * ha = adapter block pointer. 5062 * fcport = port structure pointer. 5063 * 5064 * Return: 5065 * 0 - Success 5066 * BIT_0 - error 5067 * 5068 * Context: 5069 * Kernel context. 5070 */ 5071 void 5072 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 5073 { 5074 fcport->vha = vha; 5075 5076 if (IS_SW_RESV_ADDR(fcport->d_id)) 5077 return; 5078 5079 ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n", 5080 __func__, fcport->port_name); 5081 5082 if (IS_QLAFX00(vha->hw)) { 5083 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 5084 goto reg_port; 5085 } 5086 fcport->login_retry = 0; 5087 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 5088 fcport->disc_state = DSC_LOGIN_COMPLETE; 5089 fcport->deleted = 0; 5090 fcport->logout_on_delete = 1; 5091 5092 if (fcport->fc4f_nvme) { 5093 qla_nvme_register_remote(vha, fcport); 5094 return; 5095 } 5096 5097 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 5098 qla2x00_iidma_fcport(vha, fcport); 5099 qla24xx_update_fcport_fcp_prio(vha, fcport); 5100 5101 reg_port: 5102 switch (vha->host->active_mode) { 5103 case MODE_INITIATOR: 5104 qla2x00_reg_remote_port(vha, fcport); 5105 break; 5106 case MODE_TARGET: 5107 if (!vha->vha_tgt.qla_tgt->tgt_stop && 5108 !vha->vha_tgt.qla_tgt->tgt_stopped) 5109 qlt_fc_port_added(vha, fcport); 5110 break; 5111 case MODE_DUAL: 5112 qla2x00_reg_remote_port(vha, fcport); 5113 if (!vha->vha_tgt.qla_tgt->tgt_stop && 5114 !vha->vha_tgt.qla_tgt->tgt_stopped) 5115 qlt_fc_port_added(vha, fcport); 5116 break; 5117 default: 5118 break; 5119 } 5120 } 5121 5122 /* 5123 * qla2x00_configure_fabric 5124 * Setup SNS devices with loop ID's. 5125 * 5126 * Input: 5127 * ha = adapter block pointer. 5128 * 5129 * Returns: 5130 * 0 = success. 5131 * BIT_0 = error 5132 */ 5133 static int 5134 qla2x00_configure_fabric(scsi_qla_host_t *vha) 5135 { 5136 int rval; 5137 fc_port_t *fcport; 5138 uint16_t mb[MAILBOX_REGISTER_COUNT]; 5139 uint16_t loop_id; 5140 LIST_HEAD(new_fcports); 5141 struct qla_hw_data *ha = vha->hw; 5142 int discovery_gen; 5143 5144 /* If FL port exists, then SNS is present */ 5145 if (IS_FWI2_CAPABLE(ha)) 5146 loop_id = NPH_F_PORT; 5147 else 5148 loop_id = SNS_FL_PORT; 5149 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1); 5150 if (rval != QLA_SUCCESS) { 5151 ql_dbg(ql_dbg_disc, vha, 0x20a0, 5152 "MBX_GET_PORT_NAME failed, No FL Port.\n"); 5153 5154 vha->device_flags &= ~SWITCH_FOUND; 5155 return (QLA_SUCCESS); 5156 } 5157 vha->device_flags |= SWITCH_FOUND; 5158 5159 5160 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { 5161 rval = qla2x00_send_change_request(vha, 0x3, 0); 5162 if (rval != QLA_SUCCESS) 5163 ql_log(ql_log_warn, vha, 0x121, 5164 "Failed to enable receiving of RSCN requests: 0x%x.\n", 5165 rval); 5166 } 5167 5168 5169 do { 5170 qla2x00_mgmt_svr_login(vha); 5171 5172 /* FDMI support. */ 5173 if (ql2xfdmienable && 5174 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags)) 5175 qla2x00_fdmi_register(vha); 5176 5177 /* Ensure we are logged into the SNS. */ 5178 loop_id = NPH_SNS_LID(ha); 5179 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, 5180 0xfc, mb, BIT_1|BIT_0); 5181 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { 5182 ql_dbg(ql_dbg_disc, vha, 0x20a1, 5183 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n", 5184 loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval); 5185 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5186 return rval; 5187 } 5188 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) { 5189 if (qla2x00_rft_id(vha)) { 5190 /* EMPTY */ 5191 ql_dbg(ql_dbg_disc, vha, 0x20a2, 5192 "Register FC-4 TYPE failed.\n"); 5193 if (test_bit(LOOP_RESYNC_NEEDED, 5194 &vha->dpc_flags)) 5195 break; 5196 } 5197 if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) { 5198 /* EMPTY */ 5199 ql_dbg(ql_dbg_disc, vha, 0x209a, 5200 "Register FC-4 Features failed.\n"); 5201 if (test_bit(LOOP_RESYNC_NEEDED, 5202 &vha->dpc_flags)) 5203 break; 5204 } 5205 if (vha->flags.nvme_enabled) { 5206 if (qla2x00_rff_id(vha, FC_TYPE_NVME)) { 5207 ql_dbg(ql_dbg_disc, vha, 0x2049, 5208 "Register NVME FC Type Features failed.\n"); 5209 } 5210 } 5211 if (qla2x00_rnn_id(vha)) { 5212 /* EMPTY */ 5213 ql_dbg(ql_dbg_disc, vha, 0x2104, 5214 "Register Node Name failed.\n"); 5215 if (test_bit(LOOP_RESYNC_NEEDED, 5216 &vha->dpc_flags)) 5217 break; 5218 } else if (qla2x00_rsnn_nn(vha)) { 5219 /* EMPTY */ 5220 ql_dbg(ql_dbg_disc, vha, 0x209b, 5221 "Register Symbolic Node Name failed.\n"); 5222 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5223 break; 5224 } 5225 } 5226 5227 5228 /* Mark the time right before querying FW for connected ports. 5229 * This process is long, asynchronous and by the time it's done, 5230 * collected information might not be accurate anymore. E.g. 5231 * disconnected port might have re-connected and a brand new 5232 * session has been created. In this case session's generation 5233 * will be newer than discovery_gen. */ 5234 qlt_do_generation_tick(vha, &discovery_gen); 5235 5236 if (USE_ASYNC_SCAN(ha)) { 5237 rval = QLA_SUCCESS; 5238 rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI); 5239 if (rval) 5240 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5241 } else { 5242 list_for_each_entry(fcport, &vha->vp_fcports, list) 5243 fcport->scan_state = QLA_FCPORT_SCAN; 5244 5245 rval = qla2x00_find_all_fabric_devs(vha); 5246 } 5247 if (rval != QLA_SUCCESS) 5248 break; 5249 } while (0); 5250 5251 if (!vha->nvme_local_port && vha->flags.nvme_enabled) 5252 qla_nvme_register_hba(vha); 5253 5254 if (rval) 5255 ql_dbg(ql_dbg_disc, vha, 0x2068, 5256 "Configure fabric error exit rval=%d.\n", rval); 5257 5258 return (rval); 5259 } 5260 5261 /* 5262 * qla2x00_find_all_fabric_devs 5263 * 5264 * Input: 5265 * ha = adapter block pointer. 5266 * dev = database device entry pointer. 5267 * 5268 * Returns: 5269 * 0 = success. 5270 * 5271 * Context: 5272 * Kernel context. 5273 */ 5274 static int 5275 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) 5276 { 5277 int rval; 5278 uint16_t loop_id; 5279 fc_port_t *fcport, *new_fcport; 5280 int found; 5281 5282 sw_info_t *swl; 5283 int swl_idx; 5284 int first_dev, last_dev; 5285 port_id_t wrap = {}, nxt_d_id; 5286 struct qla_hw_data *ha = vha->hw; 5287 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 5288 unsigned long flags; 5289 5290 rval = QLA_SUCCESS; 5291 5292 /* Try GID_PT to get device list, else GAN. */ 5293 if (!ha->swl) 5294 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t), 5295 GFP_KERNEL); 5296 swl = ha->swl; 5297 if (!swl) { 5298 /*EMPTY*/ 5299 ql_dbg(ql_dbg_disc, vha, 0x209c, 5300 "GID_PT allocations failed, fallback on GA_NXT.\n"); 5301 } else { 5302 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t)); 5303 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) { 5304 swl = NULL; 5305 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5306 return rval; 5307 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) { 5308 swl = NULL; 5309 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5310 return rval; 5311 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) { 5312 swl = NULL; 5313 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5314 return rval; 5315 } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) { 5316 swl = NULL; 5317 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5318 return rval; 5319 } 5320 5321 /* If other queries succeeded probe for FC-4 type */ 5322 if (swl) { 5323 qla2x00_gff_id(vha, swl); 5324 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5325 return rval; 5326 } 5327 } 5328 swl_idx = 0; 5329 5330 /* Allocate temporary fcport for any new fcports discovered. */ 5331 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 5332 if (new_fcport == NULL) { 5333 ql_log(ql_log_warn, vha, 0x209d, 5334 "Failed to allocate memory for fcport.\n"); 5335 return (QLA_MEMORY_ALLOC_FAILED); 5336 } 5337 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 5338 /* Set start port ID scan at adapter ID. */ 5339 first_dev = 1; 5340 last_dev = 0; 5341 5342 /* Starting free loop ID. */ 5343 loop_id = ha->min_external_loopid; 5344 for (; loop_id <= ha->max_loop_id; loop_id++) { 5345 if (qla2x00_is_reserved_id(vha, loop_id)) 5346 continue; 5347 5348 if (ha->current_topology == ISP_CFG_FL && 5349 (atomic_read(&vha->loop_down_timer) || 5350 LOOP_TRANSITION(vha))) { 5351 atomic_set(&vha->loop_down_timer, 0); 5352 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5353 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 5354 break; 5355 } 5356 5357 if (swl != NULL) { 5358 if (last_dev) { 5359 wrap.b24 = new_fcport->d_id.b24; 5360 } else { 5361 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24; 5362 memcpy(new_fcport->node_name, 5363 swl[swl_idx].node_name, WWN_SIZE); 5364 memcpy(new_fcport->port_name, 5365 swl[swl_idx].port_name, WWN_SIZE); 5366 memcpy(new_fcport->fabric_port_name, 5367 swl[swl_idx].fabric_port_name, WWN_SIZE); 5368 new_fcport->fp_speed = swl[swl_idx].fp_speed; 5369 new_fcport->fc4_type = swl[swl_idx].fc4_type; 5370 5371 new_fcport->nvme_flag = 0; 5372 new_fcport->fc4f_nvme = 0; 5373 if (vha->flags.nvme_enabled && 5374 swl[swl_idx].fc4f_nvme) { 5375 new_fcport->fc4f_nvme = 5376 swl[swl_idx].fc4f_nvme; 5377 ql_log(ql_log_info, vha, 0x2131, 5378 "FOUND: NVME port %8phC as FC Type 28h\n", 5379 new_fcport->port_name); 5380 } 5381 5382 if (swl[swl_idx].d_id.b.rsvd_1 != 0) { 5383 last_dev = 1; 5384 } 5385 swl_idx++; 5386 } 5387 } else { 5388 /* Send GA_NXT to the switch */ 5389 rval = qla2x00_ga_nxt(vha, new_fcport); 5390 if (rval != QLA_SUCCESS) { 5391 ql_log(ql_log_warn, vha, 0x209e, 5392 "SNS scan failed -- assuming " 5393 "zero-entry result.\n"); 5394 rval = QLA_SUCCESS; 5395 break; 5396 } 5397 } 5398 5399 /* If wrap on switch device list, exit. */ 5400 if (first_dev) { 5401 wrap.b24 = new_fcport->d_id.b24; 5402 first_dev = 0; 5403 } else if (new_fcport->d_id.b24 == wrap.b24) { 5404 ql_dbg(ql_dbg_disc, vha, 0x209f, 5405 "Device wrap (%02x%02x%02x).\n", 5406 new_fcport->d_id.b.domain, 5407 new_fcport->d_id.b.area, 5408 new_fcport->d_id.b.al_pa); 5409 break; 5410 } 5411 5412 /* Bypass if same physical adapter. */ 5413 if (new_fcport->d_id.b24 == base_vha->d_id.b24) 5414 continue; 5415 5416 /* Bypass virtual ports of the same host. */ 5417 if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24)) 5418 continue; 5419 5420 /* Bypass if same domain and area of adapter. */ 5421 if (((new_fcport->d_id.b24 & 0xffff00) == 5422 (vha->d_id.b24 & 0xffff00)) && ha->current_topology == 5423 ISP_CFG_FL) 5424 continue; 5425 5426 /* Bypass reserved domain fields. */ 5427 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0) 5428 continue; 5429 5430 /* Bypass ports whose FCP-4 type is not FCP_SCSI */ 5431 if (ql2xgffidenable && 5432 (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI && 5433 new_fcport->fc4_type != FC4_TYPE_UNKNOWN)) 5434 continue; 5435 5436 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5437 5438 /* Locate matching device in database. */ 5439 found = 0; 5440 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5441 if (memcmp(new_fcport->port_name, fcport->port_name, 5442 WWN_SIZE)) 5443 continue; 5444 5445 fcport->scan_state = QLA_FCPORT_FOUND; 5446 5447 found++; 5448 5449 /* Update port state. */ 5450 memcpy(fcport->fabric_port_name, 5451 new_fcport->fabric_port_name, WWN_SIZE); 5452 fcport->fp_speed = new_fcport->fp_speed; 5453 5454 /* 5455 * If address the same and state FCS_ONLINE 5456 * (or in target mode), nothing changed. 5457 */ 5458 if (fcport->d_id.b24 == new_fcport->d_id.b24 && 5459 (atomic_read(&fcport->state) == FCS_ONLINE || 5460 (vha->host->active_mode == MODE_TARGET))) { 5461 break; 5462 } 5463 5464 /* 5465 * If device was not a fabric device before. 5466 */ 5467 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { 5468 fcport->d_id.b24 = new_fcport->d_id.b24; 5469 qla2x00_clear_loop_id(fcport); 5470 fcport->flags |= (FCF_FABRIC_DEVICE | 5471 FCF_LOGIN_NEEDED); 5472 break; 5473 } 5474 5475 /* 5476 * Port ID changed or device was marked to be updated; 5477 * Log it out if still logged in and mark it for 5478 * relogin later. 5479 */ 5480 if (qla_tgt_mode_enabled(base_vha)) { 5481 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080, 5482 "port changed FC ID, %8phC" 5483 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n", 5484 fcport->port_name, 5485 fcport->d_id.b.domain, 5486 fcport->d_id.b.area, 5487 fcport->d_id.b.al_pa, 5488 fcport->loop_id, 5489 new_fcport->d_id.b.domain, 5490 new_fcport->d_id.b.area, 5491 new_fcport->d_id.b.al_pa); 5492 fcport->d_id.b24 = new_fcport->d_id.b24; 5493 break; 5494 } 5495 5496 fcport->d_id.b24 = new_fcport->d_id.b24; 5497 fcport->flags |= FCF_LOGIN_NEEDED; 5498 break; 5499 } 5500 5501 if (found) { 5502 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5503 continue; 5504 } 5505 /* If device was not in our fcports list, then add it. */ 5506 new_fcport->scan_state = QLA_FCPORT_FOUND; 5507 list_add_tail(&new_fcport->list, &vha->vp_fcports); 5508 5509 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5510 5511 5512 /* Allocate a new replacement fcport. */ 5513 nxt_d_id.b24 = new_fcport->d_id.b24; 5514 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 5515 if (new_fcport == NULL) { 5516 ql_log(ql_log_warn, vha, 0xd032, 5517 "Memory allocation failed for fcport.\n"); 5518 return (QLA_MEMORY_ALLOC_FAILED); 5519 } 5520 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 5521 new_fcport->d_id.b24 = nxt_d_id.b24; 5522 } 5523 5524 qla2x00_free_fcport(new_fcport); 5525 5526 /* 5527 * Logout all previous fabric dev marked lost, except FCP2 devices. 5528 */ 5529 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5530 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5531 break; 5532 5533 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || 5534 (fcport->flags & FCF_LOGIN_NEEDED) == 0) 5535 continue; 5536 5537 if (fcport->scan_state == QLA_FCPORT_SCAN) { 5538 if ((qla_dual_mode_enabled(vha) || 5539 qla_ini_mode_enabled(vha)) && 5540 atomic_read(&fcport->state) == FCS_ONLINE) { 5541 qla2x00_mark_device_lost(vha, fcport, 5542 ql2xplogiabsentdevice, 0); 5543 if (fcport->loop_id != FC_NO_LOOP_ID && 5544 (fcport->flags & FCF_FCP2_DEVICE) == 0 && 5545 fcport->port_type != FCT_INITIATOR && 5546 fcport->port_type != FCT_BROADCAST) { 5547 ql_dbg(ql_dbg_disc, vha, 0x20f0, 5548 "%s %d %8phC post del sess\n", 5549 __func__, __LINE__, 5550 fcport->port_name); 5551 qlt_schedule_sess_for_deletion(fcport); 5552 continue; 5553 } 5554 } 5555 } 5556 5557 if (fcport->scan_state == QLA_FCPORT_FOUND) 5558 qla24xx_fcport_handle_login(vha, fcport); 5559 } 5560 return (rval); 5561 } 5562 5563 /* 5564 * qla2x00_find_new_loop_id 5565 * Scan through our port list and find a new usable loop ID. 5566 * 5567 * Input: 5568 * ha: adapter state pointer. 5569 * dev: port structure pointer. 5570 * 5571 * Returns: 5572 * qla2x00 local function return status code. 5573 * 5574 * Context: 5575 * Kernel context. 5576 */ 5577 int 5578 qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) 5579 { 5580 int rval; 5581 struct qla_hw_data *ha = vha->hw; 5582 unsigned long flags = 0; 5583 5584 rval = QLA_SUCCESS; 5585 5586 spin_lock_irqsave(&ha->vport_slock, flags); 5587 5588 dev->loop_id = find_first_zero_bit(ha->loop_id_map, 5589 LOOPID_MAP_SIZE); 5590 if (dev->loop_id >= LOOPID_MAP_SIZE || 5591 qla2x00_is_reserved_id(vha, dev->loop_id)) { 5592 dev->loop_id = FC_NO_LOOP_ID; 5593 rval = QLA_FUNCTION_FAILED; 5594 } else 5595 set_bit(dev->loop_id, ha->loop_id_map); 5596 5597 spin_unlock_irqrestore(&ha->vport_slock, flags); 5598 5599 if (rval == QLA_SUCCESS) 5600 ql_dbg(ql_dbg_disc, dev->vha, 0x2086, 5601 "Assigning new loopid=%x, portid=%x.\n", 5602 dev->loop_id, dev->d_id.b24); 5603 else 5604 ql_log(ql_log_warn, dev->vha, 0x2087, 5605 "No loop_id's available, portid=%x.\n", 5606 dev->d_id.b24); 5607 5608 return (rval); 5609 } 5610 5611 5612 /* 5613 * qla2x00_fabric_login 5614 * Issue fabric login command. 5615 * 5616 * Input: 5617 * ha = adapter block pointer. 5618 * device = pointer to FC device type structure. 5619 * 5620 * Returns: 5621 * 0 - Login successfully 5622 * 1 - Login failed 5623 * 2 - Initiator device 5624 * 3 - Fatal error 5625 */ 5626 int 5627 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport, 5628 uint16_t *next_loopid) 5629 { 5630 int rval; 5631 int retry; 5632 uint16_t tmp_loopid; 5633 uint16_t mb[MAILBOX_REGISTER_COUNT]; 5634 struct qla_hw_data *ha = vha->hw; 5635 5636 retry = 0; 5637 tmp_loopid = 0; 5638 5639 for (;;) { 5640 ql_dbg(ql_dbg_disc, vha, 0x2000, 5641 "Trying Fabric Login w/loop id 0x%04x for port " 5642 "%02x%02x%02x.\n", 5643 fcport->loop_id, fcport->d_id.b.domain, 5644 fcport->d_id.b.area, fcport->d_id.b.al_pa); 5645 5646 /* Login fcport on switch. */ 5647 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id, 5648 fcport->d_id.b.domain, fcport->d_id.b.area, 5649 fcport->d_id.b.al_pa, mb, BIT_0); 5650 if (rval != QLA_SUCCESS) { 5651 return rval; 5652 } 5653 if (mb[0] == MBS_PORT_ID_USED) { 5654 /* 5655 * Device has another loop ID. The firmware team 5656 * recommends the driver perform an implicit login with 5657 * the specified ID again. The ID we just used is save 5658 * here so we return with an ID that can be tried by 5659 * the next login. 5660 */ 5661 retry++; 5662 tmp_loopid = fcport->loop_id; 5663 fcport->loop_id = mb[1]; 5664 5665 ql_dbg(ql_dbg_disc, vha, 0x2001, 5666 "Fabric Login: port in use - next loop " 5667 "id=0x%04x, port id= %02x%02x%02x.\n", 5668 fcport->loop_id, fcport->d_id.b.domain, 5669 fcport->d_id.b.area, fcport->d_id.b.al_pa); 5670 5671 } else if (mb[0] == MBS_COMMAND_COMPLETE) { 5672 /* 5673 * Login succeeded. 5674 */ 5675 if (retry) { 5676 /* A retry occurred before. */ 5677 *next_loopid = tmp_loopid; 5678 } else { 5679 /* 5680 * No retry occurred before. Just increment the 5681 * ID value for next login. 5682 */ 5683 *next_loopid = (fcport->loop_id + 1); 5684 } 5685 5686 if (mb[1] & BIT_0) { 5687 fcport->port_type = FCT_INITIATOR; 5688 } else { 5689 fcport->port_type = FCT_TARGET; 5690 if (mb[1] & BIT_1) { 5691 fcport->flags |= FCF_FCP2_DEVICE; 5692 } 5693 } 5694 5695 if (mb[10] & BIT_0) 5696 fcport->supported_classes |= FC_COS_CLASS2; 5697 if (mb[10] & BIT_1) 5698 fcport->supported_classes |= FC_COS_CLASS3; 5699 5700 if (IS_FWI2_CAPABLE(ha)) { 5701 if (mb[10] & BIT_7) 5702 fcport->flags |= 5703 FCF_CONF_COMP_SUPPORTED; 5704 } 5705 5706 rval = QLA_SUCCESS; 5707 break; 5708 } else if (mb[0] == MBS_LOOP_ID_USED) { 5709 /* 5710 * Loop ID already used, try next loop ID. 5711 */ 5712 fcport->loop_id++; 5713 rval = qla2x00_find_new_loop_id(vha, fcport); 5714 if (rval != QLA_SUCCESS) { 5715 /* Ran out of loop IDs to use */ 5716 break; 5717 } 5718 } else if (mb[0] == MBS_COMMAND_ERROR) { 5719 /* 5720 * Firmware possibly timed out during login. If NO 5721 * retries are left to do then the device is declared 5722 * dead. 5723 */ 5724 *next_loopid = fcport->loop_id; 5725 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 5726 fcport->d_id.b.domain, fcport->d_id.b.area, 5727 fcport->d_id.b.al_pa); 5728 qla2x00_mark_device_lost(vha, fcport, 1, 0); 5729 5730 rval = 1; 5731 break; 5732 } else { 5733 /* 5734 * unrecoverable / not handled error 5735 */ 5736 ql_dbg(ql_dbg_disc, vha, 0x2002, 5737 "Failed=%x port_id=%02x%02x%02x loop_id=%x " 5738 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain, 5739 fcport->d_id.b.area, fcport->d_id.b.al_pa, 5740 fcport->loop_id, jiffies); 5741 5742 *next_loopid = fcport->loop_id; 5743 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 5744 fcport->d_id.b.domain, fcport->d_id.b.area, 5745 fcport->d_id.b.al_pa); 5746 qla2x00_clear_loop_id(fcport); 5747 fcport->login_retry = 0; 5748 5749 rval = 3; 5750 break; 5751 } 5752 } 5753 5754 return (rval); 5755 } 5756 5757 /* 5758 * qla2x00_local_device_login 5759 * Issue local device login command. 5760 * 5761 * Input: 5762 * ha = adapter block pointer. 5763 * loop_id = loop id of device to login to. 5764 * 5765 * Returns (Where's the #define!!!!): 5766 * 0 - Login successfully 5767 * 1 - Login failed 5768 * 3 - Fatal error 5769 */ 5770 int 5771 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport) 5772 { 5773 int rval; 5774 uint16_t mb[MAILBOX_REGISTER_COUNT]; 5775 5776 memset(mb, 0, sizeof(mb)); 5777 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0); 5778 if (rval == QLA_SUCCESS) { 5779 /* Interrogate mailbox registers for any errors */ 5780 if (mb[0] == MBS_COMMAND_ERROR) 5781 rval = 1; 5782 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR) 5783 /* device not in PCB table */ 5784 rval = 3; 5785 } 5786 5787 return (rval); 5788 } 5789 5790 /* 5791 * qla2x00_loop_resync 5792 * Resync with fibre channel devices. 5793 * 5794 * Input: 5795 * ha = adapter block pointer. 5796 * 5797 * Returns: 5798 * 0 = success 5799 */ 5800 int 5801 qla2x00_loop_resync(scsi_qla_host_t *vha) 5802 { 5803 int rval = QLA_SUCCESS; 5804 uint32_t wait_time; 5805 struct req_que *req; 5806 struct rsp_que *rsp; 5807 5808 req = vha->req; 5809 rsp = req->rsp; 5810 5811 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 5812 if (vha->flags.online) { 5813 if (!(rval = qla2x00_fw_ready(vha))) { 5814 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 5815 wait_time = 256; 5816 do { 5817 if (!IS_QLAFX00(vha->hw)) { 5818 /* 5819 * Issue a marker after FW becomes 5820 * ready. 5821 */ 5822 qla2x00_marker(vha, req, rsp, 0, 0, 5823 MK_SYNC_ALL); 5824 vha->marker_needed = 0; 5825 } 5826 5827 /* Remap devices on Loop. */ 5828 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5829 5830 if (IS_QLAFX00(vha->hw)) 5831 qlafx00_configure_devices(vha); 5832 else 5833 qla2x00_configure_loop(vha); 5834 5835 wait_time--; 5836 } while (!atomic_read(&vha->loop_down_timer) && 5837 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 5838 && wait_time && (test_bit(LOOP_RESYNC_NEEDED, 5839 &vha->dpc_flags))); 5840 } 5841 } 5842 5843 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 5844 return (QLA_FUNCTION_FAILED); 5845 5846 if (rval) 5847 ql_dbg(ql_dbg_disc, vha, 0x206c, 5848 "%s *** FAILED ***.\n", __func__); 5849 5850 return (rval); 5851 } 5852 5853 /* 5854 * qla2x00_perform_loop_resync 5855 * Description: This function will set the appropriate flags and call 5856 * qla2x00_loop_resync. If successful loop will be resynced 5857 * Arguments : scsi_qla_host_t pointer 5858 * returm : Success or Failure 5859 */ 5860 5861 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha) 5862 { 5863 int32_t rval = 0; 5864 5865 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) { 5866 /*Configure the flags so that resync happens properly*/ 5867 atomic_set(&ha->loop_down_timer, 0); 5868 if (!(ha->device_flags & DFLG_NO_CABLE)) { 5869 atomic_set(&ha->loop_state, LOOP_UP); 5870 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 5871 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); 5872 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 5873 5874 rval = qla2x00_loop_resync(ha); 5875 } else 5876 atomic_set(&ha->loop_state, LOOP_DEAD); 5877 5878 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); 5879 } 5880 5881 return rval; 5882 } 5883 5884 void 5885 qla2x00_update_fcports(scsi_qla_host_t *base_vha) 5886 { 5887 fc_port_t *fcport; 5888 struct scsi_qla_host *vha; 5889 struct qla_hw_data *ha = base_vha->hw; 5890 unsigned long flags; 5891 5892 spin_lock_irqsave(&ha->vport_slock, flags); 5893 /* Go with deferred removal of rport references. */ 5894 list_for_each_entry(vha, &base_vha->hw->vp_list, list) { 5895 atomic_inc(&vha->vref_count); 5896 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5897 if (fcport->drport && 5898 atomic_read(&fcport->state) != FCS_UNCONFIGURED) { 5899 spin_unlock_irqrestore(&ha->vport_slock, flags); 5900 qla2x00_rport_del(fcport); 5901 5902 spin_lock_irqsave(&ha->vport_slock, flags); 5903 } 5904 } 5905 atomic_dec(&vha->vref_count); 5906 wake_up(&vha->vref_waitq); 5907 } 5908 spin_unlock_irqrestore(&ha->vport_slock, flags); 5909 } 5910 5911 /* Assumes idc_lock always held on entry */ 5912 void 5913 qla83xx_reset_ownership(scsi_qla_host_t *vha) 5914 { 5915 struct qla_hw_data *ha = vha->hw; 5916 uint32_t drv_presence, drv_presence_mask; 5917 uint32_t dev_part_info1, dev_part_info2, class_type; 5918 uint32_t class_type_mask = 0x3; 5919 uint16_t fcoe_other_function = 0xffff, i; 5920 5921 if (IS_QLA8044(ha)) { 5922 drv_presence = qla8044_rd_direct(vha, 5923 QLA8044_CRB_DRV_ACTIVE_INDEX); 5924 dev_part_info1 = qla8044_rd_direct(vha, 5925 QLA8044_CRB_DEV_PART_INFO_INDEX); 5926 dev_part_info2 = qla8044_rd_direct(vha, 5927 QLA8044_CRB_DEV_PART_INFO2); 5928 } else { 5929 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 5930 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1); 5931 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2); 5932 } 5933 for (i = 0; i < 8; i++) { 5934 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask); 5935 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) && 5936 (i != ha->portnum)) { 5937 fcoe_other_function = i; 5938 break; 5939 } 5940 } 5941 if (fcoe_other_function == 0xffff) { 5942 for (i = 0; i < 8; i++) { 5943 class_type = ((dev_part_info2 >> (i * 4)) & 5944 class_type_mask); 5945 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) && 5946 ((i + 8) != ha->portnum)) { 5947 fcoe_other_function = i + 8; 5948 break; 5949 } 5950 } 5951 } 5952 /* 5953 * Prepare drv-presence mask based on fcoe functions present. 5954 * However consider only valid physical fcoe function numbers (0-15). 5955 */ 5956 drv_presence_mask = ~((1 << (ha->portnum)) | 5957 ((fcoe_other_function == 0xffff) ? 5958 0 : (1 << (fcoe_other_function)))); 5959 5960 /* We are the reset owner iff: 5961 * - No other protocol drivers present. 5962 * - This is the lowest among fcoe functions. */ 5963 if (!(drv_presence & drv_presence_mask) && 5964 (ha->portnum < fcoe_other_function)) { 5965 ql_dbg(ql_dbg_p3p, vha, 0xb07f, 5966 "This host is Reset owner.\n"); 5967 ha->flags.nic_core_reset_owner = 1; 5968 } 5969 } 5970 5971 static int 5972 __qla83xx_set_drv_ack(scsi_qla_host_t *vha) 5973 { 5974 int rval = QLA_SUCCESS; 5975 struct qla_hw_data *ha = vha->hw; 5976 uint32_t drv_ack; 5977 5978 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); 5979 if (rval == QLA_SUCCESS) { 5980 drv_ack |= (1 << ha->portnum); 5981 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack); 5982 } 5983 5984 return rval; 5985 } 5986 5987 static int 5988 __qla83xx_clear_drv_ack(scsi_qla_host_t *vha) 5989 { 5990 int rval = QLA_SUCCESS; 5991 struct qla_hw_data *ha = vha->hw; 5992 uint32_t drv_ack; 5993 5994 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); 5995 if (rval == QLA_SUCCESS) { 5996 drv_ack &= ~(1 << ha->portnum); 5997 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack); 5998 } 5999 6000 return rval; 6001 } 6002 6003 static const char * 6004 qla83xx_dev_state_to_string(uint32_t dev_state) 6005 { 6006 switch (dev_state) { 6007 case QLA8XXX_DEV_COLD: 6008 return "COLD/RE-INIT"; 6009 case QLA8XXX_DEV_INITIALIZING: 6010 return "INITIALIZING"; 6011 case QLA8XXX_DEV_READY: 6012 return "READY"; 6013 case QLA8XXX_DEV_NEED_RESET: 6014 return "NEED RESET"; 6015 case QLA8XXX_DEV_NEED_QUIESCENT: 6016 return "NEED QUIESCENT"; 6017 case QLA8XXX_DEV_FAILED: 6018 return "FAILED"; 6019 case QLA8XXX_DEV_QUIESCENT: 6020 return "QUIESCENT"; 6021 default: 6022 return "Unknown"; 6023 } 6024 } 6025 6026 /* Assumes idc-lock always held on entry */ 6027 void 6028 qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type) 6029 { 6030 struct qla_hw_data *ha = vha->hw; 6031 uint32_t idc_audit_reg = 0, duration_secs = 0; 6032 6033 switch (audit_type) { 6034 case IDC_AUDIT_TIMESTAMP: 6035 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000); 6036 idc_audit_reg = (ha->portnum) | 6037 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8); 6038 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg); 6039 break; 6040 6041 case IDC_AUDIT_COMPLETION: 6042 duration_secs = ((jiffies_to_msecs(jiffies) - 6043 jiffies_to_msecs(ha->idc_audit_ts)) / 1000); 6044 idc_audit_reg = (ha->portnum) | 6045 (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8); 6046 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg); 6047 break; 6048 6049 default: 6050 ql_log(ql_log_warn, vha, 0xb078, 6051 "Invalid audit type specified.\n"); 6052 break; 6053 } 6054 } 6055 6056 /* Assumes idc_lock always held on entry */ 6057 static int 6058 qla83xx_initiating_reset(scsi_qla_host_t *vha) 6059 { 6060 struct qla_hw_data *ha = vha->hw; 6061 uint32_t idc_control, dev_state; 6062 6063 __qla83xx_get_idc_control(vha, &idc_control); 6064 if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) { 6065 ql_log(ql_log_info, vha, 0xb080, 6066 "NIC Core reset has been disabled. idc-control=0x%x\n", 6067 idc_control); 6068 return QLA_FUNCTION_FAILED; 6069 } 6070 6071 /* Set NEED-RESET iff in READY state and we are the reset-owner */ 6072 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state); 6073 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) { 6074 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, 6075 QLA8XXX_DEV_NEED_RESET); 6076 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n"); 6077 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP); 6078 } else { 6079 const char *state = qla83xx_dev_state_to_string(dev_state); 6080 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state); 6081 6082 /* SV: XXX: Is timeout required here? */ 6083 /* Wait for IDC state change READY -> NEED_RESET */ 6084 while (dev_state == QLA8XXX_DEV_READY) { 6085 qla83xx_idc_unlock(vha, 0); 6086 msleep(200); 6087 qla83xx_idc_lock(vha, 0); 6088 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state); 6089 } 6090 } 6091 6092 /* Send IDC ack by writing to drv-ack register */ 6093 __qla83xx_set_drv_ack(vha); 6094 6095 return QLA_SUCCESS; 6096 } 6097 6098 int 6099 __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control) 6100 { 6101 return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control); 6102 } 6103 6104 int 6105 __qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control) 6106 { 6107 return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control); 6108 } 6109 6110 static int 6111 qla83xx_check_driver_presence(scsi_qla_host_t *vha) 6112 { 6113 uint32_t drv_presence = 0; 6114 struct qla_hw_data *ha = vha->hw; 6115 6116 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 6117 if (drv_presence & (1 << ha->portnum)) 6118 return QLA_SUCCESS; 6119 else 6120 return QLA_TEST_FAILED; 6121 } 6122 6123 int 6124 qla83xx_nic_core_reset(scsi_qla_host_t *vha) 6125 { 6126 int rval = QLA_SUCCESS; 6127 struct qla_hw_data *ha = vha->hw; 6128 6129 ql_dbg(ql_dbg_p3p, vha, 0xb058, 6130 "Entered %s().\n", __func__); 6131 6132 if (vha->device_flags & DFLG_DEV_FAILED) { 6133 ql_log(ql_log_warn, vha, 0xb059, 6134 "Device in unrecoverable FAILED state.\n"); 6135 return QLA_FUNCTION_FAILED; 6136 } 6137 6138 qla83xx_idc_lock(vha, 0); 6139 6140 if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) { 6141 ql_log(ql_log_warn, vha, 0xb05a, 6142 "Function=0x%x has been removed from IDC participation.\n", 6143 ha->portnum); 6144 rval = QLA_FUNCTION_FAILED; 6145 goto exit; 6146 } 6147 6148 qla83xx_reset_ownership(vha); 6149 6150 rval = qla83xx_initiating_reset(vha); 6151 6152 /* 6153 * Perform reset if we are the reset-owner, 6154 * else wait till IDC state changes to READY/FAILED. 6155 */ 6156 if (rval == QLA_SUCCESS) { 6157 rval = qla83xx_idc_state_handler(vha); 6158 6159 if (rval == QLA_SUCCESS) 6160 ha->flags.nic_core_hung = 0; 6161 __qla83xx_clear_drv_ack(vha); 6162 } 6163 6164 exit: 6165 qla83xx_idc_unlock(vha, 0); 6166 6167 ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__); 6168 6169 return rval; 6170 } 6171 6172 int 6173 qla2xxx_mctp_dump(scsi_qla_host_t *vha) 6174 { 6175 struct qla_hw_data *ha = vha->hw; 6176 int rval = QLA_FUNCTION_FAILED; 6177 6178 if (!IS_MCTP_CAPABLE(ha)) { 6179 /* This message can be removed from the final version */ 6180 ql_log(ql_log_info, vha, 0x506d, 6181 "This board is not MCTP capable\n"); 6182 return rval; 6183 } 6184 6185 if (!ha->mctp_dump) { 6186 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev, 6187 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL); 6188 6189 if (!ha->mctp_dump) { 6190 ql_log(ql_log_warn, vha, 0x506e, 6191 "Failed to allocate memory for mctp dump\n"); 6192 return rval; 6193 } 6194 } 6195 6196 #define MCTP_DUMP_STR_ADDR 0x00000000 6197 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma, 6198 MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4); 6199 if (rval != QLA_SUCCESS) { 6200 ql_log(ql_log_warn, vha, 0x506f, 6201 "Failed to capture mctp dump\n"); 6202 } else { 6203 ql_log(ql_log_info, vha, 0x5070, 6204 "Mctp dump capture for host (%ld/%p).\n", 6205 vha->host_no, ha->mctp_dump); 6206 ha->mctp_dumped = 1; 6207 } 6208 6209 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) { 6210 ha->flags.nic_core_reset_hdlr_active = 1; 6211 rval = qla83xx_restart_nic_firmware(vha); 6212 if (rval) 6213 /* NIC Core reset failed. */ 6214 ql_log(ql_log_warn, vha, 0x5071, 6215 "Failed to restart nic firmware\n"); 6216 else 6217 ql_dbg(ql_dbg_p3p, vha, 0xb084, 6218 "Restarted NIC firmware successfully.\n"); 6219 ha->flags.nic_core_reset_hdlr_active = 0; 6220 } 6221 6222 return rval; 6223 6224 } 6225 6226 /* 6227 * qla2x00_quiesce_io 6228 * Description: This function will block the new I/Os 6229 * Its not aborting any I/Os as context 6230 * is not destroyed during quiescence 6231 * Arguments: scsi_qla_host_t 6232 * return : void 6233 */ 6234 void 6235 qla2x00_quiesce_io(scsi_qla_host_t *vha) 6236 { 6237 struct qla_hw_data *ha = vha->hw; 6238 struct scsi_qla_host *vp; 6239 6240 ql_dbg(ql_dbg_dpc, vha, 0x401d, 6241 "Quiescing I/O - ha=%p.\n", ha); 6242 6243 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 6244 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 6245 atomic_set(&vha->loop_state, LOOP_DOWN); 6246 qla2x00_mark_all_devices_lost(vha, 0); 6247 list_for_each_entry(vp, &ha->vp_list, list) 6248 qla2x00_mark_all_devices_lost(vp, 0); 6249 } else { 6250 if (!atomic_read(&vha->loop_down_timer)) 6251 atomic_set(&vha->loop_down_timer, 6252 LOOP_DOWN_TIME); 6253 } 6254 /* Wait for pending cmds to complete */ 6255 qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST); 6256 } 6257 6258 void 6259 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) 6260 { 6261 struct qla_hw_data *ha = vha->hw; 6262 struct scsi_qla_host *vp; 6263 unsigned long flags; 6264 fc_port_t *fcport; 6265 u16 i; 6266 6267 /* For ISP82XX, driver waits for completion of the commands. 6268 * online flag should be set. 6269 */ 6270 if (!(IS_P3P_TYPE(ha))) 6271 vha->flags.online = 0; 6272 ha->flags.chip_reset_done = 0; 6273 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 6274 vha->qla_stats.total_isp_aborts++; 6275 6276 ql_log(ql_log_info, vha, 0x00af, 6277 "Performing ISP error recovery - ha=%p.\n", ha); 6278 6279 /* For ISP82XX, reset_chip is just disabling interrupts. 6280 * Driver waits for the completion of the commands. 6281 * the interrupts need to be enabled. 6282 */ 6283 if (!(IS_P3P_TYPE(ha))) 6284 ha->isp_ops->reset_chip(vha); 6285 6286 SAVE_TOPO(ha); 6287 ha->flags.rida_fmt2 = 0; 6288 ha->flags.n2n_ae = 0; 6289 ha->flags.lip_ae = 0; 6290 ha->current_topology = 0; 6291 ha->flags.fw_started = 0; 6292 ha->flags.fw_init_done = 0; 6293 ha->base_qpair->chip_reset++; 6294 for (i = 0; i < ha->max_qpairs; i++) { 6295 if (ha->queue_pair_map[i]) 6296 ha->queue_pair_map[i]->chip_reset = 6297 ha->base_qpair->chip_reset; 6298 } 6299 6300 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 6301 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 6302 atomic_set(&vha->loop_state, LOOP_DOWN); 6303 qla2x00_mark_all_devices_lost(vha, 0); 6304 6305 spin_lock_irqsave(&ha->vport_slock, flags); 6306 list_for_each_entry(vp, &ha->vp_list, list) { 6307 atomic_inc(&vp->vref_count); 6308 spin_unlock_irqrestore(&ha->vport_slock, flags); 6309 6310 qla2x00_mark_all_devices_lost(vp, 0); 6311 6312 spin_lock_irqsave(&ha->vport_slock, flags); 6313 atomic_dec(&vp->vref_count); 6314 } 6315 spin_unlock_irqrestore(&ha->vport_slock, flags); 6316 } else { 6317 if (!atomic_read(&vha->loop_down_timer)) 6318 atomic_set(&vha->loop_down_timer, 6319 LOOP_DOWN_TIME); 6320 } 6321 6322 /* Clear all async request states across all VPs. */ 6323 list_for_each_entry(fcport, &vha->vp_fcports, list) 6324 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 6325 spin_lock_irqsave(&ha->vport_slock, flags); 6326 list_for_each_entry(vp, &ha->vp_list, list) { 6327 atomic_inc(&vp->vref_count); 6328 spin_unlock_irqrestore(&ha->vport_slock, flags); 6329 6330 list_for_each_entry(fcport, &vp->vp_fcports, list) 6331 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 6332 6333 spin_lock_irqsave(&ha->vport_slock, flags); 6334 atomic_dec(&vp->vref_count); 6335 } 6336 spin_unlock_irqrestore(&ha->vport_slock, flags); 6337 6338 if (!ha->flags.eeh_busy) { 6339 /* Make sure for ISP 82XX IO DMA is complete */ 6340 if (IS_P3P_TYPE(ha)) { 6341 qla82xx_chip_reset_cleanup(vha); 6342 ql_log(ql_log_info, vha, 0x00b4, 6343 "Done chip reset cleanup.\n"); 6344 6345 /* Done waiting for pending commands. 6346 * Reset the online flag. 6347 */ 6348 vha->flags.online = 0; 6349 } 6350 6351 /* Requeue all commands in outstanding command list. */ 6352 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 6353 } 6354 /* memory barrier */ 6355 wmb(); 6356 } 6357 6358 /* 6359 * qla2x00_abort_isp 6360 * Resets ISP and aborts all outstanding commands. 6361 * 6362 * Input: 6363 * ha = adapter block pointer. 6364 * 6365 * Returns: 6366 * 0 = success 6367 */ 6368 int 6369 qla2x00_abort_isp(scsi_qla_host_t *vha) 6370 { 6371 int rval; 6372 uint8_t status = 0; 6373 struct qla_hw_data *ha = vha->hw; 6374 struct scsi_qla_host *vp; 6375 struct req_que *req = ha->req_q_map[0]; 6376 unsigned long flags; 6377 6378 if (vha->flags.online) { 6379 qla2x00_abort_isp_cleanup(vha); 6380 6381 if (IS_QLA8031(ha)) { 6382 ql_dbg(ql_dbg_p3p, vha, 0xb05c, 6383 "Clearing fcoe driver presence.\n"); 6384 if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS) 6385 ql_dbg(ql_dbg_p3p, vha, 0xb073, 6386 "Error while clearing DRV-Presence.\n"); 6387 } 6388 6389 if (unlikely(pci_channel_offline(ha->pdev) && 6390 ha->flags.pci_channel_io_perm_failure)) { 6391 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 6392 status = 0; 6393 return status; 6394 } 6395 6396 ha->isp_ops->get_flash_version(vha, req->ring); 6397 6398 ha->isp_ops->nvram_config(vha); 6399 6400 if (!qla2x00_restart_isp(vha)) { 6401 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 6402 6403 if (!atomic_read(&vha->loop_down_timer)) { 6404 /* 6405 * Issue marker command only when we are going 6406 * to start the I/O . 6407 */ 6408 vha->marker_needed = 1; 6409 } 6410 6411 vha->flags.online = 1; 6412 6413 ha->isp_ops->enable_intrs(ha); 6414 6415 ha->isp_abort_cnt = 0; 6416 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 6417 6418 if (IS_QLA81XX(ha) || IS_QLA8031(ha)) 6419 qla2x00_get_fw_version(vha); 6420 if (ha->fce) { 6421 ha->flags.fce_enabled = 1; 6422 memset(ha->fce, 0, 6423 fce_calc_size(ha->fce_bufs)); 6424 rval = qla2x00_enable_fce_trace(vha, 6425 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 6426 &ha->fce_bufs); 6427 if (rval) { 6428 ql_log(ql_log_warn, vha, 0x8033, 6429 "Unable to reinitialize FCE " 6430 "(%d).\n", rval); 6431 ha->flags.fce_enabled = 0; 6432 } 6433 } 6434 6435 if (ha->eft) { 6436 memset(ha->eft, 0, EFT_SIZE); 6437 rval = qla2x00_enable_eft_trace(vha, 6438 ha->eft_dma, EFT_NUM_BUFFERS); 6439 if (rval) { 6440 ql_log(ql_log_warn, vha, 0x8034, 6441 "Unable to reinitialize EFT " 6442 "(%d).\n", rval); 6443 } 6444 } 6445 } else { /* failed the ISP abort */ 6446 vha->flags.online = 1; 6447 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 6448 if (ha->isp_abort_cnt == 0) { 6449 ql_log(ql_log_fatal, vha, 0x8035, 6450 "ISP error recover failed - " 6451 "board disabled.\n"); 6452 /* 6453 * The next call disables the board 6454 * completely. 6455 */ 6456 ha->isp_ops->reset_adapter(vha); 6457 vha->flags.online = 0; 6458 clear_bit(ISP_ABORT_RETRY, 6459 &vha->dpc_flags); 6460 status = 0; 6461 } else { /* schedule another ISP abort */ 6462 ha->isp_abort_cnt--; 6463 ql_dbg(ql_dbg_taskm, vha, 0x8020, 6464 "ISP abort - retry remaining %d.\n", 6465 ha->isp_abort_cnt); 6466 status = 1; 6467 } 6468 } else { 6469 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 6470 ql_dbg(ql_dbg_taskm, vha, 0x8021, 6471 "ISP error recovery - retrying (%d) " 6472 "more times.\n", ha->isp_abort_cnt); 6473 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 6474 status = 1; 6475 } 6476 } 6477 6478 } 6479 6480 if (!status) { 6481 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__); 6482 qla2x00_configure_hba(vha); 6483 spin_lock_irqsave(&ha->vport_slock, flags); 6484 list_for_each_entry(vp, &ha->vp_list, list) { 6485 if (vp->vp_idx) { 6486 atomic_inc(&vp->vref_count); 6487 spin_unlock_irqrestore(&ha->vport_slock, flags); 6488 6489 qla2x00_vp_abort_isp(vp); 6490 6491 spin_lock_irqsave(&ha->vport_slock, flags); 6492 atomic_dec(&vp->vref_count); 6493 } 6494 } 6495 spin_unlock_irqrestore(&ha->vport_slock, flags); 6496 6497 if (IS_QLA8031(ha)) { 6498 ql_dbg(ql_dbg_p3p, vha, 0xb05d, 6499 "Setting back fcoe driver presence.\n"); 6500 if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS) 6501 ql_dbg(ql_dbg_p3p, vha, 0xb074, 6502 "Error while setting DRV-Presence.\n"); 6503 } 6504 } else { 6505 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n", 6506 __func__); 6507 } 6508 6509 return(status); 6510 } 6511 6512 /* 6513 * qla2x00_restart_isp 6514 * restarts the ISP after a reset 6515 * 6516 * Input: 6517 * ha = adapter block pointer. 6518 * 6519 * Returns: 6520 * 0 = success 6521 */ 6522 static int 6523 qla2x00_restart_isp(scsi_qla_host_t *vha) 6524 { 6525 int status = 0; 6526 struct qla_hw_data *ha = vha->hw; 6527 struct req_que *req = ha->req_q_map[0]; 6528 struct rsp_que *rsp = ha->rsp_q_map[0]; 6529 6530 /* If firmware needs to be loaded */ 6531 if (qla2x00_isp_firmware(vha)) { 6532 vha->flags.online = 0; 6533 status = ha->isp_ops->chip_diag(vha); 6534 if (!status) 6535 status = qla2x00_setup_chip(vha); 6536 } 6537 6538 if (!status && !(status = qla2x00_init_rings(vha))) { 6539 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 6540 ha->flags.chip_reset_done = 1; 6541 6542 /* Initialize the queues in use */ 6543 qla25xx_init_queues(ha); 6544 6545 status = qla2x00_fw_ready(vha); 6546 if (!status) { 6547 /* Issue a marker after FW becomes ready. */ 6548 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 6549 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 6550 } 6551 6552 /* if no cable then assume it's good */ 6553 if ((vha->device_flags & DFLG_NO_CABLE)) 6554 status = 0; 6555 } 6556 return (status); 6557 } 6558 6559 static int 6560 qla25xx_init_queues(struct qla_hw_data *ha) 6561 { 6562 struct rsp_que *rsp = NULL; 6563 struct req_que *req = NULL; 6564 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 6565 int ret = -1; 6566 int i; 6567 6568 for (i = 1; i < ha->max_rsp_queues; i++) { 6569 rsp = ha->rsp_q_map[i]; 6570 if (rsp && test_bit(i, ha->rsp_qid_map)) { 6571 rsp->options &= ~BIT_0; 6572 ret = qla25xx_init_rsp_que(base_vha, rsp); 6573 if (ret != QLA_SUCCESS) 6574 ql_dbg(ql_dbg_init, base_vha, 0x00ff, 6575 "%s Rsp que: %d init failed.\n", 6576 __func__, rsp->id); 6577 else 6578 ql_dbg(ql_dbg_init, base_vha, 0x0100, 6579 "%s Rsp que: %d inited.\n", 6580 __func__, rsp->id); 6581 } 6582 } 6583 for (i = 1; i < ha->max_req_queues; i++) { 6584 req = ha->req_q_map[i]; 6585 if (req && test_bit(i, ha->req_qid_map)) { 6586 /* Clear outstanding commands array. */ 6587 req->options &= ~BIT_0; 6588 ret = qla25xx_init_req_que(base_vha, req); 6589 if (ret != QLA_SUCCESS) 6590 ql_dbg(ql_dbg_init, base_vha, 0x0101, 6591 "%s Req que: %d init failed.\n", 6592 __func__, req->id); 6593 else 6594 ql_dbg(ql_dbg_init, base_vha, 0x0102, 6595 "%s Req que: %d inited.\n", 6596 __func__, req->id); 6597 } 6598 } 6599 return ret; 6600 } 6601 6602 /* 6603 * qla2x00_reset_adapter 6604 * Reset adapter. 6605 * 6606 * Input: 6607 * ha = adapter block pointer. 6608 */ 6609 void 6610 qla2x00_reset_adapter(scsi_qla_host_t *vha) 6611 { 6612 unsigned long flags = 0; 6613 struct qla_hw_data *ha = vha->hw; 6614 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 6615 6616 vha->flags.online = 0; 6617 ha->isp_ops->disable_intrs(ha); 6618 6619 spin_lock_irqsave(&ha->hardware_lock, flags); 6620 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 6621 RD_REG_WORD(®->hccr); /* PCI Posting. */ 6622 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 6623 RD_REG_WORD(®->hccr); /* PCI Posting. */ 6624 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6625 } 6626 6627 void 6628 qla24xx_reset_adapter(scsi_qla_host_t *vha) 6629 { 6630 unsigned long flags = 0; 6631 struct qla_hw_data *ha = vha->hw; 6632 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 6633 6634 if (IS_P3P_TYPE(ha)) 6635 return; 6636 6637 vha->flags.online = 0; 6638 ha->isp_ops->disable_intrs(ha); 6639 6640 spin_lock_irqsave(&ha->hardware_lock, flags); 6641 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET); 6642 RD_REG_DWORD(®->hccr); 6643 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE); 6644 RD_REG_DWORD(®->hccr); 6645 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6646 6647 if (IS_NOPOLLING_TYPE(ha)) 6648 ha->isp_ops->enable_intrs(ha); 6649 } 6650 6651 /* On sparc systems, obtain port and node WWN from firmware 6652 * properties. 6653 */ 6654 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, 6655 struct nvram_24xx *nv) 6656 { 6657 #ifdef CONFIG_SPARC 6658 struct qla_hw_data *ha = vha->hw; 6659 struct pci_dev *pdev = ha->pdev; 6660 struct device_node *dp = pci_device_to_OF_node(pdev); 6661 const u8 *val; 6662 int len; 6663 6664 val = of_get_property(dp, "port-wwn", &len); 6665 if (val && len >= WWN_SIZE) 6666 memcpy(nv->port_name, val, WWN_SIZE); 6667 6668 val = of_get_property(dp, "node-wwn", &len); 6669 if (val && len >= WWN_SIZE) 6670 memcpy(nv->node_name, val, WWN_SIZE); 6671 #endif 6672 } 6673 6674 int 6675 qla24xx_nvram_config(scsi_qla_host_t *vha) 6676 { 6677 int rval; 6678 struct init_cb_24xx *icb; 6679 struct nvram_24xx *nv; 6680 uint32_t *dptr; 6681 uint8_t *dptr1, *dptr2; 6682 uint32_t chksum; 6683 uint16_t cnt; 6684 struct qla_hw_data *ha = vha->hw; 6685 6686 rval = QLA_SUCCESS; 6687 icb = (struct init_cb_24xx *)ha->init_cb; 6688 nv = ha->nvram; 6689 6690 /* Determine NVRAM starting address. */ 6691 if (ha->port_no == 0) { 6692 ha->nvram_base = FA_NVRAM_FUNC0_ADDR; 6693 ha->vpd_base = FA_NVRAM_VPD0_ADDR; 6694 } else { 6695 ha->nvram_base = FA_NVRAM_FUNC1_ADDR; 6696 ha->vpd_base = FA_NVRAM_VPD1_ADDR; 6697 } 6698 6699 ha->nvram_size = sizeof(struct nvram_24xx); 6700 ha->vpd_size = FA_NVRAM_VPD_SIZE; 6701 6702 /* Get VPD data into cache */ 6703 ha->vpd = ha->nvram + VPD_OFFSET; 6704 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, 6705 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); 6706 6707 /* Get NVRAM data into cache and calculate checksum. */ 6708 dptr = (uint32_t *)nv; 6709 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base, 6710 ha->nvram_size); 6711 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) 6712 chksum += le32_to_cpu(*dptr); 6713 6714 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a, 6715 "Contents of NVRAM\n"); 6716 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d, 6717 (uint8_t *)nv, ha->nvram_size); 6718 6719 /* Bad NVRAM data, set defaults parameters. */ 6720 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' 6721 || nv->id[3] != ' ' || 6722 nv->nvram_version < cpu_to_le16(ICB_VERSION)) { 6723 /* Reset NVRAM data. */ 6724 ql_log(ql_log_warn, vha, 0x006b, 6725 "Inconsistent NVRAM detected: checksum=0x%x id=%c " 6726 "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version); 6727 ql_log(ql_log_warn, vha, 0x006c, 6728 "Falling back to functioning (yet invalid -- WWPN) " 6729 "defaults.\n"); 6730 6731 /* 6732 * Set default initialization control block. 6733 */ 6734 memset(nv, 0, ha->nvram_size); 6735 nv->nvram_version = cpu_to_le16(ICB_VERSION); 6736 nv->version = cpu_to_le16(ICB_VERSION); 6737 nv->frame_payload_size = 2048; 6738 nv->execution_throttle = cpu_to_le16(0xFFFF); 6739 nv->exchange_count = cpu_to_le16(0); 6740 nv->hard_address = cpu_to_le16(124); 6741 nv->port_name[0] = 0x21; 6742 nv->port_name[1] = 0x00 + ha->port_no + 1; 6743 nv->port_name[2] = 0x00; 6744 nv->port_name[3] = 0xe0; 6745 nv->port_name[4] = 0x8b; 6746 nv->port_name[5] = 0x1c; 6747 nv->port_name[6] = 0x55; 6748 nv->port_name[7] = 0x86; 6749 nv->node_name[0] = 0x20; 6750 nv->node_name[1] = 0x00; 6751 nv->node_name[2] = 0x00; 6752 nv->node_name[3] = 0xe0; 6753 nv->node_name[4] = 0x8b; 6754 nv->node_name[5] = 0x1c; 6755 nv->node_name[6] = 0x55; 6756 nv->node_name[7] = 0x86; 6757 qla24xx_nvram_wwn_from_ofw(vha, nv); 6758 nv->login_retry_count = cpu_to_le16(8); 6759 nv->interrupt_delay_timer = cpu_to_le16(0); 6760 nv->login_timeout = cpu_to_le16(0); 6761 nv->firmware_options_1 = 6762 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); 6763 nv->firmware_options_2 = cpu_to_le32(2 << 4); 6764 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 6765 nv->firmware_options_3 = cpu_to_le32(2 << 13); 6766 nv->host_p = cpu_to_le32(BIT_11|BIT_10); 6767 nv->efi_parameters = cpu_to_le32(0); 6768 nv->reset_delay = 5; 6769 nv->max_luns_per_target = cpu_to_le16(128); 6770 nv->port_down_retry_count = cpu_to_le16(30); 6771 nv->link_down_timeout = cpu_to_le16(30); 6772 6773 rval = 1; 6774 } 6775 6776 if (qla_tgt_mode_enabled(vha)) { 6777 /* Don't enable full login after initial LIP */ 6778 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 6779 /* Don't enable LIP full login for initiator */ 6780 nv->host_p &= cpu_to_le32(~BIT_10); 6781 } 6782 6783 qlt_24xx_config_nvram_stage1(vha, nv); 6784 6785 /* Reset Initialization control block */ 6786 memset(icb, 0, ha->init_cb_size); 6787 6788 /* Copy 1st segment. */ 6789 dptr1 = (uint8_t *)icb; 6790 dptr2 = (uint8_t *)&nv->version; 6791 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; 6792 while (cnt--) 6793 *dptr1++ = *dptr2++; 6794 6795 icb->login_retry_count = nv->login_retry_count; 6796 icb->link_down_on_nos = nv->link_down_on_nos; 6797 6798 /* Copy 2nd segment. */ 6799 dptr1 = (uint8_t *)&icb->interrupt_delay_timer; 6800 dptr2 = (uint8_t *)&nv->interrupt_delay_timer; 6801 cnt = (uint8_t *)&icb->reserved_3 - 6802 (uint8_t *)&icb->interrupt_delay_timer; 6803 while (cnt--) 6804 *dptr1++ = *dptr2++; 6805 6806 /* 6807 * Setup driver NVRAM options. 6808 */ 6809 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), 6810 "QLA2462"); 6811 6812 qlt_24xx_config_nvram_stage2(vha, icb); 6813 6814 if (nv->host_p & cpu_to_le32(BIT_15)) { 6815 /* Use alternate WWN? */ 6816 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 6817 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 6818 } 6819 6820 /* Prepare nodename */ 6821 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) { 6822 /* 6823 * Firmware will apply the following mask if the nodename was 6824 * not provided. 6825 */ 6826 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 6827 icb->node_name[0] &= 0xF0; 6828 } 6829 6830 /* Set host adapter parameters. */ 6831 ha->flags.disable_risc_code_load = 0; 6832 ha->flags.enable_lip_reset = 0; 6833 ha->flags.enable_lip_full_login = 6834 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0; 6835 ha->flags.enable_target_reset = 6836 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0; 6837 ha->flags.enable_led_scheme = 0; 6838 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0; 6839 6840 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & 6841 (BIT_6 | BIT_5 | BIT_4)) >> 4; 6842 6843 memcpy(ha->fw_seriallink_options24, nv->seriallink_options, 6844 sizeof(ha->fw_seriallink_options24)); 6845 6846 /* save HBA serial number */ 6847 ha->serial0 = icb->port_name[5]; 6848 ha->serial1 = icb->port_name[6]; 6849 ha->serial2 = icb->port_name[7]; 6850 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 6851 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 6852 6853 icb->execution_throttle = cpu_to_le16(0xFFFF); 6854 6855 ha->retry_count = le16_to_cpu(nv->login_retry_count); 6856 6857 /* Set minimum login_timeout to 4 seconds. */ 6858 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) 6859 nv->login_timeout = cpu_to_le16(ql2xlogintimeout); 6860 if (le16_to_cpu(nv->login_timeout) < 4) 6861 nv->login_timeout = cpu_to_le16(4); 6862 ha->login_timeout = le16_to_cpu(nv->login_timeout); 6863 6864 /* Set minimum RATOV to 100 tenths of a second. */ 6865 ha->r_a_tov = 100; 6866 6867 ha->loop_reset_delay = nv->reset_delay; 6868 6869 /* Link Down Timeout = 0: 6870 * 6871 * When Port Down timer expires we will start returning 6872 * I/O's to OS with "DID_NO_CONNECT". 6873 * 6874 * Link Down Timeout != 0: 6875 * 6876 * The driver waits for the link to come up after link down 6877 * before returning I/Os to OS with "DID_NO_CONNECT". 6878 */ 6879 if (le16_to_cpu(nv->link_down_timeout) == 0) { 6880 ha->loop_down_abort_time = 6881 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 6882 } else { 6883 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); 6884 ha->loop_down_abort_time = 6885 (LOOP_DOWN_TIME - ha->link_down_timeout); 6886 } 6887 6888 /* Need enough time to try and get the port back. */ 6889 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); 6890 if (qlport_down_retry) 6891 ha->port_down_retry_count = qlport_down_retry; 6892 6893 /* Set login_retry_count */ 6894 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); 6895 if (ha->port_down_retry_count == 6896 le16_to_cpu(nv->port_down_retry_count) && 6897 ha->port_down_retry_count > 3) 6898 ha->login_retry_count = ha->port_down_retry_count; 6899 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 6900 ha->login_retry_count = ha->port_down_retry_count; 6901 if (ql2xloginretrycount) 6902 ha->login_retry_count = ql2xloginretrycount; 6903 6904 /* Enable ZIO. */ 6905 if (!vha->flags.init_done) { 6906 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 6907 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 6908 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 6909 le16_to_cpu(icb->interrupt_delay_timer): 2; 6910 } 6911 icb->firmware_options_2 &= cpu_to_le32( 6912 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 6913 vha->flags.process_response_queue = 0; 6914 if (ha->zio_mode != QLA_ZIO_DISABLED) { 6915 ha->zio_mode = QLA_ZIO_MODE_6; 6916 6917 ql_log(ql_log_info, vha, 0x006f, 6918 "ZIO mode %d enabled; timer delay (%d us).\n", 6919 ha->zio_mode, ha->zio_timer * 100); 6920 6921 icb->firmware_options_2 |= cpu_to_le32( 6922 (uint32_t)ha->zio_mode); 6923 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); 6924 vha->flags.process_response_queue = 1; 6925 } 6926 6927 if (rval) { 6928 ql_log(ql_log_warn, vha, 0x0070, 6929 "NVRAM configuration failed.\n"); 6930 } 6931 return (rval); 6932 } 6933 6934 uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha) 6935 { 6936 struct qla27xx_image_status pri_image_status, sec_image_status; 6937 uint8_t valid_pri_image, valid_sec_image; 6938 uint32_t *wptr; 6939 uint32_t cnt, chksum, size; 6940 struct qla_hw_data *ha = vha->hw; 6941 6942 valid_pri_image = valid_sec_image = 1; 6943 ha->active_image = 0; 6944 size = sizeof(struct qla27xx_image_status) / sizeof(uint32_t); 6945 6946 if (!ha->flt_region_img_status_pri) { 6947 valid_pri_image = 0; 6948 goto check_sec_image; 6949 } 6950 6951 qla24xx_read_flash_data(vha, (uint32_t *)(&pri_image_status), 6952 ha->flt_region_img_status_pri, size); 6953 6954 if (pri_image_status.signature != QLA27XX_IMG_STATUS_SIGN) { 6955 ql_dbg(ql_dbg_init, vha, 0x018b, 6956 "Primary image signature (0x%x) not valid\n", 6957 pri_image_status.signature); 6958 valid_pri_image = 0; 6959 goto check_sec_image; 6960 } 6961 6962 wptr = (uint32_t *)(&pri_image_status); 6963 cnt = size; 6964 6965 for (chksum = 0; cnt--; wptr++) 6966 chksum += le32_to_cpu(*wptr); 6967 6968 if (chksum) { 6969 ql_dbg(ql_dbg_init, vha, 0x018c, 6970 "Checksum validation failed for primary image (0x%x)\n", 6971 chksum); 6972 valid_pri_image = 0; 6973 } 6974 6975 check_sec_image: 6976 if (!ha->flt_region_img_status_sec) { 6977 valid_sec_image = 0; 6978 goto check_valid_image; 6979 } 6980 6981 qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status), 6982 ha->flt_region_img_status_sec, size); 6983 6984 if (sec_image_status.signature != QLA27XX_IMG_STATUS_SIGN) { 6985 ql_dbg(ql_dbg_init, vha, 0x018d, 6986 "Secondary image signature(0x%x) not valid\n", 6987 sec_image_status.signature); 6988 valid_sec_image = 0; 6989 goto check_valid_image; 6990 } 6991 6992 wptr = (uint32_t *)(&sec_image_status); 6993 cnt = size; 6994 for (chksum = 0; cnt--; wptr++) 6995 chksum += le32_to_cpu(*wptr); 6996 if (chksum) { 6997 ql_dbg(ql_dbg_init, vha, 0x018e, 6998 "Checksum validation failed for secondary image (0x%x)\n", 6999 chksum); 7000 valid_sec_image = 0; 7001 } 7002 7003 check_valid_image: 7004 if (valid_pri_image && (pri_image_status.image_status_mask & 0x1)) 7005 ha->active_image = QLA27XX_PRIMARY_IMAGE; 7006 if (valid_sec_image && (sec_image_status.image_status_mask & 0x1)) { 7007 if (!ha->active_image || 7008 pri_image_status.generation_number < 7009 sec_image_status.generation_number) 7010 ha->active_image = QLA27XX_SECONDARY_IMAGE; 7011 } 7012 7013 ql_dbg(ql_dbg_init, vha, 0x018f, "%s image\n", 7014 ha->active_image == 0 ? "default bootld and fw" : 7015 ha->active_image == 1 ? "primary" : 7016 ha->active_image == 2 ? "secondary" : 7017 "Invalid"); 7018 7019 return ha->active_image; 7020 } 7021 7022 static int 7023 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, 7024 uint32_t faddr) 7025 { 7026 int rval = QLA_SUCCESS; 7027 int segments, fragment; 7028 uint32_t *dcode, dlen; 7029 uint32_t risc_addr; 7030 uint32_t risc_size; 7031 uint32_t i; 7032 struct qla_hw_data *ha = vha->hw; 7033 struct req_que *req = ha->req_q_map[0]; 7034 7035 ql_dbg(ql_dbg_init, vha, 0x008b, 7036 "FW: Loading firmware from flash (%x).\n", faddr); 7037 7038 rval = QLA_SUCCESS; 7039 7040 segments = FA_RISC_CODE_SEGMENTS; 7041 dcode = (uint32_t *)req->ring; 7042 *srisc_addr = 0; 7043 7044 if (IS_QLA27XX(ha) && 7045 qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE) 7046 faddr = ha->flt_region_fw_sec; 7047 7048 /* Validate firmware image by checking version. */ 7049 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4); 7050 for (i = 0; i < 4; i++) 7051 dcode[i] = be32_to_cpu(dcode[i]); 7052 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff && 7053 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 7054 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 7055 dcode[3] == 0)) { 7056 ql_log(ql_log_fatal, vha, 0x008c, 7057 "Unable to verify the integrity of flash firmware " 7058 "image.\n"); 7059 ql_log(ql_log_fatal, vha, 0x008d, 7060 "Firmware data: %08x %08x %08x %08x.\n", 7061 dcode[0], dcode[1], dcode[2], dcode[3]); 7062 7063 return QLA_FUNCTION_FAILED; 7064 } 7065 7066 while (segments && rval == QLA_SUCCESS) { 7067 /* Read segment's load information. */ 7068 qla24xx_read_flash_data(vha, dcode, faddr, 4); 7069 7070 risc_addr = be32_to_cpu(dcode[2]); 7071 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr; 7072 risc_size = be32_to_cpu(dcode[3]); 7073 7074 fragment = 0; 7075 while (risc_size > 0 && rval == QLA_SUCCESS) { 7076 dlen = (uint32_t)(ha->fw_transfer_size >> 2); 7077 if (dlen > risc_size) 7078 dlen = risc_size; 7079 7080 ql_dbg(ql_dbg_init, vha, 0x008e, 7081 "Loading risc segment@ risc addr %x " 7082 "number of dwords 0x%x offset 0x%x.\n", 7083 risc_addr, dlen, faddr); 7084 7085 qla24xx_read_flash_data(vha, dcode, faddr, dlen); 7086 for (i = 0; i < dlen; i++) 7087 dcode[i] = swab32(dcode[i]); 7088 7089 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 7090 dlen); 7091 if (rval) { 7092 ql_log(ql_log_fatal, vha, 0x008f, 7093 "Failed to load segment %d of firmware.\n", 7094 fragment); 7095 return QLA_FUNCTION_FAILED; 7096 } 7097 7098 faddr += dlen; 7099 risc_addr += dlen; 7100 risc_size -= dlen; 7101 fragment++; 7102 } 7103 7104 /* Next segment. */ 7105 segments--; 7106 } 7107 7108 if (!IS_QLA27XX(ha)) 7109 return rval; 7110 7111 if (ha->fw_dump_template) 7112 vfree(ha->fw_dump_template); 7113 ha->fw_dump_template = NULL; 7114 ha->fw_dump_template_len = 0; 7115 7116 ql_dbg(ql_dbg_init, vha, 0x0161, 7117 "Loading fwdump template from %x\n", faddr); 7118 qla24xx_read_flash_data(vha, dcode, faddr, 7); 7119 risc_size = be32_to_cpu(dcode[2]); 7120 ql_dbg(ql_dbg_init, vha, 0x0162, 7121 "-> array size %x dwords\n", risc_size); 7122 if (risc_size == 0 || risc_size == ~0) 7123 goto default_template; 7124 7125 dlen = (risc_size - 8) * sizeof(*dcode); 7126 ql_dbg(ql_dbg_init, vha, 0x0163, 7127 "-> template allocating %x bytes...\n", dlen); 7128 ha->fw_dump_template = vmalloc(dlen); 7129 if (!ha->fw_dump_template) { 7130 ql_log(ql_log_warn, vha, 0x0164, 7131 "Failed fwdump template allocate %x bytes.\n", risc_size); 7132 goto default_template; 7133 } 7134 7135 faddr += 7; 7136 risc_size -= 8; 7137 dcode = ha->fw_dump_template; 7138 qla24xx_read_flash_data(vha, dcode, faddr, risc_size); 7139 for (i = 0; i < risc_size; i++) 7140 dcode[i] = le32_to_cpu(dcode[i]); 7141 7142 if (!qla27xx_fwdt_template_valid(dcode)) { 7143 ql_log(ql_log_warn, vha, 0x0165, 7144 "Failed fwdump template validate\n"); 7145 goto default_template; 7146 } 7147 7148 dlen = qla27xx_fwdt_template_size(dcode); 7149 ql_dbg(ql_dbg_init, vha, 0x0166, 7150 "-> template size %x bytes\n", dlen); 7151 if (dlen > risc_size * sizeof(*dcode)) { 7152 ql_log(ql_log_warn, vha, 0x0167, 7153 "Failed fwdump template exceeds array by %zx bytes\n", 7154 (size_t)(dlen - risc_size * sizeof(*dcode))); 7155 goto default_template; 7156 } 7157 ha->fw_dump_template_len = dlen; 7158 return rval; 7159 7160 default_template: 7161 ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n"); 7162 if (ha->fw_dump_template) 7163 vfree(ha->fw_dump_template); 7164 ha->fw_dump_template = NULL; 7165 ha->fw_dump_template_len = 0; 7166 7167 dlen = qla27xx_fwdt_template_default_size(); 7168 ql_dbg(ql_dbg_init, vha, 0x0169, 7169 "-> template allocating %x bytes...\n", dlen); 7170 ha->fw_dump_template = vmalloc(dlen); 7171 if (!ha->fw_dump_template) { 7172 ql_log(ql_log_warn, vha, 0x016a, 7173 "Failed fwdump template allocate %x bytes.\n", risc_size); 7174 goto failed_template; 7175 } 7176 7177 dcode = ha->fw_dump_template; 7178 risc_size = dlen / sizeof(*dcode); 7179 memcpy(dcode, qla27xx_fwdt_template_default(), dlen); 7180 for (i = 0; i < risc_size; i++) 7181 dcode[i] = be32_to_cpu(dcode[i]); 7182 7183 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) { 7184 ql_log(ql_log_warn, vha, 0x016b, 7185 "Failed fwdump template validate\n"); 7186 goto failed_template; 7187 } 7188 7189 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template); 7190 ql_dbg(ql_dbg_init, vha, 0x016c, 7191 "-> template size %x bytes\n", dlen); 7192 ha->fw_dump_template_len = dlen; 7193 return rval; 7194 7195 failed_template: 7196 ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n"); 7197 if (ha->fw_dump_template) 7198 vfree(ha->fw_dump_template); 7199 ha->fw_dump_template = NULL; 7200 ha->fw_dump_template_len = 0; 7201 return rval; 7202 } 7203 7204 #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/" 7205 7206 int 7207 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 7208 { 7209 int rval; 7210 int i, fragment; 7211 uint16_t *wcode, *fwcode; 7212 uint32_t risc_addr, risc_size, fwclen, wlen, *seg; 7213 struct fw_blob *blob; 7214 struct qla_hw_data *ha = vha->hw; 7215 struct req_que *req = ha->req_q_map[0]; 7216 7217 /* Load firmware blob. */ 7218 blob = qla2x00_request_firmware(vha); 7219 if (!blob) { 7220 ql_log(ql_log_info, vha, 0x0083, 7221 "Firmware image unavailable.\n"); 7222 ql_log(ql_log_info, vha, 0x0084, 7223 "Firmware images can be retrieved from: "QLA_FW_URL ".\n"); 7224 return QLA_FUNCTION_FAILED; 7225 } 7226 7227 rval = QLA_SUCCESS; 7228 7229 wcode = (uint16_t *)req->ring; 7230 *srisc_addr = 0; 7231 fwcode = (uint16_t *)blob->fw->data; 7232 fwclen = 0; 7233 7234 /* Validate firmware image by checking version. */ 7235 if (blob->fw->size < 8 * sizeof(uint16_t)) { 7236 ql_log(ql_log_fatal, vha, 0x0085, 7237 "Unable to verify integrity of firmware image (%zd).\n", 7238 blob->fw->size); 7239 goto fail_fw_integrity; 7240 } 7241 for (i = 0; i < 4; i++) 7242 wcode[i] = be16_to_cpu(fwcode[i + 4]); 7243 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff && 7244 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 && 7245 wcode[2] == 0 && wcode[3] == 0)) { 7246 ql_log(ql_log_fatal, vha, 0x0086, 7247 "Unable to verify integrity of firmware image.\n"); 7248 ql_log(ql_log_fatal, vha, 0x0087, 7249 "Firmware data: %04x %04x %04x %04x.\n", 7250 wcode[0], wcode[1], wcode[2], wcode[3]); 7251 goto fail_fw_integrity; 7252 } 7253 7254 seg = blob->segs; 7255 while (*seg && rval == QLA_SUCCESS) { 7256 risc_addr = *seg; 7257 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr; 7258 risc_size = be16_to_cpu(fwcode[3]); 7259 7260 /* Validate firmware image size. */ 7261 fwclen += risc_size * sizeof(uint16_t); 7262 if (blob->fw->size < fwclen) { 7263 ql_log(ql_log_fatal, vha, 0x0088, 7264 "Unable to verify integrity of firmware image " 7265 "(%zd).\n", blob->fw->size); 7266 goto fail_fw_integrity; 7267 } 7268 7269 fragment = 0; 7270 while (risc_size > 0 && rval == QLA_SUCCESS) { 7271 wlen = (uint16_t)(ha->fw_transfer_size >> 1); 7272 if (wlen > risc_size) 7273 wlen = risc_size; 7274 ql_dbg(ql_dbg_init, vha, 0x0089, 7275 "Loading risc segment@ risc addr %x number of " 7276 "words 0x%x.\n", risc_addr, wlen); 7277 7278 for (i = 0; i < wlen; i++) 7279 wcode[i] = swab16(fwcode[i]); 7280 7281 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 7282 wlen); 7283 if (rval) { 7284 ql_log(ql_log_fatal, vha, 0x008a, 7285 "Failed to load segment %d of firmware.\n", 7286 fragment); 7287 break; 7288 } 7289 7290 fwcode += wlen; 7291 risc_addr += wlen; 7292 risc_size -= wlen; 7293 fragment++; 7294 } 7295 7296 /* Next segment. */ 7297 seg++; 7298 } 7299 return rval; 7300 7301 fail_fw_integrity: 7302 return QLA_FUNCTION_FAILED; 7303 } 7304 7305 static int 7306 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) 7307 { 7308 int rval; 7309 int segments, fragment; 7310 uint32_t *dcode, dlen; 7311 uint32_t risc_addr; 7312 uint32_t risc_size; 7313 uint32_t i; 7314 struct fw_blob *blob; 7315 const uint32_t *fwcode; 7316 uint32_t fwclen; 7317 struct qla_hw_data *ha = vha->hw; 7318 struct req_que *req = ha->req_q_map[0]; 7319 7320 /* Load firmware blob. */ 7321 blob = qla2x00_request_firmware(vha); 7322 if (!blob) { 7323 ql_log(ql_log_warn, vha, 0x0090, 7324 "Firmware image unavailable.\n"); 7325 ql_log(ql_log_warn, vha, 0x0091, 7326 "Firmware images can be retrieved from: " 7327 QLA_FW_URL ".\n"); 7328 7329 return QLA_FUNCTION_FAILED; 7330 } 7331 7332 ql_dbg(ql_dbg_init, vha, 0x0092, 7333 "FW: Loading via request-firmware.\n"); 7334 7335 rval = QLA_SUCCESS; 7336 7337 segments = FA_RISC_CODE_SEGMENTS; 7338 dcode = (uint32_t *)req->ring; 7339 *srisc_addr = 0; 7340 fwcode = (uint32_t *)blob->fw->data; 7341 fwclen = 0; 7342 7343 /* Validate firmware image by checking version. */ 7344 if (blob->fw->size < 8 * sizeof(uint32_t)) { 7345 ql_log(ql_log_fatal, vha, 0x0093, 7346 "Unable to verify integrity of firmware image (%zd).\n", 7347 blob->fw->size); 7348 return QLA_FUNCTION_FAILED; 7349 } 7350 for (i = 0; i < 4; i++) 7351 dcode[i] = be32_to_cpu(fwcode[i + 4]); 7352 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff && 7353 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 7354 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 7355 dcode[3] == 0)) { 7356 ql_log(ql_log_fatal, vha, 0x0094, 7357 "Unable to verify integrity of firmware image (%zd).\n", 7358 blob->fw->size); 7359 ql_log(ql_log_fatal, vha, 0x0095, 7360 "Firmware data: %08x %08x %08x %08x.\n", 7361 dcode[0], dcode[1], dcode[2], dcode[3]); 7362 return QLA_FUNCTION_FAILED; 7363 } 7364 7365 while (segments && rval == QLA_SUCCESS) { 7366 risc_addr = be32_to_cpu(fwcode[2]); 7367 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr; 7368 risc_size = be32_to_cpu(fwcode[3]); 7369 7370 /* Validate firmware image size. */ 7371 fwclen += risc_size * sizeof(uint32_t); 7372 if (blob->fw->size < fwclen) { 7373 ql_log(ql_log_fatal, vha, 0x0096, 7374 "Unable to verify integrity of firmware image " 7375 "(%zd).\n", blob->fw->size); 7376 return QLA_FUNCTION_FAILED; 7377 } 7378 7379 fragment = 0; 7380 while (risc_size > 0 && rval == QLA_SUCCESS) { 7381 dlen = (uint32_t)(ha->fw_transfer_size >> 2); 7382 if (dlen > risc_size) 7383 dlen = risc_size; 7384 7385 ql_dbg(ql_dbg_init, vha, 0x0097, 7386 "Loading risc segment@ risc addr %x " 7387 "number of dwords 0x%x.\n", risc_addr, dlen); 7388 7389 for (i = 0; i < dlen; i++) 7390 dcode[i] = swab32(fwcode[i]); 7391 7392 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 7393 dlen); 7394 if (rval) { 7395 ql_log(ql_log_fatal, vha, 0x0098, 7396 "Failed to load segment %d of firmware.\n", 7397 fragment); 7398 return QLA_FUNCTION_FAILED; 7399 } 7400 7401 fwcode += dlen; 7402 risc_addr += dlen; 7403 risc_size -= dlen; 7404 fragment++; 7405 } 7406 7407 /* Next segment. */ 7408 segments--; 7409 } 7410 7411 if (!IS_QLA27XX(ha)) 7412 return rval; 7413 7414 if (ha->fw_dump_template) 7415 vfree(ha->fw_dump_template); 7416 ha->fw_dump_template = NULL; 7417 ha->fw_dump_template_len = 0; 7418 7419 ql_dbg(ql_dbg_init, vha, 0x171, 7420 "Loading fwdump template from %x\n", 7421 (uint32_t)((void *)fwcode - (void *)blob->fw->data)); 7422 risc_size = be32_to_cpu(fwcode[2]); 7423 ql_dbg(ql_dbg_init, vha, 0x172, 7424 "-> array size %x dwords\n", risc_size); 7425 if (risc_size == 0 || risc_size == ~0) 7426 goto default_template; 7427 7428 dlen = (risc_size - 8) * sizeof(*fwcode); 7429 ql_dbg(ql_dbg_init, vha, 0x0173, 7430 "-> template allocating %x bytes...\n", dlen); 7431 ha->fw_dump_template = vmalloc(dlen); 7432 if (!ha->fw_dump_template) { 7433 ql_log(ql_log_warn, vha, 0x0174, 7434 "Failed fwdump template allocate %x bytes.\n", risc_size); 7435 goto default_template; 7436 } 7437 7438 fwcode += 7; 7439 risc_size -= 8; 7440 dcode = ha->fw_dump_template; 7441 for (i = 0; i < risc_size; i++) 7442 dcode[i] = le32_to_cpu(fwcode[i]); 7443 7444 if (!qla27xx_fwdt_template_valid(dcode)) { 7445 ql_log(ql_log_warn, vha, 0x0175, 7446 "Failed fwdump template validate\n"); 7447 goto default_template; 7448 } 7449 7450 dlen = qla27xx_fwdt_template_size(dcode); 7451 ql_dbg(ql_dbg_init, vha, 0x0176, 7452 "-> template size %x bytes\n", dlen); 7453 if (dlen > risc_size * sizeof(*fwcode)) { 7454 ql_log(ql_log_warn, vha, 0x0177, 7455 "Failed fwdump template exceeds array by %zx bytes\n", 7456 (size_t)(dlen - risc_size * sizeof(*fwcode))); 7457 goto default_template; 7458 } 7459 ha->fw_dump_template_len = dlen; 7460 return rval; 7461 7462 default_template: 7463 ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n"); 7464 if (ha->fw_dump_template) 7465 vfree(ha->fw_dump_template); 7466 ha->fw_dump_template = NULL; 7467 ha->fw_dump_template_len = 0; 7468 7469 dlen = qla27xx_fwdt_template_default_size(); 7470 ql_dbg(ql_dbg_init, vha, 0x0179, 7471 "-> template allocating %x bytes...\n", dlen); 7472 ha->fw_dump_template = vmalloc(dlen); 7473 if (!ha->fw_dump_template) { 7474 ql_log(ql_log_warn, vha, 0x017a, 7475 "Failed fwdump template allocate %x bytes.\n", risc_size); 7476 goto failed_template; 7477 } 7478 7479 dcode = ha->fw_dump_template; 7480 risc_size = dlen / sizeof(*fwcode); 7481 fwcode = qla27xx_fwdt_template_default(); 7482 for (i = 0; i < risc_size; i++) 7483 dcode[i] = be32_to_cpu(fwcode[i]); 7484 7485 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) { 7486 ql_log(ql_log_warn, vha, 0x017b, 7487 "Failed fwdump template validate\n"); 7488 goto failed_template; 7489 } 7490 7491 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template); 7492 ql_dbg(ql_dbg_init, vha, 0x017c, 7493 "-> template size %x bytes\n", dlen); 7494 ha->fw_dump_template_len = dlen; 7495 return rval; 7496 7497 failed_template: 7498 ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n"); 7499 if (ha->fw_dump_template) 7500 vfree(ha->fw_dump_template); 7501 ha->fw_dump_template = NULL; 7502 ha->fw_dump_template_len = 0; 7503 return rval; 7504 } 7505 7506 int 7507 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 7508 { 7509 int rval; 7510 7511 if (ql2xfwloadbin == 1) 7512 return qla81xx_load_risc(vha, srisc_addr); 7513 7514 /* 7515 * FW Load priority: 7516 * 1) Firmware via request-firmware interface (.bin file). 7517 * 2) Firmware residing in flash. 7518 */ 7519 rval = qla24xx_load_risc_blob(vha, srisc_addr); 7520 if (rval == QLA_SUCCESS) 7521 return rval; 7522 7523 return qla24xx_load_risc_flash(vha, srisc_addr, 7524 vha->hw->flt_region_fw); 7525 } 7526 7527 int 7528 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 7529 { 7530 int rval; 7531 struct qla_hw_data *ha = vha->hw; 7532 7533 if (ql2xfwloadbin == 2) 7534 goto try_blob_fw; 7535 7536 /* 7537 * FW Load priority: 7538 * 1) Firmware residing in flash. 7539 * 2) Firmware via request-firmware interface (.bin file). 7540 * 3) Golden-Firmware residing in flash -- limited operation. 7541 */ 7542 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw); 7543 if (rval == QLA_SUCCESS) 7544 return rval; 7545 7546 try_blob_fw: 7547 rval = qla24xx_load_risc_blob(vha, srisc_addr); 7548 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw) 7549 return rval; 7550 7551 ql_log(ql_log_info, vha, 0x0099, 7552 "Attempting to fallback to golden firmware.\n"); 7553 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); 7554 if (rval != QLA_SUCCESS) 7555 return rval; 7556 7557 ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n"); 7558 ha->flags.running_gold_fw = 1; 7559 return rval; 7560 } 7561 7562 void 7563 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) 7564 { 7565 int ret, retries; 7566 struct qla_hw_data *ha = vha->hw; 7567 7568 if (ha->flags.pci_channel_io_perm_failure) 7569 return; 7570 if (!IS_FWI2_CAPABLE(ha)) 7571 return; 7572 if (!ha->fw_major_version) 7573 return; 7574 if (!ha->flags.fw_started) 7575 return; 7576 7577 ret = qla2x00_stop_firmware(vha); 7578 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && 7579 ret != QLA_INVALID_COMMAND && retries ; retries--) { 7580 ha->isp_ops->reset_chip(vha); 7581 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS) 7582 continue; 7583 if (qla2x00_setup_chip(vha) != QLA_SUCCESS) 7584 continue; 7585 ql_log(ql_log_info, vha, 0x8015, 7586 "Attempting retry of stop-firmware command.\n"); 7587 ret = qla2x00_stop_firmware(vha); 7588 } 7589 7590 QLA_FW_STOPPED(ha); 7591 ha->flags.fw_init_done = 0; 7592 } 7593 7594 int 7595 qla24xx_configure_vhba(scsi_qla_host_t *vha) 7596 { 7597 int rval = QLA_SUCCESS; 7598 int rval2; 7599 uint16_t mb[MAILBOX_REGISTER_COUNT]; 7600 struct qla_hw_data *ha = vha->hw; 7601 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 7602 struct req_que *req; 7603 struct rsp_que *rsp; 7604 7605 if (!vha->vp_idx) 7606 return -EINVAL; 7607 7608 rval = qla2x00_fw_ready(base_vha); 7609 if (vha->qpair) 7610 req = vha->qpair->req; 7611 else 7612 req = ha->req_q_map[0]; 7613 rsp = req->rsp; 7614 7615 if (rval == QLA_SUCCESS) { 7616 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 7617 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 7618 } 7619 7620 vha->flags.management_server_logged_in = 0; 7621 7622 /* Login to SNS first */ 7623 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, 7624 BIT_1); 7625 if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { 7626 if (rval2 == QLA_MEMORY_ALLOC_FAILED) 7627 ql_dbg(ql_dbg_init, vha, 0x0120, 7628 "Failed SNS login: loop_id=%x, rval2=%d\n", 7629 NPH_SNS, rval2); 7630 else 7631 ql_dbg(ql_dbg_init, vha, 0x0103, 7632 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " 7633 "mb[2]=%x mb[6]=%x mb[7]=%x.\n", 7634 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]); 7635 return (QLA_FUNCTION_FAILED); 7636 } 7637 7638 atomic_set(&vha->loop_down_timer, 0); 7639 atomic_set(&vha->loop_state, LOOP_UP); 7640 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 7641 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 7642 rval = qla2x00_loop_resync(base_vha); 7643 7644 return rval; 7645 } 7646 7647 /* 84XX Support **************************************************************/ 7648 7649 static LIST_HEAD(qla_cs84xx_list); 7650 static DEFINE_MUTEX(qla_cs84xx_mutex); 7651 7652 static struct qla_chip_state_84xx * 7653 qla84xx_get_chip(struct scsi_qla_host *vha) 7654 { 7655 struct qla_chip_state_84xx *cs84xx; 7656 struct qla_hw_data *ha = vha->hw; 7657 7658 mutex_lock(&qla_cs84xx_mutex); 7659 7660 /* Find any shared 84xx chip. */ 7661 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) { 7662 if (cs84xx->bus == ha->pdev->bus) { 7663 kref_get(&cs84xx->kref); 7664 goto done; 7665 } 7666 } 7667 7668 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL); 7669 if (!cs84xx) 7670 goto done; 7671 7672 kref_init(&cs84xx->kref); 7673 spin_lock_init(&cs84xx->access_lock); 7674 mutex_init(&cs84xx->fw_update_mutex); 7675 cs84xx->bus = ha->pdev->bus; 7676 7677 list_add_tail(&cs84xx->list, &qla_cs84xx_list); 7678 done: 7679 mutex_unlock(&qla_cs84xx_mutex); 7680 return cs84xx; 7681 } 7682 7683 static void 7684 __qla84xx_chip_release(struct kref *kref) 7685 { 7686 struct qla_chip_state_84xx *cs84xx = 7687 container_of(kref, struct qla_chip_state_84xx, kref); 7688 7689 mutex_lock(&qla_cs84xx_mutex); 7690 list_del(&cs84xx->list); 7691 mutex_unlock(&qla_cs84xx_mutex); 7692 kfree(cs84xx); 7693 } 7694 7695 void 7696 qla84xx_put_chip(struct scsi_qla_host *vha) 7697 { 7698 struct qla_hw_data *ha = vha->hw; 7699 if (ha->cs84xx) 7700 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release); 7701 } 7702 7703 static int 7704 qla84xx_init_chip(scsi_qla_host_t *vha) 7705 { 7706 int rval; 7707 uint16_t status[2]; 7708 struct qla_hw_data *ha = vha->hw; 7709 7710 mutex_lock(&ha->cs84xx->fw_update_mutex); 7711 7712 rval = qla84xx_verify_chip(vha, status); 7713 7714 mutex_unlock(&ha->cs84xx->fw_update_mutex); 7715 7716 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED: 7717 QLA_SUCCESS; 7718 } 7719 7720 /* 81XX Support **************************************************************/ 7721 7722 int 7723 qla81xx_nvram_config(scsi_qla_host_t *vha) 7724 { 7725 int rval; 7726 struct init_cb_81xx *icb; 7727 struct nvram_81xx *nv; 7728 uint32_t *dptr; 7729 uint8_t *dptr1, *dptr2; 7730 uint32_t chksum; 7731 uint16_t cnt; 7732 struct qla_hw_data *ha = vha->hw; 7733 7734 rval = QLA_SUCCESS; 7735 icb = (struct init_cb_81xx *)ha->init_cb; 7736 nv = ha->nvram; 7737 7738 /* Determine NVRAM starting address. */ 7739 ha->nvram_size = sizeof(struct nvram_81xx); 7740 ha->vpd_size = FA_NVRAM_VPD_SIZE; 7741 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha)) 7742 ha->vpd_size = FA_VPD_SIZE_82XX; 7743 7744 /* Get VPD data into cache */ 7745 ha->vpd = ha->nvram + VPD_OFFSET; 7746 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2, 7747 ha->vpd_size); 7748 7749 /* Get NVRAM data into cache and calculate checksum. */ 7750 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2, 7751 ha->nvram_size); 7752 dptr = (uint32_t *)nv; 7753 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) 7754 chksum += le32_to_cpu(*dptr); 7755 7756 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111, 7757 "Contents of NVRAM:\n"); 7758 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112, 7759 (uint8_t *)nv, ha->nvram_size); 7760 7761 /* Bad NVRAM data, set defaults parameters. */ 7762 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' 7763 || nv->id[3] != ' ' || 7764 nv->nvram_version < cpu_to_le16(ICB_VERSION)) { 7765 /* Reset NVRAM data. */ 7766 ql_log(ql_log_info, vha, 0x0073, 7767 "Inconsistent NVRAM detected: checksum=0x%x id=%c " 7768 "version=0x%x.\n", chksum, nv->id[0], 7769 le16_to_cpu(nv->nvram_version)); 7770 ql_log(ql_log_info, vha, 0x0074, 7771 "Falling back to functioning (yet invalid -- WWPN) " 7772 "defaults.\n"); 7773 7774 /* 7775 * Set default initialization control block. 7776 */ 7777 memset(nv, 0, ha->nvram_size); 7778 nv->nvram_version = cpu_to_le16(ICB_VERSION); 7779 nv->version = cpu_to_le16(ICB_VERSION); 7780 nv->frame_payload_size = 2048; 7781 nv->execution_throttle = cpu_to_le16(0xFFFF); 7782 nv->exchange_count = cpu_to_le16(0); 7783 nv->port_name[0] = 0x21; 7784 nv->port_name[1] = 0x00 + ha->port_no + 1; 7785 nv->port_name[2] = 0x00; 7786 nv->port_name[3] = 0xe0; 7787 nv->port_name[4] = 0x8b; 7788 nv->port_name[5] = 0x1c; 7789 nv->port_name[6] = 0x55; 7790 nv->port_name[7] = 0x86; 7791 nv->node_name[0] = 0x20; 7792 nv->node_name[1] = 0x00; 7793 nv->node_name[2] = 0x00; 7794 nv->node_name[3] = 0xe0; 7795 nv->node_name[4] = 0x8b; 7796 nv->node_name[5] = 0x1c; 7797 nv->node_name[6] = 0x55; 7798 nv->node_name[7] = 0x86; 7799 nv->login_retry_count = cpu_to_le16(8); 7800 nv->interrupt_delay_timer = cpu_to_le16(0); 7801 nv->login_timeout = cpu_to_le16(0); 7802 nv->firmware_options_1 = 7803 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); 7804 nv->firmware_options_2 = cpu_to_le32(2 << 4); 7805 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 7806 nv->firmware_options_3 = cpu_to_le32(2 << 13); 7807 nv->host_p = cpu_to_le32(BIT_11|BIT_10); 7808 nv->efi_parameters = cpu_to_le32(0); 7809 nv->reset_delay = 5; 7810 nv->max_luns_per_target = cpu_to_le16(128); 7811 nv->port_down_retry_count = cpu_to_le16(30); 7812 nv->link_down_timeout = cpu_to_le16(180); 7813 nv->enode_mac[0] = 0x00; 7814 nv->enode_mac[1] = 0xC0; 7815 nv->enode_mac[2] = 0xDD; 7816 nv->enode_mac[3] = 0x04; 7817 nv->enode_mac[4] = 0x05; 7818 nv->enode_mac[5] = 0x06 + ha->port_no + 1; 7819 7820 rval = 1; 7821 } 7822 7823 if (IS_T10_PI_CAPABLE(ha)) 7824 nv->frame_payload_size &= ~7; 7825 7826 qlt_81xx_config_nvram_stage1(vha, nv); 7827 7828 /* Reset Initialization control block */ 7829 memset(icb, 0, ha->init_cb_size); 7830 7831 /* Copy 1st segment. */ 7832 dptr1 = (uint8_t *)icb; 7833 dptr2 = (uint8_t *)&nv->version; 7834 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; 7835 while (cnt--) 7836 *dptr1++ = *dptr2++; 7837 7838 icb->login_retry_count = nv->login_retry_count; 7839 7840 /* Copy 2nd segment. */ 7841 dptr1 = (uint8_t *)&icb->interrupt_delay_timer; 7842 dptr2 = (uint8_t *)&nv->interrupt_delay_timer; 7843 cnt = (uint8_t *)&icb->reserved_5 - 7844 (uint8_t *)&icb->interrupt_delay_timer; 7845 while (cnt--) 7846 *dptr1++ = *dptr2++; 7847 7848 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac)); 7849 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */ 7850 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) { 7851 icb->enode_mac[0] = 0x00; 7852 icb->enode_mac[1] = 0xC0; 7853 icb->enode_mac[2] = 0xDD; 7854 icb->enode_mac[3] = 0x04; 7855 icb->enode_mac[4] = 0x05; 7856 icb->enode_mac[5] = 0x06 + ha->port_no + 1; 7857 } 7858 7859 /* Use extended-initialization control block. */ 7860 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb)); 7861 7862 /* 7863 * Setup driver NVRAM options. 7864 */ 7865 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), 7866 "QLE8XXX"); 7867 7868 qlt_81xx_config_nvram_stage2(vha, icb); 7869 7870 /* Use alternate WWN? */ 7871 if (nv->host_p & cpu_to_le32(BIT_15)) { 7872 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 7873 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 7874 } 7875 7876 /* Prepare nodename */ 7877 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) { 7878 /* 7879 * Firmware will apply the following mask if the nodename was 7880 * not provided. 7881 */ 7882 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 7883 icb->node_name[0] &= 0xF0; 7884 } 7885 7886 /* Set host adapter parameters. */ 7887 ha->flags.disable_risc_code_load = 0; 7888 ha->flags.enable_lip_reset = 0; 7889 ha->flags.enable_lip_full_login = 7890 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0; 7891 ha->flags.enable_target_reset = 7892 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0; 7893 ha->flags.enable_led_scheme = 0; 7894 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0; 7895 7896 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & 7897 (BIT_6 | BIT_5 | BIT_4)) >> 4; 7898 7899 /* save HBA serial number */ 7900 ha->serial0 = icb->port_name[5]; 7901 ha->serial1 = icb->port_name[6]; 7902 ha->serial2 = icb->port_name[7]; 7903 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 7904 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 7905 7906 icb->execution_throttle = cpu_to_le16(0xFFFF); 7907 7908 ha->retry_count = le16_to_cpu(nv->login_retry_count); 7909 7910 /* Set minimum login_timeout to 4 seconds. */ 7911 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) 7912 nv->login_timeout = cpu_to_le16(ql2xlogintimeout); 7913 if (le16_to_cpu(nv->login_timeout) < 4) 7914 nv->login_timeout = cpu_to_le16(4); 7915 ha->login_timeout = le16_to_cpu(nv->login_timeout); 7916 7917 /* Set minimum RATOV to 100 tenths of a second. */ 7918 ha->r_a_tov = 100; 7919 7920 ha->loop_reset_delay = nv->reset_delay; 7921 7922 /* Link Down Timeout = 0: 7923 * 7924 * When Port Down timer expires we will start returning 7925 * I/O's to OS with "DID_NO_CONNECT". 7926 * 7927 * Link Down Timeout != 0: 7928 * 7929 * The driver waits for the link to come up after link down 7930 * before returning I/Os to OS with "DID_NO_CONNECT". 7931 */ 7932 if (le16_to_cpu(nv->link_down_timeout) == 0) { 7933 ha->loop_down_abort_time = 7934 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 7935 } else { 7936 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); 7937 ha->loop_down_abort_time = 7938 (LOOP_DOWN_TIME - ha->link_down_timeout); 7939 } 7940 7941 /* Need enough time to try and get the port back. */ 7942 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); 7943 if (qlport_down_retry) 7944 ha->port_down_retry_count = qlport_down_retry; 7945 7946 /* Set login_retry_count */ 7947 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); 7948 if (ha->port_down_retry_count == 7949 le16_to_cpu(nv->port_down_retry_count) && 7950 ha->port_down_retry_count > 3) 7951 ha->login_retry_count = ha->port_down_retry_count; 7952 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 7953 ha->login_retry_count = ha->port_down_retry_count; 7954 if (ql2xloginretrycount) 7955 ha->login_retry_count = ql2xloginretrycount; 7956 7957 /* if not running MSI-X we need handshaking on interrupts */ 7958 if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha))) 7959 icb->firmware_options_2 |= cpu_to_le32(BIT_22); 7960 7961 /* Enable ZIO. */ 7962 if (!vha->flags.init_done) { 7963 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 7964 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 7965 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 7966 le16_to_cpu(icb->interrupt_delay_timer): 2; 7967 } 7968 icb->firmware_options_2 &= cpu_to_le32( 7969 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 7970 vha->flags.process_response_queue = 0; 7971 if (ha->zio_mode != QLA_ZIO_DISABLED) { 7972 ha->zio_mode = QLA_ZIO_MODE_6; 7973 7974 ql_log(ql_log_info, vha, 0x0075, 7975 "ZIO mode %d enabled; timer delay (%d us).\n", 7976 ha->zio_mode, 7977 ha->zio_timer * 100); 7978 7979 icb->firmware_options_2 |= cpu_to_le32( 7980 (uint32_t)ha->zio_mode); 7981 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); 7982 vha->flags.process_response_queue = 1; 7983 } 7984 7985 /* enable RIDA Format2 */ 7986 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) 7987 icb->firmware_options_3 |= BIT_0; 7988 7989 if (IS_QLA27XX(ha)) { 7990 icb->firmware_options_3 |= BIT_8; 7991 ql_dbg(ql_log_info, vha, 0x0075, 7992 "Enabling direct connection.\n"); 7993 } 7994 7995 if (rval) { 7996 ql_log(ql_log_warn, vha, 0x0076, 7997 "NVRAM configuration failed.\n"); 7998 } 7999 return (rval); 8000 } 8001 8002 int 8003 qla82xx_restart_isp(scsi_qla_host_t *vha) 8004 { 8005 int status, rval; 8006 struct qla_hw_data *ha = vha->hw; 8007 struct req_que *req = ha->req_q_map[0]; 8008 struct rsp_que *rsp = ha->rsp_q_map[0]; 8009 struct scsi_qla_host *vp; 8010 unsigned long flags; 8011 8012 status = qla2x00_init_rings(vha); 8013 if (!status) { 8014 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 8015 ha->flags.chip_reset_done = 1; 8016 8017 status = qla2x00_fw_ready(vha); 8018 if (!status) { 8019 /* Issue a marker after FW becomes ready. */ 8020 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 8021 vha->flags.online = 1; 8022 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 8023 } 8024 8025 /* if no cable then assume it's good */ 8026 if ((vha->device_flags & DFLG_NO_CABLE)) 8027 status = 0; 8028 } 8029 8030 if (!status) { 8031 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 8032 8033 if (!atomic_read(&vha->loop_down_timer)) { 8034 /* 8035 * Issue marker command only when we are going 8036 * to start the I/O . 8037 */ 8038 vha->marker_needed = 1; 8039 } 8040 8041 ha->isp_ops->enable_intrs(ha); 8042 8043 ha->isp_abort_cnt = 0; 8044 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 8045 8046 /* Update the firmware version */ 8047 status = qla82xx_check_md_needed(vha); 8048 8049 if (ha->fce) { 8050 ha->flags.fce_enabled = 1; 8051 memset(ha->fce, 0, 8052 fce_calc_size(ha->fce_bufs)); 8053 rval = qla2x00_enable_fce_trace(vha, 8054 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 8055 &ha->fce_bufs); 8056 if (rval) { 8057 ql_log(ql_log_warn, vha, 0x8001, 8058 "Unable to reinitialize FCE (%d).\n", 8059 rval); 8060 ha->flags.fce_enabled = 0; 8061 } 8062 } 8063 8064 if (ha->eft) { 8065 memset(ha->eft, 0, EFT_SIZE); 8066 rval = qla2x00_enable_eft_trace(vha, 8067 ha->eft_dma, EFT_NUM_BUFFERS); 8068 if (rval) { 8069 ql_log(ql_log_warn, vha, 0x8010, 8070 "Unable to reinitialize EFT (%d).\n", 8071 rval); 8072 } 8073 } 8074 } 8075 8076 if (!status) { 8077 ql_dbg(ql_dbg_taskm, vha, 0x8011, 8078 "qla82xx_restart_isp succeeded.\n"); 8079 8080 spin_lock_irqsave(&ha->vport_slock, flags); 8081 list_for_each_entry(vp, &ha->vp_list, list) { 8082 if (vp->vp_idx) { 8083 atomic_inc(&vp->vref_count); 8084 spin_unlock_irqrestore(&ha->vport_slock, flags); 8085 8086 qla2x00_vp_abort_isp(vp); 8087 8088 spin_lock_irqsave(&ha->vport_slock, flags); 8089 atomic_dec(&vp->vref_count); 8090 } 8091 } 8092 spin_unlock_irqrestore(&ha->vport_slock, flags); 8093 8094 } else { 8095 ql_log(ql_log_warn, vha, 0x8016, 8096 "qla82xx_restart_isp **** FAILED ****.\n"); 8097 } 8098 8099 return status; 8100 } 8101 8102 void 8103 qla81xx_update_fw_options(scsi_qla_host_t *vha) 8104 { 8105 struct qla_hw_data *ha = vha->hw; 8106 8107 /* Hold status IOCBs until ABTS response received. */ 8108 if (ql2xfwholdabts) 8109 ha->fw_options[3] |= BIT_12; 8110 8111 /* Set Retry FLOGI in case of P2P connection */ 8112 if (ha->operating_mode == P2P) { 8113 ha->fw_options[2] |= BIT_3; 8114 ql_dbg(ql_dbg_disc, vha, 0x2103, 8115 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", 8116 __func__, ha->fw_options[2]); 8117 } 8118 8119 /* Move PUREX, ABTS RX & RIDA to ATIOQ */ 8120 if (ql2xmvasynctoatio) { 8121 if (qla_tgt_mode_enabled(vha) || 8122 qla_dual_mode_enabled(vha)) 8123 ha->fw_options[2] |= BIT_11; 8124 else 8125 ha->fw_options[2] &= ~BIT_11; 8126 } 8127 8128 if (qla_tgt_mode_enabled(vha) || 8129 qla_dual_mode_enabled(vha)) { 8130 /* FW auto send SCSI status during */ 8131 ha->fw_options[1] |= BIT_8; 8132 ha->fw_options[10] |= (u16)SAM_STAT_BUSY << 8; 8133 8134 /* FW perform Exchange validation */ 8135 ha->fw_options[2] |= BIT_4; 8136 } else { 8137 ha->fw_options[1] &= ~BIT_8; 8138 ha->fw_options[10] &= 0x00ff; 8139 8140 ha->fw_options[2] &= ~BIT_4; 8141 } 8142 8143 if (ql2xetsenable) { 8144 /* Enable ETS Burst. */ 8145 memset(ha->fw_options, 0, sizeof(ha->fw_options)); 8146 ha->fw_options[2] |= BIT_9; 8147 } 8148 8149 ql_dbg(ql_dbg_init, vha, 0x00e9, 8150 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", 8151 __func__, ha->fw_options[1], ha->fw_options[2], 8152 ha->fw_options[3], vha->host->active_mode); 8153 8154 qla2x00_set_fw_options(vha, ha->fw_options); 8155 } 8156 8157 /* 8158 * qla24xx_get_fcp_prio 8159 * Gets the fcp cmd priority value for the logged in port. 8160 * Looks for a match of the port descriptors within 8161 * each of the fcp prio config entries. If a match is found, 8162 * the tag (priority) value is returned. 8163 * 8164 * Input: 8165 * vha = scsi host structure pointer. 8166 * fcport = port structure pointer. 8167 * 8168 * Return: 8169 * non-zero (if found) 8170 * -1 (if not found) 8171 * 8172 * Context: 8173 * Kernel context 8174 */ 8175 static int 8176 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) 8177 { 8178 int i, entries; 8179 uint8_t pid_match, wwn_match; 8180 int priority; 8181 uint32_t pid1, pid2; 8182 uint64_t wwn1, wwn2; 8183 struct qla_fcp_prio_entry *pri_entry; 8184 struct qla_hw_data *ha = vha->hw; 8185 8186 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled) 8187 return -1; 8188 8189 priority = -1; 8190 entries = ha->fcp_prio_cfg->num_entries; 8191 pri_entry = &ha->fcp_prio_cfg->entry[0]; 8192 8193 for (i = 0; i < entries; i++) { 8194 pid_match = wwn_match = 0; 8195 8196 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) { 8197 pri_entry++; 8198 continue; 8199 } 8200 8201 /* check source pid for a match */ 8202 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) { 8203 pid1 = pri_entry->src_pid & INVALID_PORT_ID; 8204 pid2 = vha->d_id.b24 & INVALID_PORT_ID; 8205 if (pid1 == INVALID_PORT_ID) 8206 pid_match++; 8207 else if (pid1 == pid2) 8208 pid_match++; 8209 } 8210 8211 /* check destination pid for a match */ 8212 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) { 8213 pid1 = pri_entry->dst_pid & INVALID_PORT_ID; 8214 pid2 = fcport->d_id.b24 & INVALID_PORT_ID; 8215 if (pid1 == INVALID_PORT_ID) 8216 pid_match++; 8217 else if (pid1 == pid2) 8218 pid_match++; 8219 } 8220 8221 /* check source WWN for a match */ 8222 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) { 8223 wwn1 = wwn_to_u64(vha->port_name); 8224 wwn2 = wwn_to_u64(pri_entry->src_wwpn); 8225 if (wwn2 == (uint64_t)-1) 8226 wwn_match++; 8227 else if (wwn1 == wwn2) 8228 wwn_match++; 8229 } 8230 8231 /* check destination WWN for a match */ 8232 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) { 8233 wwn1 = wwn_to_u64(fcport->port_name); 8234 wwn2 = wwn_to_u64(pri_entry->dst_wwpn); 8235 if (wwn2 == (uint64_t)-1) 8236 wwn_match++; 8237 else if (wwn1 == wwn2) 8238 wwn_match++; 8239 } 8240 8241 if (pid_match == 2 || wwn_match == 2) { 8242 /* Found a matching entry */ 8243 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID) 8244 priority = pri_entry->tag; 8245 break; 8246 } 8247 8248 pri_entry++; 8249 } 8250 8251 return priority; 8252 } 8253 8254 /* 8255 * qla24xx_update_fcport_fcp_prio 8256 * Activates fcp priority for the logged in fc port 8257 * 8258 * Input: 8259 * vha = scsi host structure pointer. 8260 * fcp = port structure pointer. 8261 * 8262 * Return: 8263 * QLA_SUCCESS or QLA_FUNCTION_FAILED 8264 * 8265 * Context: 8266 * Kernel context. 8267 */ 8268 int 8269 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) 8270 { 8271 int ret; 8272 int priority; 8273 uint16_t mb[5]; 8274 8275 if (fcport->port_type != FCT_TARGET || 8276 fcport->loop_id == FC_NO_LOOP_ID) 8277 return QLA_FUNCTION_FAILED; 8278 8279 priority = qla24xx_get_fcp_prio(vha, fcport); 8280 if (priority < 0) 8281 return QLA_FUNCTION_FAILED; 8282 8283 if (IS_P3P_TYPE(vha->hw)) { 8284 fcport->fcp_prio = priority & 0xf; 8285 return QLA_SUCCESS; 8286 } 8287 8288 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb); 8289 if (ret == QLA_SUCCESS) { 8290 if (fcport->fcp_prio != priority) 8291 ql_dbg(ql_dbg_user, vha, 0x709e, 8292 "Updated FCP_CMND priority - value=%d loop_id=%d " 8293 "port_id=%02x%02x%02x.\n", priority, 8294 fcport->loop_id, fcport->d_id.b.domain, 8295 fcport->d_id.b.area, fcport->d_id.b.al_pa); 8296 fcport->fcp_prio = priority & 0xf; 8297 } else 8298 ql_dbg(ql_dbg_user, vha, 0x704f, 8299 "Unable to update FCP_CMND priority - ret=0x%x for " 8300 "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id, 8301 fcport->d_id.b.domain, fcport->d_id.b.area, 8302 fcport->d_id.b.al_pa); 8303 return ret; 8304 } 8305 8306 /* 8307 * qla24xx_update_all_fcp_prio 8308 * Activates fcp priority for all the logged in ports 8309 * 8310 * Input: 8311 * ha = adapter block pointer. 8312 * 8313 * Return: 8314 * QLA_SUCCESS or QLA_FUNCTION_FAILED 8315 * 8316 * Context: 8317 * Kernel context. 8318 */ 8319 int 8320 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha) 8321 { 8322 int ret; 8323 fc_port_t *fcport; 8324 8325 ret = QLA_FUNCTION_FAILED; 8326 /* We need to set priority for all logged in ports */ 8327 list_for_each_entry(fcport, &vha->vp_fcports, list) 8328 ret = qla24xx_update_fcport_fcp_prio(vha, fcport); 8329 8330 return ret; 8331 } 8332 8333 struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, 8334 int vp_idx, bool startqp) 8335 { 8336 int rsp_id = 0; 8337 int req_id = 0; 8338 int i; 8339 struct qla_hw_data *ha = vha->hw; 8340 uint16_t qpair_id = 0; 8341 struct qla_qpair *qpair = NULL; 8342 struct qla_msix_entry *msix; 8343 8344 if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) { 8345 ql_log(ql_log_warn, vha, 0x00181, 8346 "FW/Driver is not multi-queue capable.\n"); 8347 return NULL; 8348 } 8349 8350 if (ql2xmqsupport || ql2xnvmeenable) { 8351 qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL); 8352 if (qpair == NULL) { 8353 ql_log(ql_log_warn, vha, 0x0182, 8354 "Failed to allocate memory for queue pair.\n"); 8355 return NULL; 8356 } 8357 memset(qpair, 0, sizeof(struct qla_qpair)); 8358 8359 qpair->hw = vha->hw; 8360 qpair->vha = vha; 8361 qpair->qp_lock_ptr = &qpair->qp_lock; 8362 spin_lock_init(&qpair->qp_lock); 8363 qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; 8364 8365 /* Assign available que pair id */ 8366 mutex_lock(&ha->mq_lock); 8367 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs); 8368 if (ha->num_qpairs >= ha->max_qpairs) { 8369 mutex_unlock(&ha->mq_lock); 8370 ql_log(ql_log_warn, vha, 0x0183, 8371 "No resources to create additional q pair.\n"); 8372 goto fail_qid_map; 8373 } 8374 ha->num_qpairs++; 8375 set_bit(qpair_id, ha->qpair_qid_map); 8376 ha->queue_pair_map[qpair_id] = qpair; 8377 qpair->id = qpair_id; 8378 qpair->vp_idx = vp_idx; 8379 qpair->fw_started = ha->flags.fw_started; 8380 INIT_LIST_HEAD(&qpair->hints_list); 8381 INIT_LIST_HEAD(&qpair->nvme_done_list); 8382 qpair->chip_reset = ha->base_qpair->chip_reset; 8383 qpair->enable_class_2 = ha->base_qpair->enable_class_2; 8384 qpair->enable_explicit_conf = 8385 ha->base_qpair->enable_explicit_conf; 8386 8387 for (i = 0; i < ha->msix_count; i++) { 8388 msix = &ha->msix_entries[i]; 8389 if (msix->in_use) 8390 continue; 8391 qpair->msix = msix; 8392 ql_dbg(ql_dbg_multiq, vha, 0xc00f, 8393 "Vector %x selected for qpair\n", msix->vector); 8394 break; 8395 } 8396 if (!qpair->msix) { 8397 ql_log(ql_log_warn, vha, 0x0184, 8398 "Out of MSI-X vectors!.\n"); 8399 goto fail_msix; 8400 } 8401 8402 qpair->msix->in_use = 1; 8403 list_add_tail(&qpair->qp_list_elem, &vha->qp_list); 8404 qpair->pdev = ha->pdev; 8405 if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) 8406 qpair->reqq_start_iocbs = qla_83xx_start_iocbs; 8407 8408 mutex_unlock(&ha->mq_lock); 8409 8410 /* Create response queue first */ 8411 rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp); 8412 if (!rsp_id) { 8413 ql_log(ql_log_warn, vha, 0x0185, 8414 "Failed to create response queue.\n"); 8415 goto fail_rsp; 8416 } 8417 8418 qpair->rsp = ha->rsp_q_map[rsp_id]; 8419 8420 /* Create request queue */ 8421 req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos, 8422 startqp); 8423 if (!req_id) { 8424 ql_log(ql_log_warn, vha, 0x0186, 8425 "Failed to create request queue.\n"); 8426 goto fail_req; 8427 } 8428 8429 qpair->req = ha->req_q_map[req_id]; 8430 qpair->rsp->req = qpair->req; 8431 qpair->rsp->qpair = qpair; 8432 /* init qpair to this cpu. Will adjust at run time. */ 8433 qla_cpu_update(qpair, smp_processor_id()); 8434 8435 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 8436 if (ha->fw_attributes & BIT_4) 8437 qpair->difdix_supported = 1; 8438 } 8439 8440 qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 8441 if (!qpair->srb_mempool) { 8442 ql_log(ql_log_warn, vha, 0xd036, 8443 "Failed to create srb mempool for qpair %d\n", 8444 qpair->id); 8445 goto fail_mempool; 8446 } 8447 8448 /* Mark as online */ 8449 qpair->online = 1; 8450 8451 if (!vha->flags.qpairs_available) 8452 vha->flags.qpairs_available = 1; 8453 8454 ql_dbg(ql_dbg_multiq, vha, 0xc00d, 8455 "Request/Response queue pair created, id %d\n", 8456 qpair->id); 8457 ql_dbg(ql_dbg_init, vha, 0x0187, 8458 "Request/Response queue pair created, id %d\n", 8459 qpair->id); 8460 } 8461 return qpair; 8462 8463 fail_mempool: 8464 fail_req: 8465 qla25xx_delete_rsp_que(vha, qpair->rsp); 8466 fail_rsp: 8467 mutex_lock(&ha->mq_lock); 8468 qpair->msix->in_use = 0; 8469 list_del(&qpair->qp_list_elem); 8470 if (list_empty(&vha->qp_list)) 8471 vha->flags.qpairs_available = 0; 8472 fail_msix: 8473 ha->queue_pair_map[qpair_id] = NULL; 8474 clear_bit(qpair_id, ha->qpair_qid_map); 8475 ha->num_qpairs--; 8476 mutex_unlock(&ha->mq_lock); 8477 fail_qid_map: 8478 kfree(qpair); 8479 return NULL; 8480 } 8481 8482 int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) 8483 { 8484 int ret = QLA_FUNCTION_FAILED; 8485 struct qla_hw_data *ha = qpair->hw; 8486 8487 qpair->delete_in_progress = 1; 8488 while (atomic_read(&qpair->ref_count)) 8489 msleep(500); 8490 8491 ret = qla25xx_delete_req_que(vha, qpair->req); 8492 if (ret != QLA_SUCCESS) 8493 goto fail; 8494 8495 ret = qla25xx_delete_rsp_que(vha, qpair->rsp); 8496 if (ret != QLA_SUCCESS) 8497 goto fail; 8498 8499 mutex_lock(&ha->mq_lock); 8500 ha->queue_pair_map[qpair->id] = NULL; 8501 clear_bit(qpair->id, ha->qpair_qid_map); 8502 ha->num_qpairs--; 8503 list_del(&qpair->qp_list_elem); 8504 if (list_empty(&vha->qp_list)) { 8505 vha->flags.qpairs_available = 0; 8506 vha->flags.qpairs_req_created = 0; 8507 vha->flags.qpairs_rsp_created = 0; 8508 } 8509 mempool_destroy(qpair->srb_mempool); 8510 kfree(qpair); 8511 mutex_unlock(&ha->mq_lock); 8512 8513 return QLA_SUCCESS; 8514 fail: 8515 return ret; 8516 } 8517