1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_gbl.h" 9 10 #include <linux/delay.h> 11 #include <linux/slab.h> 12 #include <linux/vmalloc.h> 13 14 #include "qla_devtbl.h" 15 16 #ifdef CONFIG_SPARC 17 #include <asm/prom.h> 18 #endif 19 20 #include <target/target_core_base.h> 21 #include "qla_target.h" 22 23 /* 24 * QLogic ISP2x00 Hardware Support Function Prototypes. 25 */ 26 static int qla2x00_isp_firmware(scsi_qla_host_t *); 27 static int qla2x00_setup_chip(scsi_qla_host_t *); 28 static int qla2x00_fw_ready(scsi_qla_host_t *); 29 static int qla2x00_configure_hba(scsi_qla_host_t *); 30 static int qla2x00_configure_loop(scsi_qla_host_t *); 31 static int qla2x00_configure_local_loop(scsi_qla_host_t *); 32 static int qla2x00_configure_fabric(scsi_qla_host_t *); 33 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *); 34 static int qla2x00_restart_isp(scsi_qla_host_t *); 35 36 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); 37 static int qla84xx_init_chip(scsi_qla_host_t *); 38 static int qla25xx_init_queues(struct qla_hw_data *); 39 static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *); 40 static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *, 41 struct event_arg *); 42 static void qla24xx_handle_prli_done_event(struct scsi_qla_host *, 43 struct event_arg *); 44 45 /* SRB Extensions ---------------------------------------------------------- */ 46 47 void 48 qla2x00_sp_timeout(struct timer_list *t) 49 { 50 srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer); 51 struct srb_iocb *iocb; 52 scsi_qla_host_t *vha = sp->vha; 53 struct req_que *req; 54 unsigned long flags; 55 56 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 57 req = vha->hw->req_q_map[0]; 58 req->outstanding_cmds[sp->handle] = NULL; 59 iocb = &sp->u.iocb_cmd; 60 iocb->timeout(sp); 61 sp->free(sp); 62 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 63 } 64 65 void 66 qla2x00_sp_free(void *ptr) 67 { 68 srb_t *sp = ptr; 69 struct srb_iocb *iocb = &sp->u.iocb_cmd; 70 71 del_timer(&iocb->timer); 72 qla2x00_rel_sp(sp); 73 } 74 75 /* Asynchronous Login/Logout Routines -------------------------------------- */ 76 77 unsigned long 78 qla2x00_get_async_timeout(struct scsi_qla_host *vha) 79 { 80 unsigned long tmo; 81 struct qla_hw_data *ha = vha->hw; 82 83 /* Firmware should use switch negotiated r_a_tov for timeout. */ 84 tmo = ha->r_a_tov / 10 * 2; 85 if (IS_QLAFX00(ha)) { 86 tmo = FX00_DEF_RATOV * 2; 87 } else if (!IS_FWI2_CAPABLE(ha)) { 88 /* 89 * Except for earlier ISPs where the timeout is seeded from the 90 * initialization control block. 91 */ 92 tmo = ha->login_timeout; 93 } 94 return tmo; 95 } 96 97 void 98 qla2x00_async_iocb_timeout(void *data) 99 { 100 srb_t *sp = data; 101 fc_port_t *fcport = sp->fcport; 102 struct srb_iocb *lio = &sp->u.iocb_cmd; 103 struct event_arg ea; 104 105 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, 106 "Async-%s timeout - hdl=%x portid=%06x %8phC.\n", 107 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name); 108 109 fcport->flags &= ~FCF_ASYNC_SENT; 110 111 switch (sp->type) { 112 case SRB_LOGIN_CMD: 113 /* Retry as needed. */ 114 lio->u.logio.data[0] = MBS_COMMAND_ERROR; 115 lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 116 QLA_LOGIO_LOGIN_RETRIED : 0; 117 memset(&ea, 0, sizeof(ea)); 118 ea.event = FCME_PLOGI_DONE; 119 ea.fcport = sp->fcport; 120 ea.data[0] = lio->u.logio.data[0]; 121 ea.data[1] = lio->u.logio.data[1]; 122 ea.sp = sp; 123 qla24xx_handle_plogi_done_event(fcport->vha, &ea); 124 break; 125 case SRB_LOGOUT_CMD: 126 qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT); 127 break; 128 case SRB_CT_PTHRU_CMD: 129 case SRB_MB_IOCB: 130 case SRB_NACK_PLOGI: 131 case SRB_NACK_PRLI: 132 case SRB_NACK_LOGO: 133 sp->done(sp, QLA_FUNCTION_TIMEOUT); 134 break; 135 } 136 } 137 138 static void 139 qla2x00_async_login_sp_done(void *ptr, int res) 140 { 141 srb_t *sp = ptr; 142 struct scsi_qla_host *vha = sp->vha; 143 struct srb_iocb *lio = &sp->u.iocb_cmd; 144 struct event_arg ea; 145 146 ql_dbg(ql_dbg_disc, vha, 0x20dd, 147 "%s %8phC res %d \n", __func__, sp->fcport->port_name, res); 148 149 sp->fcport->flags &= ~FCF_ASYNC_SENT; 150 if (!test_bit(UNLOADING, &vha->dpc_flags)) { 151 memset(&ea, 0, sizeof(ea)); 152 ea.event = FCME_PLOGI_DONE; 153 ea.fcport = sp->fcport; 154 ea.data[0] = lio->u.logio.data[0]; 155 ea.data[1] = lio->u.logio.data[1]; 156 ea.iop[0] = lio->u.logio.iop[0]; 157 ea.iop[1] = lio->u.logio.iop[1]; 158 ea.sp = sp; 159 qla2x00_fcport_event_handler(vha, &ea); 160 } 161 162 sp->free(sp); 163 } 164 165 int 166 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, 167 uint16_t *data) 168 { 169 srb_t *sp; 170 struct srb_iocb *lio; 171 int rval = QLA_FUNCTION_FAILED; 172 173 if (!vha->flags.online) 174 goto done; 175 176 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 177 (fcport->fw_login_state == DSC_LS_PLOGI_COMP) || 178 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) 179 goto done; 180 181 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 182 if (!sp) 183 goto done; 184 185 fcport->flags |= FCF_ASYNC_SENT; 186 fcport->logout_completed = 0; 187 188 sp->type = SRB_LOGIN_CMD; 189 sp->name = "login"; 190 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 191 192 lio = &sp->u.iocb_cmd; 193 lio->timeout = qla2x00_async_iocb_timeout; 194 sp->done = qla2x00_async_login_sp_done; 195 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; 196 197 if (fcport->fc4f_nvme) 198 lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI; 199 200 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 201 lio->u.logio.flags |= SRB_LOGIN_RETRIED; 202 rval = qla2x00_start_sp(sp); 203 if (rval != QLA_SUCCESS) { 204 fcport->flags &= ~FCF_ASYNC_SENT; 205 fcport->flags |= FCF_LOGIN_NEEDED; 206 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 207 goto done_free_sp; 208 } 209 210 ql_dbg(ql_dbg_disc, vha, 0x2072, 211 "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x " 212 "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id, 213 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 214 fcport->login_retry); 215 return rval; 216 217 done_free_sp: 218 sp->free(sp); 219 done: 220 fcport->flags &= ~FCF_ASYNC_SENT; 221 return rval; 222 } 223 224 static void 225 qla2x00_async_logout_sp_done(void *ptr, int res) 226 { 227 srb_t *sp = ptr; 228 struct srb_iocb *lio = &sp->u.iocb_cmd; 229 230 sp->fcport->flags &= ~FCF_ASYNC_SENT; 231 if (!test_bit(UNLOADING, &sp->vha->dpc_flags)) 232 qla2x00_post_async_logout_done_work(sp->vha, sp->fcport, 233 lio->u.logio.data); 234 sp->free(sp); 235 } 236 237 int 238 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) 239 { 240 srb_t *sp; 241 struct srb_iocb *lio; 242 int rval; 243 244 rval = QLA_FUNCTION_FAILED; 245 fcport->flags |= FCF_ASYNC_SENT; 246 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 247 if (!sp) 248 goto done; 249 250 sp->type = SRB_LOGOUT_CMD; 251 sp->name = "logout"; 252 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 253 254 lio = &sp->u.iocb_cmd; 255 lio->timeout = qla2x00_async_iocb_timeout; 256 sp->done = qla2x00_async_logout_sp_done; 257 rval = qla2x00_start_sp(sp); 258 if (rval != QLA_SUCCESS) 259 goto done_free_sp; 260 261 ql_dbg(ql_dbg_disc, vha, 0x2070, 262 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n", 263 sp->handle, fcport->loop_id, fcport->d_id.b.domain, 264 fcport->d_id.b.area, fcport->d_id.b.al_pa, 265 fcport->port_name); 266 return rval; 267 268 done_free_sp: 269 sp->free(sp); 270 done: 271 fcport->flags &= ~FCF_ASYNC_SENT; 272 return rval; 273 } 274 275 static void 276 qla2x00_async_adisc_sp_done(void *ptr, int res) 277 { 278 srb_t *sp = ptr; 279 struct scsi_qla_host *vha = sp->vha; 280 struct srb_iocb *lio = &sp->u.iocb_cmd; 281 282 if (!test_bit(UNLOADING, &vha->dpc_flags)) 283 qla2x00_post_async_adisc_done_work(sp->vha, sp->fcport, 284 lio->u.logio.data); 285 sp->free(sp); 286 } 287 288 int 289 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, 290 uint16_t *data) 291 { 292 srb_t *sp; 293 struct srb_iocb *lio; 294 int rval; 295 296 rval = QLA_FUNCTION_FAILED; 297 fcport->flags |= FCF_ASYNC_SENT; 298 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 299 if (!sp) 300 goto done; 301 302 sp->type = SRB_ADISC_CMD; 303 sp->name = "adisc"; 304 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 305 306 lio = &sp->u.iocb_cmd; 307 lio->timeout = qla2x00_async_iocb_timeout; 308 sp->done = qla2x00_async_adisc_sp_done; 309 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 310 lio->u.logio.flags |= SRB_LOGIN_RETRIED; 311 rval = qla2x00_start_sp(sp); 312 if (rval != QLA_SUCCESS) 313 goto done_free_sp; 314 315 ql_dbg(ql_dbg_disc, vha, 0x206f, 316 "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n", 317 sp->handle, fcport->loop_id, fcport->d_id.b.domain, 318 fcport->d_id.b.area, fcport->d_id.b.al_pa); 319 return rval; 320 321 done_free_sp: 322 sp->free(sp); 323 done: 324 fcport->flags &= ~FCF_ASYNC_SENT; 325 return rval; 326 } 327 328 static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, 329 struct event_arg *ea) 330 { 331 fc_port_t *fcport, *conflict_fcport; 332 struct get_name_list_extended *e; 333 u16 i, n, found = 0, loop_id; 334 port_id_t id; 335 u64 wwn; 336 u8 opt = 0, current_login_state; 337 338 fcport = ea->fcport; 339 340 if (ea->rc) { /* rval */ 341 if (fcport->login_retry == 0) { 342 fcport->login_retry = vha->hw->login_retry_count; 343 ql_dbg(ql_dbg_disc, vha, 0x20de, 344 "GNL failed Port login retry %8phN, retry cnt=%d.\n", 345 fcport->port_name, fcport->login_retry); 346 } 347 return; 348 } 349 350 if (fcport->last_rscn_gen != fcport->rscn_gen) { 351 ql_dbg(ql_dbg_disc, vha, 0x20df, 352 "%s %8phC rscn gen changed rscn %d|%d \n", 353 __func__, fcport->port_name, 354 fcport->last_rscn_gen, fcport->rscn_gen); 355 qla24xx_post_gidpn_work(vha, fcport); 356 return; 357 } else if (fcport->last_login_gen != fcport->login_gen) { 358 ql_dbg(ql_dbg_disc, vha, 0x20e0, 359 "%s %8phC login gen changed login %d|%d\n", 360 __func__, fcport->port_name, 361 fcport->last_login_gen, fcport->login_gen); 362 return; 363 } 364 365 n = ea->data[0] / sizeof(struct get_name_list_extended); 366 367 ql_dbg(ql_dbg_disc, vha, 0x20e1, 368 "%s %d %8phC n %d %02x%02x%02x lid %d \n", 369 __func__, __LINE__, fcport->port_name, n, 370 fcport->d_id.b.domain, fcport->d_id.b.area, 371 fcport->d_id.b.al_pa, fcport->loop_id); 372 373 for (i = 0; i < n; i++) { 374 e = &vha->gnl.l[i]; 375 wwn = wwn_to_u64(e->port_name); 376 377 if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE)) 378 continue; 379 380 found = 1; 381 id.b.domain = e->port_id[2]; 382 id.b.area = e->port_id[1]; 383 id.b.al_pa = e->port_id[0]; 384 id.b.rsvd_1 = 0; 385 386 loop_id = le16_to_cpu(e->nport_handle); 387 loop_id = (loop_id & 0x7fff); 388 389 ql_dbg(ql_dbg_disc, vha, 0x20e2, 390 "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n", 391 __func__, fcport->port_name, 392 e->current_login_state, fcport->fw_login_state, 393 id.b.domain, id.b.area, id.b.al_pa, 394 fcport->d_id.b.domain, fcport->d_id.b.area, 395 fcport->d_id.b.al_pa, loop_id, fcport->loop_id); 396 397 if ((id.b24 != fcport->d_id.b24) || 398 ((fcport->loop_id != FC_NO_LOOP_ID) && 399 (fcport->loop_id != loop_id))) { 400 ql_dbg(ql_dbg_disc, vha, 0x20e3, 401 "%s %d %8phC post del sess\n", 402 __func__, __LINE__, fcport->port_name); 403 qlt_schedule_sess_for_deletion(fcport, 1); 404 return; 405 } 406 407 fcport->loop_id = loop_id; 408 409 wwn = wwn_to_u64(fcport->port_name); 410 qlt_find_sess_invalidate_other(vha, wwn, 411 id, loop_id, &conflict_fcport); 412 413 if (conflict_fcport) { 414 /* 415 * Another share fcport share the same loop_id & 416 * nport id. Conflict fcport needs to finish 417 * cleanup before this fcport can proceed to login. 418 */ 419 conflict_fcport->conflict = fcport; 420 fcport->login_pause = 1; 421 } 422 423 if (fcport->fc4f_nvme) 424 current_login_state = e->current_login_state >> 4; 425 else 426 current_login_state = e->current_login_state & 0xf; 427 428 switch (current_login_state) { 429 case DSC_LS_PRLI_COMP: 430 ql_dbg(ql_dbg_disc, vha, 0x20e4, 431 "%s %d %8phC post gpdb\n", 432 __func__, __LINE__, fcport->port_name); 433 opt = PDO_FORCE_ADISC; 434 qla24xx_post_gpdb_work(vha, fcport, opt); 435 break; 436 case DSC_LS_PORT_UNAVAIL: 437 default: 438 if (fcport->loop_id == FC_NO_LOOP_ID) { 439 qla2x00_find_new_loop_id(vha, fcport); 440 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 441 } 442 ql_dbg(ql_dbg_disc, vha, 0x20e5, 443 "%s %d %8phC\n", 444 __func__, __LINE__, fcport->port_name); 445 qla24xx_fcport_handle_login(vha, fcport); 446 break; 447 } 448 } 449 450 if (!found) { 451 /* fw has no record of this port */ 452 if (fcport->loop_id == FC_NO_LOOP_ID) { 453 qla2x00_find_new_loop_id(vha, fcport); 454 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 455 } else { 456 for (i = 0; i < n; i++) { 457 e = &vha->gnl.l[i]; 458 id.b.domain = e->port_id[0]; 459 id.b.area = e->port_id[1]; 460 id.b.al_pa = e->port_id[2]; 461 id.b.rsvd_1 = 0; 462 loop_id = le16_to_cpu(e->nport_handle); 463 464 if (fcport->d_id.b24 == id.b24) { 465 conflict_fcport = 466 qla2x00_find_fcport_by_wwpn(vha, 467 e->port_name, 0); 468 469 ql_dbg(ql_dbg_disc, vha, 0x20e6, 470 "%s %d %8phC post del sess\n", 471 __func__, __LINE__, 472 conflict_fcport->port_name); 473 qlt_schedule_sess_for_deletion 474 (conflict_fcport, 1); 475 } 476 477 if (fcport->loop_id == loop_id) { 478 /* FW already picked this loop id for another fcport */ 479 qla2x00_find_new_loop_id(vha, fcport); 480 } 481 } 482 } 483 qla24xx_fcport_handle_login(vha, fcport); 484 } 485 } /* gnl_event */ 486 487 static void 488 qla24xx_async_gnl_sp_done(void *s, int res) 489 { 490 struct srb *sp = s; 491 struct scsi_qla_host *vha = sp->vha; 492 unsigned long flags; 493 struct fc_port *fcport = NULL, *tf; 494 u16 i, n = 0, loop_id; 495 struct event_arg ea; 496 struct get_name_list_extended *e; 497 u64 wwn; 498 struct list_head h; 499 500 ql_dbg(ql_dbg_disc, vha, 0x20e7, 501 "Async done-%s res %x mb[1]=%x mb[2]=%x \n", 502 sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1], 503 sp->u.iocb_cmd.u.mbx.in_mb[2]); 504 505 memset(&ea, 0, sizeof(ea)); 506 ea.sp = sp; 507 ea.rc = res; 508 ea.event = FCME_GNL_DONE; 509 510 if (sp->u.iocb_cmd.u.mbx.in_mb[1] >= 511 sizeof(struct get_name_list_extended)) { 512 n = sp->u.iocb_cmd.u.mbx.in_mb[1] / 513 sizeof(struct get_name_list_extended); 514 ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */ 515 } 516 517 for (i = 0; i < n; i++) { 518 e = &vha->gnl.l[i]; 519 loop_id = le16_to_cpu(e->nport_handle); 520 /* mask out reserve bit */ 521 loop_id = (loop_id & 0x7fff); 522 set_bit(loop_id, vha->hw->loop_id_map); 523 wwn = wwn_to_u64(e->port_name); 524 525 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8, 526 "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n", 527 __func__, (void *)&wwn, e->port_id[2], e->port_id[1], 528 e->port_id[0], e->current_login_state, e->last_login_state, 529 (loop_id & 0x7fff)); 530 } 531 532 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 533 vha->gnl.sent = 0; 534 535 INIT_LIST_HEAD(&h); 536 fcport = tf = NULL; 537 if (!list_empty(&vha->gnl.fcports)) 538 list_splice_init(&vha->gnl.fcports, &h); 539 540 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { 541 list_del_init(&fcport->gnl_entry); 542 fcport->flags &= ~FCF_ASYNC_SENT; 543 ea.fcport = fcport; 544 545 qla2x00_fcport_event_handler(vha, &ea); 546 } 547 548 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 549 550 sp->free(sp); 551 } 552 553 int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) 554 { 555 srb_t *sp; 556 struct srb_iocb *mbx; 557 int rval = QLA_FUNCTION_FAILED; 558 unsigned long flags; 559 u16 *mb; 560 561 if (!vha->flags.online) 562 goto done; 563 564 ql_dbg(ql_dbg_disc, vha, 0x20d9, 565 "Async-gnlist WWPN %8phC \n", fcport->port_name); 566 567 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 568 fcport->flags |= FCF_ASYNC_SENT; 569 fcport->disc_state = DSC_GNL; 570 fcport->last_rscn_gen = fcport->rscn_gen; 571 fcport->last_login_gen = fcport->login_gen; 572 573 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports); 574 if (vha->gnl.sent) { 575 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 576 rval = QLA_SUCCESS; 577 goto done; 578 } 579 vha->gnl.sent = 1; 580 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 581 582 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 583 if (!sp) 584 goto done; 585 sp->type = SRB_MB_IOCB; 586 sp->name = "gnlist"; 587 sp->gen1 = fcport->rscn_gen; 588 sp->gen2 = fcport->login_gen; 589 590 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2); 591 592 mb = sp->u.iocb_cmd.u.mbx.out_mb; 593 mb[0] = MBC_PORT_NODE_NAME_LIST; 594 mb[1] = BIT_2 | BIT_3; 595 mb[2] = MSW(vha->gnl.ldma); 596 mb[3] = LSW(vha->gnl.ldma); 597 mb[6] = MSW(MSD(vha->gnl.ldma)); 598 mb[7] = LSW(MSD(vha->gnl.ldma)); 599 mb[8] = vha->gnl.size; 600 mb[9] = vha->vp_idx; 601 602 mbx = &sp->u.iocb_cmd; 603 mbx->timeout = qla2x00_async_iocb_timeout; 604 605 sp->done = qla24xx_async_gnl_sp_done; 606 607 rval = qla2x00_start_sp(sp); 608 if (rval != QLA_SUCCESS) 609 goto done_free_sp; 610 611 ql_dbg(ql_dbg_disc, vha, 0x20da, 612 "Async-%s - OUT WWPN %8phC hndl %x\n", 613 sp->name, fcport->port_name, sp->handle); 614 615 return rval; 616 617 done_free_sp: 618 sp->free(sp); 619 done: 620 fcport->flags &= ~FCF_ASYNC_SENT; 621 return rval; 622 } 623 624 int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport) 625 { 626 struct qla_work_evt *e; 627 628 e = qla2x00_alloc_work(vha, QLA_EVT_GNL); 629 if (!e) 630 return QLA_FUNCTION_FAILED; 631 632 e->u.fcport.fcport = fcport; 633 return qla2x00_post_work(vha, e); 634 } 635 636 static 637 void qla24xx_async_gpdb_sp_done(void *s, int res) 638 { 639 struct srb *sp = s; 640 struct scsi_qla_host *vha = sp->vha; 641 struct qla_hw_data *ha = vha->hw; 642 struct port_database_24xx *pd; 643 fc_port_t *fcport = sp->fcport; 644 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb; 645 int rval = QLA_SUCCESS; 646 struct event_arg ea; 647 648 ql_dbg(ql_dbg_disc, vha, 0x20db, 649 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n", 650 sp->name, res, fcport->port_name, mb[1], mb[2]); 651 652 fcport->flags &= ~FCF_ASYNC_SENT; 653 654 if (res) { 655 rval = res; 656 goto gpd_error_out; 657 } 658 659 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; 660 661 rval = __qla24xx_parse_gpdb(vha, fcport, pd); 662 663 gpd_error_out: 664 memset(&ea, 0, sizeof(ea)); 665 ea.event = FCME_GPDB_DONE; 666 ea.rc = rval; 667 ea.fcport = fcport; 668 ea.sp = sp; 669 670 qla2x00_fcport_event_handler(vha, &ea); 671 672 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in, 673 sp->u.iocb_cmd.u.mbx.in_dma); 674 675 sp->free(sp); 676 } 677 678 static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport) 679 { 680 struct qla_work_evt *e; 681 682 e = qla2x00_alloc_work(vha, QLA_EVT_PRLI); 683 if (!e) 684 return QLA_FUNCTION_FAILED; 685 686 e->u.fcport.fcport = fcport; 687 688 return qla2x00_post_work(vha, e); 689 } 690 691 static void 692 qla2x00_async_prli_sp_done(void *ptr, int res) 693 { 694 srb_t *sp = ptr; 695 struct scsi_qla_host *vha = sp->vha; 696 struct srb_iocb *lio = &sp->u.iocb_cmd; 697 struct event_arg ea; 698 699 ql_dbg(ql_dbg_disc, vha, 0x2129, 700 "%s %8phC res %d \n", __func__, 701 sp->fcport->port_name, res); 702 703 sp->fcport->flags &= ~FCF_ASYNC_SENT; 704 705 if (!test_bit(UNLOADING, &vha->dpc_flags)) { 706 memset(&ea, 0, sizeof(ea)); 707 ea.event = FCME_PRLI_DONE; 708 ea.fcport = sp->fcport; 709 ea.data[0] = lio->u.logio.data[0]; 710 ea.data[1] = lio->u.logio.data[1]; 711 ea.iop[0] = lio->u.logio.iop[0]; 712 ea.iop[1] = lio->u.logio.iop[1]; 713 ea.sp = sp; 714 715 qla2x00_fcport_event_handler(vha, &ea); 716 } 717 718 sp->free(sp); 719 } 720 721 int 722 qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport) 723 { 724 srb_t *sp; 725 struct srb_iocb *lio; 726 int rval = QLA_FUNCTION_FAILED; 727 728 if (!vha->flags.online) 729 return rval; 730 731 if (fcport->fw_login_state == DSC_LS_PLOGI_PEND || 732 fcport->fw_login_state == DSC_LS_PLOGI_COMP || 733 fcport->fw_login_state == DSC_LS_PRLI_PEND) 734 return rval; 735 736 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 737 if (!sp) 738 return rval; 739 740 fcport->flags |= FCF_ASYNC_SENT; 741 fcport->logout_completed = 0; 742 743 sp->type = SRB_PRLI_CMD; 744 sp->name = "prli"; 745 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 746 747 lio = &sp->u.iocb_cmd; 748 lio->timeout = qla2x00_async_iocb_timeout; 749 sp->done = qla2x00_async_prli_sp_done; 750 lio->u.logio.flags = 0; 751 752 if (fcport->fc4f_nvme) 753 lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI; 754 755 rval = qla2x00_start_sp(sp); 756 if (rval != QLA_SUCCESS) { 757 fcport->flags &= ~FCF_ASYNC_SENT; 758 fcport->flags |= FCF_LOGIN_NEEDED; 759 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 760 goto done_free_sp; 761 } 762 763 ql_dbg(ql_dbg_disc, vha, 0x211b, 764 "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n", 765 fcport->port_name, sp->handle, fcport->loop_id, 766 fcport->d_id.b24, fcport->login_retry); 767 768 return rval; 769 770 done_free_sp: 771 sp->free(sp); 772 fcport->flags &= ~FCF_ASYNC_SENT; 773 return rval; 774 } 775 776 int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 777 { 778 struct qla_work_evt *e; 779 780 e = qla2x00_alloc_work(vha, QLA_EVT_GPDB); 781 if (!e) 782 return QLA_FUNCTION_FAILED; 783 784 e->u.fcport.fcport = fcport; 785 e->u.fcport.opt = opt; 786 return qla2x00_post_work(vha, e); 787 } 788 789 int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 790 { 791 srb_t *sp; 792 struct srb_iocb *mbx; 793 int rval = QLA_FUNCTION_FAILED; 794 u16 *mb; 795 dma_addr_t pd_dma; 796 struct port_database_24xx *pd; 797 struct qla_hw_data *ha = vha->hw; 798 799 if (!vha->flags.online) 800 goto done; 801 802 fcport->flags |= FCF_ASYNC_SENT; 803 fcport->disc_state = DSC_GPDB; 804 805 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 806 if (!sp) 807 goto done; 808 809 sp->type = SRB_MB_IOCB; 810 sp->name = "gpdb"; 811 sp->gen1 = fcport->rscn_gen; 812 sp->gen2 = fcport->login_gen; 813 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 814 815 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 816 if (pd == NULL) { 817 ql_log(ql_log_warn, vha, 0xd043, 818 "Failed to allocate port database structure.\n"); 819 goto done_free_sp; 820 } 821 822 mb = sp->u.iocb_cmd.u.mbx.out_mb; 823 mb[0] = MBC_GET_PORT_DATABASE; 824 mb[1] = fcport->loop_id; 825 mb[2] = MSW(pd_dma); 826 mb[3] = LSW(pd_dma); 827 mb[6] = MSW(MSD(pd_dma)); 828 mb[7] = LSW(MSD(pd_dma)); 829 mb[9] = vha->vp_idx; 830 mb[10] = opt; 831 832 mbx = &sp->u.iocb_cmd; 833 mbx->timeout = qla2x00_async_iocb_timeout; 834 mbx->u.mbx.in = (void *)pd; 835 mbx->u.mbx.in_dma = pd_dma; 836 837 sp->done = qla24xx_async_gpdb_sp_done; 838 839 rval = qla2x00_start_sp(sp); 840 if (rval != QLA_SUCCESS) 841 goto done_free_sp; 842 843 ql_dbg(ql_dbg_disc, vha, 0x20dc, 844 "Async-%s %8phC hndl %x opt %x\n", 845 sp->name, fcport->port_name, sp->handle, opt); 846 847 return rval; 848 849 done_free_sp: 850 if (pd) 851 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 852 853 sp->free(sp); 854 done: 855 fcport->flags &= ~FCF_ASYNC_SENT; 856 qla24xx_post_gpdb_work(vha, fcport, opt); 857 return rval; 858 } 859 860 static 861 void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) 862 { 863 int rval = ea->rc; 864 fc_port_t *fcport = ea->fcport; 865 unsigned long flags; 866 867 fcport->flags &= ~FCF_ASYNC_SENT; 868 869 ql_dbg(ql_dbg_disc, vha, 0x20d2, 870 "%s %8phC DS %d LS %d rval %d\n", __func__, fcport->port_name, 871 fcport->disc_state, fcport->fw_login_state, rval); 872 873 if (ea->sp->gen2 != fcport->login_gen) { 874 /* target side must have changed it. */ 875 ql_dbg(ql_dbg_disc, vha, 0x20d3, 876 "%s %8phC generation changed rscn %d|%d login %d|%d \n", 877 __func__, fcport->port_name, fcport->last_rscn_gen, 878 fcport->rscn_gen, fcport->last_login_gen, 879 fcport->login_gen); 880 return; 881 } else if (ea->sp->gen1 != fcport->rscn_gen) { 882 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n", 883 __func__, __LINE__, fcport->port_name); 884 qla24xx_post_gidpn_work(vha, fcport); 885 return; 886 } 887 888 if (rval != QLA_SUCCESS) { 889 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n", 890 __func__, __LINE__, fcport->port_name); 891 qlt_schedule_sess_for_deletion_lock(fcport); 892 return; 893 } 894 895 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 896 ea->fcport->login_gen++; 897 ea->fcport->deleted = 0; 898 ea->fcport->logout_on_delete = 1; 899 900 if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) { 901 vha->fcport_count++; 902 ea->fcport->login_succ = 1; 903 904 if (!IS_IIDMA_CAPABLE(vha->hw) || 905 !vha->hw->flags.gpsc_supported) { 906 ql_dbg(ql_dbg_disc, vha, 0x20d6, 907 "%s %d %8phC post upd_fcport fcp_cnt %d\n", 908 __func__, __LINE__, fcport->port_name, 909 vha->fcport_count); 910 911 qla24xx_post_upd_fcport_work(vha, fcport); 912 } else { 913 ql_dbg(ql_dbg_disc, vha, 0x20d7, 914 "%s %d %8phC post gpsc fcp_cnt %d\n", 915 __func__, __LINE__, fcport->port_name, 916 vha->fcport_count); 917 918 qla24xx_post_gpsc_work(vha, fcport); 919 } 920 } 921 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 922 } /* gpdb event */ 923 924 int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) 925 { 926 if (fcport->login_retry == 0) 927 return 0; 928 929 if (fcport->scan_state != QLA_FCPORT_FOUND) 930 return 0; 931 932 ql_dbg(ql_dbg_disc, vha, 0x20d8, 933 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d|%d retry %d lid %d\n", 934 __func__, fcport->port_name, fcport->disc_state, 935 fcport->fw_login_state, fcport->login_pause, fcport->flags, 936 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen, 937 fcport->last_login_gen, fcport->login_gen, fcport->login_retry, 938 fcport->loop_id); 939 940 fcport->login_retry--; 941 942 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 943 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) 944 return 0; 945 946 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { 947 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) 948 return 0; 949 } 950 951 /* for pure Target Mode. Login will not be initiated */ 952 if (vha->host->active_mode == MODE_TARGET) 953 return 0; 954 955 if (fcport->flags & FCF_ASYNC_SENT) { 956 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 957 return 0; 958 } 959 960 switch (fcport->disc_state) { 961 case DSC_DELETED: 962 if (fcport->loop_id == FC_NO_LOOP_ID) { 963 ql_dbg(ql_dbg_disc, vha, 0x20bd, 964 "%s %d %8phC post gnl\n", 965 __func__, __LINE__, fcport->port_name); 966 qla24xx_async_gnl(vha, fcport); 967 } else { 968 ql_dbg(ql_dbg_disc, vha, 0x20bf, 969 "%s %d %8phC post login\n", 970 __func__, __LINE__, fcport->port_name); 971 fcport->disc_state = DSC_LOGIN_PEND; 972 qla2x00_post_async_login_work(vha, fcport, NULL); 973 } 974 break; 975 976 case DSC_GNL: 977 if (fcport->login_pause) { 978 fcport->last_rscn_gen = fcport->rscn_gen; 979 fcport->last_login_gen = fcport->login_gen; 980 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 981 break; 982 } 983 984 if (fcport->flags & FCF_FCP2_DEVICE) { 985 u8 opt = PDO_FORCE_ADISC; 986 987 ql_dbg(ql_dbg_disc, vha, 0x20c9, 988 "%s %d %8phC post gpdb\n", 989 __func__, __LINE__, fcport->port_name); 990 991 fcport->disc_state = DSC_GPDB; 992 qla24xx_post_gpdb_work(vha, fcport, opt); 993 } else { 994 ql_dbg(ql_dbg_disc, vha, 0x20cf, 995 "%s %d %8phC post login\n", 996 __func__, __LINE__, fcport->port_name); 997 fcport->disc_state = DSC_LOGIN_PEND; 998 qla2x00_post_async_login_work(vha, fcport, NULL); 999 } 1000 1001 break; 1002 1003 case DSC_LOGIN_FAILED: 1004 ql_dbg(ql_dbg_disc, vha, 0x20d0, 1005 "%s %d %8phC post gidpn\n", 1006 __func__, __LINE__, fcport->port_name); 1007 1008 qla24xx_post_gidpn_work(vha, fcport); 1009 break; 1010 1011 case DSC_LOGIN_COMPLETE: 1012 /* recheck login state */ 1013 ql_dbg(ql_dbg_disc, vha, 0x20d1, 1014 "%s %d %8phC post gpdb\n", 1015 __func__, __LINE__, fcport->port_name); 1016 1017 qla24xx_post_gpdb_work(vha, fcport, PDO_FORCE_ADISC); 1018 break; 1019 1020 default: 1021 break; 1022 } 1023 1024 return 0; 1025 } 1026 1027 static 1028 void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea) 1029 { 1030 fcport->rscn_gen++; 1031 1032 ql_dbg(ql_dbg_disc, fcport->vha, 0x210c, 1033 "%s %8phC DS %d LS %d\n", 1034 __func__, fcport->port_name, fcport->disc_state, 1035 fcport->fw_login_state); 1036 1037 if (fcport->flags & FCF_ASYNC_SENT) 1038 return; 1039 1040 switch (fcport->disc_state) { 1041 case DSC_DELETED: 1042 case DSC_LOGIN_COMPLETE: 1043 qla24xx_post_gidpn_work(fcport->vha, fcport); 1044 break; 1045 1046 default: 1047 break; 1048 } 1049 } 1050 1051 int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id, 1052 u8 *port_name, void *pla) 1053 { 1054 struct qla_work_evt *e; 1055 e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS); 1056 if (!e) 1057 return QLA_FUNCTION_FAILED; 1058 1059 e->u.new_sess.id = *id; 1060 e->u.new_sess.pla = pla; 1061 memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE); 1062 1063 return qla2x00_post_work(vha, e); 1064 } 1065 1066 static 1067 int qla24xx_handle_delete_done_event(scsi_qla_host_t *vha, 1068 struct event_arg *ea) 1069 { 1070 fc_port_t *fcport = ea->fcport; 1071 1072 if (test_bit(UNLOADING, &vha->dpc_flags)) 1073 return 0; 1074 1075 switch (vha->host->active_mode) { 1076 case MODE_INITIATOR: 1077 case MODE_DUAL: 1078 if (fcport->scan_state == QLA_FCPORT_FOUND) 1079 qla24xx_fcport_handle_login(vha, fcport); 1080 break; 1081 1082 case MODE_TARGET: 1083 default: 1084 /* no-op */ 1085 break; 1086 } 1087 1088 return 0; 1089 } 1090 1091 static 1092 void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, 1093 struct event_arg *ea) 1094 { 1095 fc_port_t *fcport = ea->fcport; 1096 1097 if (fcport->scan_state != QLA_FCPORT_FOUND) { 1098 fcport->login_retry++; 1099 return; 1100 } 1101 1102 ql_dbg(ql_dbg_disc, vha, 0x2102, 1103 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n", 1104 __func__, fcport->port_name, fcport->disc_state, 1105 fcport->fw_login_state, fcport->login_pause, 1106 fcport->deleted, fcport->conflict, 1107 fcport->last_rscn_gen, fcport->rscn_gen, 1108 fcport->last_login_gen, fcport->login_gen, 1109 fcport->flags); 1110 1111 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 1112 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) 1113 return; 1114 1115 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { 1116 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) 1117 return; 1118 } 1119 1120 if (fcport->flags & FCF_ASYNC_SENT) { 1121 fcport->login_retry++; 1122 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1123 return; 1124 } 1125 1126 if (fcport->disc_state == DSC_DELETE_PEND) { 1127 fcport->login_retry++; 1128 return; 1129 } 1130 1131 if (fcport->last_rscn_gen != fcport->rscn_gen) { 1132 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n", 1133 __func__, __LINE__, fcport->port_name); 1134 1135 qla24xx_async_gidpn(vha, fcport); 1136 return; 1137 } 1138 1139 qla24xx_fcport_handle_login(vha, fcport); 1140 } 1141 1142 void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea) 1143 { 1144 fc_port_t *fcport, *f, *tf; 1145 uint32_t id = 0, mask, rid; 1146 int rc; 1147 1148 switch (ea->event) { 1149 case FCME_RELOGIN: 1150 case FCME_RSCN: 1151 case FCME_GIDPN_DONE: 1152 case FCME_GPSC_DONE: 1153 case FCME_GPNID_DONE: 1154 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) || 1155 test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags)) 1156 return; 1157 break; 1158 default: 1159 break; 1160 } 1161 1162 switch (ea->event) { 1163 case FCME_RELOGIN: 1164 if (test_bit(UNLOADING, &vha->dpc_flags)) 1165 return; 1166 1167 qla24xx_handle_relogin_event(vha, ea); 1168 break; 1169 case FCME_RSCN: 1170 if (test_bit(UNLOADING, &vha->dpc_flags)) 1171 return; 1172 switch (ea->id.b.rsvd_1) { 1173 case RSCN_PORT_ADDR: 1174 fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1); 1175 if (!fcport) { 1176 /* cable moved */ 1177 rc = qla24xx_post_gpnid_work(vha, &ea->id); 1178 if (rc) { 1179 ql_log(ql_log_warn, vha, 0xd044, 1180 "RSCN GPNID work failed %02x%02x%02x\n", 1181 ea->id.b.domain, ea->id.b.area, 1182 ea->id.b.al_pa); 1183 } 1184 } else { 1185 ea->fcport = fcport; 1186 qla24xx_handle_rscn_event(fcport, ea); 1187 } 1188 break; 1189 case RSCN_AREA_ADDR: 1190 case RSCN_DOM_ADDR: 1191 if (ea->id.b.rsvd_1 == RSCN_AREA_ADDR) { 1192 mask = 0xffff00; 1193 ql_dbg(ql_dbg_async, vha, 0x5044, 1194 "RSCN: Area 0x%06x was affected\n", 1195 ea->id.b24); 1196 } else { 1197 mask = 0xff0000; 1198 ql_dbg(ql_dbg_async, vha, 0x507a, 1199 "RSCN: Domain 0x%06x was affected\n", 1200 ea->id.b24); 1201 } 1202 1203 rid = ea->id.b24 & mask; 1204 list_for_each_entry_safe(f, tf, &vha->vp_fcports, 1205 list) { 1206 id = f->d_id.b24 & mask; 1207 if (rid == id) { 1208 ea->fcport = f; 1209 qla24xx_handle_rscn_event(f, ea); 1210 } 1211 } 1212 break; 1213 case RSCN_FAB_ADDR: 1214 default: 1215 ql_log(ql_log_warn, vha, 0xd045, 1216 "RSCN: Fabric was affected. Addr format %d\n", 1217 ea->id.b.rsvd_1); 1218 qla2x00_mark_all_devices_lost(vha, 1); 1219 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1220 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1221 } 1222 break; 1223 case FCME_GIDPN_DONE: 1224 qla24xx_handle_gidpn_event(vha, ea); 1225 break; 1226 case FCME_GNL_DONE: 1227 qla24xx_handle_gnl_done_event(vha, ea); 1228 break; 1229 case FCME_GPSC_DONE: 1230 qla24xx_post_upd_fcport_work(vha, ea->fcport); 1231 break; 1232 case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */ 1233 qla24xx_handle_plogi_done_event(vha, ea); 1234 break; 1235 case FCME_PRLI_DONE: 1236 qla24xx_handle_prli_done_event(vha, ea); 1237 break; 1238 case FCME_GPDB_DONE: 1239 qla24xx_handle_gpdb_event(vha, ea); 1240 break; 1241 case FCME_GPNID_DONE: 1242 qla24xx_handle_gpnid_event(vha, ea); 1243 break; 1244 case FCME_GFFID_DONE: 1245 qla24xx_handle_gffid_event(vha, ea); 1246 break; 1247 case FCME_DELETE_DONE: 1248 qla24xx_handle_delete_done_event(vha, ea); 1249 break; 1250 default: 1251 BUG_ON(1); 1252 break; 1253 } 1254 } 1255 1256 static void 1257 qla2x00_tmf_iocb_timeout(void *data) 1258 { 1259 srb_t *sp = data; 1260 struct srb_iocb *tmf = &sp->u.iocb_cmd; 1261 1262 tmf->u.tmf.comp_status = CS_TIMEOUT; 1263 complete(&tmf->u.tmf.comp); 1264 } 1265 1266 static void 1267 qla2x00_tmf_sp_done(void *ptr, int res) 1268 { 1269 srb_t *sp = ptr; 1270 struct srb_iocb *tmf = &sp->u.iocb_cmd; 1271 1272 complete(&tmf->u.tmf.comp); 1273 } 1274 1275 int 1276 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, 1277 uint32_t tag) 1278 { 1279 struct scsi_qla_host *vha = fcport->vha; 1280 struct srb_iocb *tm_iocb; 1281 srb_t *sp; 1282 int rval = QLA_FUNCTION_FAILED; 1283 1284 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1285 if (!sp) 1286 goto done; 1287 1288 tm_iocb = &sp->u.iocb_cmd; 1289 sp->type = SRB_TM_CMD; 1290 sp->name = "tmf"; 1291 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)); 1292 tm_iocb->u.tmf.flags = flags; 1293 tm_iocb->u.tmf.lun = lun; 1294 tm_iocb->u.tmf.data = tag; 1295 sp->done = qla2x00_tmf_sp_done; 1296 tm_iocb->timeout = qla2x00_tmf_iocb_timeout; 1297 init_completion(&tm_iocb->u.tmf.comp); 1298 1299 rval = qla2x00_start_sp(sp); 1300 if (rval != QLA_SUCCESS) 1301 goto done_free_sp; 1302 1303 ql_dbg(ql_dbg_taskm, vha, 0x802f, 1304 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n", 1305 sp->handle, fcport->loop_id, fcport->d_id.b.domain, 1306 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1307 1308 wait_for_completion(&tm_iocb->u.tmf.comp); 1309 1310 rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ? 1311 QLA_SUCCESS : QLA_FUNCTION_FAILED; 1312 1313 if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) { 1314 ql_dbg(ql_dbg_taskm, vha, 0x8030, 1315 "TM IOCB failed (%x).\n", rval); 1316 } 1317 1318 if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) { 1319 flags = tm_iocb->u.tmf.flags; 1320 lun = (uint16_t)tm_iocb->u.tmf.lun; 1321 1322 /* Issue Marker IOCB */ 1323 qla2x00_marker(vha, vha->hw->req_q_map[0], 1324 vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun, 1325 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 1326 } 1327 1328 done_free_sp: 1329 sp->free(sp); 1330 done: 1331 return rval; 1332 } 1333 1334 static void 1335 qla24xx_abort_iocb_timeout(void *data) 1336 { 1337 srb_t *sp = data; 1338 struct srb_iocb *abt = &sp->u.iocb_cmd; 1339 1340 abt->u.abt.comp_status = CS_TIMEOUT; 1341 complete(&abt->u.abt.comp); 1342 } 1343 1344 static void 1345 qla24xx_abort_sp_done(void *ptr, int res) 1346 { 1347 srb_t *sp = ptr; 1348 struct srb_iocb *abt = &sp->u.iocb_cmd; 1349 1350 complete(&abt->u.abt.comp); 1351 } 1352 1353 int 1354 qla24xx_async_abort_cmd(srb_t *cmd_sp) 1355 { 1356 scsi_qla_host_t *vha = cmd_sp->vha; 1357 fc_port_t *fcport = cmd_sp->fcport; 1358 struct srb_iocb *abt_iocb; 1359 srb_t *sp; 1360 int rval = QLA_FUNCTION_FAILED; 1361 1362 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1363 if (!sp) 1364 goto done; 1365 1366 abt_iocb = &sp->u.iocb_cmd; 1367 sp->type = SRB_ABT_CMD; 1368 sp->name = "abort"; 1369 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)); 1370 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle; 1371 sp->done = qla24xx_abort_sp_done; 1372 abt_iocb->timeout = qla24xx_abort_iocb_timeout; 1373 init_completion(&abt_iocb->u.abt.comp); 1374 1375 rval = qla2x00_start_sp(sp); 1376 if (rval != QLA_SUCCESS) 1377 goto done_free_sp; 1378 1379 ql_dbg(ql_dbg_async, vha, 0x507c, 1380 "Abort command issued - hdl=%x, target_id=%x\n", 1381 cmd_sp->handle, fcport->tgt_id); 1382 1383 wait_for_completion(&abt_iocb->u.abt.comp); 1384 1385 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ? 1386 QLA_SUCCESS : QLA_FUNCTION_FAILED; 1387 1388 done_free_sp: 1389 sp->free(sp); 1390 done: 1391 return rval; 1392 } 1393 1394 int 1395 qla24xx_async_abort_command(srb_t *sp) 1396 { 1397 unsigned long flags = 0; 1398 1399 uint32_t handle; 1400 fc_port_t *fcport = sp->fcport; 1401 struct scsi_qla_host *vha = fcport->vha; 1402 struct qla_hw_data *ha = vha->hw; 1403 struct req_que *req = vha->req; 1404 1405 spin_lock_irqsave(&ha->hardware_lock, flags); 1406 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 1407 if (req->outstanding_cmds[handle] == sp) 1408 break; 1409 } 1410 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1411 if (handle == req->num_outstanding_cmds) { 1412 /* Command not found. */ 1413 return QLA_FUNCTION_FAILED; 1414 } 1415 if (sp->type == SRB_FXIOCB_DCMD) 1416 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport, 1417 FXDISC_ABORT_IOCTL); 1418 1419 return qla24xx_async_abort_cmd(sp); 1420 } 1421 1422 static void 1423 qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea) 1424 { 1425 switch (ea->data[0]) { 1426 case MBS_COMMAND_COMPLETE: 1427 ql_dbg(ql_dbg_disc, vha, 0x2118, 1428 "%s %d %8phC post gpdb\n", 1429 __func__, __LINE__, ea->fcport->port_name); 1430 1431 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; 1432 ea->fcport->logout_on_delete = 1; 1433 qla24xx_post_gpdb_work(vha, ea->fcport, 0); 1434 break; 1435 default: 1436 if (ea->fcport->n2n_flag) { 1437 ql_dbg(ql_dbg_disc, vha, 0x2118, 1438 "%s %d %8phC post fc4 prli\n", 1439 __func__, __LINE__, ea->fcport->port_name); 1440 ea->fcport->fc4f_nvme = 0; 1441 ea->fcport->n2n_flag = 0; 1442 qla24xx_post_prli_work(vha, ea->fcport); 1443 } 1444 ql_dbg(ql_dbg_disc, vha, 0x2119, 1445 "%s %d %8phC unhandle event of %x\n", 1446 __func__, __LINE__, ea->fcport->port_name, ea->data[0]); 1447 break; 1448 } 1449 } 1450 1451 static void 1452 qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) 1453 { 1454 port_id_t cid; /* conflict Nport id */ 1455 1456 switch (ea->data[0]) { 1457 case MBS_COMMAND_COMPLETE: 1458 /* 1459 * Driver must validate login state - If PRLI not complete, 1460 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI 1461 * requests. 1462 */ 1463 if (ea->fcport->fc4f_nvme) { 1464 ql_dbg(ql_dbg_disc, vha, 0x2117, 1465 "%s %d %8phC post prli\n", 1466 __func__, __LINE__, ea->fcport->port_name); 1467 qla24xx_post_prli_work(vha, ea->fcport); 1468 } else { 1469 ql_dbg(ql_dbg_disc, vha, 0x20ea, 1470 "%s %d %8phC post gpdb\n", 1471 __func__, __LINE__, ea->fcport->port_name); 1472 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; 1473 ea->fcport->logout_on_delete = 1; 1474 ea->fcport->send_els_logo = 0; 1475 qla24xx_post_gpdb_work(vha, ea->fcport, 0); 1476 } 1477 break; 1478 case MBS_COMMAND_ERROR: 1479 ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n", 1480 __func__, __LINE__, ea->fcport->port_name, ea->data[1]); 1481 1482 ea->fcport->flags &= ~FCF_ASYNC_SENT; 1483 ea->fcport->disc_state = DSC_LOGIN_FAILED; 1484 if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED) 1485 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1486 else 1487 qla2x00_mark_device_lost(vha, ea->fcport, 1, 0); 1488 break; 1489 case MBS_LOOP_ID_USED: 1490 /* data[1] = IO PARAM 1 = nport ID */ 1491 cid.b.domain = (ea->iop[1] >> 16) & 0xff; 1492 cid.b.area = (ea->iop[1] >> 8) & 0xff; 1493 cid.b.al_pa = ea->iop[1] & 0xff; 1494 cid.b.rsvd_1 = 0; 1495 1496 ql_dbg(ql_dbg_disc, vha, 0x20ec, 1497 "%s %d %8phC LoopID 0x%x in use post gnl\n", 1498 __func__, __LINE__, ea->fcport->port_name, 1499 ea->fcport->loop_id); 1500 1501 if (IS_SW_RESV_ADDR(cid)) { 1502 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); 1503 ea->fcport->loop_id = FC_NO_LOOP_ID; 1504 } else { 1505 qla2x00_clear_loop_id(ea->fcport); 1506 } 1507 qla24xx_post_gnl_work(vha, ea->fcport); 1508 break; 1509 case MBS_PORT_ID_USED: 1510 ql_dbg(ql_dbg_disc, vha, 0x20ed, 1511 "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n", 1512 __func__, __LINE__, ea->fcport->port_name, 1513 ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area, 1514 ea->fcport->d_id.b.al_pa); 1515 1516 qla2x00_clear_loop_id(ea->fcport); 1517 qla24xx_post_gidpn_work(vha, ea->fcport); 1518 break; 1519 } 1520 return; 1521 } 1522 1523 void 1524 qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport, 1525 uint16_t *data) 1526 { 1527 qla2x00_mark_device_lost(vha, fcport, 1, 0); 1528 qlt_logo_completion_handler(fcport, data[0]); 1529 fcport->login_gen++; 1530 return; 1531 } 1532 1533 void 1534 qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport, 1535 uint16_t *data) 1536 { 1537 if (data[0] == MBS_COMMAND_COMPLETE) { 1538 qla2x00_update_fcport(vha, fcport); 1539 1540 return; 1541 } 1542 1543 /* Retry login. */ 1544 fcport->flags &= ~FCF_ASYNC_SENT; 1545 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 1546 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1547 else 1548 qla2x00_mark_device_lost(vha, fcport, 1, 0); 1549 1550 return; 1551 } 1552 1553 /****************************************************************************/ 1554 /* QLogic ISP2x00 Hardware Support Functions. */ 1555 /****************************************************************************/ 1556 1557 static int 1558 qla83xx_nic_core_fw_load(scsi_qla_host_t *vha) 1559 { 1560 int rval = QLA_SUCCESS; 1561 struct qla_hw_data *ha = vha->hw; 1562 uint32_t idc_major_ver, idc_minor_ver; 1563 uint16_t config[4]; 1564 1565 qla83xx_idc_lock(vha, 0); 1566 1567 /* SV: TODO: Assign initialization timeout from 1568 * flash-info / other param 1569 */ 1570 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT; 1571 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT; 1572 1573 /* Set our fcoe function presence */ 1574 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) { 1575 ql_dbg(ql_dbg_p3p, vha, 0xb077, 1576 "Error while setting DRV-Presence.\n"); 1577 rval = QLA_FUNCTION_FAILED; 1578 goto exit; 1579 } 1580 1581 /* Decide the reset ownership */ 1582 qla83xx_reset_ownership(vha); 1583 1584 /* 1585 * On first protocol driver load: 1586 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery 1587 * register. 1588 * Others: Check compatibility with current IDC Major version. 1589 */ 1590 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver); 1591 if (ha->flags.nic_core_reset_owner) { 1592 /* Set IDC Major version */ 1593 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION; 1594 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver); 1595 1596 /* Clearing IDC-Lock-Recovery register */ 1597 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0); 1598 } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) { 1599 /* 1600 * Clear further IDC participation if we are not compatible with 1601 * the current IDC Major Version. 1602 */ 1603 ql_log(ql_log_warn, vha, 0xb07d, 1604 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n", 1605 idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION); 1606 __qla83xx_clear_drv_presence(vha); 1607 rval = QLA_FUNCTION_FAILED; 1608 goto exit; 1609 } 1610 /* Each function sets its supported Minor version. */ 1611 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver); 1612 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2)); 1613 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver); 1614 1615 if (ha->flags.nic_core_reset_owner) { 1616 memset(config, 0, sizeof(config)); 1617 if (!qla81xx_get_port_config(vha, config)) 1618 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, 1619 QLA8XXX_DEV_READY); 1620 } 1621 1622 rval = qla83xx_idc_state_handler(vha); 1623 1624 exit: 1625 qla83xx_idc_unlock(vha, 0); 1626 1627 return rval; 1628 } 1629 1630 /* 1631 * qla2x00_initialize_adapter 1632 * Initialize board. 1633 * 1634 * Input: 1635 * ha = adapter block pointer. 1636 * 1637 * Returns: 1638 * 0 = success 1639 */ 1640 int 1641 qla2x00_initialize_adapter(scsi_qla_host_t *vha) 1642 { 1643 int rval; 1644 struct qla_hw_data *ha = vha->hw; 1645 struct req_que *req = ha->req_q_map[0]; 1646 1647 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats)); 1648 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat)); 1649 1650 /* Clear adapter flags. */ 1651 vha->flags.online = 0; 1652 ha->flags.chip_reset_done = 0; 1653 vha->flags.reset_active = 0; 1654 ha->flags.pci_channel_io_perm_failure = 0; 1655 ha->flags.eeh_busy = 0; 1656 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); 1657 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1658 atomic_set(&vha->loop_state, LOOP_DOWN); 1659 vha->device_flags = DFLG_NO_CABLE; 1660 vha->dpc_flags = 0; 1661 vha->flags.management_server_logged_in = 0; 1662 vha->marker_needed = 0; 1663 ha->isp_abort_cnt = 0; 1664 ha->beacon_blink_led = 0; 1665 1666 set_bit(0, ha->req_qid_map); 1667 set_bit(0, ha->rsp_qid_map); 1668 1669 ql_dbg(ql_dbg_init, vha, 0x0040, 1670 "Configuring PCI space...\n"); 1671 rval = ha->isp_ops->pci_config(vha); 1672 if (rval) { 1673 ql_log(ql_log_warn, vha, 0x0044, 1674 "Unable to configure PCI space.\n"); 1675 return (rval); 1676 } 1677 1678 ha->isp_ops->reset_chip(vha); 1679 1680 rval = qla2xxx_get_flash_info(vha); 1681 if (rval) { 1682 ql_log(ql_log_fatal, vha, 0x004f, 1683 "Unable to validate FLASH data.\n"); 1684 return rval; 1685 } 1686 1687 if (IS_QLA8044(ha)) { 1688 qla8044_read_reset_template(vha); 1689 1690 /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0. 1691 * If DONRESET_BIT0 is set, drivers should not set dev_state 1692 * to NEED_RESET. But if NEED_RESET is set, drivers should 1693 * should honor the reset. */ 1694 if (ql2xdontresethba == 1) 1695 qla8044_set_idc_dontreset(vha); 1696 } 1697 1698 ha->isp_ops->get_flash_version(vha, req->ring); 1699 ql_dbg(ql_dbg_init, vha, 0x0061, 1700 "Configure NVRAM parameters...\n"); 1701 1702 ha->isp_ops->nvram_config(vha); 1703 1704 if (ha->flags.disable_serdes) { 1705 /* Mask HBA via NVRAM settings? */ 1706 ql_log(ql_log_info, vha, 0x0077, 1707 "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name); 1708 return QLA_FUNCTION_FAILED; 1709 } 1710 1711 ql_dbg(ql_dbg_init, vha, 0x0078, 1712 "Verifying loaded RISC code...\n"); 1713 1714 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) { 1715 rval = ha->isp_ops->chip_diag(vha); 1716 if (rval) 1717 return (rval); 1718 rval = qla2x00_setup_chip(vha); 1719 if (rval) 1720 return (rval); 1721 } 1722 1723 if (IS_QLA84XX(ha)) { 1724 ha->cs84xx = qla84xx_get_chip(vha); 1725 if (!ha->cs84xx) { 1726 ql_log(ql_log_warn, vha, 0x00d0, 1727 "Unable to configure ISP84XX.\n"); 1728 return QLA_FUNCTION_FAILED; 1729 } 1730 } 1731 1732 if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) 1733 rval = qla2x00_init_rings(vha); 1734 1735 ha->flags.chip_reset_done = 1; 1736 1737 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) { 1738 /* Issue verify 84xx FW IOCB to complete 84xx initialization */ 1739 rval = qla84xx_init_chip(vha); 1740 if (rval != QLA_SUCCESS) { 1741 ql_log(ql_log_warn, vha, 0x00d4, 1742 "Unable to initialize ISP84XX.\n"); 1743 qla84xx_put_chip(vha); 1744 } 1745 } 1746 1747 /* Load the NIC Core f/w if we are the first protocol driver. */ 1748 if (IS_QLA8031(ha)) { 1749 rval = qla83xx_nic_core_fw_load(vha); 1750 if (rval) 1751 ql_log(ql_log_warn, vha, 0x0124, 1752 "Error in initializing NIC Core f/w.\n"); 1753 } 1754 1755 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) 1756 qla24xx_read_fcp_prio_cfg(vha); 1757 1758 if (IS_P3P_TYPE(ha)) 1759 qla82xx_set_driver_version(vha, QLA2XXX_VERSION); 1760 else 1761 qla25xx_set_driver_version(vha, QLA2XXX_VERSION); 1762 1763 return (rval); 1764 } 1765 1766 /** 1767 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers. 1768 * @ha: HA context 1769 * 1770 * Returns 0 on success. 1771 */ 1772 int 1773 qla2100_pci_config(scsi_qla_host_t *vha) 1774 { 1775 uint16_t w; 1776 unsigned long flags; 1777 struct qla_hw_data *ha = vha->hw; 1778 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1779 1780 pci_set_master(ha->pdev); 1781 pci_try_set_mwi(ha->pdev); 1782 1783 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 1784 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 1785 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 1786 1787 pci_disable_rom(ha->pdev); 1788 1789 /* Get PCI bus information. */ 1790 spin_lock_irqsave(&ha->hardware_lock, flags); 1791 ha->pci_attr = RD_REG_WORD(®->ctrl_status); 1792 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1793 1794 return QLA_SUCCESS; 1795 } 1796 1797 /** 1798 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers. 1799 * @ha: HA context 1800 * 1801 * Returns 0 on success. 1802 */ 1803 int 1804 qla2300_pci_config(scsi_qla_host_t *vha) 1805 { 1806 uint16_t w; 1807 unsigned long flags = 0; 1808 uint32_t cnt; 1809 struct qla_hw_data *ha = vha->hw; 1810 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1811 1812 pci_set_master(ha->pdev); 1813 pci_try_set_mwi(ha->pdev); 1814 1815 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 1816 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 1817 1818 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 1819 w &= ~PCI_COMMAND_INTX_DISABLE; 1820 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 1821 1822 /* 1823 * If this is a 2300 card and not 2312, reset the 1824 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately, 1825 * the 2310 also reports itself as a 2300 so we need to get the 1826 * fb revision level -- a 6 indicates it really is a 2300 and 1827 * not a 2310. 1828 */ 1829 if (IS_QLA2300(ha)) { 1830 spin_lock_irqsave(&ha->hardware_lock, flags); 1831 1832 /* Pause RISC. */ 1833 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); 1834 for (cnt = 0; cnt < 30000; cnt++) { 1835 if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) != 0) 1836 break; 1837 1838 udelay(10); 1839 } 1840 1841 /* Select FPM registers. */ 1842 WRT_REG_WORD(®->ctrl_status, 0x20); 1843 RD_REG_WORD(®->ctrl_status); 1844 1845 /* Get the fb rev level */ 1846 ha->fb_rev = RD_FB_CMD_REG(ha, reg); 1847 1848 if (ha->fb_rev == FPM_2300) 1849 pci_clear_mwi(ha->pdev); 1850 1851 /* Deselect FPM registers. */ 1852 WRT_REG_WORD(®->ctrl_status, 0x0); 1853 RD_REG_WORD(®->ctrl_status); 1854 1855 /* Release RISC module. */ 1856 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 1857 for (cnt = 0; cnt < 30000; cnt++) { 1858 if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0) 1859 break; 1860 1861 udelay(10); 1862 } 1863 1864 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1865 } 1866 1867 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 1868 1869 pci_disable_rom(ha->pdev); 1870 1871 /* Get PCI bus information. */ 1872 spin_lock_irqsave(&ha->hardware_lock, flags); 1873 ha->pci_attr = RD_REG_WORD(®->ctrl_status); 1874 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1875 1876 return QLA_SUCCESS; 1877 } 1878 1879 /** 1880 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers. 1881 * @ha: HA context 1882 * 1883 * Returns 0 on success. 1884 */ 1885 int 1886 qla24xx_pci_config(scsi_qla_host_t *vha) 1887 { 1888 uint16_t w; 1889 unsigned long flags = 0; 1890 struct qla_hw_data *ha = vha->hw; 1891 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1892 1893 pci_set_master(ha->pdev); 1894 pci_try_set_mwi(ha->pdev); 1895 1896 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 1897 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 1898 w &= ~PCI_COMMAND_INTX_DISABLE; 1899 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 1900 1901 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 1902 1903 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */ 1904 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX)) 1905 pcix_set_mmrbc(ha->pdev, 2048); 1906 1907 /* PCIe -- adjust Maximum Read Request Size (2048). */ 1908 if (pci_is_pcie(ha->pdev)) 1909 pcie_set_readrq(ha->pdev, 4096); 1910 1911 pci_disable_rom(ha->pdev); 1912 1913 ha->chip_revision = ha->pdev->revision; 1914 1915 /* Get PCI bus information. */ 1916 spin_lock_irqsave(&ha->hardware_lock, flags); 1917 ha->pci_attr = RD_REG_DWORD(®->ctrl_status); 1918 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1919 1920 return QLA_SUCCESS; 1921 } 1922 1923 /** 1924 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers. 1925 * @ha: HA context 1926 * 1927 * Returns 0 on success. 1928 */ 1929 int 1930 qla25xx_pci_config(scsi_qla_host_t *vha) 1931 { 1932 uint16_t w; 1933 struct qla_hw_data *ha = vha->hw; 1934 1935 pci_set_master(ha->pdev); 1936 pci_try_set_mwi(ha->pdev); 1937 1938 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 1939 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 1940 w &= ~PCI_COMMAND_INTX_DISABLE; 1941 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 1942 1943 /* PCIe -- adjust Maximum Read Request Size (2048). */ 1944 if (pci_is_pcie(ha->pdev)) 1945 pcie_set_readrq(ha->pdev, 4096); 1946 1947 pci_disable_rom(ha->pdev); 1948 1949 ha->chip_revision = ha->pdev->revision; 1950 1951 return QLA_SUCCESS; 1952 } 1953 1954 /** 1955 * qla2x00_isp_firmware() - Choose firmware image. 1956 * @ha: HA context 1957 * 1958 * Returns 0 on success. 1959 */ 1960 static int 1961 qla2x00_isp_firmware(scsi_qla_host_t *vha) 1962 { 1963 int rval; 1964 uint16_t loop_id, topo, sw_cap; 1965 uint8_t domain, area, al_pa; 1966 struct qla_hw_data *ha = vha->hw; 1967 1968 /* Assume loading risc code */ 1969 rval = QLA_FUNCTION_FAILED; 1970 1971 if (ha->flags.disable_risc_code_load) { 1972 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n"); 1973 1974 /* Verify checksum of loaded RISC code. */ 1975 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address); 1976 if (rval == QLA_SUCCESS) { 1977 /* And, verify we are not in ROM code. */ 1978 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, 1979 &area, &domain, &topo, &sw_cap); 1980 } 1981 } 1982 1983 if (rval) 1984 ql_dbg(ql_dbg_init, vha, 0x007a, 1985 "**** Load RISC code ****.\n"); 1986 1987 return (rval); 1988 } 1989 1990 /** 1991 * qla2x00_reset_chip() - Reset ISP chip. 1992 * @ha: HA context 1993 * 1994 * Returns 0 on success. 1995 */ 1996 void 1997 qla2x00_reset_chip(scsi_qla_host_t *vha) 1998 { 1999 unsigned long flags = 0; 2000 struct qla_hw_data *ha = vha->hw; 2001 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2002 uint32_t cnt; 2003 uint16_t cmd; 2004 2005 if (unlikely(pci_channel_offline(ha->pdev))) 2006 return; 2007 2008 ha->isp_ops->disable_intrs(ha); 2009 2010 spin_lock_irqsave(&ha->hardware_lock, flags); 2011 2012 /* Turn off master enable */ 2013 cmd = 0; 2014 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd); 2015 cmd &= ~PCI_COMMAND_MASTER; 2016 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); 2017 2018 if (!IS_QLA2100(ha)) { 2019 /* Pause RISC. */ 2020 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); 2021 if (IS_QLA2200(ha) || IS_QLA2300(ha)) { 2022 for (cnt = 0; cnt < 30000; cnt++) { 2023 if ((RD_REG_WORD(®->hccr) & 2024 HCCR_RISC_PAUSE) != 0) 2025 break; 2026 udelay(100); 2027 } 2028 } else { 2029 RD_REG_WORD(®->hccr); /* PCI Posting. */ 2030 udelay(10); 2031 } 2032 2033 /* Select FPM registers. */ 2034 WRT_REG_WORD(®->ctrl_status, 0x20); 2035 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ 2036 2037 /* FPM Soft Reset. */ 2038 WRT_REG_WORD(®->fpm_diag_config, 0x100); 2039 RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */ 2040 2041 /* Toggle Fpm Reset. */ 2042 if (!IS_QLA2200(ha)) { 2043 WRT_REG_WORD(®->fpm_diag_config, 0x0); 2044 RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */ 2045 } 2046 2047 /* Select frame buffer registers. */ 2048 WRT_REG_WORD(®->ctrl_status, 0x10); 2049 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ 2050 2051 /* Reset frame buffer FIFOs. */ 2052 if (IS_QLA2200(ha)) { 2053 WRT_FB_CMD_REG(ha, reg, 0xa000); 2054 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */ 2055 } else { 2056 WRT_FB_CMD_REG(ha, reg, 0x00fc); 2057 2058 /* Read back fb_cmd until zero or 3 seconds max */ 2059 for (cnt = 0; cnt < 3000; cnt++) { 2060 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0) 2061 break; 2062 udelay(100); 2063 } 2064 } 2065 2066 /* Select RISC module registers. */ 2067 WRT_REG_WORD(®->ctrl_status, 0); 2068 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ 2069 2070 /* Reset RISC processor. */ 2071 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 2072 RD_REG_WORD(®->hccr); /* PCI Posting. */ 2073 2074 /* Release RISC processor. */ 2075 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 2076 RD_REG_WORD(®->hccr); /* PCI Posting. */ 2077 } 2078 2079 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 2080 WRT_REG_WORD(®->hccr, HCCR_CLR_HOST_INT); 2081 2082 /* Reset ISP chip. */ 2083 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); 2084 2085 /* Wait for RISC to recover from reset. */ 2086 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 2087 /* 2088 * It is necessary to for a delay here since the card doesn't 2089 * respond to PCI reads during a reset. On some architectures 2090 * this will result in an MCA. 2091 */ 2092 udelay(20); 2093 for (cnt = 30000; cnt; cnt--) { 2094 if ((RD_REG_WORD(®->ctrl_status) & 2095 CSR_ISP_SOFT_RESET) == 0) 2096 break; 2097 udelay(100); 2098 } 2099 } else 2100 udelay(10); 2101 2102 /* Reset RISC processor. */ 2103 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 2104 2105 WRT_REG_WORD(®->semaphore, 0); 2106 2107 /* Release RISC processor. */ 2108 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 2109 RD_REG_WORD(®->hccr); /* PCI Posting. */ 2110 2111 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 2112 for (cnt = 0; cnt < 30000; cnt++) { 2113 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY) 2114 break; 2115 2116 udelay(100); 2117 } 2118 } else 2119 udelay(100); 2120 2121 /* Turn on master enable */ 2122 cmd |= PCI_COMMAND_MASTER; 2123 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); 2124 2125 /* Disable RISC pause on FPM parity error. */ 2126 if (!IS_QLA2100(ha)) { 2127 WRT_REG_WORD(®->hccr, HCCR_DISABLE_PARITY_PAUSE); 2128 RD_REG_WORD(®->hccr); /* PCI Posting. */ 2129 } 2130 2131 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2132 } 2133 2134 /** 2135 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC. 2136 * 2137 * Returns 0 on success. 2138 */ 2139 static int 2140 qla81xx_reset_mpi(scsi_qla_host_t *vha) 2141 { 2142 uint16_t mb[4] = {0x1010, 0, 1, 0}; 2143 2144 if (!IS_QLA81XX(vha->hw)) 2145 return QLA_SUCCESS; 2146 2147 return qla81xx_write_mpi_register(vha, mb); 2148 } 2149 2150 /** 2151 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC. 2152 * @ha: HA context 2153 * 2154 * Returns 0 on success. 2155 */ 2156 static inline int 2157 qla24xx_reset_risc(scsi_qla_host_t *vha) 2158 { 2159 unsigned long flags = 0; 2160 struct qla_hw_data *ha = vha->hw; 2161 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2162 uint32_t cnt; 2163 uint16_t wd; 2164 static int abts_cnt; /* ISP abort retry counts */ 2165 int rval = QLA_SUCCESS; 2166 2167 spin_lock_irqsave(&ha->hardware_lock, flags); 2168 2169 /* Reset RISC. */ 2170 WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 2171 for (cnt = 0; cnt < 30000; cnt++) { 2172 if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) 2173 break; 2174 2175 udelay(10); 2176 } 2177 2178 if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE)) 2179 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); 2180 2181 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e, 2182 "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n", 2183 RD_REG_DWORD(®->hccr), 2184 RD_REG_DWORD(®->ctrl_status), 2185 (RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE)); 2186 2187 WRT_REG_DWORD(®->ctrl_status, 2188 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 2189 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); 2190 2191 udelay(100); 2192 2193 /* Wait for firmware to complete NVRAM accesses. */ 2194 RD_REG_WORD(®->mailbox0); 2195 for (cnt = 10000; RD_REG_WORD(®->mailbox0) != 0 && 2196 rval == QLA_SUCCESS; cnt--) { 2197 barrier(); 2198 if (cnt) 2199 udelay(5); 2200 else 2201 rval = QLA_FUNCTION_TIMEOUT; 2202 } 2203 2204 if (rval == QLA_SUCCESS) 2205 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags); 2206 2207 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f, 2208 "HCCR: 0x%x, MailBox0 Status 0x%x\n", 2209 RD_REG_DWORD(®->hccr), 2210 RD_REG_DWORD(®->mailbox0)); 2211 2212 /* Wait for soft-reset to complete. */ 2213 RD_REG_DWORD(®->ctrl_status); 2214 for (cnt = 0; cnt < 60; cnt++) { 2215 barrier(); 2216 if ((RD_REG_DWORD(®->ctrl_status) & 2217 CSRX_ISP_SOFT_RESET) == 0) 2218 break; 2219 2220 udelay(5); 2221 } 2222 if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_ISP_SOFT_RESET)) 2223 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags); 2224 2225 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d, 2226 "HCCR: 0x%x, Soft Reset status: 0x%x\n", 2227 RD_REG_DWORD(®->hccr), 2228 RD_REG_DWORD(®->ctrl_status)); 2229 2230 /* If required, do an MPI FW reset now */ 2231 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) { 2232 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) { 2233 if (++abts_cnt < 5) { 2234 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2235 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags); 2236 } else { 2237 /* 2238 * We exhausted the ISP abort retries. We have to 2239 * set the board offline. 2240 */ 2241 abts_cnt = 0; 2242 vha->flags.online = 0; 2243 } 2244 } 2245 } 2246 2247 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET); 2248 RD_REG_DWORD(®->hccr); 2249 2250 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE); 2251 RD_REG_DWORD(®->hccr); 2252 2253 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET); 2254 RD_REG_DWORD(®->hccr); 2255 2256 RD_REG_WORD(®->mailbox0); 2257 for (cnt = 60; RD_REG_WORD(®->mailbox0) != 0 && 2258 rval == QLA_SUCCESS; cnt--) { 2259 barrier(); 2260 if (cnt) 2261 udelay(5); 2262 else 2263 rval = QLA_FUNCTION_TIMEOUT; 2264 } 2265 if (rval == QLA_SUCCESS) 2266 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); 2267 2268 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e, 2269 "Host Risc 0x%x, mailbox0 0x%x\n", 2270 RD_REG_DWORD(®->hccr), 2271 RD_REG_WORD(®->mailbox0)); 2272 2273 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2274 2275 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f, 2276 "Driver in %s mode\n", 2277 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling"); 2278 2279 if (IS_NOPOLLING_TYPE(ha)) 2280 ha->isp_ops->enable_intrs(ha); 2281 2282 return rval; 2283 } 2284 2285 static void 2286 qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data) 2287 { 2288 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; 2289 2290 WRT_REG_DWORD(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); 2291 *data = RD_REG_DWORD(®->iobase_window + RISC_REGISTER_WINDOW_OFFET); 2292 2293 } 2294 2295 static void 2296 qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data) 2297 { 2298 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; 2299 2300 WRT_REG_DWORD(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); 2301 WRT_REG_DWORD(®->iobase_window + RISC_REGISTER_WINDOW_OFFET, data); 2302 } 2303 2304 static void 2305 qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha) 2306 { 2307 uint32_t wd32 = 0; 2308 uint delta_msec = 100; 2309 uint elapsed_msec = 0; 2310 uint timeout_msec; 2311 ulong n; 2312 2313 if (vha->hw->pdev->subsystem_device != 0x0175 && 2314 vha->hw->pdev->subsystem_device != 0x0240) 2315 return; 2316 2317 WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE); 2318 udelay(100); 2319 2320 attempt: 2321 timeout_msec = TIMEOUT_SEMAPHORE; 2322 n = timeout_msec / delta_msec; 2323 while (n--) { 2324 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET); 2325 qla25xx_read_risc_sema_reg(vha, &wd32); 2326 if (wd32 & RISC_SEMAPHORE) 2327 break; 2328 msleep(delta_msec); 2329 elapsed_msec += delta_msec; 2330 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED) 2331 goto force; 2332 } 2333 2334 if (!(wd32 & RISC_SEMAPHORE)) 2335 goto force; 2336 2337 if (!(wd32 & RISC_SEMAPHORE_FORCE)) 2338 goto acquired; 2339 2340 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR); 2341 timeout_msec = TIMEOUT_SEMAPHORE_FORCE; 2342 n = timeout_msec / delta_msec; 2343 while (n--) { 2344 qla25xx_read_risc_sema_reg(vha, &wd32); 2345 if (!(wd32 & RISC_SEMAPHORE_FORCE)) 2346 break; 2347 msleep(delta_msec); 2348 elapsed_msec += delta_msec; 2349 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED) 2350 goto force; 2351 } 2352 2353 if (wd32 & RISC_SEMAPHORE_FORCE) 2354 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR); 2355 2356 goto attempt; 2357 2358 force: 2359 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET); 2360 2361 acquired: 2362 return; 2363 } 2364 2365 /** 2366 * qla24xx_reset_chip() - Reset ISP24xx chip. 2367 * @ha: HA context 2368 * 2369 * Returns 0 on success. 2370 */ 2371 void 2372 qla24xx_reset_chip(scsi_qla_host_t *vha) 2373 { 2374 struct qla_hw_data *ha = vha->hw; 2375 2376 if (pci_channel_offline(ha->pdev) && 2377 ha->flags.pci_channel_io_perm_failure) { 2378 return; 2379 } 2380 2381 ha->isp_ops->disable_intrs(ha); 2382 2383 qla25xx_manipulate_risc_semaphore(vha); 2384 2385 /* Perform RISC reset. */ 2386 qla24xx_reset_risc(vha); 2387 } 2388 2389 /** 2390 * qla2x00_chip_diag() - Test chip for proper operation. 2391 * @ha: HA context 2392 * 2393 * Returns 0 on success. 2394 */ 2395 int 2396 qla2x00_chip_diag(scsi_qla_host_t *vha) 2397 { 2398 int rval; 2399 struct qla_hw_data *ha = vha->hw; 2400 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2401 unsigned long flags = 0; 2402 uint16_t data; 2403 uint32_t cnt; 2404 uint16_t mb[5]; 2405 struct req_que *req = ha->req_q_map[0]; 2406 2407 /* Assume a failed state */ 2408 rval = QLA_FUNCTION_FAILED; 2409 2410 ql_dbg(ql_dbg_init, vha, 0x007b, 2411 "Testing device at %lx.\n", (u_long)®->flash_address); 2412 2413 spin_lock_irqsave(&ha->hardware_lock, flags); 2414 2415 /* Reset ISP chip. */ 2416 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); 2417 2418 /* 2419 * We need to have a delay here since the card will not respond while 2420 * in reset causing an MCA on some architectures. 2421 */ 2422 udelay(20); 2423 data = qla2x00_debounce_register(®->ctrl_status); 2424 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) { 2425 udelay(5); 2426 data = RD_REG_WORD(®->ctrl_status); 2427 barrier(); 2428 } 2429 2430 if (!cnt) 2431 goto chip_diag_failed; 2432 2433 ql_dbg(ql_dbg_init, vha, 0x007c, 2434 "Reset register cleared by chip reset.\n"); 2435 2436 /* Reset RISC processor. */ 2437 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 2438 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 2439 2440 /* Workaround for QLA2312 PCI parity error */ 2441 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 2442 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0)); 2443 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) { 2444 udelay(5); 2445 data = RD_MAILBOX_REG(ha, reg, 0); 2446 barrier(); 2447 } 2448 } else 2449 udelay(10); 2450 2451 if (!cnt) 2452 goto chip_diag_failed; 2453 2454 /* Check product ID of chip */ 2455 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n"); 2456 2457 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 2458 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 2459 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 2460 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4)); 2461 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) || 2462 mb[3] != PROD_ID_3) { 2463 ql_log(ql_log_warn, vha, 0x0062, 2464 "Wrong product ID = 0x%x,0x%x,0x%x.\n", 2465 mb[1], mb[2], mb[3]); 2466 2467 goto chip_diag_failed; 2468 } 2469 ha->product_id[0] = mb[1]; 2470 ha->product_id[1] = mb[2]; 2471 ha->product_id[2] = mb[3]; 2472 ha->product_id[3] = mb[4]; 2473 2474 /* Adjust fw RISC transfer size */ 2475 if (req->length > 1024) 2476 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; 2477 else 2478 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 2479 req->length; 2480 2481 if (IS_QLA2200(ha) && 2482 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { 2483 /* Limit firmware transfer size with a 2200A */ 2484 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n"); 2485 2486 ha->device_type |= DT_ISP2200A; 2487 ha->fw_transfer_size = 128; 2488 } 2489 2490 /* Wrap Incoming Mailboxes Test. */ 2491 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2492 2493 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n"); 2494 rval = qla2x00_mbx_reg_test(vha); 2495 if (rval) 2496 ql_log(ql_log_warn, vha, 0x0080, 2497 "Failed mailbox send register test.\n"); 2498 else 2499 /* Flag a successful rval */ 2500 rval = QLA_SUCCESS; 2501 spin_lock_irqsave(&ha->hardware_lock, flags); 2502 2503 chip_diag_failed: 2504 if (rval) 2505 ql_log(ql_log_info, vha, 0x0081, 2506 "Chip diagnostics **** FAILED ****.\n"); 2507 2508 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2509 2510 return (rval); 2511 } 2512 2513 /** 2514 * qla24xx_chip_diag() - Test ISP24xx for proper operation. 2515 * @ha: HA context 2516 * 2517 * Returns 0 on success. 2518 */ 2519 int 2520 qla24xx_chip_diag(scsi_qla_host_t *vha) 2521 { 2522 int rval; 2523 struct qla_hw_data *ha = vha->hw; 2524 struct req_que *req = ha->req_q_map[0]; 2525 2526 if (IS_P3P_TYPE(ha)) 2527 return QLA_SUCCESS; 2528 2529 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; 2530 2531 rval = qla2x00_mbx_reg_test(vha); 2532 if (rval) { 2533 ql_log(ql_log_warn, vha, 0x0082, 2534 "Failed mailbox send register test.\n"); 2535 } else { 2536 /* Flag a successful rval */ 2537 rval = QLA_SUCCESS; 2538 } 2539 2540 return rval; 2541 } 2542 2543 void 2544 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) 2545 { 2546 int rval; 2547 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size, 2548 eft_size, fce_size, mq_size; 2549 dma_addr_t tc_dma; 2550 void *tc; 2551 struct qla_hw_data *ha = vha->hw; 2552 struct req_que *req = ha->req_q_map[0]; 2553 struct rsp_que *rsp = ha->rsp_q_map[0]; 2554 2555 if (ha->fw_dump) { 2556 ql_dbg(ql_dbg_init, vha, 0x00bd, 2557 "Firmware dump already allocated.\n"); 2558 return; 2559 } 2560 2561 ha->fw_dumped = 0; 2562 ha->fw_dump_cap_flags = 0; 2563 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0; 2564 req_q_size = rsp_q_size = 0; 2565 2566 if (IS_QLA27XX(ha)) 2567 goto try_fce; 2568 2569 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 2570 fixed_size = sizeof(struct qla2100_fw_dump); 2571 } else if (IS_QLA23XX(ha)) { 2572 fixed_size = offsetof(struct qla2300_fw_dump, data_ram); 2573 mem_size = (ha->fw_memory_size - 0x11000 + 1) * 2574 sizeof(uint16_t); 2575 } else if (IS_FWI2_CAPABLE(ha)) { 2576 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) 2577 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem); 2578 else if (IS_QLA81XX(ha)) 2579 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); 2580 else if (IS_QLA25XX(ha)) 2581 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem); 2582 else 2583 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem); 2584 2585 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 2586 sizeof(uint32_t); 2587 if (ha->mqenable) { 2588 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) 2589 mq_size = sizeof(struct qla2xxx_mq_chain); 2590 /* 2591 * Allocate maximum buffer size for all queues. 2592 * Resizing must be done at end-of-dump processing. 2593 */ 2594 mq_size += ha->max_req_queues * 2595 (req->length * sizeof(request_t)); 2596 mq_size += ha->max_rsp_queues * 2597 (rsp->length * sizeof(response_t)); 2598 } 2599 if (ha->tgt.atio_ring) 2600 mq_size += ha->tgt.atio_q_length * sizeof(request_t); 2601 /* Allocate memory for Fibre Channel Event Buffer. */ 2602 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 2603 !IS_QLA27XX(ha)) 2604 goto try_eft; 2605 2606 try_fce: 2607 if (ha->fce) 2608 dma_free_coherent(&ha->pdev->dev, 2609 FCE_SIZE, ha->fce, ha->fce_dma); 2610 2611 /* Allocate memory for Fibre Channel Event Buffer. */ 2612 tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 2613 GFP_KERNEL); 2614 if (!tc) { 2615 ql_log(ql_log_warn, vha, 0x00be, 2616 "Unable to allocate (%d KB) for FCE.\n", 2617 FCE_SIZE / 1024); 2618 goto try_eft; 2619 } 2620 2621 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS, 2622 ha->fce_mb, &ha->fce_bufs); 2623 if (rval) { 2624 ql_log(ql_log_warn, vha, 0x00bf, 2625 "Unable to initialize FCE (%d).\n", rval); 2626 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, 2627 tc_dma); 2628 ha->flags.fce_enabled = 0; 2629 goto try_eft; 2630 } 2631 ql_dbg(ql_dbg_init, vha, 0x00c0, 2632 "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024); 2633 2634 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; 2635 ha->flags.fce_enabled = 1; 2636 ha->fce_dma = tc_dma; 2637 ha->fce = tc; 2638 2639 try_eft: 2640 if (ha->eft) 2641 dma_free_coherent(&ha->pdev->dev, 2642 EFT_SIZE, ha->eft, ha->eft_dma); 2643 2644 /* Allocate memory for Extended Trace Buffer. */ 2645 tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, 2646 GFP_KERNEL); 2647 if (!tc) { 2648 ql_log(ql_log_warn, vha, 0x00c1, 2649 "Unable to allocate (%d KB) for EFT.\n", 2650 EFT_SIZE / 1024); 2651 goto cont_alloc; 2652 } 2653 2654 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); 2655 if (rval) { 2656 ql_log(ql_log_warn, vha, 0x00c2, 2657 "Unable to initialize EFT (%d).\n", rval); 2658 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, 2659 tc_dma); 2660 goto cont_alloc; 2661 } 2662 ql_dbg(ql_dbg_init, vha, 0x00c3, 2663 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024); 2664 2665 eft_size = EFT_SIZE; 2666 ha->eft_dma = tc_dma; 2667 ha->eft = tc; 2668 } 2669 2670 cont_alloc: 2671 if (IS_QLA27XX(ha)) { 2672 if (!ha->fw_dump_template) { 2673 ql_log(ql_log_warn, vha, 0x00ba, 2674 "Failed missing fwdump template\n"); 2675 return; 2676 } 2677 dump_size = qla27xx_fwdt_calculate_dump_size(vha); 2678 ql_dbg(ql_dbg_init, vha, 0x00fa, 2679 "-> allocating fwdump (%x bytes)...\n", dump_size); 2680 goto allocate; 2681 } 2682 2683 req_q_size = req->length * sizeof(request_t); 2684 rsp_q_size = rsp->length * sizeof(response_t); 2685 dump_size = offsetof(struct qla2xxx_fw_dump, isp); 2686 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size; 2687 ha->chain_offset = dump_size; 2688 dump_size += mq_size + fce_size; 2689 2690 if (ha->exchoffld_buf) 2691 dump_size += sizeof(struct qla2xxx_offld_chain) + 2692 ha->exchoffld_size; 2693 if (ha->exlogin_buf) 2694 dump_size += sizeof(struct qla2xxx_offld_chain) + 2695 ha->exlogin_size; 2696 2697 allocate: 2698 ha->fw_dump = vmalloc(dump_size); 2699 if (!ha->fw_dump) { 2700 ql_log(ql_log_warn, vha, 0x00c4, 2701 "Unable to allocate (%d KB) for firmware dump.\n", 2702 dump_size / 1024); 2703 2704 if (ha->fce) { 2705 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, 2706 ha->fce_dma); 2707 ha->fce = NULL; 2708 ha->fce_dma = 0; 2709 } 2710 2711 if (ha->eft) { 2712 dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft, 2713 ha->eft_dma); 2714 ha->eft = NULL; 2715 ha->eft_dma = 0; 2716 } 2717 return; 2718 } 2719 ha->fw_dump_len = dump_size; 2720 ql_dbg(ql_dbg_init, vha, 0x00c5, 2721 "Allocated (%d KB) for firmware dump.\n", dump_size / 1024); 2722 2723 if (IS_QLA27XX(ha)) 2724 return; 2725 2726 ha->fw_dump->signature[0] = 'Q'; 2727 ha->fw_dump->signature[1] = 'L'; 2728 ha->fw_dump->signature[2] = 'G'; 2729 ha->fw_dump->signature[3] = 'C'; 2730 ha->fw_dump->version = htonl(1); 2731 2732 ha->fw_dump->fixed_size = htonl(fixed_size); 2733 ha->fw_dump->mem_size = htonl(mem_size); 2734 ha->fw_dump->req_q_size = htonl(req_q_size); 2735 ha->fw_dump->rsp_q_size = htonl(rsp_q_size); 2736 2737 ha->fw_dump->eft_size = htonl(eft_size); 2738 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma)); 2739 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma)); 2740 2741 ha->fw_dump->header_size = 2742 htonl(offsetof(struct qla2xxx_fw_dump, isp)); 2743 } 2744 2745 static int 2746 qla81xx_mpi_sync(scsi_qla_host_t *vha) 2747 { 2748 #define MPS_MASK 0xe0 2749 int rval; 2750 uint16_t dc; 2751 uint32_t dw; 2752 2753 if (!IS_QLA81XX(vha->hw)) 2754 return QLA_SUCCESS; 2755 2756 rval = qla2x00_write_ram_word(vha, 0x7c00, 1); 2757 if (rval != QLA_SUCCESS) { 2758 ql_log(ql_log_warn, vha, 0x0105, 2759 "Unable to acquire semaphore.\n"); 2760 goto done; 2761 } 2762 2763 pci_read_config_word(vha->hw->pdev, 0x54, &dc); 2764 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw); 2765 if (rval != QLA_SUCCESS) { 2766 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n"); 2767 goto done_release; 2768 } 2769 2770 dc &= MPS_MASK; 2771 if (dc == (dw & MPS_MASK)) 2772 goto done_release; 2773 2774 dw &= ~MPS_MASK; 2775 dw |= dc; 2776 rval = qla2x00_write_ram_word(vha, 0x7a15, dw); 2777 if (rval != QLA_SUCCESS) { 2778 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n"); 2779 } 2780 2781 done_release: 2782 rval = qla2x00_write_ram_word(vha, 0x7c00, 0); 2783 if (rval != QLA_SUCCESS) { 2784 ql_log(ql_log_warn, vha, 0x006d, 2785 "Unable to release semaphore.\n"); 2786 } 2787 2788 done: 2789 return rval; 2790 } 2791 2792 int 2793 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req) 2794 { 2795 /* Don't try to reallocate the array */ 2796 if (req->outstanding_cmds) 2797 return QLA_SUCCESS; 2798 2799 if (!IS_FWI2_CAPABLE(ha)) 2800 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS; 2801 else { 2802 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count) 2803 req->num_outstanding_cmds = ha->cur_fw_xcb_count; 2804 else 2805 req->num_outstanding_cmds = ha->cur_fw_iocb_count; 2806 } 2807 2808 req->outstanding_cmds = kzalloc(sizeof(srb_t *) * 2809 req->num_outstanding_cmds, GFP_KERNEL); 2810 2811 if (!req->outstanding_cmds) { 2812 /* 2813 * Try to allocate a minimal size just so we can get through 2814 * initialization. 2815 */ 2816 req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS; 2817 req->outstanding_cmds = kzalloc(sizeof(srb_t *) * 2818 req->num_outstanding_cmds, GFP_KERNEL); 2819 2820 if (!req->outstanding_cmds) { 2821 ql_log(ql_log_fatal, NULL, 0x0126, 2822 "Failed to allocate memory for " 2823 "outstanding_cmds for req_que %p.\n", req); 2824 req->num_outstanding_cmds = 0; 2825 return QLA_FUNCTION_FAILED; 2826 } 2827 } 2828 2829 return QLA_SUCCESS; 2830 } 2831 2832 #define PRINT_FIELD(_field, _flag, _str) { \ 2833 if (a0->_field & _flag) {\ 2834 if (p) {\ 2835 strcat(ptr, "|");\ 2836 ptr++;\ 2837 leftover--;\ 2838 } \ 2839 len = snprintf(ptr, leftover, "%s", _str); \ 2840 p = 1;\ 2841 leftover -= len;\ 2842 ptr += len; \ 2843 } \ 2844 } 2845 2846 static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha) 2847 { 2848 #define STR_LEN 64 2849 struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data; 2850 u8 str[STR_LEN], *ptr, p; 2851 int leftover, len; 2852 2853 memset(str, 0, STR_LEN); 2854 snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name); 2855 ql_dbg(ql_dbg_init, vha, 0x015a, 2856 "SFP MFG Name: %s\n", str); 2857 2858 memset(str, 0, STR_LEN); 2859 snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn); 2860 ql_dbg(ql_dbg_init, vha, 0x015c, 2861 "SFP Part Name: %s\n", str); 2862 2863 /* media */ 2864 memset(str, 0, STR_LEN); 2865 ptr = str; 2866 leftover = STR_LEN; 2867 p = len = 0; 2868 PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX"); 2869 PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair"); 2870 PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax"); 2871 PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax"); 2872 PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um"); 2873 PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um"); 2874 PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode"); 2875 ql_dbg(ql_dbg_init, vha, 0x0160, 2876 "SFP Media: %s\n", str); 2877 2878 /* link length */ 2879 memset(str, 0, STR_LEN); 2880 ptr = str; 2881 leftover = STR_LEN; 2882 p = len = 0; 2883 PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long"); 2884 PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short"); 2885 PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate"); 2886 PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long"); 2887 PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium"); 2888 ql_dbg(ql_dbg_init, vha, 0x0196, 2889 "SFP Link Length: %s\n", str); 2890 2891 memset(str, 0, STR_LEN); 2892 ptr = str; 2893 leftover = STR_LEN; 2894 p = len = 0; 2895 PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)"); 2896 PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)"); 2897 PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)"); 2898 PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)"); 2899 PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)"); 2900 ql_dbg(ql_dbg_init, vha, 0x016e, 2901 "SFP FC Link Tech: %s\n", str); 2902 2903 if (a0->length_km) 2904 ql_dbg(ql_dbg_init, vha, 0x016f, 2905 "SFP Distant: %d km\n", a0->length_km); 2906 if (a0->length_100m) 2907 ql_dbg(ql_dbg_init, vha, 0x0170, 2908 "SFP Distant: %d m\n", a0->length_100m*100); 2909 if (a0->length_50um_10m) 2910 ql_dbg(ql_dbg_init, vha, 0x0189, 2911 "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10); 2912 if (a0->length_62um_10m) 2913 ql_dbg(ql_dbg_init, vha, 0x018a, 2914 "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10); 2915 if (a0->length_om4_10m) 2916 ql_dbg(ql_dbg_init, vha, 0x0194, 2917 "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10); 2918 if (a0->length_om3_10m) 2919 ql_dbg(ql_dbg_init, vha, 0x0195, 2920 "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10); 2921 } 2922 2923 2924 /* 2925 * Return Code: 2926 * QLA_SUCCESS: no action 2927 * QLA_INTERFACE_ERROR: SFP is not there. 2928 * QLA_FUNCTION_FAILED: detected New SFP 2929 */ 2930 int 2931 qla24xx_detect_sfp(scsi_qla_host_t *vha) 2932 { 2933 int rc = QLA_SUCCESS; 2934 struct sff_8247_a0 *a; 2935 struct qla_hw_data *ha = vha->hw; 2936 2937 if (!AUTO_DETECT_SFP_SUPPORT(vha)) 2938 goto out; 2939 2940 rc = qla2x00_read_sfp_dev(vha, NULL, 0); 2941 if (rc) 2942 goto out; 2943 2944 a = (struct sff_8247_a0 *)vha->hw->sfp_data; 2945 qla2xxx_print_sfp_info(vha); 2946 2947 if (a->fc_ll_cc7 & FC_LL_VL || a->fc_ll_cc7 & FC_LL_L) { 2948 /* long range */ 2949 ha->flags.detected_lr_sfp = 1; 2950 2951 if (a->length_km > 5 || a->length_100m > 50) 2952 ha->long_range_distance = LR_DISTANCE_10K; 2953 else 2954 ha->long_range_distance = LR_DISTANCE_5K; 2955 2956 if (ha->flags.detected_lr_sfp != ha->flags.using_lr_setting) 2957 ql_dbg(ql_dbg_async, vha, 0x507b, 2958 "Detected Long Range SFP.\n"); 2959 } else { 2960 /* short range */ 2961 ha->flags.detected_lr_sfp = 0; 2962 if (ha->flags.using_lr_setting) 2963 ql_dbg(ql_dbg_async, vha, 0x5084, 2964 "Detected Short Range SFP.\n"); 2965 } 2966 2967 if (!vha->flags.init_done) 2968 rc = QLA_SUCCESS; 2969 out: 2970 return rc; 2971 } 2972 2973 /** 2974 * qla2x00_setup_chip() - Load and start RISC firmware. 2975 * @ha: HA context 2976 * 2977 * Returns 0 on success. 2978 */ 2979 static int 2980 qla2x00_setup_chip(scsi_qla_host_t *vha) 2981 { 2982 int rval; 2983 uint32_t srisc_address = 0; 2984 struct qla_hw_data *ha = vha->hw; 2985 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2986 unsigned long flags; 2987 uint16_t fw_major_version; 2988 2989 if (IS_P3P_TYPE(ha)) { 2990 rval = ha->isp_ops->load_risc(vha, &srisc_address); 2991 if (rval == QLA_SUCCESS) { 2992 qla2x00_stop_firmware(vha); 2993 goto enable_82xx_npiv; 2994 } else 2995 goto failed; 2996 } 2997 2998 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 2999 /* Disable SRAM, Instruction RAM and GP RAM parity. */ 3000 spin_lock_irqsave(&ha->hardware_lock, flags); 3001 WRT_REG_WORD(®->hccr, (HCCR_ENABLE_PARITY + 0x0)); 3002 RD_REG_WORD(®->hccr); 3003 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3004 } 3005 3006 qla81xx_mpi_sync(vha); 3007 3008 /* Load firmware sequences */ 3009 rval = ha->isp_ops->load_risc(vha, &srisc_address); 3010 if (rval == QLA_SUCCESS) { 3011 ql_dbg(ql_dbg_init, vha, 0x00c9, 3012 "Verifying Checksum of loaded RISC code.\n"); 3013 3014 rval = qla2x00_verify_checksum(vha, srisc_address); 3015 if (rval == QLA_SUCCESS) { 3016 /* Start firmware execution. */ 3017 ql_dbg(ql_dbg_init, vha, 0x00ca, 3018 "Starting firmware.\n"); 3019 3020 if (ql2xexlogins) 3021 ha->flags.exlogins_enabled = 1; 3022 3023 if (qla_is_exch_offld_enabled(vha)) 3024 ha->flags.exchoffld_enabled = 1; 3025 3026 rval = qla2x00_execute_fw(vha, srisc_address); 3027 /* Retrieve firmware information. */ 3028 if (rval == QLA_SUCCESS) { 3029 qla24xx_detect_sfp(vha); 3030 3031 rval = qla2x00_set_exlogins_buffer(vha); 3032 if (rval != QLA_SUCCESS) 3033 goto failed; 3034 3035 rval = qla2x00_set_exchoffld_buffer(vha); 3036 if (rval != QLA_SUCCESS) 3037 goto failed; 3038 3039 enable_82xx_npiv: 3040 fw_major_version = ha->fw_major_version; 3041 if (IS_P3P_TYPE(ha)) 3042 qla82xx_check_md_needed(vha); 3043 else 3044 rval = qla2x00_get_fw_version(vha); 3045 if (rval != QLA_SUCCESS) 3046 goto failed; 3047 ha->flags.npiv_supported = 0; 3048 if (IS_QLA2XXX_MIDTYPE(ha) && 3049 (ha->fw_attributes & BIT_2)) { 3050 ha->flags.npiv_supported = 1; 3051 if ((!ha->max_npiv_vports) || 3052 ((ha->max_npiv_vports + 1) % 3053 MIN_MULTI_ID_FABRIC)) 3054 ha->max_npiv_vports = 3055 MIN_MULTI_ID_FABRIC - 1; 3056 } 3057 qla2x00_get_resource_cnts(vha); 3058 3059 /* 3060 * Allocate the array of outstanding commands 3061 * now that we know the firmware resources. 3062 */ 3063 rval = qla2x00_alloc_outstanding_cmds(ha, 3064 vha->req); 3065 if (rval != QLA_SUCCESS) 3066 goto failed; 3067 3068 if (!fw_major_version && ql2xallocfwdump 3069 && !(IS_P3P_TYPE(ha))) 3070 qla2x00_alloc_fw_dump(vha); 3071 } else { 3072 goto failed; 3073 } 3074 } else { 3075 ql_log(ql_log_fatal, vha, 0x00cd, 3076 "ISP Firmware failed checksum.\n"); 3077 goto failed; 3078 } 3079 } else 3080 goto failed; 3081 3082 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 3083 /* Enable proper parity. */ 3084 spin_lock_irqsave(&ha->hardware_lock, flags); 3085 if (IS_QLA2300(ha)) 3086 /* SRAM parity */ 3087 WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x1); 3088 else 3089 /* SRAM, Instruction RAM and GP RAM parity */ 3090 WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x7); 3091 RD_REG_WORD(®->hccr); 3092 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3093 } 3094 3095 if (IS_QLA27XX(ha)) 3096 ha->flags.fac_supported = 1; 3097 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { 3098 uint32_t size; 3099 3100 rval = qla81xx_fac_get_sector_size(vha, &size); 3101 if (rval == QLA_SUCCESS) { 3102 ha->flags.fac_supported = 1; 3103 ha->fdt_block_size = size << 2; 3104 } else { 3105 ql_log(ql_log_warn, vha, 0x00ce, 3106 "Unsupported FAC firmware (%d.%02d.%02d).\n", 3107 ha->fw_major_version, ha->fw_minor_version, 3108 ha->fw_subminor_version); 3109 3110 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 3111 ha->flags.fac_supported = 0; 3112 rval = QLA_SUCCESS; 3113 } 3114 } 3115 } 3116 failed: 3117 if (rval) { 3118 ql_log(ql_log_fatal, vha, 0x00cf, 3119 "Setup chip ****FAILED****.\n"); 3120 } 3121 3122 return (rval); 3123 } 3124 3125 /** 3126 * qla2x00_init_response_q_entries() - Initializes response queue entries. 3127 * @ha: HA context 3128 * 3129 * Beginning of request ring has initialization control block already built 3130 * by nvram config routine. 3131 * 3132 * Returns 0 on success. 3133 */ 3134 void 3135 qla2x00_init_response_q_entries(struct rsp_que *rsp) 3136 { 3137 uint16_t cnt; 3138 response_t *pkt; 3139 3140 rsp->ring_ptr = rsp->ring; 3141 rsp->ring_index = 0; 3142 rsp->status_srb = NULL; 3143 pkt = rsp->ring_ptr; 3144 for (cnt = 0; cnt < rsp->length; cnt++) { 3145 pkt->signature = RESPONSE_PROCESSED; 3146 pkt++; 3147 } 3148 } 3149 3150 /** 3151 * qla2x00_update_fw_options() - Read and process firmware options. 3152 * @ha: HA context 3153 * 3154 * Returns 0 on success. 3155 */ 3156 void 3157 qla2x00_update_fw_options(scsi_qla_host_t *vha) 3158 { 3159 uint16_t swing, emphasis, tx_sens, rx_sens; 3160 struct qla_hw_data *ha = vha->hw; 3161 3162 memset(ha->fw_options, 0, sizeof(ha->fw_options)); 3163 qla2x00_get_fw_options(vha, ha->fw_options); 3164 3165 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 3166 return; 3167 3168 /* Serial Link options. */ 3169 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115, 3170 "Serial link options.\n"); 3171 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109, 3172 (uint8_t *)&ha->fw_seriallink_options, 3173 sizeof(ha->fw_seriallink_options)); 3174 3175 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 3176 if (ha->fw_seriallink_options[3] & BIT_2) { 3177 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING; 3178 3179 /* 1G settings */ 3180 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0); 3181 emphasis = (ha->fw_seriallink_options[2] & 3182 (BIT_4 | BIT_3)) >> 3; 3183 tx_sens = ha->fw_seriallink_options[0] & 3184 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 3185 rx_sens = (ha->fw_seriallink_options[0] & 3186 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; 3187 ha->fw_options[10] = (emphasis << 14) | (swing << 8); 3188 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 3189 if (rx_sens == 0x0) 3190 rx_sens = 0x3; 3191 ha->fw_options[10] |= (tx_sens << 4) | rx_sens; 3192 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) 3193 ha->fw_options[10] |= BIT_5 | 3194 ((rx_sens & (BIT_1 | BIT_0)) << 2) | 3195 (tx_sens & (BIT_1 | BIT_0)); 3196 3197 /* 2G settings */ 3198 swing = (ha->fw_seriallink_options[2] & 3199 (BIT_7 | BIT_6 | BIT_5)) >> 5; 3200 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0); 3201 tx_sens = ha->fw_seriallink_options[1] & 3202 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 3203 rx_sens = (ha->fw_seriallink_options[1] & 3204 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; 3205 ha->fw_options[11] = (emphasis << 14) | (swing << 8); 3206 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 3207 if (rx_sens == 0x0) 3208 rx_sens = 0x3; 3209 ha->fw_options[11] |= (tx_sens << 4) | rx_sens; 3210 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) 3211 ha->fw_options[11] |= BIT_5 | 3212 ((rx_sens & (BIT_1 | BIT_0)) << 2) | 3213 (tx_sens & (BIT_1 | BIT_0)); 3214 } 3215 3216 /* FCP2 options. */ 3217 /* Return command IOCBs without waiting for an ABTS to complete. */ 3218 ha->fw_options[3] |= BIT_13; 3219 3220 /* LED scheme. */ 3221 if (ha->flags.enable_led_scheme) 3222 ha->fw_options[2] |= BIT_12; 3223 3224 /* Detect ISP6312. */ 3225 if (IS_QLA6312(ha)) 3226 ha->fw_options[2] |= BIT_13; 3227 3228 /* Set Retry FLOGI in case of P2P connection */ 3229 if (ha->operating_mode == P2P) { 3230 ha->fw_options[2] |= BIT_3; 3231 ql_dbg(ql_dbg_disc, vha, 0x2100, 3232 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", 3233 __func__, ha->fw_options[2]); 3234 } 3235 3236 /* Update firmware options. */ 3237 qla2x00_set_fw_options(vha, ha->fw_options); 3238 } 3239 3240 void 3241 qla24xx_update_fw_options(scsi_qla_host_t *vha) 3242 { 3243 int rval; 3244 struct qla_hw_data *ha = vha->hw; 3245 3246 if (IS_P3P_TYPE(ha)) 3247 return; 3248 3249 /* Hold status IOCBs until ABTS response received. */ 3250 if (ql2xfwholdabts) 3251 ha->fw_options[3] |= BIT_12; 3252 3253 /* Set Retry FLOGI in case of P2P connection */ 3254 if (ha->operating_mode == P2P) { 3255 ha->fw_options[2] |= BIT_3; 3256 ql_dbg(ql_dbg_disc, vha, 0x2101, 3257 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", 3258 __func__, ha->fw_options[2]); 3259 } 3260 3261 /* Move PUREX, ABTS RX & RIDA to ATIOQ */ 3262 if (ql2xmvasynctoatio && 3263 (IS_QLA83XX(ha) || IS_QLA27XX(ha))) { 3264 if (qla_tgt_mode_enabled(vha) || 3265 qla_dual_mode_enabled(vha)) 3266 ha->fw_options[2] |= BIT_11; 3267 else 3268 ha->fw_options[2] &= ~BIT_11; 3269 } 3270 3271 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 3272 /* 3273 * Tell FW to track each exchange to prevent 3274 * driver from using stale exchange. 3275 */ 3276 if (qla_tgt_mode_enabled(vha) || 3277 qla_dual_mode_enabled(vha)) 3278 ha->fw_options[2] |= BIT_4; 3279 else 3280 ha->fw_options[2] &= ~BIT_4; 3281 } 3282 3283 ql_dbg(ql_dbg_init, vha, 0x00e8, 3284 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", 3285 __func__, ha->fw_options[1], ha->fw_options[2], 3286 ha->fw_options[3], vha->host->active_mode); 3287 3288 if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3]) 3289 qla2x00_set_fw_options(vha, ha->fw_options); 3290 3291 /* Update Serial Link options. */ 3292 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) 3293 return; 3294 3295 rval = qla2x00_set_serdes_params(vha, 3296 le16_to_cpu(ha->fw_seriallink_options24[1]), 3297 le16_to_cpu(ha->fw_seriallink_options24[2]), 3298 le16_to_cpu(ha->fw_seriallink_options24[3])); 3299 if (rval != QLA_SUCCESS) { 3300 ql_log(ql_log_warn, vha, 0x0104, 3301 "Unable to update Serial Link options (%x).\n", rval); 3302 } 3303 } 3304 3305 void 3306 qla2x00_config_rings(struct scsi_qla_host *vha) 3307 { 3308 struct qla_hw_data *ha = vha->hw; 3309 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3310 struct req_que *req = ha->req_q_map[0]; 3311 struct rsp_que *rsp = ha->rsp_q_map[0]; 3312 3313 /* Setup ring parameters in initialization control block. */ 3314 ha->init_cb->request_q_outpointer = cpu_to_le16(0); 3315 ha->init_cb->response_q_inpointer = cpu_to_le16(0); 3316 ha->init_cb->request_q_length = cpu_to_le16(req->length); 3317 ha->init_cb->response_q_length = cpu_to_le16(rsp->length); 3318 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); 3319 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma)); 3320 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); 3321 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 3322 3323 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0); 3324 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0); 3325 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0); 3326 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0); 3327 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */ 3328 } 3329 3330 void 3331 qla24xx_config_rings(struct scsi_qla_host *vha) 3332 { 3333 struct qla_hw_data *ha = vha->hw; 3334 device_reg_t *reg = ISP_QUE_REG(ha, 0); 3335 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; 3336 struct qla_msix_entry *msix; 3337 struct init_cb_24xx *icb; 3338 uint16_t rid = 0; 3339 struct req_que *req = ha->req_q_map[0]; 3340 struct rsp_que *rsp = ha->rsp_q_map[0]; 3341 3342 /* Setup ring parameters in initialization control block. */ 3343 icb = (struct init_cb_24xx *)ha->init_cb; 3344 icb->request_q_outpointer = cpu_to_le16(0); 3345 icb->response_q_inpointer = cpu_to_le16(0); 3346 icb->request_q_length = cpu_to_le16(req->length); 3347 icb->response_q_length = cpu_to_le16(rsp->length); 3348 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); 3349 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma)); 3350 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); 3351 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 3352 3353 /* Setup ATIO queue dma pointers for target mode */ 3354 icb->atio_q_inpointer = cpu_to_le16(0); 3355 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length); 3356 icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma)); 3357 icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma)); 3358 3359 if (IS_SHADOW_REG_CAPABLE(ha)) 3360 icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29); 3361 3362 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 3363 icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS); 3364 icb->rid = cpu_to_le16(rid); 3365 if (ha->flags.msix_enabled) { 3366 msix = &ha->msix_entries[1]; 3367 ql_dbg(ql_dbg_init, vha, 0x0019, 3368 "Registering vector 0x%x for base que.\n", 3369 msix->entry); 3370 icb->msix = cpu_to_le16(msix->entry); 3371 } 3372 /* Use alternate PCI bus number */ 3373 if (MSB(rid)) 3374 icb->firmware_options_2 |= cpu_to_le32(BIT_19); 3375 /* Use alternate PCI devfn */ 3376 if (LSB(rid)) 3377 icb->firmware_options_2 |= cpu_to_le32(BIT_18); 3378 3379 /* Use Disable MSIX Handshake mode for capable adapters */ 3380 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) && 3381 (ha->flags.msix_enabled)) { 3382 icb->firmware_options_2 &= cpu_to_le32(~BIT_22); 3383 ha->flags.disable_msix_handshake = 1; 3384 ql_dbg(ql_dbg_init, vha, 0x00fe, 3385 "MSIX Handshake Disable Mode turned on.\n"); 3386 } else { 3387 icb->firmware_options_2 |= cpu_to_le32(BIT_22); 3388 } 3389 icb->firmware_options_2 |= cpu_to_le32(BIT_23); 3390 3391 WRT_REG_DWORD(®->isp25mq.req_q_in, 0); 3392 WRT_REG_DWORD(®->isp25mq.req_q_out, 0); 3393 WRT_REG_DWORD(®->isp25mq.rsp_q_in, 0); 3394 WRT_REG_DWORD(®->isp25mq.rsp_q_out, 0); 3395 } else { 3396 WRT_REG_DWORD(®->isp24.req_q_in, 0); 3397 WRT_REG_DWORD(®->isp24.req_q_out, 0); 3398 WRT_REG_DWORD(®->isp24.rsp_q_in, 0); 3399 WRT_REG_DWORD(®->isp24.rsp_q_out, 0); 3400 } 3401 qlt_24xx_config_rings(vha); 3402 3403 /* PCI posting */ 3404 RD_REG_DWORD(&ioreg->hccr); 3405 } 3406 3407 /** 3408 * qla2x00_init_rings() - Initializes firmware. 3409 * @ha: HA context 3410 * 3411 * Beginning of request ring has initialization control block already built 3412 * by nvram config routine. 3413 * 3414 * Returns 0 on success. 3415 */ 3416 int 3417 qla2x00_init_rings(scsi_qla_host_t *vha) 3418 { 3419 int rval; 3420 unsigned long flags = 0; 3421 int cnt, que; 3422 struct qla_hw_data *ha = vha->hw; 3423 struct req_que *req; 3424 struct rsp_que *rsp; 3425 struct mid_init_cb_24xx *mid_init_cb = 3426 (struct mid_init_cb_24xx *) ha->init_cb; 3427 3428 spin_lock_irqsave(&ha->hardware_lock, flags); 3429 3430 /* Clear outstanding commands array. */ 3431 for (que = 0; que < ha->max_req_queues; que++) { 3432 req = ha->req_q_map[que]; 3433 if (!req || !test_bit(que, ha->req_qid_map)) 3434 continue; 3435 req->out_ptr = (void *)(req->ring + req->length); 3436 *req->out_ptr = 0; 3437 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) 3438 req->outstanding_cmds[cnt] = NULL; 3439 3440 req->current_outstanding_cmd = 1; 3441 3442 /* Initialize firmware. */ 3443 req->ring_ptr = req->ring; 3444 req->ring_index = 0; 3445 req->cnt = req->length; 3446 } 3447 3448 for (que = 0; que < ha->max_rsp_queues; que++) { 3449 rsp = ha->rsp_q_map[que]; 3450 if (!rsp || !test_bit(que, ha->rsp_qid_map)) 3451 continue; 3452 rsp->in_ptr = (void *)(rsp->ring + rsp->length); 3453 *rsp->in_ptr = 0; 3454 /* Initialize response queue entries */ 3455 if (IS_QLAFX00(ha)) 3456 qlafx00_init_response_q_entries(rsp); 3457 else 3458 qla2x00_init_response_q_entries(rsp); 3459 } 3460 3461 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; 3462 ha->tgt.atio_ring_index = 0; 3463 /* Initialize ATIO queue entries */ 3464 qlt_init_atio_q_entries(vha); 3465 3466 ha->isp_ops->config_rings(vha); 3467 3468 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3469 3470 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n"); 3471 3472 if (IS_QLAFX00(ha)) { 3473 rval = qlafx00_init_firmware(vha, ha->init_cb_size); 3474 goto next_check; 3475 } 3476 3477 /* Update any ISP specific firmware options before initialization. */ 3478 ha->isp_ops->update_fw_options(vha); 3479 3480 if (ha->flags.npiv_supported) { 3481 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha)) 3482 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; 3483 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); 3484 } 3485 3486 if (IS_FWI2_CAPABLE(ha)) { 3487 mid_init_cb->options = cpu_to_le16(BIT_1); 3488 mid_init_cb->init_cb.execution_throttle = 3489 cpu_to_le16(ha->cur_fw_xcb_count); 3490 ha->flags.dport_enabled = 3491 (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0; 3492 ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n", 3493 (ha->flags.dport_enabled) ? "enabled" : "disabled"); 3494 /* FA-WWPN Status */ 3495 ha->flags.fawwpn_enabled = 3496 (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0; 3497 ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n", 3498 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled"); 3499 } 3500 3501 rval = qla2x00_init_firmware(vha, ha->init_cb_size); 3502 next_check: 3503 if (rval) { 3504 ql_log(ql_log_fatal, vha, 0x00d2, 3505 "Init Firmware **** FAILED ****.\n"); 3506 } else { 3507 ql_dbg(ql_dbg_init, vha, 0x00d3, 3508 "Init Firmware -- success.\n"); 3509 QLA_FW_STARTED(ha); 3510 } 3511 3512 return (rval); 3513 } 3514 3515 /** 3516 * qla2x00_fw_ready() - Waits for firmware ready. 3517 * @ha: HA context 3518 * 3519 * Returns 0 on success. 3520 */ 3521 static int 3522 qla2x00_fw_ready(scsi_qla_host_t *vha) 3523 { 3524 int rval; 3525 unsigned long wtime, mtime, cs84xx_time; 3526 uint16_t min_wait; /* Minimum wait time if loop is down */ 3527 uint16_t wait_time; /* Wait time if loop is coming ready */ 3528 uint16_t state[6]; 3529 struct qla_hw_data *ha = vha->hw; 3530 3531 if (IS_QLAFX00(vha->hw)) 3532 return qlafx00_fw_ready(vha); 3533 3534 rval = QLA_SUCCESS; 3535 3536 /* Time to wait for loop down */ 3537 if (IS_P3P_TYPE(ha)) 3538 min_wait = 30; 3539 else 3540 min_wait = 20; 3541 3542 /* 3543 * Firmware should take at most one RATOV to login, plus 5 seconds for 3544 * our own processing. 3545 */ 3546 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) { 3547 wait_time = min_wait; 3548 } 3549 3550 /* Min wait time if loop down */ 3551 mtime = jiffies + (min_wait * HZ); 3552 3553 /* wait time before firmware ready */ 3554 wtime = jiffies + (wait_time * HZ); 3555 3556 /* Wait for ISP to finish LIP */ 3557 if (!vha->flags.init_done) 3558 ql_log(ql_log_info, vha, 0x801e, 3559 "Waiting for LIP to complete.\n"); 3560 3561 do { 3562 memset(state, -1, sizeof(state)); 3563 rval = qla2x00_get_firmware_state(vha, state); 3564 if (rval == QLA_SUCCESS) { 3565 if (state[0] < FSTATE_LOSS_OF_SYNC) { 3566 vha->device_flags &= ~DFLG_NO_CABLE; 3567 } 3568 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { 3569 ql_dbg(ql_dbg_taskm, vha, 0x801f, 3570 "fw_state=%x 84xx=%x.\n", state[0], 3571 state[2]); 3572 if ((state[2] & FSTATE_LOGGED_IN) && 3573 (state[2] & FSTATE_WAITING_FOR_VERIFY)) { 3574 ql_dbg(ql_dbg_taskm, vha, 0x8028, 3575 "Sending verify iocb.\n"); 3576 3577 cs84xx_time = jiffies; 3578 rval = qla84xx_init_chip(vha); 3579 if (rval != QLA_SUCCESS) { 3580 ql_log(ql_log_warn, 3581 vha, 0x8007, 3582 "Init chip failed.\n"); 3583 break; 3584 } 3585 3586 /* Add time taken to initialize. */ 3587 cs84xx_time = jiffies - cs84xx_time; 3588 wtime += cs84xx_time; 3589 mtime += cs84xx_time; 3590 ql_dbg(ql_dbg_taskm, vha, 0x8008, 3591 "Increasing wait time by %ld. " 3592 "New time %ld.\n", cs84xx_time, 3593 wtime); 3594 } 3595 } else if (state[0] == FSTATE_READY) { 3596 ql_dbg(ql_dbg_taskm, vha, 0x8037, 3597 "F/W Ready - OK.\n"); 3598 3599 qla2x00_get_retry_cnt(vha, &ha->retry_count, 3600 &ha->login_timeout, &ha->r_a_tov); 3601 3602 rval = QLA_SUCCESS; 3603 break; 3604 } 3605 3606 rval = QLA_FUNCTION_FAILED; 3607 3608 if (atomic_read(&vha->loop_down_timer) && 3609 state[0] != FSTATE_READY) { 3610 /* Loop down. Timeout on min_wait for states 3611 * other than Wait for Login. 3612 */ 3613 if (time_after_eq(jiffies, mtime)) { 3614 ql_log(ql_log_info, vha, 0x8038, 3615 "Cable is unplugged...\n"); 3616 3617 vha->device_flags |= DFLG_NO_CABLE; 3618 break; 3619 } 3620 } 3621 } else { 3622 /* Mailbox cmd failed. Timeout on min_wait. */ 3623 if (time_after_eq(jiffies, mtime) || 3624 ha->flags.isp82xx_fw_hung) 3625 break; 3626 } 3627 3628 if (time_after_eq(jiffies, wtime)) 3629 break; 3630 3631 /* Delay for a while */ 3632 msleep(500); 3633 } while (1); 3634 3635 ql_dbg(ql_dbg_taskm, vha, 0x803a, 3636 "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0], 3637 state[1], state[2], state[3], state[4], state[5], jiffies); 3638 3639 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) { 3640 ql_log(ql_log_warn, vha, 0x803b, 3641 "Firmware ready **** FAILED ****.\n"); 3642 } 3643 3644 return (rval); 3645 } 3646 3647 /* 3648 * qla2x00_configure_hba 3649 * Setup adapter context. 3650 * 3651 * Input: 3652 * ha = adapter state pointer. 3653 * 3654 * Returns: 3655 * 0 = success 3656 * 3657 * Context: 3658 * Kernel context. 3659 */ 3660 static int 3661 qla2x00_configure_hba(scsi_qla_host_t *vha) 3662 { 3663 int rval; 3664 uint16_t loop_id; 3665 uint16_t topo; 3666 uint16_t sw_cap; 3667 uint8_t al_pa; 3668 uint8_t area; 3669 uint8_t domain; 3670 char connect_type[22]; 3671 struct qla_hw_data *ha = vha->hw; 3672 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 3673 port_id_t id; 3674 3675 /* Get host addresses. */ 3676 rval = qla2x00_get_adapter_id(vha, 3677 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); 3678 if (rval != QLA_SUCCESS) { 3679 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || 3680 IS_CNA_CAPABLE(ha) || 3681 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { 3682 ql_dbg(ql_dbg_disc, vha, 0x2008, 3683 "Loop is in a transition state.\n"); 3684 } else { 3685 ql_log(ql_log_warn, vha, 0x2009, 3686 "Unable to get host loop ID.\n"); 3687 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) && 3688 (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) { 3689 ql_log(ql_log_warn, vha, 0x1151, 3690 "Doing link init.\n"); 3691 if (qla24xx_link_initialize(vha) == QLA_SUCCESS) 3692 return rval; 3693 } 3694 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3695 } 3696 return (rval); 3697 } 3698 3699 if (topo == 4) { 3700 ql_log(ql_log_info, vha, 0x200a, 3701 "Cannot get topology - retrying.\n"); 3702 return (QLA_FUNCTION_FAILED); 3703 } 3704 3705 vha->loop_id = loop_id; 3706 3707 /* initialize */ 3708 ha->min_external_loopid = SNS_FIRST_LOOP_ID; 3709 ha->operating_mode = LOOP; 3710 ha->switch_cap = 0; 3711 3712 switch (topo) { 3713 case 0: 3714 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n"); 3715 ha->current_topology = ISP_CFG_NL; 3716 strcpy(connect_type, "(Loop)"); 3717 break; 3718 3719 case 1: 3720 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n"); 3721 ha->switch_cap = sw_cap; 3722 ha->current_topology = ISP_CFG_FL; 3723 strcpy(connect_type, "(FL_Port)"); 3724 break; 3725 3726 case 2: 3727 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n"); 3728 ha->operating_mode = P2P; 3729 ha->current_topology = ISP_CFG_N; 3730 strcpy(connect_type, "(N_Port-to-N_Port)"); 3731 break; 3732 3733 case 3: 3734 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n"); 3735 ha->switch_cap = sw_cap; 3736 ha->operating_mode = P2P; 3737 ha->current_topology = ISP_CFG_F; 3738 strcpy(connect_type, "(F_Port)"); 3739 break; 3740 3741 default: 3742 ql_dbg(ql_dbg_disc, vha, 0x200f, 3743 "HBA in unknown topology %x, using NL.\n", topo); 3744 ha->current_topology = ISP_CFG_NL; 3745 strcpy(connect_type, "(Loop)"); 3746 break; 3747 } 3748 3749 /* Save Host port and loop ID. */ 3750 /* byte order - Big Endian */ 3751 id.b.domain = domain; 3752 id.b.area = area; 3753 id.b.al_pa = al_pa; 3754 id.b.rsvd_1 = 0; 3755 qlt_update_host_map(vha, id); 3756 3757 if (!vha->flags.init_done) 3758 ql_log(ql_log_info, vha, 0x2010, 3759 "Topology - %s, Host Loop address 0x%x.\n", 3760 connect_type, vha->loop_id); 3761 3762 return(rval); 3763 } 3764 3765 inline void 3766 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, 3767 char *def) 3768 { 3769 char *st, *en; 3770 uint16_t index; 3771 struct qla_hw_data *ha = vha->hw; 3772 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && 3773 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha); 3774 3775 if (memcmp(model, BINZERO, len) != 0) { 3776 strncpy(ha->model_number, model, len); 3777 st = en = ha->model_number; 3778 en += len - 1; 3779 while (en > st) { 3780 if (*en != 0x20 && *en != 0x00) 3781 break; 3782 *en-- = '\0'; 3783 } 3784 3785 index = (ha->pdev->subsystem_device & 0xff); 3786 if (use_tbl && 3787 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 3788 index < QLA_MODEL_NAMES) 3789 strncpy(ha->model_desc, 3790 qla2x00_model_name[index * 2 + 1], 3791 sizeof(ha->model_desc) - 1); 3792 } else { 3793 index = (ha->pdev->subsystem_device & 0xff); 3794 if (use_tbl && 3795 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 3796 index < QLA_MODEL_NAMES) { 3797 strcpy(ha->model_number, 3798 qla2x00_model_name[index * 2]); 3799 strncpy(ha->model_desc, 3800 qla2x00_model_name[index * 2 + 1], 3801 sizeof(ha->model_desc) - 1); 3802 } else { 3803 strcpy(ha->model_number, def); 3804 } 3805 } 3806 if (IS_FWI2_CAPABLE(ha)) 3807 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc, 3808 sizeof(ha->model_desc)); 3809 } 3810 3811 /* On sparc systems, obtain port and node WWN from firmware 3812 * properties. 3813 */ 3814 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv) 3815 { 3816 #ifdef CONFIG_SPARC 3817 struct qla_hw_data *ha = vha->hw; 3818 struct pci_dev *pdev = ha->pdev; 3819 struct device_node *dp = pci_device_to_OF_node(pdev); 3820 const u8 *val; 3821 int len; 3822 3823 val = of_get_property(dp, "port-wwn", &len); 3824 if (val && len >= WWN_SIZE) 3825 memcpy(nv->port_name, val, WWN_SIZE); 3826 3827 val = of_get_property(dp, "node-wwn", &len); 3828 if (val && len >= WWN_SIZE) 3829 memcpy(nv->node_name, val, WWN_SIZE); 3830 #endif 3831 } 3832 3833 /* 3834 * NVRAM configuration for ISP 2xxx 3835 * 3836 * Input: 3837 * ha = adapter block pointer. 3838 * 3839 * Output: 3840 * initialization control block in response_ring 3841 * host adapters parameters in host adapter block 3842 * 3843 * Returns: 3844 * 0 = success. 3845 */ 3846 int 3847 qla2x00_nvram_config(scsi_qla_host_t *vha) 3848 { 3849 int rval; 3850 uint8_t chksum = 0; 3851 uint16_t cnt; 3852 uint8_t *dptr1, *dptr2; 3853 struct qla_hw_data *ha = vha->hw; 3854 init_cb_t *icb = ha->init_cb; 3855 nvram_t *nv = ha->nvram; 3856 uint8_t *ptr = ha->nvram; 3857 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3858 3859 rval = QLA_SUCCESS; 3860 3861 /* Determine NVRAM starting address. */ 3862 ha->nvram_size = sizeof(nvram_t); 3863 ha->nvram_base = 0; 3864 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) 3865 if ((RD_REG_WORD(®->ctrl_status) >> 14) == 1) 3866 ha->nvram_base = 0x80; 3867 3868 /* Get NVRAM data and calculate checksum. */ 3869 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size); 3870 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) 3871 chksum += *ptr++; 3872 3873 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f, 3874 "Contents of NVRAM.\n"); 3875 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110, 3876 (uint8_t *)nv, ha->nvram_size); 3877 3878 /* Bad NVRAM data, set defaults parameters. */ 3879 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || 3880 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) { 3881 /* Reset NVRAM data. */ 3882 ql_log(ql_log_warn, vha, 0x0064, 3883 "Inconsistent NVRAM " 3884 "detected: checksum=0x%x id=%c version=0x%x.\n", 3885 chksum, nv->id[0], nv->nvram_version); 3886 ql_log(ql_log_warn, vha, 0x0065, 3887 "Falling back to " 3888 "functioning (yet invalid -- WWPN) defaults.\n"); 3889 3890 /* 3891 * Set default initialization control block. 3892 */ 3893 memset(nv, 0, ha->nvram_size); 3894 nv->parameter_block_version = ICB_VERSION; 3895 3896 if (IS_QLA23XX(ha)) { 3897 nv->firmware_options[0] = BIT_2 | BIT_1; 3898 nv->firmware_options[1] = BIT_7 | BIT_5; 3899 nv->add_firmware_options[0] = BIT_5; 3900 nv->add_firmware_options[1] = BIT_5 | BIT_4; 3901 nv->frame_payload_size = 2048; 3902 nv->special_options[1] = BIT_7; 3903 } else if (IS_QLA2200(ha)) { 3904 nv->firmware_options[0] = BIT_2 | BIT_1; 3905 nv->firmware_options[1] = BIT_7 | BIT_5; 3906 nv->add_firmware_options[0] = BIT_5; 3907 nv->add_firmware_options[1] = BIT_5 | BIT_4; 3908 nv->frame_payload_size = 1024; 3909 } else if (IS_QLA2100(ha)) { 3910 nv->firmware_options[0] = BIT_3 | BIT_1; 3911 nv->firmware_options[1] = BIT_5; 3912 nv->frame_payload_size = 1024; 3913 } 3914 3915 nv->max_iocb_allocation = cpu_to_le16(256); 3916 nv->execution_throttle = cpu_to_le16(16); 3917 nv->retry_count = 8; 3918 nv->retry_delay = 1; 3919 3920 nv->port_name[0] = 33; 3921 nv->port_name[3] = 224; 3922 nv->port_name[4] = 139; 3923 3924 qla2xxx_nvram_wwn_from_ofw(vha, nv); 3925 3926 nv->login_timeout = 4; 3927 3928 /* 3929 * Set default host adapter parameters 3930 */ 3931 nv->host_p[1] = BIT_2; 3932 nv->reset_delay = 5; 3933 nv->port_down_retry_count = 8; 3934 nv->max_luns_per_target = cpu_to_le16(8); 3935 nv->link_down_timeout = 60; 3936 3937 rval = 1; 3938 } 3939 3940 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 3941 /* 3942 * The SN2 does not provide BIOS emulation which means you can't change 3943 * potentially bogus BIOS settings. Force the use of default settings 3944 * for link rate and frame size. Hope that the rest of the settings 3945 * are valid. 3946 */ 3947 if (ia64_platform_is("sn2")) { 3948 nv->frame_payload_size = 2048; 3949 if (IS_QLA23XX(ha)) 3950 nv->special_options[1] = BIT_7; 3951 } 3952 #endif 3953 3954 /* Reset Initialization control block */ 3955 memset(icb, 0, ha->init_cb_size); 3956 3957 /* 3958 * Setup driver NVRAM options. 3959 */ 3960 nv->firmware_options[0] |= (BIT_6 | BIT_1); 3961 nv->firmware_options[0] &= ~(BIT_5 | BIT_4); 3962 nv->firmware_options[1] |= (BIT_5 | BIT_0); 3963 nv->firmware_options[1] &= ~BIT_4; 3964 3965 if (IS_QLA23XX(ha)) { 3966 nv->firmware_options[0] |= BIT_2; 3967 nv->firmware_options[0] &= ~BIT_3; 3968 nv->special_options[0] &= ~BIT_6; 3969 nv->add_firmware_options[1] |= BIT_5 | BIT_4; 3970 3971 if (IS_QLA2300(ha)) { 3972 if (ha->fb_rev == FPM_2310) { 3973 strcpy(ha->model_number, "QLA2310"); 3974 } else { 3975 strcpy(ha->model_number, "QLA2300"); 3976 } 3977 } else { 3978 qla2x00_set_model_info(vha, nv->model_number, 3979 sizeof(nv->model_number), "QLA23xx"); 3980 } 3981 } else if (IS_QLA2200(ha)) { 3982 nv->firmware_options[0] |= BIT_2; 3983 /* 3984 * 'Point-to-point preferred, else loop' is not a safe 3985 * connection mode setting. 3986 */ 3987 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) == 3988 (BIT_5 | BIT_4)) { 3989 /* Force 'loop preferred, else point-to-point'. */ 3990 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4); 3991 nv->add_firmware_options[0] |= BIT_5; 3992 } 3993 strcpy(ha->model_number, "QLA22xx"); 3994 } else /*if (IS_QLA2100(ha))*/ { 3995 strcpy(ha->model_number, "QLA2100"); 3996 } 3997 3998 /* 3999 * Copy over NVRAM RISC parameter block to initialization control block. 4000 */ 4001 dptr1 = (uint8_t *)icb; 4002 dptr2 = (uint8_t *)&nv->parameter_block_version; 4003 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version; 4004 while (cnt--) 4005 *dptr1++ = *dptr2++; 4006 4007 /* Copy 2nd half. */ 4008 dptr1 = (uint8_t *)icb->add_firmware_options; 4009 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options; 4010 while (cnt--) 4011 *dptr1++ = *dptr2++; 4012 4013 /* Use alternate WWN? */ 4014 if (nv->host_p[1] & BIT_7) { 4015 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 4016 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 4017 } 4018 4019 /* Prepare nodename */ 4020 if ((icb->firmware_options[1] & BIT_6) == 0) { 4021 /* 4022 * Firmware will apply the following mask if the nodename was 4023 * not provided. 4024 */ 4025 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 4026 icb->node_name[0] &= 0xF0; 4027 } 4028 4029 /* 4030 * Set host adapter parameters. 4031 */ 4032 4033 /* 4034 * BIT_7 in the host-parameters section allows for modification to 4035 * internal driver logging. 4036 */ 4037 if (nv->host_p[0] & BIT_7) 4038 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; 4039 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); 4040 /* Always load RISC code on non ISP2[12]00 chips. */ 4041 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) 4042 ha->flags.disable_risc_code_load = 0; 4043 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0); 4044 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0); 4045 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0); 4046 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0; 4047 ha->flags.disable_serdes = 0; 4048 4049 ha->operating_mode = 4050 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4; 4051 4052 memcpy(ha->fw_seriallink_options, nv->seriallink_options, 4053 sizeof(ha->fw_seriallink_options)); 4054 4055 /* save HBA serial number */ 4056 ha->serial0 = icb->port_name[5]; 4057 ha->serial1 = icb->port_name[6]; 4058 ha->serial2 = icb->port_name[7]; 4059 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 4060 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 4061 4062 icb->execution_throttle = cpu_to_le16(0xFFFF); 4063 4064 ha->retry_count = nv->retry_count; 4065 4066 /* Set minimum login_timeout to 4 seconds. */ 4067 if (nv->login_timeout != ql2xlogintimeout) 4068 nv->login_timeout = ql2xlogintimeout; 4069 if (nv->login_timeout < 4) 4070 nv->login_timeout = 4; 4071 ha->login_timeout = nv->login_timeout; 4072 4073 /* Set minimum RATOV to 100 tenths of a second. */ 4074 ha->r_a_tov = 100; 4075 4076 ha->loop_reset_delay = nv->reset_delay; 4077 4078 /* Link Down Timeout = 0: 4079 * 4080 * When Port Down timer expires we will start returning 4081 * I/O's to OS with "DID_NO_CONNECT". 4082 * 4083 * Link Down Timeout != 0: 4084 * 4085 * The driver waits for the link to come up after link down 4086 * before returning I/Os to OS with "DID_NO_CONNECT". 4087 */ 4088 if (nv->link_down_timeout == 0) { 4089 ha->loop_down_abort_time = 4090 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 4091 } else { 4092 ha->link_down_timeout = nv->link_down_timeout; 4093 ha->loop_down_abort_time = 4094 (LOOP_DOWN_TIME - ha->link_down_timeout); 4095 } 4096 4097 /* 4098 * Need enough time to try and get the port back. 4099 */ 4100 ha->port_down_retry_count = nv->port_down_retry_count; 4101 if (qlport_down_retry) 4102 ha->port_down_retry_count = qlport_down_retry; 4103 /* Set login_retry_count */ 4104 ha->login_retry_count = nv->retry_count; 4105 if (ha->port_down_retry_count == nv->port_down_retry_count && 4106 ha->port_down_retry_count > 3) 4107 ha->login_retry_count = ha->port_down_retry_count; 4108 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 4109 ha->login_retry_count = ha->port_down_retry_count; 4110 if (ql2xloginretrycount) 4111 ha->login_retry_count = ql2xloginretrycount; 4112 4113 icb->lun_enables = cpu_to_le16(0); 4114 icb->command_resource_count = 0; 4115 icb->immediate_notify_resource_count = 0; 4116 icb->timeout = cpu_to_le16(0); 4117 4118 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 4119 /* Enable RIO */ 4120 icb->firmware_options[0] &= ~BIT_3; 4121 icb->add_firmware_options[0] &= 4122 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 4123 icb->add_firmware_options[0] |= BIT_2; 4124 icb->response_accumulation_timer = 3; 4125 icb->interrupt_delay_timer = 5; 4126 4127 vha->flags.process_response_queue = 1; 4128 } else { 4129 /* Enable ZIO. */ 4130 if (!vha->flags.init_done) { 4131 ha->zio_mode = icb->add_firmware_options[0] & 4132 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 4133 ha->zio_timer = icb->interrupt_delay_timer ? 4134 icb->interrupt_delay_timer: 2; 4135 } 4136 icb->add_firmware_options[0] &= 4137 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 4138 vha->flags.process_response_queue = 0; 4139 if (ha->zio_mode != QLA_ZIO_DISABLED) { 4140 ha->zio_mode = QLA_ZIO_MODE_6; 4141 4142 ql_log(ql_log_info, vha, 0x0068, 4143 "ZIO mode %d enabled; timer delay (%d us).\n", 4144 ha->zio_mode, ha->zio_timer * 100); 4145 4146 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode; 4147 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer; 4148 vha->flags.process_response_queue = 1; 4149 } 4150 } 4151 4152 if (rval) { 4153 ql_log(ql_log_warn, vha, 0x0069, 4154 "NVRAM configuration failed.\n"); 4155 } 4156 return (rval); 4157 } 4158 4159 static void 4160 qla2x00_rport_del(void *data) 4161 { 4162 fc_port_t *fcport = data; 4163 struct fc_rport *rport; 4164 unsigned long flags; 4165 4166 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 4167 rport = fcport->drport ? fcport->drport: fcport->rport; 4168 fcport->drport = NULL; 4169 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 4170 if (rport) { 4171 ql_dbg(ql_dbg_disc, fcport->vha, 0x210b, 4172 "%s %8phN. rport %p roles %x\n", 4173 __func__, fcport->port_name, rport, 4174 rport->roles); 4175 4176 fc_remote_port_delete(rport); 4177 } 4178 } 4179 4180 /** 4181 * qla2x00_alloc_fcport() - Allocate a generic fcport. 4182 * @ha: HA context 4183 * @flags: allocation flags 4184 * 4185 * Returns a pointer to the allocated fcport, or NULL, if none available. 4186 */ 4187 fc_port_t * 4188 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) 4189 { 4190 fc_port_t *fcport; 4191 4192 fcport = kzalloc(sizeof(fc_port_t), flags); 4193 if (!fcport) 4194 return NULL; 4195 4196 /* Setup fcport template structure. */ 4197 fcport->vha = vha; 4198 fcport->port_type = FCT_UNKNOWN; 4199 fcport->loop_id = FC_NO_LOOP_ID; 4200 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); 4201 fcport->supported_classes = FC_COS_UNSPECIFIED; 4202 4203 fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev, 4204 sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma, 4205 flags); 4206 fcport->disc_state = DSC_DELETED; 4207 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 4208 fcport->deleted = QLA_SESS_DELETED; 4209 fcport->login_retry = vha->hw->login_retry_count; 4210 fcport->login_retry = 5; 4211 fcport->logout_on_delete = 1; 4212 4213 if (!fcport->ct_desc.ct_sns) { 4214 ql_log(ql_log_warn, vha, 0xd049, 4215 "Failed to allocate ct_sns request.\n"); 4216 kfree(fcport); 4217 fcport = NULL; 4218 } 4219 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); 4220 INIT_LIST_HEAD(&fcport->gnl_entry); 4221 INIT_LIST_HEAD(&fcport->list); 4222 4223 return fcport; 4224 } 4225 4226 void 4227 qla2x00_free_fcport(fc_port_t *fcport) 4228 { 4229 if (fcport->ct_desc.ct_sns) { 4230 dma_free_coherent(&fcport->vha->hw->pdev->dev, 4231 sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns, 4232 fcport->ct_desc.ct_sns_dma); 4233 4234 fcport->ct_desc.ct_sns = NULL; 4235 } 4236 kfree(fcport); 4237 } 4238 4239 /* 4240 * qla2x00_configure_loop 4241 * Updates Fibre Channel Device Database with what is actually on loop. 4242 * 4243 * Input: 4244 * ha = adapter block pointer. 4245 * 4246 * Returns: 4247 * 0 = success. 4248 * 1 = error. 4249 * 2 = database was full and device was not configured. 4250 */ 4251 static int 4252 qla2x00_configure_loop(scsi_qla_host_t *vha) 4253 { 4254 int rval; 4255 unsigned long flags, save_flags; 4256 struct qla_hw_data *ha = vha->hw; 4257 rval = QLA_SUCCESS; 4258 4259 /* Get Initiator ID */ 4260 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) { 4261 rval = qla2x00_configure_hba(vha); 4262 if (rval != QLA_SUCCESS) { 4263 ql_dbg(ql_dbg_disc, vha, 0x2013, 4264 "Unable to configure HBA.\n"); 4265 return (rval); 4266 } 4267 } 4268 4269 save_flags = flags = vha->dpc_flags; 4270 ql_dbg(ql_dbg_disc, vha, 0x2014, 4271 "Configure loop -- dpc flags = 0x%lx.\n", flags); 4272 4273 /* 4274 * If we have both an RSCN and PORT UPDATE pending then handle them 4275 * both at the same time. 4276 */ 4277 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 4278 clear_bit(RSCN_UPDATE, &vha->dpc_flags); 4279 4280 qla2x00_get_data_rate(vha); 4281 4282 /* Determine what we need to do */ 4283 if (ha->current_topology == ISP_CFG_FL && 4284 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 4285 4286 set_bit(RSCN_UPDATE, &flags); 4287 4288 } else if (ha->current_topology == ISP_CFG_F && 4289 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 4290 4291 set_bit(RSCN_UPDATE, &flags); 4292 clear_bit(LOCAL_LOOP_UPDATE, &flags); 4293 4294 } else if (ha->current_topology == ISP_CFG_N) { 4295 clear_bit(RSCN_UPDATE, &flags); 4296 } else if (ha->current_topology == ISP_CFG_NL) { 4297 clear_bit(RSCN_UPDATE, &flags); 4298 set_bit(LOCAL_LOOP_UPDATE, &flags); 4299 } else if (!vha->flags.online || 4300 (test_bit(ABORT_ISP_ACTIVE, &flags))) { 4301 set_bit(RSCN_UPDATE, &flags); 4302 set_bit(LOCAL_LOOP_UPDATE, &flags); 4303 } 4304 4305 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) { 4306 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 4307 ql_dbg(ql_dbg_disc, vha, 0x2015, 4308 "Loop resync needed, failing.\n"); 4309 rval = QLA_FUNCTION_FAILED; 4310 } else 4311 rval = qla2x00_configure_local_loop(vha); 4312 } 4313 4314 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) { 4315 if (LOOP_TRANSITION(vha)) { 4316 ql_dbg(ql_dbg_disc, vha, 0x2099, 4317 "Needs RSCN update and loop transition.\n"); 4318 rval = QLA_FUNCTION_FAILED; 4319 } 4320 else 4321 rval = qla2x00_configure_fabric(vha); 4322 } 4323 4324 if (rval == QLA_SUCCESS) { 4325 if (atomic_read(&vha->loop_down_timer) || 4326 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 4327 rval = QLA_FUNCTION_FAILED; 4328 } else { 4329 atomic_set(&vha->loop_state, LOOP_READY); 4330 ql_dbg(ql_dbg_disc, vha, 0x2069, 4331 "LOOP READY.\n"); 4332 ha->flags.fw_init_done = 1; 4333 4334 /* 4335 * Process any ATIO queue entries that came in 4336 * while we weren't online. 4337 */ 4338 if (qla_tgt_mode_enabled(vha) || 4339 qla_dual_mode_enabled(vha)) { 4340 if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) { 4341 spin_lock_irqsave(&ha->tgt.atio_lock, 4342 flags); 4343 qlt_24xx_process_atio_queue(vha, 0); 4344 spin_unlock_irqrestore( 4345 &ha->tgt.atio_lock, flags); 4346 } else { 4347 spin_lock_irqsave(&ha->hardware_lock, 4348 flags); 4349 qlt_24xx_process_atio_queue(vha, 1); 4350 spin_unlock_irqrestore( 4351 &ha->hardware_lock, flags); 4352 } 4353 } 4354 } 4355 } 4356 4357 if (rval) { 4358 ql_dbg(ql_dbg_disc, vha, 0x206a, 4359 "%s *** FAILED ***.\n", __func__); 4360 } else { 4361 ql_dbg(ql_dbg_disc, vha, 0x206b, 4362 "%s: exiting normally.\n", __func__); 4363 } 4364 4365 /* Restore state if a resync event occurred during processing */ 4366 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 4367 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 4368 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 4369 if (test_bit(RSCN_UPDATE, &save_flags)) { 4370 set_bit(RSCN_UPDATE, &vha->dpc_flags); 4371 } 4372 } 4373 4374 return (rval); 4375 } 4376 4377 /* 4378 * N2N Login 4379 * Updates Fibre Channel Device Database with local loop devices. 4380 * 4381 * Input: 4382 * ha = adapter block pointer. 4383 * 4384 * Returns: 4385 */ 4386 static int qla24xx_n2n_handle_login(struct scsi_qla_host *vha, 4387 fc_port_t *fcport) 4388 { 4389 struct qla_hw_data *ha = vha->hw; 4390 int res = QLA_SUCCESS, rval; 4391 int greater_wwpn = 0; 4392 int logged_in = 0; 4393 4394 if (ha->current_topology != ISP_CFG_N) 4395 return res; 4396 4397 if (wwn_to_u64(vha->port_name) > 4398 wwn_to_u64(vha->n2n_port_name)) { 4399 ql_dbg(ql_dbg_disc, vha, 0x2002, 4400 "HBA WWPN is greater %llx > target %llx\n", 4401 wwn_to_u64(vha->port_name), 4402 wwn_to_u64(vha->n2n_port_name)); 4403 greater_wwpn = 1; 4404 fcport->d_id.b24 = vha->n2n_id; 4405 } 4406 4407 fcport->loop_id = vha->loop_id; 4408 fcport->fc4f_nvme = 0; 4409 fcport->query = 1; 4410 4411 ql_dbg(ql_dbg_disc, vha, 0x4001, 4412 "Initiate N2N login handler: HBA port_id=%06x loopid=%d\n", 4413 fcport->d_id.b24, vha->loop_id); 4414 4415 /* Fill in member data. */ 4416 if (!greater_wwpn) { 4417 rval = qla2x00_get_port_database(vha, fcport, 0); 4418 ql_dbg(ql_dbg_disc, vha, 0x1051, 4419 "Remote login-state (%x/%x) port_id=%06x loop_id=%x, rval=%d\n", 4420 fcport->current_login_state, fcport->last_login_state, 4421 fcport->d_id.b24, fcport->loop_id, rval); 4422 4423 if (((fcport->current_login_state & 0xf) == 0x4) || 4424 ((fcport->current_login_state & 0xf) == 0x6)) 4425 logged_in = 1; 4426 } 4427 4428 if (logged_in || greater_wwpn) { 4429 if (!vha->nvme_local_port && vha->flags.nvme_enabled) 4430 qla_nvme_register_hba(vha); 4431 4432 /* Set connected N_Port d_id */ 4433 if (vha->flags.nvme_enabled) 4434 fcport->fc4f_nvme = 1; 4435 4436 fcport->scan_state = QLA_FCPORT_FOUND; 4437 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 4438 fcport->disc_state = DSC_GNL; 4439 fcport->n2n_flag = 1; 4440 fcport->flags = 3; 4441 vha->hw->flags.gpsc_supported = 0; 4442 4443 if (greater_wwpn) { 4444 ql_dbg(ql_dbg_disc, vha, 0x20e5, 4445 "%s %d PLOGI ELS %8phC\n", 4446 __func__, __LINE__, fcport->port_name); 4447 4448 res = qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI, 4449 fcport, fcport->d_id); 4450 } 4451 4452 if (res != QLA_SUCCESS) { 4453 ql_log(ql_log_info, vha, 0xd04d, 4454 "PLOGI Failed: portid=%06x - retrying\n", 4455 fcport->d_id.b24); 4456 res = QLA_SUCCESS; 4457 } else { 4458 /* State 0x6 means FCP PRLI complete */ 4459 if ((fcport->current_login_state & 0xf) == 0x6) { 4460 ql_dbg(ql_dbg_disc, vha, 0x2118, 4461 "%s %d %8phC post GPDB work\n", 4462 __func__, __LINE__, fcport->port_name); 4463 fcport->chip_reset = 4464 vha->hw->base_qpair->chip_reset; 4465 qla24xx_post_gpdb_work(vha, fcport, 0); 4466 } else { 4467 ql_dbg(ql_dbg_disc, vha, 0x2118, 4468 "%s %d %8phC post NVMe PRLI\n", 4469 __func__, __LINE__, fcport->port_name); 4470 qla24xx_post_prli_work(vha, fcport); 4471 } 4472 } 4473 } else { 4474 /* Wait for next database change */ 4475 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags); 4476 } 4477 4478 return res; 4479 } 4480 4481 /* 4482 * qla2x00_configure_local_loop 4483 * Updates Fibre Channel Device Database with local loop devices. 4484 * 4485 * Input: 4486 * ha = adapter block pointer. 4487 * 4488 * Returns: 4489 * 0 = success. 4490 */ 4491 static int 4492 qla2x00_configure_local_loop(scsi_qla_host_t *vha) 4493 { 4494 int rval, rval2; 4495 int found_devs; 4496 int found; 4497 fc_port_t *fcport, *new_fcport; 4498 4499 uint16_t index; 4500 uint16_t entries; 4501 char *id_iter; 4502 uint16_t loop_id; 4503 uint8_t domain, area, al_pa; 4504 struct qla_hw_data *ha = vha->hw; 4505 unsigned long flags; 4506 4507 found_devs = 0; 4508 new_fcport = NULL; 4509 entries = MAX_FIBRE_DEVICES_LOOP; 4510 4511 /* Get list of logged in devices. */ 4512 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha)); 4513 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma, 4514 &entries); 4515 if (rval != QLA_SUCCESS) 4516 goto cleanup_allocation; 4517 4518 ql_dbg(ql_dbg_disc, vha, 0x2011, 4519 "Entries in ID list (%d).\n", entries); 4520 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075, 4521 (uint8_t *)ha->gid_list, 4522 entries * sizeof(struct gid_list_info)); 4523 4524 /* Allocate temporary fcport for any new fcports discovered. */ 4525 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 4526 if (new_fcport == NULL) { 4527 ql_log(ql_log_warn, vha, 0x2012, 4528 "Memory allocation failed for fcport.\n"); 4529 rval = QLA_MEMORY_ALLOC_FAILED; 4530 goto cleanup_allocation; 4531 } 4532 new_fcport->flags &= ~FCF_FABRIC_DEVICE; 4533 4534 /* 4535 * Mark local devices that were present with FCF_DEVICE_LOST for now. 4536 */ 4537 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4538 if (atomic_read(&fcport->state) == FCS_ONLINE && 4539 fcport->port_type != FCT_BROADCAST && 4540 (fcport->flags & FCF_FABRIC_DEVICE) == 0) { 4541 4542 ql_dbg(ql_dbg_disc, vha, 0x2096, 4543 "Marking port lost loop_id=0x%04x.\n", 4544 fcport->loop_id); 4545 4546 qla2x00_mark_device_lost(vha, fcport, 0, 0); 4547 } 4548 } 4549 4550 /* Inititae N2N login. */ 4551 if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) { 4552 rval = qla24xx_n2n_handle_login(vha, new_fcport); 4553 if (rval != QLA_SUCCESS) 4554 goto cleanup_allocation; 4555 return QLA_SUCCESS; 4556 } 4557 4558 /* Add devices to port list. */ 4559 id_iter = (char *)ha->gid_list; 4560 for (index = 0; index < entries; index++) { 4561 domain = ((struct gid_list_info *)id_iter)->domain; 4562 area = ((struct gid_list_info *)id_iter)->area; 4563 al_pa = ((struct gid_list_info *)id_iter)->al_pa; 4564 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 4565 loop_id = (uint16_t) 4566 ((struct gid_list_info *)id_iter)->loop_id_2100; 4567 else 4568 loop_id = le16_to_cpu( 4569 ((struct gid_list_info *)id_iter)->loop_id); 4570 id_iter += ha->gid_list_info_size; 4571 4572 /* Bypass reserved domain fields. */ 4573 if ((domain & 0xf0) == 0xf0) 4574 continue; 4575 4576 /* Bypass if not same domain and area of adapter. */ 4577 if (area && domain && 4578 (area != vha->d_id.b.area || domain != vha->d_id.b.domain)) 4579 continue; 4580 4581 /* Bypass invalid local loop ID. */ 4582 if (loop_id > LAST_LOCAL_LOOP_ID) 4583 continue; 4584 4585 memset(new_fcport->port_name, 0, WWN_SIZE); 4586 4587 /* Fill in member data. */ 4588 new_fcport->d_id.b.domain = domain; 4589 new_fcport->d_id.b.area = area; 4590 new_fcport->d_id.b.al_pa = al_pa; 4591 new_fcport->loop_id = loop_id; 4592 4593 rval2 = qla2x00_get_port_database(vha, new_fcport, 0); 4594 if (rval2 != QLA_SUCCESS) { 4595 ql_dbg(ql_dbg_disc, vha, 0x2097, 4596 "Failed to retrieve fcport information " 4597 "-- get_port_database=%x, loop_id=0x%04x.\n", 4598 rval2, new_fcport->loop_id); 4599 /* Skip retry if N2N */ 4600 if (ha->current_topology != ISP_CFG_N) { 4601 ql_dbg(ql_dbg_disc, vha, 0x2105, 4602 "Scheduling resync.\n"); 4603 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 4604 continue; 4605 } 4606 } 4607 4608 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 4609 /* Check for matching device in port list. */ 4610 found = 0; 4611 fcport = NULL; 4612 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4613 if (memcmp(new_fcport->port_name, fcport->port_name, 4614 WWN_SIZE)) 4615 continue; 4616 4617 fcport->flags &= ~FCF_FABRIC_DEVICE; 4618 fcport->loop_id = new_fcport->loop_id; 4619 fcport->port_type = new_fcport->port_type; 4620 fcport->d_id.b24 = new_fcport->d_id.b24; 4621 memcpy(fcport->node_name, new_fcport->node_name, 4622 WWN_SIZE); 4623 4624 if (!fcport->login_succ) { 4625 vha->fcport_count++; 4626 fcport->login_succ = 1; 4627 fcport->disc_state = DSC_LOGIN_COMPLETE; 4628 } 4629 4630 found++; 4631 break; 4632 } 4633 4634 if (!found) { 4635 /* New device, add to fcports list. */ 4636 list_add_tail(&new_fcport->list, &vha->vp_fcports); 4637 4638 /* Allocate a new replacement fcport. */ 4639 fcport = new_fcport; 4640 if (!fcport->login_succ) { 4641 vha->fcport_count++; 4642 fcport->login_succ = 1; 4643 fcport->disc_state = DSC_LOGIN_COMPLETE; 4644 } 4645 4646 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 4647 4648 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 4649 4650 if (new_fcport == NULL) { 4651 ql_log(ql_log_warn, vha, 0xd031, 4652 "Failed to allocate memory for fcport.\n"); 4653 rval = QLA_MEMORY_ALLOC_FAILED; 4654 goto cleanup_allocation; 4655 } 4656 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 4657 new_fcport->flags &= ~FCF_FABRIC_DEVICE; 4658 } 4659 4660 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 4661 4662 /* Base iIDMA settings on HBA port speed. */ 4663 fcport->fp_speed = ha->link_data_rate; 4664 4665 qla2x00_update_fcport(vha, fcport); 4666 4667 found_devs++; 4668 } 4669 4670 cleanup_allocation: 4671 kfree(new_fcport); 4672 4673 if (rval != QLA_SUCCESS) { 4674 ql_dbg(ql_dbg_disc, vha, 0x2098, 4675 "Configure local loop error exit: rval=%x.\n", rval); 4676 } 4677 4678 return (rval); 4679 } 4680 4681 static void 4682 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 4683 { 4684 int rval; 4685 uint16_t mb[MAILBOX_REGISTER_COUNT]; 4686 struct qla_hw_data *ha = vha->hw; 4687 4688 if (!IS_IIDMA_CAPABLE(ha)) 4689 return; 4690 4691 if (atomic_read(&fcport->state) != FCS_ONLINE) 4692 return; 4693 4694 if (fcport->fp_speed == PORT_SPEED_UNKNOWN || 4695 fcport->fp_speed > ha->link_data_rate) 4696 return; 4697 4698 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed, 4699 mb); 4700 if (rval != QLA_SUCCESS) { 4701 ql_dbg(ql_dbg_disc, vha, 0x2004, 4702 "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n", 4703 fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]); 4704 } else { 4705 ql_dbg(ql_dbg_disc, vha, 0x2005, 4706 "iIDMA adjusted to %s GB/s on %8phN.\n", 4707 qla2x00_get_link_speed_str(ha, fcport->fp_speed), 4708 fcport->port_name); 4709 } 4710 } 4711 4712 /* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/ 4713 static void 4714 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) 4715 { 4716 struct fc_rport_identifiers rport_ids; 4717 struct fc_rport *rport; 4718 unsigned long flags; 4719 4720 rport_ids.node_name = wwn_to_u64(fcport->node_name); 4721 rport_ids.port_name = wwn_to_u64(fcport->port_name); 4722 rport_ids.port_id = fcport->d_id.b.domain << 16 | 4723 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; 4724 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 4725 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids); 4726 if (!rport) { 4727 ql_log(ql_log_warn, vha, 0x2006, 4728 "Unable to allocate fc remote port.\n"); 4729 return; 4730 } 4731 4732 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 4733 *((fc_port_t **)rport->dd_data) = fcport; 4734 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 4735 4736 rport->supported_classes = fcport->supported_classes; 4737 4738 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 4739 if (fcport->port_type == FCT_INITIATOR) 4740 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; 4741 if (fcport->port_type == FCT_TARGET) 4742 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; 4743 4744 ql_dbg(ql_dbg_disc, vha, 0x20ee, 4745 "%s %8phN. rport %p is %s mode\n", 4746 __func__, fcport->port_name, rport, 4747 (fcport->port_type == FCT_TARGET) ? "tgt" : "ini"); 4748 4749 fc_remote_port_rolechg(rport, rport_ids.roles); 4750 } 4751 4752 /* 4753 * qla2x00_update_fcport 4754 * Updates device on list. 4755 * 4756 * Input: 4757 * ha = adapter block pointer. 4758 * fcport = port structure pointer. 4759 * 4760 * Return: 4761 * 0 - Success 4762 * BIT_0 - error 4763 * 4764 * Context: 4765 * Kernel context. 4766 */ 4767 void 4768 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 4769 { 4770 fcport->vha = vha; 4771 4772 if (IS_SW_RESV_ADDR(fcport->d_id)) 4773 return; 4774 4775 ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n", 4776 __func__, fcport->port_name); 4777 4778 if (IS_QLAFX00(vha->hw)) { 4779 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 4780 goto reg_port; 4781 } 4782 fcport->login_retry = 0; 4783 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 4784 fcport->disc_state = DSC_LOGIN_COMPLETE; 4785 fcport->deleted = 0; 4786 fcport->logout_on_delete = 1; 4787 4788 if (fcport->fc4f_nvme) { 4789 qla_nvme_register_remote(vha, fcport); 4790 return; 4791 } 4792 4793 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 4794 qla2x00_iidma_fcport(vha, fcport); 4795 qla24xx_update_fcport_fcp_prio(vha, fcport); 4796 4797 reg_port: 4798 switch (vha->host->active_mode) { 4799 case MODE_INITIATOR: 4800 qla2x00_reg_remote_port(vha, fcport); 4801 break; 4802 case MODE_TARGET: 4803 if (!vha->vha_tgt.qla_tgt->tgt_stop && 4804 !vha->vha_tgt.qla_tgt->tgt_stopped) 4805 qlt_fc_port_added(vha, fcport); 4806 break; 4807 case MODE_DUAL: 4808 qla2x00_reg_remote_port(vha, fcport); 4809 if (!vha->vha_tgt.qla_tgt->tgt_stop && 4810 !vha->vha_tgt.qla_tgt->tgt_stopped) 4811 qlt_fc_port_added(vha, fcport); 4812 break; 4813 default: 4814 break; 4815 } 4816 } 4817 4818 /* 4819 * qla2x00_configure_fabric 4820 * Setup SNS devices with loop ID's. 4821 * 4822 * Input: 4823 * ha = adapter block pointer. 4824 * 4825 * Returns: 4826 * 0 = success. 4827 * BIT_0 = error 4828 */ 4829 static int 4830 qla2x00_configure_fabric(scsi_qla_host_t *vha) 4831 { 4832 int rval; 4833 fc_port_t *fcport; 4834 uint16_t mb[MAILBOX_REGISTER_COUNT]; 4835 uint16_t loop_id; 4836 LIST_HEAD(new_fcports); 4837 struct qla_hw_data *ha = vha->hw; 4838 int discovery_gen; 4839 4840 /* If FL port exists, then SNS is present */ 4841 if (IS_FWI2_CAPABLE(ha)) 4842 loop_id = NPH_F_PORT; 4843 else 4844 loop_id = SNS_FL_PORT; 4845 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1); 4846 if (rval != QLA_SUCCESS) { 4847 ql_dbg(ql_dbg_disc, vha, 0x20a0, 4848 "MBX_GET_PORT_NAME failed, No FL Port.\n"); 4849 4850 vha->device_flags &= ~SWITCH_FOUND; 4851 return (QLA_SUCCESS); 4852 } 4853 vha->device_flags |= SWITCH_FOUND; 4854 4855 4856 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { 4857 rval = qla2x00_send_change_request(vha, 0x3, 0); 4858 if (rval != QLA_SUCCESS) 4859 ql_log(ql_log_warn, vha, 0x121, 4860 "Failed to enable receiving of RSCN requests: 0x%x.\n", 4861 rval); 4862 } 4863 4864 4865 do { 4866 qla2x00_mgmt_svr_login(vha); 4867 4868 /* FDMI support. */ 4869 if (ql2xfdmienable && 4870 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags)) 4871 qla2x00_fdmi_register(vha); 4872 4873 /* Ensure we are logged into the SNS. */ 4874 loop_id = NPH_SNS_LID(ha); 4875 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, 4876 0xfc, mb, BIT_1|BIT_0); 4877 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { 4878 ql_dbg(ql_dbg_disc, vha, 0x20a1, 4879 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n", 4880 loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval); 4881 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 4882 return rval; 4883 } 4884 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) { 4885 if (qla2x00_rft_id(vha)) { 4886 /* EMPTY */ 4887 ql_dbg(ql_dbg_disc, vha, 0x20a2, 4888 "Register FC-4 TYPE failed.\n"); 4889 if (test_bit(LOOP_RESYNC_NEEDED, 4890 &vha->dpc_flags)) 4891 break; 4892 } 4893 if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) { 4894 /* EMPTY */ 4895 ql_dbg(ql_dbg_disc, vha, 0x209a, 4896 "Register FC-4 Features failed.\n"); 4897 if (test_bit(LOOP_RESYNC_NEEDED, 4898 &vha->dpc_flags)) 4899 break; 4900 } 4901 if (vha->flags.nvme_enabled) { 4902 if (qla2x00_rff_id(vha, FC_TYPE_NVME)) { 4903 ql_dbg(ql_dbg_disc, vha, 0x2049, 4904 "Register NVME FC Type Features failed.\n"); 4905 } 4906 } 4907 if (qla2x00_rnn_id(vha)) { 4908 /* EMPTY */ 4909 ql_dbg(ql_dbg_disc, vha, 0x2104, 4910 "Register Node Name failed.\n"); 4911 if (test_bit(LOOP_RESYNC_NEEDED, 4912 &vha->dpc_flags)) 4913 break; 4914 } else if (qla2x00_rsnn_nn(vha)) { 4915 /* EMPTY */ 4916 ql_dbg(ql_dbg_disc, vha, 0x209b, 4917 "Register Symbolic Node Name failed.\n"); 4918 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 4919 break; 4920 } 4921 } 4922 4923 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4924 fcport->scan_state = QLA_FCPORT_SCAN; 4925 } 4926 4927 /* Mark the time right before querying FW for connected ports. 4928 * This process is long, asynchronous and by the time it's done, 4929 * collected information might not be accurate anymore. E.g. 4930 * disconnected port might have re-connected and a brand new 4931 * session has been created. In this case session's generation 4932 * will be newer than discovery_gen. */ 4933 qlt_do_generation_tick(vha, &discovery_gen); 4934 4935 rval = qla2x00_find_all_fabric_devs(vha); 4936 if (rval != QLA_SUCCESS) 4937 break; 4938 } while (0); 4939 4940 if (!vha->nvme_local_port && vha->flags.nvme_enabled) 4941 qla_nvme_register_hba(vha); 4942 4943 if (rval) 4944 ql_dbg(ql_dbg_disc, vha, 0x2068, 4945 "Configure fabric error exit rval=%d.\n", rval); 4946 4947 return (rval); 4948 } 4949 4950 /* 4951 * qla2x00_find_all_fabric_devs 4952 * 4953 * Input: 4954 * ha = adapter block pointer. 4955 * dev = database device entry pointer. 4956 * 4957 * Returns: 4958 * 0 = success. 4959 * 4960 * Context: 4961 * Kernel context. 4962 */ 4963 static int 4964 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) 4965 { 4966 int rval; 4967 uint16_t loop_id; 4968 fc_port_t *fcport, *new_fcport; 4969 int found; 4970 4971 sw_info_t *swl; 4972 int swl_idx; 4973 int first_dev, last_dev; 4974 port_id_t wrap = {}, nxt_d_id; 4975 struct qla_hw_data *ha = vha->hw; 4976 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 4977 unsigned long flags; 4978 4979 rval = QLA_SUCCESS; 4980 4981 /* Try GID_PT to get device list, else GAN. */ 4982 if (!ha->swl) 4983 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t), 4984 GFP_KERNEL); 4985 swl = ha->swl; 4986 if (!swl) { 4987 /*EMPTY*/ 4988 ql_dbg(ql_dbg_disc, vha, 0x209c, 4989 "GID_PT allocations failed, fallback on GA_NXT.\n"); 4990 } else { 4991 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t)); 4992 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) { 4993 swl = NULL; 4994 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 4995 return rval; 4996 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) { 4997 swl = NULL; 4998 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 4999 return rval; 5000 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) { 5001 swl = NULL; 5002 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5003 return rval; 5004 } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) { 5005 swl = NULL; 5006 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5007 return rval; 5008 } 5009 5010 /* If other queries succeeded probe for FC-4 type */ 5011 if (swl) { 5012 qla2x00_gff_id(vha, swl); 5013 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5014 return rval; 5015 } 5016 } 5017 swl_idx = 0; 5018 5019 /* Allocate temporary fcport for any new fcports discovered. */ 5020 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 5021 if (new_fcport == NULL) { 5022 ql_log(ql_log_warn, vha, 0x209d, 5023 "Failed to allocate memory for fcport.\n"); 5024 return (QLA_MEMORY_ALLOC_FAILED); 5025 } 5026 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 5027 /* Set start port ID scan at adapter ID. */ 5028 first_dev = 1; 5029 last_dev = 0; 5030 5031 /* Starting free loop ID. */ 5032 loop_id = ha->min_external_loopid; 5033 for (; loop_id <= ha->max_loop_id; loop_id++) { 5034 if (qla2x00_is_reserved_id(vha, loop_id)) 5035 continue; 5036 5037 if (ha->current_topology == ISP_CFG_FL && 5038 (atomic_read(&vha->loop_down_timer) || 5039 LOOP_TRANSITION(vha))) { 5040 atomic_set(&vha->loop_down_timer, 0); 5041 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5042 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 5043 break; 5044 } 5045 5046 if (swl != NULL) { 5047 if (last_dev) { 5048 wrap.b24 = new_fcport->d_id.b24; 5049 } else { 5050 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24; 5051 memcpy(new_fcport->node_name, 5052 swl[swl_idx].node_name, WWN_SIZE); 5053 memcpy(new_fcport->port_name, 5054 swl[swl_idx].port_name, WWN_SIZE); 5055 memcpy(new_fcport->fabric_port_name, 5056 swl[swl_idx].fabric_port_name, WWN_SIZE); 5057 new_fcport->fp_speed = swl[swl_idx].fp_speed; 5058 new_fcport->fc4_type = swl[swl_idx].fc4_type; 5059 5060 new_fcport->nvme_flag = 0; 5061 new_fcport->fc4f_nvme = 0; 5062 if (vha->flags.nvme_enabled && 5063 swl[swl_idx].fc4f_nvme) { 5064 new_fcport->fc4f_nvme = 5065 swl[swl_idx].fc4f_nvme; 5066 ql_log(ql_log_info, vha, 0x2131, 5067 "FOUND: NVME port %8phC as FC Type 28h\n", 5068 new_fcport->port_name); 5069 } 5070 5071 if (swl[swl_idx].d_id.b.rsvd_1 != 0) { 5072 last_dev = 1; 5073 } 5074 swl_idx++; 5075 } 5076 } else { 5077 /* Send GA_NXT to the switch */ 5078 rval = qla2x00_ga_nxt(vha, new_fcport); 5079 if (rval != QLA_SUCCESS) { 5080 ql_log(ql_log_warn, vha, 0x209e, 5081 "SNS scan failed -- assuming " 5082 "zero-entry result.\n"); 5083 rval = QLA_SUCCESS; 5084 break; 5085 } 5086 } 5087 5088 /* If wrap on switch device list, exit. */ 5089 if (first_dev) { 5090 wrap.b24 = new_fcport->d_id.b24; 5091 first_dev = 0; 5092 } else if (new_fcport->d_id.b24 == wrap.b24) { 5093 ql_dbg(ql_dbg_disc, vha, 0x209f, 5094 "Device wrap (%02x%02x%02x).\n", 5095 new_fcport->d_id.b.domain, 5096 new_fcport->d_id.b.area, 5097 new_fcport->d_id.b.al_pa); 5098 break; 5099 } 5100 5101 /* Bypass if same physical adapter. */ 5102 if (new_fcport->d_id.b24 == base_vha->d_id.b24) 5103 continue; 5104 5105 /* Bypass virtual ports of the same host. */ 5106 if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24)) 5107 continue; 5108 5109 /* Bypass if same domain and area of adapter. */ 5110 if (((new_fcport->d_id.b24 & 0xffff00) == 5111 (vha->d_id.b24 & 0xffff00)) && ha->current_topology == 5112 ISP_CFG_FL) 5113 continue; 5114 5115 /* Bypass reserved domain fields. */ 5116 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0) 5117 continue; 5118 5119 /* Bypass ports whose FCP-4 type is not FCP_SCSI */ 5120 if (ql2xgffidenable && 5121 (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI && 5122 new_fcport->fc4_type != FC4_TYPE_UNKNOWN)) 5123 continue; 5124 5125 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5126 5127 /* Locate matching device in database. */ 5128 found = 0; 5129 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5130 if (memcmp(new_fcport->port_name, fcport->port_name, 5131 WWN_SIZE)) 5132 continue; 5133 5134 fcport->scan_state = QLA_FCPORT_FOUND; 5135 5136 found++; 5137 5138 /* Update port state. */ 5139 memcpy(fcport->fabric_port_name, 5140 new_fcport->fabric_port_name, WWN_SIZE); 5141 fcport->fp_speed = new_fcport->fp_speed; 5142 5143 /* 5144 * If address the same and state FCS_ONLINE 5145 * (or in target mode), nothing changed. 5146 */ 5147 if (fcport->d_id.b24 == new_fcport->d_id.b24 && 5148 (atomic_read(&fcport->state) == FCS_ONLINE || 5149 (vha->host->active_mode == MODE_TARGET))) { 5150 break; 5151 } 5152 5153 /* 5154 * If device was not a fabric device before. 5155 */ 5156 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { 5157 fcport->d_id.b24 = new_fcport->d_id.b24; 5158 qla2x00_clear_loop_id(fcport); 5159 fcport->flags |= (FCF_FABRIC_DEVICE | 5160 FCF_LOGIN_NEEDED); 5161 break; 5162 } 5163 5164 /* 5165 * Port ID changed or device was marked to be updated; 5166 * Log it out if still logged in and mark it for 5167 * relogin later. 5168 */ 5169 if (qla_tgt_mode_enabled(base_vha)) { 5170 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080, 5171 "port changed FC ID, %8phC" 5172 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n", 5173 fcport->port_name, 5174 fcport->d_id.b.domain, 5175 fcport->d_id.b.area, 5176 fcport->d_id.b.al_pa, 5177 fcport->loop_id, 5178 new_fcport->d_id.b.domain, 5179 new_fcport->d_id.b.area, 5180 new_fcport->d_id.b.al_pa); 5181 fcport->d_id.b24 = new_fcport->d_id.b24; 5182 break; 5183 } 5184 5185 fcport->d_id.b24 = new_fcport->d_id.b24; 5186 fcport->flags |= FCF_LOGIN_NEEDED; 5187 break; 5188 } 5189 5190 if (found) { 5191 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5192 continue; 5193 } 5194 /* If device was not in our fcports list, then add it. */ 5195 new_fcport->scan_state = QLA_FCPORT_FOUND; 5196 list_add_tail(&new_fcport->list, &vha->vp_fcports); 5197 5198 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5199 5200 5201 /* Allocate a new replacement fcport. */ 5202 nxt_d_id.b24 = new_fcport->d_id.b24; 5203 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 5204 if (new_fcport == NULL) { 5205 ql_log(ql_log_warn, vha, 0xd032, 5206 "Memory allocation failed for fcport.\n"); 5207 return (QLA_MEMORY_ALLOC_FAILED); 5208 } 5209 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 5210 new_fcport->d_id.b24 = nxt_d_id.b24; 5211 } 5212 5213 qla2x00_free_fcport(new_fcport); 5214 5215 /* 5216 * Logout all previous fabric dev marked lost, except FCP2 devices. 5217 */ 5218 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5219 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5220 break; 5221 5222 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || 5223 (fcport->flags & FCF_LOGIN_NEEDED) == 0) 5224 continue; 5225 5226 if (fcport->scan_state == QLA_FCPORT_SCAN) { 5227 if ((qla_dual_mode_enabled(vha) || 5228 qla_ini_mode_enabled(vha)) && 5229 atomic_read(&fcport->state) == FCS_ONLINE) { 5230 qla2x00_mark_device_lost(vha, fcport, 5231 ql2xplogiabsentdevice, 0); 5232 if (fcport->loop_id != FC_NO_LOOP_ID && 5233 (fcport->flags & FCF_FCP2_DEVICE) == 0 && 5234 fcport->port_type != FCT_INITIATOR && 5235 fcport->port_type != FCT_BROADCAST) { 5236 ql_dbg(ql_dbg_disc, vha, 0x20f0, 5237 "%s %d %8phC post del sess\n", 5238 __func__, __LINE__, 5239 fcport->port_name); 5240 5241 qlt_schedule_sess_for_deletion_lock 5242 (fcport); 5243 continue; 5244 } 5245 } 5246 } 5247 5248 if (fcport->scan_state == QLA_FCPORT_FOUND) 5249 qla24xx_fcport_handle_login(vha, fcport); 5250 } 5251 return (rval); 5252 } 5253 5254 /* 5255 * qla2x00_find_new_loop_id 5256 * Scan through our port list and find a new usable loop ID. 5257 * 5258 * Input: 5259 * ha: adapter state pointer. 5260 * dev: port structure pointer. 5261 * 5262 * Returns: 5263 * qla2x00 local function return status code. 5264 * 5265 * Context: 5266 * Kernel context. 5267 */ 5268 int 5269 qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) 5270 { 5271 int rval; 5272 struct qla_hw_data *ha = vha->hw; 5273 unsigned long flags = 0; 5274 5275 rval = QLA_SUCCESS; 5276 5277 spin_lock_irqsave(&ha->vport_slock, flags); 5278 5279 dev->loop_id = find_first_zero_bit(ha->loop_id_map, 5280 LOOPID_MAP_SIZE); 5281 if (dev->loop_id >= LOOPID_MAP_SIZE || 5282 qla2x00_is_reserved_id(vha, dev->loop_id)) { 5283 dev->loop_id = FC_NO_LOOP_ID; 5284 rval = QLA_FUNCTION_FAILED; 5285 } else 5286 set_bit(dev->loop_id, ha->loop_id_map); 5287 5288 spin_unlock_irqrestore(&ha->vport_slock, flags); 5289 5290 if (rval == QLA_SUCCESS) 5291 ql_dbg(ql_dbg_disc, dev->vha, 0x2086, 5292 "Assigning new loopid=%x, portid=%x.\n", 5293 dev->loop_id, dev->d_id.b24); 5294 else 5295 ql_log(ql_log_warn, dev->vha, 0x2087, 5296 "No loop_id's available, portid=%x.\n", 5297 dev->d_id.b24); 5298 5299 return (rval); 5300 } 5301 5302 5303 /* 5304 * qla2x00_fabric_login 5305 * Issue fabric login command. 5306 * 5307 * Input: 5308 * ha = adapter block pointer. 5309 * device = pointer to FC device type structure. 5310 * 5311 * Returns: 5312 * 0 - Login successfully 5313 * 1 - Login failed 5314 * 2 - Initiator device 5315 * 3 - Fatal error 5316 */ 5317 int 5318 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport, 5319 uint16_t *next_loopid) 5320 { 5321 int rval; 5322 int retry; 5323 uint16_t tmp_loopid; 5324 uint16_t mb[MAILBOX_REGISTER_COUNT]; 5325 struct qla_hw_data *ha = vha->hw; 5326 5327 retry = 0; 5328 tmp_loopid = 0; 5329 5330 for (;;) { 5331 ql_dbg(ql_dbg_disc, vha, 0x2000, 5332 "Trying Fabric Login w/loop id 0x%04x for port " 5333 "%02x%02x%02x.\n", 5334 fcport->loop_id, fcport->d_id.b.domain, 5335 fcport->d_id.b.area, fcport->d_id.b.al_pa); 5336 5337 /* Login fcport on switch. */ 5338 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id, 5339 fcport->d_id.b.domain, fcport->d_id.b.area, 5340 fcport->d_id.b.al_pa, mb, BIT_0); 5341 if (rval != QLA_SUCCESS) { 5342 return rval; 5343 } 5344 if (mb[0] == MBS_PORT_ID_USED) { 5345 /* 5346 * Device has another loop ID. The firmware team 5347 * recommends the driver perform an implicit login with 5348 * the specified ID again. The ID we just used is save 5349 * here so we return with an ID that can be tried by 5350 * the next login. 5351 */ 5352 retry++; 5353 tmp_loopid = fcport->loop_id; 5354 fcport->loop_id = mb[1]; 5355 5356 ql_dbg(ql_dbg_disc, vha, 0x2001, 5357 "Fabric Login: port in use - next loop " 5358 "id=0x%04x, port id= %02x%02x%02x.\n", 5359 fcport->loop_id, fcport->d_id.b.domain, 5360 fcport->d_id.b.area, fcport->d_id.b.al_pa); 5361 5362 } else if (mb[0] == MBS_COMMAND_COMPLETE) { 5363 /* 5364 * Login succeeded. 5365 */ 5366 if (retry) { 5367 /* A retry occurred before. */ 5368 *next_loopid = tmp_loopid; 5369 } else { 5370 /* 5371 * No retry occurred before. Just increment the 5372 * ID value for next login. 5373 */ 5374 *next_loopid = (fcport->loop_id + 1); 5375 } 5376 5377 if (mb[1] & BIT_0) { 5378 fcport->port_type = FCT_INITIATOR; 5379 } else { 5380 fcport->port_type = FCT_TARGET; 5381 if (mb[1] & BIT_1) { 5382 fcport->flags |= FCF_FCP2_DEVICE; 5383 } 5384 } 5385 5386 if (mb[10] & BIT_0) 5387 fcport->supported_classes |= FC_COS_CLASS2; 5388 if (mb[10] & BIT_1) 5389 fcport->supported_classes |= FC_COS_CLASS3; 5390 5391 if (IS_FWI2_CAPABLE(ha)) { 5392 if (mb[10] & BIT_7) 5393 fcport->flags |= 5394 FCF_CONF_COMP_SUPPORTED; 5395 } 5396 5397 rval = QLA_SUCCESS; 5398 break; 5399 } else if (mb[0] == MBS_LOOP_ID_USED) { 5400 /* 5401 * Loop ID already used, try next loop ID. 5402 */ 5403 fcport->loop_id++; 5404 rval = qla2x00_find_new_loop_id(vha, fcport); 5405 if (rval != QLA_SUCCESS) { 5406 /* Ran out of loop IDs to use */ 5407 break; 5408 } 5409 } else if (mb[0] == MBS_COMMAND_ERROR) { 5410 /* 5411 * Firmware possibly timed out during login. If NO 5412 * retries are left to do then the device is declared 5413 * dead. 5414 */ 5415 *next_loopid = fcport->loop_id; 5416 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 5417 fcport->d_id.b.domain, fcport->d_id.b.area, 5418 fcport->d_id.b.al_pa); 5419 qla2x00_mark_device_lost(vha, fcport, 1, 0); 5420 5421 rval = 1; 5422 break; 5423 } else { 5424 /* 5425 * unrecoverable / not handled error 5426 */ 5427 ql_dbg(ql_dbg_disc, vha, 0x2002, 5428 "Failed=%x port_id=%02x%02x%02x loop_id=%x " 5429 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain, 5430 fcport->d_id.b.area, fcport->d_id.b.al_pa, 5431 fcport->loop_id, jiffies); 5432 5433 *next_loopid = fcport->loop_id; 5434 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 5435 fcport->d_id.b.domain, fcport->d_id.b.area, 5436 fcport->d_id.b.al_pa); 5437 qla2x00_clear_loop_id(fcport); 5438 fcport->login_retry = 0; 5439 5440 rval = 3; 5441 break; 5442 } 5443 } 5444 5445 return (rval); 5446 } 5447 5448 /* 5449 * qla2x00_local_device_login 5450 * Issue local device login command. 5451 * 5452 * Input: 5453 * ha = adapter block pointer. 5454 * loop_id = loop id of device to login to. 5455 * 5456 * Returns (Where's the #define!!!!): 5457 * 0 - Login successfully 5458 * 1 - Login failed 5459 * 3 - Fatal error 5460 */ 5461 int 5462 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport) 5463 { 5464 int rval; 5465 uint16_t mb[MAILBOX_REGISTER_COUNT]; 5466 5467 memset(mb, 0, sizeof(mb)); 5468 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0); 5469 if (rval == QLA_SUCCESS) { 5470 /* Interrogate mailbox registers for any errors */ 5471 if (mb[0] == MBS_COMMAND_ERROR) 5472 rval = 1; 5473 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR) 5474 /* device not in PCB table */ 5475 rval = 3; 5476 } 5477 5478 return (rval); 5479 } 5480 5481 /* 5482 * qla2x00_loop_resync 5483 * Resync with fibre channel devices. 5484 * 5485 * Input: 5486 * ha = adapter block pointer. 5487 * 5488 * Returns: 5489 * 0 = success 5490 */ 5491 int 5492 qla2x00_loop_resync(scsi_qla_host_t *vha) 5493 { 5494 int rval = QLA_SUCCESS; 5495 uint32_t wait_time; 5496 struct req_que *req; 5497 struct rsp_que *rsp; 5498 5499 req = vha->req; 5500 rsp = req->rsp; 5501 5502 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 5503 if (vha->flags.online) { 5504 if (!(rval = qla2x00_fw_ready(vha))) { 5505 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 5506 wait_time = 256; 5507 do { 5508 if (!IS_QLAFX00(vha->hw)) { 5509 /* 5510 * Issue a marker after FW becomes 5511 * ready. 5512 */ 5513 qla2x00_marker(vha, req, rsp, 0, 0, 5514 MK_SYNC_ALL); 5515 vha->marker_needed = 0; 5516 } 5517 5518 /* Remap devices on Loop. */ 5519 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5520 5521 if (IS_QLAFX00(vha->hw)) 5522 qlafx00_configure_devices(vha); 5523 else 5524 qla2x00_configure_loop(vha); 5525 5526 wait_time--; 5527 } while (!atomic_read(&vha->loop_down_timer) && 5528 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 5529 && wait_time && (test_bit(LOOP_RESYNC_NEEDED, 5530 &vha->dpc_flags))); 5531 } 5532 } 5533 5534 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 5535 return (QLA_FUNCTION_FAILED); 5536 5537 if (rval) 5538 ql_dbg(ql_dbg_disc, vha, 0x206c, 5539 "%s *** FAILED ***.\n", __func__); 5540 5541 return (rval); 5542 } 5543 5544 /* 5545 * qla2x00_perform_loop_resync 5546 * Description: This function will set the appropriate flags and call 5547 * qla2x00_loop_resync. If successful loop will be resynced 5548 * Arguments : scsi_qla_host_t pointer 5549 * returm : Success or Failure 5550 */ 5551 5552 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha) 5553 { 5554 int32_t rval = 0; 5555 5556 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) { 5557 /*Configure the flags so that resync happens properly*/ 5558 atomic_set(&ha->loop_down_timer, 0); 5559 if (!(ha->device_flags & DFLG_NO_CABLE)) { 5560 atomic_set(&ha->loop_state, LOOP_UP); 5561 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 5562 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); 5563 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 5564 5565 rval = qla2x00_loop_resync(ha); 5566 } else 5567 atomic_set(&ha->loop_state, LOOP_DEAD); 5568 5569 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); 5570 } 5571 5572 return rval; 5573 } 5574 5575 void 5576 qla2x00_update_fcports(scsi_qla_host_t *base_vha) 5577 { 5578 fc_port_t *fcport; 5579 struct scsi_qla_host *vha; 5580 struct qla_hw_data *ha = base_vha->hw; 5581 unsigned long flags; 5582 5583 spin_lock_irqsave(&ha->vport_slock, flags); 5584 /* Go with deferred removal of rport references. */ 5585 list_for_each_entry(vha, &base_vha->hw->vp_list, list) { 5586 atomic_inc(&vha->vref_count); 5587 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5588 if (fcport->drport && 5589 atomic_read(&fcport->state) != FCS_UNCONFIGURED) { 5590 spin_unlock_irqrestore(&ha->vport_slock, flags); 5591 qla2x00_rport_del(fcport); 5592 5593 spin_lock_irqsave(&ha->vport_slock, flags); 5594 } 5595 } 5596 atomic_dec(&vha->vref_count); 5597 wake_up(&vha->vref_waitq); 5598 } 5599 spin_unlock_irqrestore(&ha->vport_slock, flags); 5600 } 5601 5602 /* Assumes idc_lock always held on entry */ 5603 void 5604 qla83xx_reset_ownership(scsi_qla_host_t *vha) 5605 { 5606 struct qla_hw_data *ha = vha->hw; 5607 uint32_t drv_presence, drv_presence_mask; 5608 uint32_t dev_part_info1, dev_part_info2, class_type; 5609 uint32_t class_type_mask = 0x3; 5610 uint16_t fcoe_other_function = 0xffff, i; 5611 5612 if (IS_QLA8044(ha)) { 5613 drv_presence = qla8044_rd_direct(vha, 5614 QLA8044_CRB_DRV_ACTIVE_INDEX); 5615 dev_part_info1 = qla8044_rd_direct(vha, 5616 QLA8044_CRB_DEV_PART_INFO_INDEX); 5617 dev_part_info2 = qla8044_rd_direct(vha, 5618 QLA8044_CRB_DEV_PART_INFO2); 5619 } else { 5620 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 5621 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1); 5622 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2); 5623 } 5624 for (i = 0; i < 8; i++) { 5625 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask); 5626 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) && 5627 (i != ha->portnum)) { 5628 fcoe_other_function = i; 5629 break; 5630 } 5631 } 5632 if (fcoe_other_function == 0xffff) { 5633 for (i = 0; i < 8; i++) { 5634 class_type = ((dev_part_info2 >> (i * 4)) & 5635 class_type_mask); 5636 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) && 5637 ((i + 8) != ha->portnum)) { 5638 fcoe_other_function = i + 8; 5639 break; 5640 } 5641 } 5642 } 5643 /* 5644 * Prepare drv-presence mask based on fcoe functions present. 5645 * However consider only valid physical fcoe function numbers (0-15). 5646 */ 5647 drv_presence_mask = ~((1 << (ha->portnum)) | 5648 ((fcoe_other_function == 0xffff) ? 5649 0 : (1 << (fcoe_other_function)))); 5650 5651 /* We are the reset owner iff: 5652 * - No other protocol drivers present. 5653 * - This is the lowest among fcoe functions. */ 5654 if (!(drv_presence & drv_presence_mask) && 5655 (ha->portnum < fcoe_other_function)) { 5656 ql_dbg(ql_dbg_p3p, vha, 0xb07f, 5657 "This host is Reset owner.\n"); 5658 ha->flags.nic_core_reset_owner = 1; 5659 } 5660 } 5661 5662 static int 5663 __qla83xx_set_drv_ack(scsi_qla_host_t *vha) 5664 { 5665 int rval = QLA_SUCCESS; 5666 struct qla_hw_data *ha = vha->hw; 5667 uint32_t drv_ack; 5668 5669 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); 5670 if (rval == QLA_SUCCESS) { 5671 drv_ack |= (1 << ha->portnum); 5672 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack); 5673 } 5674 5675 return rval; 5676 } 5677 5678 static int 5679 __qla83xx_clear_drv_ack(scsi_qla_host_t *vha) 5680 { 5681 int rval = QLA_SUCCESS; 5682 struct qla_hw_data *ha = vha->hw; 5683 uint32_t drv_ack; 5684 5685 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); 5686 if (rval == QLA_SUCCESS) { 5687 drv_ack &= ~(1 << ha->portnum); 5688 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack); 5689 } 5690 5691 return rval; 5692 } 5693 5694 static const char * 5695 qla83xx_dev_state_to_string(uint32_t dev_state) 5696 { 5697 switch (dev_state) { 5698 case QLA8XXX_DEV_COLD: 5699 return "COLD/RE-INIT"; 5700 case QLA8XXX_DEV_INITIALIZING: 5701 return "INITIALIZING"; 5702 case QLA8XXX_DEV_READY: 5703 return "READY"; 5704 case QLA8XXX_DEV_NEED_RESET: 5705 return "NEED RESET"; 5706 case QLA8XXX_DEV_NEED_QUIESCENT: 5707 return "NEED QUIESCENT"; 5708 case QLA8XXX_DEV_FAILED: 5709 return "FAILED"; 5710 case QLA8XXX_DEV_QUIESCENT: 5711 return "QUIESCENT"; 5712 default: 5713 return "Unknown"; 5714 } 5715 } 5716 5717 /* Assumes idc-lock always held on entry */ 5718 void 5719 qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type) 5720 { 5721 struct qla_hw_data *ha = vha->hw; 5722 uint32_t idc_audit_reg = 0, duration_secs = 0; 5723 5724 switch (audit_type) { 5725 case IDC_AUDIT_TIMESTAMP: 5726 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000); 5727 idc_audit_reg = (ha->portnum) | 5728 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8); 5729 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg); 5730 break; 5731 5732 case IDC_AUDIT_COMPLETION: 5733 duration_secs = ((jiffies_to_msecs(jiffies) - 5734 jiffies_to_msecs(ha->idc_audit_ts)) / 1000); 5735 idc_audit_reg = (ha->portnum) | 5736 (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8); 5737 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg); 5738 break; 5739 5740 default: 5741 ql_log(ql_log_warn, vha, 0xb078, 5742 "Invalid audit type specified.\n"); 5743 break; 5744 } 5745 } 5746 5747 /* Assumes idc_lock always held on entry */ 5748 static int 5749 qla83xx_initiating_reset(scsi_qla_host_t *vha) 5750 { 5751 struct qla_hw_data *ha = vha->hw; 5752 uint32_t idc_control, dev_state; 5753 5754 __qla83xx_get_idc_control(vha, &idc_control); 5755 if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) { 5756 ql_log(ql_log_info, vha, 0xb080, 5757 "NIC Core reset has been disabled. idc-control=0x%x\n", 5758 idc_control); 5759 return QLA_FUNCTION_FAILED; 5760 } 5761 5762 /* Set NEED-RESET iff in READY state and we are the reset-owner */ 5763 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state); 5764 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) { 5765 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, 5766 QLA8XXX_DEV_NEED_RESET); 5767 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n"); 5768 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP); 5769 } else { 5770 const char *state = qla83xx_dev_state_to_string(dev_state); 5771 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state); 5772 5773 /* SV: XXX: Is timeout required here? */ 5774 /* Wait for IDC state change READY -> NEED_RESET */ 5775 while (dev_state == QLA8XXX_DEV_READY) { 5776 qla83xx_idc_unlock(vha, 0); 5777 msleep(200); 5778 qla83xx_idc_lock(vha, 0); 5779 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state); 5780 } 5781 } 5782 5783 /* Send IDC ack by writing to drv-ack register */ 5784 __qla83xx_set_drv_ack(vha); 5785 5786 return QLA_SUCCESS; 5787 } 5788 5789 int 5790 __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control) 5791 { 5792 return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control); 5793 } 5794 5795 int 5796 __qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control) 5797 { 5798 return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control); 5799 } 5800 5801 static int 5802 qla83xx_check_driver_presence(scsi_qla_host_t *vha) 5803 { 5804 uint32_t drv_presence = 0; 5805 struct qla_hw_data *ha = vha->hw; 5806 5807 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 5808 if (drv_presence & (1 << ha->portnum)) 5809 return QLA_SUCCESS; 5810 else 5811 return QLA_TEST_FAILED; 5812 } 5813 5814 int 5815 qla83xx_nic_core_reset(scsi_qla_host_t *vha) 5816 { 5817 int rval = QLA_SUCCESS; 5818 struct qla_hw_data *ha = vha->hw; 5819 5820 ql_dbg(ql_dbg_p3p, vha, 0xb058, 5821 "Entered %s().\n", __func__); 5822 5823 if (vha->device_flags & DFLG_DEV_FAILED) { 5824 ql_log(ql_log_warn, vha, 0xb059, 5825 "Device in unrecoverable FAILED state.\n"); 5826 return QLA_FUNCTION_FAILED; 5827 } 5828 5829 qla83xx_idc_lock(vha, 0); 5830 5831 if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) { 5832 ql_log(ql_log_warn, vha, 0xb05a, 5833 "Function=0x%x has been removed from IDC participation.\n", 5834 ha->portnum); 5835 rval = QLA_FUNCTION_FAILED; 5836 goto exit; 5837 } 5838 5839 qla83xx_reset_ownership(vha); 5840 5841 rval = qla83xx_initiating_reset(vha); 5842 5843 /* 5844 * Perform reset if we are the reset-owner, 5845 * else wait till IDC state changes to READY/FAILED. 5846 */ 5847 if (rval == QLA_SUCCESS) { 5848 rval = qla83xx_idc_state_handler(vha); 5849 5850 if (rval == QLA_SUCCESS) 5851 ha->flags.nic_core_hung = 0; 5852 __qla83xx_clear_drv_ack(vha); 5853 } 5854 5855 exit: 5856 qla83xx_idc_unlock(vha, 0); 5857 5858 ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__); 5859 5860 return rval; 5861 } 5862 5863 int 5864 qla2xxx_mctp_dump(scsi_qla_host_t *vha) 5865 { 5866 struct qla_hw_data *ha = vha->hw; 5867 int rval = QLA_FUNCTION_FAILED; 5868 5869 if (!IS_MCTP_CAPABLE(ha)) { 5870 /* This message can be removed from the final version */ 5871 ql_log(ql_log_info, vha, 0x506d, 5872 "This board is not MCTP capable\n"); 5873 return rval; 5874 } 5875 5876 if (!ha->mctp_dump) { 5877 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev, 5878 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL); 5879 5880 if (!ha->mctp_dump) { 5881 ql_log(ql_log_warn, vha, 0x506e, 5882 "Failed to allocate memory for mctp dump\n"); 5883 return rval; 5884 } 5885 } 5886 5887 #define MCTP_DUMP_STR_ADDR 0x00000000 5888 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma, 5889 MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4); 5890 if (rval != QLA_SUCCESS) { 5891 ql_log(ql_log_warn, vha, 0x506f, 5892 "Failed to capture mctp dump\n"); 5893 } else { 5894 ql_log(ql_log_info, vha, 0x5070, 5895 "Mctp dump capture for host (%ld/%p).\n", 5896 vha->host_no, ha->mctp_dump); 5897 ha->mctp_dumped = 1; 5898 } 5899 5900 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) { 5901 ha->flags.nic_core_reset_hdlr_active = 1; 5902 rval = qla83xx_restart_nic_firmware(vha); 5903 if (rval) 5904 /* NIC Core reset failed. */ 5905 ql_log(ql_log_warn, vha, 0x5071, 5906 "Failed to restart nic firmware\n"); 5907 else 5908 ql_dbg(ql_dbg_p3p, vha, 0xb084, 5909 "Restarted NIC firmware successfully.\n"); 5910 ha->flags.nic_core_reset_hdlr_active = 0; 5911 } 5912 5913 return rval; 5914 5915 } 5916 5917 /* 5918 * qla2x00_quiesce_io 5919 * Description: This function will block the new I/Os 5920 * Its not aborting any I/Os as context 5921 * is not destroyed during quiescence 5922 * Arguments: scsi_qla_host_t 5923 * return : void 5924 */ 5925 void 5926 qla2x00_quiesce_io(scsi_qla_host_t *vha) 5927 { 5928 struct qla_hw_data *ha = vha->hw; 5929 struct scsi_qla_host *vp; 5930 5931 ql_dbg(ql_dbg_dpc, vha, 0x401d, 5932 "Quiescing I/O - ha=%p.\n", ha); 5933 5934 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 5935 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 5936 atomic_set(&vha->loop_state, LOOP_DOWN); 5937 qla2x00_mark_all_devices_lost(vha, 0); 5938 list_for_each_entry(vp, &ha->vp_list, list) 5939 qla2x00_mark_all_devices_lost(vp, 0); 5940 } else { 5941 if (!atomic_read(&vha->loop_down_timer)) 5942 atomic_set(&vha->loop_down_timer, 5943 LOOP_DOWN_TIME); 5944 } 5945 /* Wait for pending cmds to complete */ 5946 qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST); 5947 } 5948 5949 void 5950 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) 5951 { 5952 struct qla_hw_data *ha = vha->hw; 5953 struct scsi_qla_host *vp; 5954 unsigned long flags; 5955 fc_port_t *fcport; 5956 u16 i; 5957 5958 /* For ISP82XX, driver waits for completion of the commands. 5959 * online flag should be set. 5960 */ 5961 if (!(IS_P3P_TYPE(ha))) 5962 vha->flags.online = 0; 5963 ha->flags.chip_reset_done = 0; 5964 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 5965 vha->qla_stats.total_isp_aborts++; 5966 5967 ql_log(ql_log_info, vha, 0x00af, 5968 "Performing ISP error recovery - ha=%p.\n", ha); 5969 5970 /* For ISP82XX, reset_chip is just disabling interrupts. 5971 * Driver waits for the completion of the commands. 5972 * the interrupts need to be enabled. 5973 */ 5974 if (!(IS_P3P_TYPE(ha))) 5975 ha->isp_ops->reset_chip(vha); 5976 5977 ha->flags.n2n_ae = 0; 5978 ha->flags.lip_ae = 0; 5979 ha->current_topology = 0; 5980 ha->flags.fw_started = 0; 5981 ha->flags.fw_init_done = 0; 5982 ha->base_qpair->chip_reset++; 5983 for (i = 0; i < ha->max_qpairs; i++) { 5984 if (ha->queue_pair_map[i]) 5985 ha->queue_pair_map[i]->chip_reset = 5986 ha->base_qpair->chip_reset; 5987 } 5988 5989 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 5990 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 5991 atomic_set(&vha->loop_state, LOOP_DOWN); 5992 qla2x00_mark_all_devices_lost(vha, 0); 5993 5994 spin_lock_irqsave(&ha->vport_slock, flags); 5995 list_for_each_entry(vp, &ha->vp_list, list) { 5996 atomic_inc(&vp->vref_count); 5997 spin_unlock_irqrestore(&ha->vport_slock, flags); 5998 5999 qla2x00_mark_all_devices_lost(vp, 0); 6000 6001 spin_lock_irqsave(&ha->vport_slock, flags); 6002 atomic_dec(&vp->vref_count); 6003 } 6004 spin_unlock_irqrestore(&ha->vport_slock, flags); 6005 } else { 6006 if (!atomic_read(&vha->loop_down_timer)) 6007 atomic_set(&vha->loop_down_timer, 6008 LOOP_DOWN_TIME); 6009 } 6010 6011 /* Clear all async request states across all VPs. */ 6012 list_for_each_entry(fcport, &vha->vp_fcports, list) 6013 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 6014 spin_lock_irqsave(&ha->vport_slock, flags); 6015 list_for_each_entry(vp, &ha->vp_list, list) { 6016 atomic_inc(&vp->vref_count); 6017 spin_unlock_irqrestore(&ha->vport_slock, flags); 6018 6019 list_for_each_entry(fcport, &vp->vp_fcports, list) 6020 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 6021 6022 spin_lock_irqsave(&ha->vport_slock, flags); 6023 atomic_dec(&vp->vref_count); 6024 } 6025 spin_unlock_irqrestore(&ha->vport_slock, flags); 6026 6027 if (!ha->flags.eeh_busy) { 6028 /* Make sure for ISP 82XX IO DMA is complete */ 6029 if (IS_P3P_TYPE(ha)) { 6030 qla82xx_chip_reset_cleanup(vha); 6031 ql_log(ql_log_info, vha, 0x00b4, 6032 "Done chip reset cleanup.\n"); 6033 6034 /* Done waiting for pending commands. 6035 * Reset the online flag. 6036 */ 6037 vha->flags.online = 0; 6038 } 6039 6040 /* Requeue all commands in outstanding command list. */ 6041 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 6042 } 6043 /* memory barrier */ 6044 wmb(); 6045 } 6046 6047 /* 6048 * qla2x00_abort_isp 6049 * Resets ISP and aborts all outstanding commands. 6050 * 6051 * Input: 6052 * ha = adapter block pointer. 6053 * 6054 * Returns: 6055 * 0 = success 6056 */ 6057 int 6058 qla2x00_abort_isp(scsi_qla_host_t *vha) 6059 { 6060 int rval; 6061 uint8_t status = 0; 6062 struct qla_hw_data *ha = vha->hw; 6063 struct scsi_qla_host *vp; 6064 struct req_que *req = ha->req_q_map[0]; 6065 unsigned long flags; 6066 6067 if (vha->flags.online) { 6068 qla2x00_abort_isp_cleanup(vha); 6069 6070 if (IS_QLA8031(ha)) { 6071 ql_dbg(ql_dbg_p3p, vha, 0xb05c, 6072 "Clearing fcoe driver presence.\n"); 6073 if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS) 6074 ql_dbg(ql_dbg_p3p, vha, 0xb073, 6075 "Error while clearing DRV-Presence.\n"); 6076 } 6077 6078 if (unlikely(pci_channel_offline(ha->pdev) && 6079 ha->flags.pci_channel_io_perm_failure)) { 6080 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 6081 status = 0; 6082 return status; 6083 } 6084 6085 ha->isp_ops->get_flash_version(vha, req->ring); 6086 6087 ha->isp_ops->nvram_config(vha); 6088 6089 if (!qla2x00_restart_isp(vha)) { 6090 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 6091 6092 if (!atomic_read(&vha->loop_down_timer)) { 6093 /* 6094 * Issue marker command only when we are going 6095 * to start the I/O . 6096 */ 6097 vha->marker_needed = 1; 6098 } 6099 6100 vha->flags.online = 1; 6101 6102 ha->isp_ops->enable_intrs(ha); 6103 6104 ha->isp_abort_cnt = 0; 6105 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 6106 6107 if (IS_QLA81XX(ha) || IS_QLA8031(ha)) 6108 qla2x00_get_fw_version(vha); 6109 if (ha->fce) { 6110 ha->flags.fce_enabled = 1; 6111 memset(ha->fce, 0, 6112 fce_calc_size(ha->fce_bufs)); 6113 rval = qla2x00_enable_fce_trace(vha, 6114 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 6115 &ha->fce_bufs); 6116 if (rval) { 6117 ql_log(ql_log_warn, vha, 0x8033, 6118 "Unable to reinitialize FCE " 6119 "(%d).\n", rval); 6120 ha->flags.fce_enabled = 0; 6121 } 6122 } 6123 6124 if (ha->eft) { 6125 memset(ha->eft, 0, EFT_SIZE); 6126 rval = qla2x00_enable_eft_trace(vha, 6127 ha->eft_dma, EFT_NUM_BUFFERS); 6128 if (rval) { 6129 ql_log(ql_log_warn, vha, 0x8034, 6130 "Unable to reinitialize EFT " 6131 "(%d).\n", rval); 6132 } 6133 } 6134 } else { /* failed the ISP abort */ 6135 vha->flags.online = 1; 6136 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 6137 if (ha->isp_abort_cnt == 0) { 6138 ql_log(ql_log_fatal, vha, 0x8035, 6139 "ISP error recover failed - " 6140 "board disabled.\n"); 6141 /* 6142 * The next call disables the board 6143 * completely. 6144 */ 6145 ha->isp_ops->reset_adapter(vha); 6146 vha->flags.online = 0; 6147 clear_bit(ISP_ABORT_RETRY, 6148 &vha->dpc_flags); 6149 status = 0; 6150 } else { /* schedule another ISP abort */ 6151 ha->isp_abort_cnt--; 6152 ql_dbg(ql_dbg_taskm, vha, 0x8020, 6153 "ISP abort - retry remaining %d.\n", 6154 ha->isp_abort_cnt); 6155 status = 1; 6156 } 6157 } else { 6158 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 6159 ql_dbg(ql_dbg_taskm, vha, 0x8021, 6160 "ISP error recovery - retrying (%d) " 6161 "more times.\n", ha->isp_abort_cnt); 6162 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 6163 status = 1; 6164 } 6165 } 6166 6167 } 6168 6169 if (!status) { 6170 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__); 6171 qla2x00_configure_hba(vha); 6172 spin_lock_irqsave(&ha->vport_slock, flags); 6173 list_for_each_entry(vp, &ha->vp_list, list) { 6174 if (vp->vp_idx) { 6175 atomic_inc(&vp->vref_count); 6176 spin_unlock_irqrestore(&ha->vport_slock, flags); 6177 6178 qla2x00_vp_abort_isp(vp); 6179 6180 spin_lock_irqsave(&ha->vport_slock, flags); 6181 atomic_dec(&vp->vref_count); 6182 } 6183 } 6184 spin_unlock_irqrestore(&ha->vport_slock, flags); 6185 6186 if (IS_QLA8031(ha)) { 6187 ql_dbg(ql_dbg_p3p, vha, 0xb05d, 6188 "Setting back fcoe driver presence.\n"); 6189 if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS) 6190 ql_dbg(ql_dbg_p3p, vha, 0xb074, 6191 "Error while setting DRV-Presence.\n"); 6192 } 6193 } else { 6194 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n", 6195 __func__); 6196 } 6197 6198 return(status); 6199 } 6200 6201 /* 6202 * qla2x00_restart_isp 6203 * restarts the ISP after a reset 6204 * 6205 * Input: 6206 * ha = adapter block pointer. 6207 * 6208 * Returns: 6209 * 0 = success 6210 */ 6211 static int 6212 qla2x00_restart_isp(scsi_qla_host_t *vha) 6213 { 6214 int status = 0; 6215 struct qla_hw_data *ha = vha->hw; 6216 struct req_que *req = ha->req_q_map[0]; 6217 struct rsp_que *rsp = ha->rsp_q_map[0]; 6218 6219 /* If firmware needs to be loaded */ 6220 if (qla2x00_isp_firmware(vha)) { 6221 vha->flags.online = 0; 6222 status = ha->isp_ops->chip_diag(vha); 6223 if (!status) 6224 status = qla2x00_setup_chip(vha); 6225 } 6226 6227 if (!status && !(status = qla2x00_init_rings(vha))) { 6228 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 6229 ha->flags.chip_reset_done = 1; 6230 6231 /* Initialize the queues in use */ 6232 qla25xx_init_queues(ha); 6233 6234 status = qla2x00_fw_ready(vha); 6235 if (!status) { 6236 /* Issue a marker after FW becomes ready. */ 6237 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 6238 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 6239 } 6240 6241 /* if no cable then assume it's good */ 6242 if ((vha->device_flags & DFLG_NO_CABLE)) 6243 status = 0; 6244 } 6245 return (status); 6246 } 6247 6248 static int 6249 qla25xx_init_queues(struct qla_hw_data *ha) 6250 { 6251 struct rsp_que *rsp = NULL; 6252 struct req_que *req = NULL; 6253 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 6254 int ret = -1; 6255 int i; 6256 6257 for (i = 1; i < ha->max_rsp_queues; i++) { 6258 rsp = ha->rsp_q_map[i]; 6259 if (rsp && test_bit(i, ha->rsp_qid_map)) { 6260 rsp->options &= ~BIT_0; 6261 ret = qla25xx_init_rsp_que(base_vha, rsp); 6262 if (ret != QLA_SUCCESS) 6263 ql_dbg(ql_dbg_init, base_vha, 0x00ff, 6264 "%s Rsp que: %d init failed.\n", 6265 __func__, rsp->id); 6266 else 6267 ql_dbg(ql_dbg_init, base_vha, 0x0100, 6268 "%s Rsp que: %d inited.\n", 6269 __func__, rsp->id); 6270 } 6271 } 6272 for (i = 1; i < ha->max_req_queues; i++) { 6273 req = ha->req_q_map[i]; 6274 if (req && test_bit(i, ha->req_qid_map)) { 6275 /* Clear outstanding commands array. */ 6276 req->options &= ~BIT_0; 6277 ret = qla25xx_init_req_que(base_vha, req); 6278 if (ret != QLA_SUCCESS) 6279 ql_dbg(ql_dbg_init, base_vha, 0x0101, 6280 "%s Req que: %d init failed.\n", 6281 __func__, req->id); 6282 else 6283 ql_dbg(ql_dbg_init, base_vha, 0x0102, 6284 "%s Req que: %d inited.\n", 6285 __func__, req->id); 6286 } 6287 } 6288 return ret; 6289 } 6290 6291 /* 6292 * qla2x00_reset_adapter 6293 * Reset adapter. 6294 * 6295 * Input: 6296 * ha = adapter block pointer. 6297 */ 6298 void 6299 qla2x00_reset_adapter(scsi_qla_host_t *vha) 6300 { 6301 unsigned long flags = 0; 6302 struct qla_hw_data *ha = vha->hw; 6303 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 6304 6305 vha->flags.online = 0; 6306 ha->isp_ops->disable_intrs(ha); 6307 6308 spin_lock_irqsave(&ha->hardware_lock, flags); 6309 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 6310 RD_REG_WORD(®->hccr); /* PCI Posting. */ 6311 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 6312 RD_REG_WORD(®->hccr); /* PCI Posting. */ 6313 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6314 } 6315 6316 void 6317 qla24xx_reset_adapter(scsi_qla_host_t *vha) 6318 { 6319 unsigned long flags = 0; 6320 struct qla_hw_data *ha = vha->hw; 6321 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 6322 6323 if (IS_P3P_TYPE(ha)) 6324 return; 6325 6326 vha->flags.online = 0; 6327 ha->isp_ops->disable_intrs(ha); 6328 6329 spin_lock_irqsave(&ha->hardware_lock, flags); 6330 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET); 6331 RD_REG_DWORD(®->hccr); 6332 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE); 6333 RD_REG_DWORD(®->hccr); 6334 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6335 6336 if (IS_NOPOLLING_TYPE(ha)) 6337 ha->isp_ops->enable_intrs(ha); 6338 } 6339 6340 /* On sparc systems, obtain port and node WWN from firmware 6341 * properties. 6342 */ 6343 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, 6344 struct nvram_24xx *nv) 6345 { 6346 #ifdef CONFIG_SPARC 6347 struct qla_hw_data *ha = vha->hw; 6348 struct pci_dev *pdev = ha->pdev; 6349 struct device_node *dp = pci_device_to_OF_node(pdev); 6350 const u8 *val; 6351 int len; 6352 6353 val = of_get_property(dp, "port-wwn", &len); 6354 if (val && len >= WWN_SIZE) 6355 memcpy(nv->port_name, val, WWN_SIZE); 6356 6357 val = of_get_property(dp, "node-wwn", &len); 6358 if (val && len >= WWN_SIZE) 6359 memcpy(nv->node_name, val, WWN_SIZE); 6360 #endif 6361 } 6362 6363 int 6364 qla24xx_nvram_config(scsi_qla_host_t *vha) 6365 { 6366 int rval; 6367 struct init_cb_24xx *icb; 6368 struct nvram_24xx *nv; 6369 uint32_t *dptr; 6370 uint8_t *dptr1, *dptr2; 6371 uint32_t chksum; 6372 uint16_t cnt; 6373 struct qla_hw_data *ha = vha->hw; 6374 6375 rval = QLA_SUCCESS; 6376 icb = (struct init_cb_24xx *)ha->init_cb; 6377 nv = ha->nvram; 6378 6379 /* Determine NVRAM starting address. */ 6380 if (ha->port_no == 0) { 6381 ha->nvram_base = FA_NVRAM_FUNC0_ADDR; 6382 ha->vpd_base = FA_NVRAM_VPD0_ADDR; 6383 } else { 6384 ha->nvram_base = FA_NVRAM_FUNC1_ADDR; 6385 ha->vpd_base = FA_NVRAM_VPD1_ADDR; 6386 } 6387 6388 ha->nvram_size = sizeof(struct nvram_24xx); 6389 ha->vpd_size = FA_NVRAM_VPD_SIZE; 6390 6391 /* Get VPD data into cache */ 6392 ha->vpd = ha->nvram + VPD_OFFSET; 6393 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, 6394 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); 6395 6396 /* Get NVRAM data into cache and calculate checksum. */ 6397 dptr = (uint32_t *)nv; 6398 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base, 6399 ha->nvram_size); 6400 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) 6401 chksum += le32_to_cpu(*dptr); 6402 6403 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a, 6404 "Contents of NVRAM\n"); 6405 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d, 6406 (uint8_t *)nv, ha->nvram_size); 6407 6408 /* Bad NVRAM data, set defaults parameters. */ 6409 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' 6410 || nv->id[3] != ' ' || 6411 nv->nvram_version < cpu_to_le16(ICB_VERSION)) { 6412 /* Reset NVRAM data. */ 6413 ql_log(ql_log_warn, vha, 0x006b, 6414 "Inconsistent NVRAM detected: checksum=0x%x id=%c " 6415 "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version); 6416 ql_log(ql_log_warn, vha, 0x006c, 6417 "Falling back to functioning (yet invalid -- WWPN) " 6418 "defaults.\n"); 6419 6420 /* 6421 * Set default initialization control block. 6422 */ 6423 memset(nv, 0, ha->nvram_size); 6424 nv->nvram_version = cpu_to_le16(ICB_VERSION); 6425 nv->version = cpu_to_le16(ICB_VERSION); 6426 nv->frame_payload_size = 2048; 6427 nv->execution_throttle = cpu_to_le16(0xFFFF); 6428 nv->exchange_count = cpu_to_le16(0); 6429 nv->hard_address = cpu_to_le16(124); 6430 nv->port_name[0] = 0x21; 6431 nv->port_name[1] = 0x00 + ha->port_no + 1; 6432 nv->port_name[2] = 0x00; 6433 nv->port_name[3] = 0xe0; 6434 nv->port_name[4] = 0x8b; 6435 nv->port_name[5] = 0x1c; 6436 nv->port_name[6] = 0x55; 6437 nv->port_name[7] = 0x86; 6438 nv->node_name[0] = 0x20; 6439 nv->node_name[1] = 0x00; 6440 nv->node_name[2] = 0x00; 6441 nv->node_name[3] = 0xe0; 6442 nv->node_name[4] = 0x8b; 6443 nv->node_name[5] = 0x1c; 6444 nv->node_name[6] = 0x55; 6445 nv->node_name[7] = 0x86; 6446 qla24xx_nvram_wwn_from_ofw(vha, nv); 6447 nv->login_retry_count = cpu_to_le16(8); 6448 nv->interrupt_delay_timer = cpu_to_le16(0); 6449 nv->login_timeout = cpu_to_le16(0); 6450 nv->firmware_options_1 = 6451 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); 6452 nv->firmware_options_2 = cpu_to_le32(2 << 4); 6453 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 6454 nv->firmware_options_3 = cpu_to_le32(2 << 13); 6455 nv->host_p = cpu_to_le32(BIT_11|BIT_10); 6456 nv->efi_parameters = cpu_to_le32(0); 6457 nv->reset_delay = 5; 6458 nv->max_luns_per_target = cpu_to_le16(128); 6459 nv->port_down_retry_count = cpu_to_le16(30); 6460 nv->link_down_timeout = cpu_to_le16(30); 6461 6462 rval = 1; 6463 } 6464 6465 if (qla_tgt_mode_enabled(vha)) { 6466 /* Don't enable full login after initial LIP */ 6467 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 6468 /* Don't enable LIP full login for initiator */ 6469 nv->host_p &= cpu_to_le32(~BIT_10); 6470 } 6471 6472 qlt_24xx_config_nvram_stage1(vha, nv); 6473 6474 /* Reset Initialization control block */ 6475 memset(icb, 0, ha->init_cb_size); 6476 6477 /* Copy 1st segment. */ 6478 dptr1 = (uint8_t *)icb; 6479 dptr2 = (uint8_t *)&nv->version; 6480 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; 6481 while (cnt--) 6482 *dptr1++ = *dptr2++; 6483 6484 icb->login_retry_count = nv->login_retry_count; 6485 icb->link_down_on_nos = nv->link_down_on_nos; 6486 6487 /* Copy 2nd segment. */ 6488 dptr1 = (uint8_t *)&icb->interrupt_delay_timer; 6489 dptr2 = (uint8_t *)&nv->interrupt_delay_timer; 6490 cnt = (uint8_t *)&icb->reserved_3 - 6491 (uint8_t *)&icb->interrupt_delay_timer; 6492 while (cnt--) 6493 *dptr1++ = *dptr2++; 6494 6495 /* 6496 * Setup driver NVRAM options. 6497 */ 6498 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), 6499 "QLA2462"); 6500 6501 qlt_24xx_config_nvram_stage2(vha, icb); 6502 6503 if (nv->host_p & cpu_to_le32(BIT_15)) { 6504 /* Use alternate WWN? */ 6505 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 6506 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 6507 } 6508 6509 /* Prepare nodename */ 6510 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) { 6511 /* 6512 * Firmware will apply the following mask if the nodename was 6513 * not provided. 6514 */ 6515 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 6516 icb->node_name[0] &= 0xF0; 6517 } 6518 6519 /* Set host adapter parameters. */ 6520 ha->flags.disable_risc_code_load = 0; 6521 ha->flags.enable_lip_reset = 0; 6522 ha->flags.enable_lip_full_login = 6523 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0; 6524 ha->flags.enable_target_reset = 6525 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0; 6526 ha->flags.enable_led_scheme = 0; 6527 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0; 6528 6529 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & 6530 (BIT_6 | BIT_5 | BIT_4)) >> 4; 6531 6532 memcpy(ha->fw_seriallink_options24, nv->seriallink_options, 6533 sizeof(ha->fw_seriallink_options24)); 6534 6535 /* save HBA serial number */ 6536 ha->serial0 = icb->port_name[5]; 6537 ha->serial1 = icb->port_name[6]; 6538 ha->serial2 = icb->port_name[7]; 6539 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 6540 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 6541 6542 icb->execution_throttle = cpu_to_le16(0xFFFF); 6543 6544 ha->retry_count = le16_to_cpu(nv->login_retry_count); 6545 6546 /* Set minimum login_timeout to 4 seconds. */ 6547 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) 6548 nv->login_timeout = cpu_to_le16(ql2xlogintimeout); 6549 if (le16_to_cpu(nv->login_timeout) < 4) 6550 nv->login_timeout = cpu_to_le16(4); 6551 ha->login_timeout = le16_to_cpu(nv->login_timeout); 6552 6553 /* Set minimum RATOV to 100 tenths of a second. */ 6554 ha->r_a_tov = 100; 6555 6556 ha->loop_reset_delay = nv->reset_delay; 6557 6558 /* Link Down Timeout = 0: 6559 * 6560 * When Port Down timer expires we will start returning 6561 * I/O's to OS with "DID_NO_CONNECT". 6562 * 6563 * Link Down Timeout != 0: 6564 * 6565 * The driver waits for the link to come up after link down 6566 * before returning I/Os to OS with "DID_NO_CONNECT". 6567 */ 6568 if (le16_to_cpu(nv->link_down_timeout) == 0) { 6569 ha->loop_down_abort_time = 6570 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 6571 } else { 6572 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); 6573 ha->loop_down_abort_time = 6574 (LOOP_DOWN_TIME - ha->link_down_timeout); 6575 } 6576 6577 /* Need enough time to try and get the port back. */ 6578 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); 6579 if (qlport_down_retry) 6580 ha->port_down_retry_count = qlport_down_retry; 6581 6582 /* Set login_retry_count */ 6583 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); 6584 if (ha->port_down_retry_count == 6585 le16_to_cpu(nv->port_down_retry_count) && 6586 ha->port_down_retry_count > 3) 6587 ha->login_retry_count = ha->port_down_retry_count; 6588 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 6589 ha->login_retry_count = ha->port_down_retry_count; 6590 if (ql2xloginretrycount) 6591 ha->login_retry_count = ql2xloginretrycount; 6592 6593 /* Enable ZIO. */ 6594 if (!vha->flags.init_done) { 6595 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 6596 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 6597 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 6598 le16_to_cpu(icb->interrupt_delay_timer): 2; 6599 } 6600 icb->firmware_options_2 &= cpu_to_le32( 6601 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 6602 vha->flags.process_response_queue = 0; 6603 if (ha->zio_mode != QLA_ZIO_DISABLED) { 6604 ha->zio_mode = QLA_ZIO_MODE_6; 6605 6606 ql_log(ql_log_info, vha, 0x006f, 6607 "ZIO mode %d enabled; timer delay (%d us).\n", 6608 ha->zio_mode, ha->zio_timer * 100); 6609 6610 icb->firmware_options_2 |= cpu_to_le32( 6611 (uint32_t)ha->zio_mode); 6612 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); 6613 vha->flags.process_response_queue = 1; 6614 } 6615 6616 if (rval) { 6617 ql_log(ql_log_warn, vha, 0x0070, 6618 "NVRAM configuration failed.\n"); 6619 } 6620 return (rval); 6621 } 6622 6623 uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha) 6624 { 6625 struct qla27xx_image_status pri_image_status, sec_image_status; 6626 uint8_t valid_pri_image, valid_sec_image; 6627 uint32_t *wptr; 6628 uint32_t cnt, chksum, size; 6629 struct qla_hw_data *ha = vha->hw; 6630 6631 valid_pri_image = valid_sec_image = 1; 6632 ha->active_image = 0; 6633 size = sizeof(struct qla27xx_image_status) / sizeof(uint32_t); 6634 6635 if (!ha->flt_region_img_status_pri) { 6636 valid_pri_image = 0; 6637 goto check_sec_image; 6638 } 6639 6640 qla24xx_read_flash_data(vha, (uint32_t *)(&pri_image_status), 6641 ha->flt_region_img_status_pri, size); 6642 6643 if (pri_image_status.signature != QLA27XX_IMG_STATUS_SIGN) { 6644 ql_dbg(ql_dbg_init, vha, 0x018b, 6645 "Primary image signature (0x%x) not valid\n", 6646 pri_image_status.signature); 6647 valid_pri_image = 0; 6648 goto check_sec_image; 6649 } 6650 6651 wptr = (uint32_t *)(&pri_image_status); 6652 cnt = size; 6653 6654 for (chksum = 0; cnt--; wptr++) 6655 chksum += le32_to_cpu(*wptr); 6656 6657 if (chksum) { 6658 ql_dbg(ql_dbg_init, vha, 0x018c, 6659 "Checksum validation failed for primary image (0x%x)\n", 6660 chksum); 6661 valid_pri_image = 0; 6662 } 6663 6664 check_sec_image: 6665 if (!ha->flt_region_img_status_sec) { 6666 valid_sec_image = 0; 6667 goto check_valid_image; 6668 } 6669 6670 qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status), 6671 ha->flt_region_img_status_sec, size); 6672 6673 if (sec_image_status.signature != QLA27XX_IMG_STATUS_SIGN) { 6674 ql_dbg(ql_dbg_init, vha, 0x018d, 6675 "Secondary image signature(0x%x) not valid\n", 6676 sec_image_status.signature); 6677 valid_sec_image = 0; 6678 goto check_valid_image; 6679 } 6680 6681 wptr = (uint32_t *)(&sec_image_status); 6682 cnt = size; 6683 for (chksum = 0; cnt--; wptr++) 6684 chksum += le32_to_cpu(*wptr); 6685 if (chksum) { 6686 ql_dbg(ql_dbg_init, vha, 0x018e, 6687 "Checksum validation failed for secondary image (0x%x)\n", 6688 chksum); 6689 valid_sec_image = 0; 6690 } 6691 6692 check_valid_image: 6693 if (valid_pri_image && (pri_image_status.image_status_mask & 0x1)) 6694 ha->active_image = QLA27XX_PRIMARY_IMAGE; 6695 if (valid_sec_image && (sec_image_status.image_status_mask & 0x1)) { 6696 if (!ha->active_image || 6697 pri_image_status.generation_number < 6698 sec_image_status.generation_number) 6699 ha->active_image = QLA27XX_SECONDARY_IMAGE; 6700 } 6701 6702 ql_dbg(ql_dbg_init, vha, 0x018f, "%s image\n", 6703 ha->active_image == 0 ? "default bootld and fw" : 6704 ha->active_image == 1 ? "primary" : 6705 ha->active_image == 2 ? "secondary" : 6706 "Invalid"); 6707 6708 return ha->active_image; 6709 } 6710 6711 static int 6712 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, 6713 uint32_t faddr) 6714 { 6715 int rval = QLA_SUCCESS; 6716 int segments, fragment; 6717 uint32_t *dcode, dlen; 6718 uint32_t risc_addr; 6719 uint32_t risc_size; 6720 uint32_t i; 6721 struct qla_hw_data *ha = vha->hw; 6722 struct req_que *req = ha->req_q_map[0]; 6723 6724 ql_dbg(ql_dbg_init, vha, 0x008b, 6725 "FW: Loading firmware from flash (%x).\n", faddr); 6726 6727 rval = QLA_SUCCESS; 6728 6729 segments = FA_RISC_CODE_SEGMENTS; 6730 dcode = (uint32_t *)req->ring; 6731 *srisc_addr = 0; 6732 6733 if (IS_QLA27XX(ha) && 6734 qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE) 6735 faddr = ha->flt_region_fw_sec; 6736 6737 /* Validate firmware image by checking version. */ 6738 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4); 6739 for (i = 0; i < 4; i++) 6740 dcode[i] = be32_to_cpu(dcode[i]); 6741 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff && 6742 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 6743 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 6744 dcode[3] == 0)) { 6745 ql_log(ql_log_fatal, vha, 0x008c, 6746 "Unable to verify the integrity of flash firmware " 6747 "image.\n"); 6748 ql_log(ql_log_fatal, vha, 0x008d, 6749 "Firmware data: %08x %08x %08x %08x.\n", 6750 dcode[0], dcode[1], dcode[2], dcode[3]); 6751 6752 return QLA_FUNCTION_FAILED; 6753 } 6754 6755 while (segments && rval == QLA_SUCCESS) { 6756 /* Read segment's load information. */ 6757 qla24xx_read_flash_data(vha, dcode, faddr, 4); 6758 6759 risc_addr = be32_to_cpu(dcode[2]); 6760 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr; 6761 risc_size = be32_to_cpu(dcode[3]); 6762 6763 fragment = 0; 6764 while (risc_size > 0 && rval == QLA_SUCCESS) { 6765 dlen = (uint32_t)(ha->fw_transfer_size >> 2); 6766 if (dlen > risc_size) 6767 dlen = risc_size; 6768 6769 ql_dbg(ql_dbg_init, vha, 0x008e, 6770 "Loading risc segment@ risc addr %x " 6771 "number of dwords 0x%x offset 0x%x.\n", 6772 risc_addr, dlen, faddr); 6773 6774 qla24xx_read_flash_data(vha, dcode, faddr, dlen); 6775 for (i = 0; i < dlen; i++) 6776 dcode[i] = swab32(dcode[i]); 6777 6778 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 6779 dlen); 6780 if (rval) { 6781 ql_log(ql_log_fatal, vha, 0x008f, 6782 "Failed to load segment %d of firmware.\n", 6783 fragment); 6784 return QLA_FUNCTION_FAILED; 6785 } 6786 6787 faddr += dlen; 6788 risc_addr += dlen; 6789 risc_size -= dlen; 6790 fragment++; 6791 } 6792 6793 /* Next segment. */ 6794 segments--; 6795 } 6796 6797 if (!IS_QLA27XX(ha)) 6798 return rval; 6799 6800 if (ha->fw_dump_template) 6801 vfree(ha->fw_dump_template); 6802 ha->fw_dump_template = NULL; 6803 ha->fw_dump_template_len = 0; 6804 6805 ql_dbg(ql_dbg_init, vha, 0x0161, 6806 "Loading fwdump template from %x\n", faddr); 6807 qla24xx_read_flash_data(vha, dcode, faddr, 7); 6808 risc_size = be32_to_cpu(dcode[2]); 6809 ql_dbg(ql_dbg_init, vha, 0x0162, 6810 "-> array size %x dwords\n", risc_size); 6811 if (risc_size == 0 || risc_size == ~0) 6812 goto default_template; 6813 6814 dlen = (risc_size - 8) * sizeof(*dcode); 6815 ql_dbg(ql_dbg_init, vha, 0x0163, 6816 "-> template allocating %x bytes...\n", dlen); 6817 ha->fw_dump_template = vmalloc(dlen); 6818 if (!ha->fw_dump_template) { 6819 ql_log(ql_log_warn, vha, 0x0164, 6820 "Failed fwdump template allocate %x bytes.\n", risc_size); 6821 goto default_template; 6822 } 6823 6824 faddr += 7; 6825 risc_size -= 8; 6826 dcode = ha->fw_dump_template; 6827 qla24xx_read_flash_data(vha, dcode, faddr, risc_size); 6828 for (i = 0; i < risc_size; i++) 6829 dcode[i] = le32_to_cpu(dcode[i]); 6830 6831 if (!qla27xx_fwdt_template_valid(dcode)) { 6832 ql_log(ql_log_warn, vha, 0x0165, 6833 "Failed fwdump template validate\n"); 6834 goto default_template; 6835 } 6836 6837 dlen = qla27xx_fwdt_template_size(dcode); 6838 ql_dbg(ql_dbg_init, vha, 0x0166, 6839 "-> template size %x bytes\n", dlen); 6840 if (dlen > risc_size * sizeof(*dcode)) { 6841 ql_log(ql_log_warn, vha, 0x0167, 6842 "Failed fwdump template exceeds array by %zx bytes\n", 6843 (size_t)(dlen - risc_size * sizeof(*dcode))); 6844 goto default_template; 6845 } 6846 ha->fw_dump_template_len = dlen; 6847 return rval; 6848 6849 default_template: 6850 ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n"); 6851 if (ha->fw_dump_template) 6852 vfree(ha->fw_dump_template); 6853 ha->fw_dump_template = NULL; 6854 ha->fw_dump_template_len = 0; 6855 6856 dlen = qla27xx_fwdt_template_default_size(); 6857 ql_dbg(ql_dbg_init, vha, 0x0169, 6858 "-> template allocating %x bytes...\n", dlen); 6859 ha->fw_dump_template = vmalloc(dlen); 6860 if (!ha->fw_dump_template) { 6861 ql_log(ql_log_warn, vha, 0x016a, 6862 "Failed fwdump template allocate %x bytes.\n", risc_size); 6863 goto failed_template; 6864 } 6865 6866 dcode = ha->fw_dump_template; 6867 risc_size = dlen / sizeof(*dcode); 6868 memcpy(dcode, qla27xx_fwdt_template_default(), dlen); 6869 for (i = 0; i < risc_size; i++) 6870 dcode[i] = be32_to_cpu(dcode[i]); 6871 6872 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) { 6873 ql_log(ql_log_warn, vha, 0x016b, 6874 "Failed fwdump template validate\n"); 6875 goto failed_template; 6876 } 6877 6878 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template); 6879 ql_dbg(ql_dbg_init, vha, 0x016c, 6880 "-> template size %x bytes\n", dlen); 6881 ha->fw_dump_template_len = dlen; 6882 return rval; 6883 6884 failed_template: 6885 ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n"); 6886 if (ha->fw_dump_template) 6887 vfree(ha->fw_dump_template); 6888 ha->fw_dump_template = NULL; 6889 ha->fw_dump_template_len = 0; 6890 return rval; 6891 } 6892 6893 #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/" 6894 6895 int 6896 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 6897 { 6898 int rval; 6899 int i, fragment; 6900 uint16_t *wcode, *fwcode; 6901 uint32_t risc_addr, risc_size, fwclen, wlen, *seg; 6902 struct fw_blob *blob; 6903 struct qla_hw_data *ha = vha->hw; 6904 struct req_que *req = ha->req_q_map[0]; 6905 6906 /* Load firmware blob. */ 6907 blob = qla2x00_request_firmware(vha); 6908 if (!blob) { 6909 ql_log(ql_log_info, vha, 0x0083, 6910 "Firmware image unavailable.\n"); 6911 ql_log(ql_log_info, vha, 0x0084, 6912 "Firmware images can be retrieved from: "QLA_FW_URL ".\n"); 6913 return QLA_FUNCTION_FAILED; 6914 } 6915 6916 rval = QLA_SUCCESS; 6917 6918 wcode = (uint16_t *)req->ring; 6919 *srisc_addr = 0; 6920 fwcode = (uint16_t *)blob->fw->data; 6921 fwclen = 0; 6922 6923 /* Validate firmware image by checking version. */ 6924 if (blob->fw->size < 8 * sizeof(uint16_t)) { 6925 ql_log(ql_log_fatal, vha, 0x0085, 6926 "Unable to verify integrity of firmware image (%zd).\n", 6927 blob->fw->size); 6928 goto fail_fw_integrity; 6929 } 6930 for (i = 0; i < 4; i++) 6931 wcode[i] = be16_to_cpu(fwcode[i + 4]); 6932 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff && 6933 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 && 6934 wcode[2] == 0 && wcode[3] == 0)) { 6935 ql_log(ql_log_fatal, vha, 0x0086, 6936 "Unable to verify integrity of firmware image.\n"); 6937 ql_log(ql_log_fatal, vha, 0x0087, 6938 "Firmware data: %04x %04x %04x %04x.\n", 6939 wcode[0], wcode[1], wcode[2], wcode[3]); 6940 goto fail_fw_integrity; 6941 } 6942 6943 seg = blob->segs; 6944 while (*seg && rval == QLA_SUCCESS) { 6945 risc_addr = *seg; 6946 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr; 6947 risc_size = be16_to_cpu(fwcode[3]); 6948 6949 /* Validate firmware image size. */ 6950 fwclen += risc_size * sizeof(uint16_t); 6951 if (blob->fw->size < fwclen) { 6952 ql_log(ql_log_fatal, vha, 0x0088, 6953 "Unable to verify integrity of firmware image " 6954 "(%zd).\n", blob->fw->size); 6955 goto fail_fw_integrity; 6956 } 6957 6958 fragment = 0; 6959 while (risc_size > 0 && rval == QLA_SUCCESS) { 6960 wlen = (uint16_t)(ha->fw_transfer_size >> 1); 6961 if (wlen > risc_size) 6962 wlen = risc_size; 6963 ql_dbg(ql_dbg_init, vha, 0x0089, 6964 "Loading risc segment@ risc addr %x number of " 6965 "words 0x%x.\n", risc_addr, wlen); 6966 6967 for (i = 0; i < wlen; i++) 6968 wcode[i] = swab16(fwcode[i]); 6969 6970 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 6971 wlen); 6972 if (rval) { 6973 ql_log(ql_log_fatal, vha, 0x008a, 6974 "Failed to load segment %d of firmware.\n", 6975 fragment); 6976 break; 6977 } 6978 6979 fwcode += wlen; 6980 risc_addr += wlen; 6981 risc_size -= wlen; 6982 fragment++; 6983 } 6984 6985 /* Next segment. */ 6986 seg++; 6987 } 6988 return rval; 6989 6990 fail_fw_integrity: 6991 return QLA_FUNCTION_FAILED; 6992 } 6993 6994 static int 6995 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) 6996 { 6997 int rval; 6998 int segments, fragment; 6999 uint32_t *dcode, dlen; 7000 uint32_t risc_addr; 7001 uint32_t risc_size; 7002 uint32_t i; 7003 struct fw_blob *blob; 7004 const uint32_t *fwcode; 7005 uint32_t fwclen; 7006 struct qla_hw_data *ha = vha->hw; 7007 struct req_que *req = ha->req_q_map[0]; 7008 7009 /* Load firmware blob. */ 7010 blob = qla2x00_request_firmware(vha); 7011 if (!blob) { 7012 ql_log(ql_log_warn, vha, 0x0090, 7013 "Firmware image unavailable.\n"); 7014 ql_log(ql_log_warn, vha, 0x0091, 7015 "Firmware images can be retrieved from: " 7016 QLA_FW_URL ".\n"); 7017 7018 return QLA_FUNCTION_FAILED; 7019 } 7020 7021 ql_dbg(ql_dbg_init, vha, 0x0092, 7022 "FW: Loading via request-firmware.\n"); 7023 7024 rval = QLA_SUCCESS; 7025 7026 segments = FA_RISC_CODE_SEGMENTS; 7027 dcode = (uint32_t *)req->ring; 7028 *srisc_addr = 0; 7029 fwcode = (uint32_t *)blob->fw->data; 7030 fwclen = 0; 7031 7032 /* Validate firmware image by checking version. */ 7033 if (blob->fw->size < 8 * sizeof(uint32_t)) { 7034 ql_log(ql_log_fatal, vha, 0x0093, 7035 "Unable to verify integrity of firmware image (%zd).\n", 7036 blob->fw->size); 7037 return QLA_FUNCTION_FAILED; 7038 } 7039 for (i = 0; i < 4; i++) 7040 dcode[i] = be32_to_cpu(fwcode[i + 4]); 7041 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff && 7042 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 7043 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 7044 dcode[3] == 0)) { 7045 ql_log(ql_log_fatal, vha, 0x0094, 7046 "Unable to verify integrity of firmware image (%zd).\n", 7047 blob->fw->size); 7048 ql_log(ql_log_fatal, vha, 0x0095, 7049 "Firmware data: %08x %08x %08x %08x.\n", 7050 dcode[0], dcode[1], dcode[2], dcode[3]); 7051 return QLA_FUNCTION_FAILED; 7052 } 7053 7054 while (segments && rval == QLA_SUCCESS) { 7055 risc_addr = be32_to_cpu(fwcode[2]); 7056 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr; 7057 risc_size = be32_to_cpu(fwcode[3]); 7058 7059 /* Validate firmware image size. */ 7060 fwclen += risc_size * sizeof(uint32_t); 7061 if (blob->fw->size < fwclen) { 7062 ql_log(ql_log_fatal, vha, 0x0096, 7063 "Unable to verify integrity of firmware image " 7064 "(%zd).\n", blob->fw->size); 7065 return QLA_FUNCTION_FAILED; 7066 } 7067 7068 fragment = 0; 7069 while (risc_size > 0 && rval == QLA_SUCCESS) { 7070 dlen = (uint32_t)(ha->fw_transfer_size >> 2); 7071 if (dlen > risc_size) 7072 dlen = risc_size; 7073 7074 ql_dbg(ql_dbg_init, vha, 0x0097, 7075 "Loading risc segment@ risc addr %x " 7076 "number of dwords 0x%x.\n", risc_addr, dlen); 7077 7078 for (i = 0; i < dlen; i++) 7079 dcode[i] = swab32(fwcode[i]); 7080 7081 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 7082 dlen); 7083 if (rval) { 7084 ql_log(ql_log_fatal, vha, 0x0098, 7085 "Failed to load segment %d of firmware.\n", 7086 fragment); 7087 return QLA_FUNCTION_FAILED; 7088 } 7089 7090 fwcode += dlen; 7091 risc_addr += dlen; 7092 risc_size -= dlen; 7093 fragment++; 7094 } 7095 7096 /* Next segment. */ 7097 segments--; 7098 } 7099 7100 if (!IS_QLA27XX(ha)) 7101 return rval; 7102 7103 if (ha->fw_dump_template) 7104 vfree(ha->fw_dump_template); 7105 ha->fw_dump_template = NULL; 7106 ha->fw_dump_template_len = 0; 7107 7108 ql_dbg(ql_dbg_init, vha, 0x171, 7109 "Loading fwdump template from %x\n", 7110 (uint32_t)((void *)fwcode - (void *)blob->fw->data)); 7111 risc_size = be32_to_cpu(fwcode[2]); 7112 ql_dbg(ql_dbg_init, vha, 0x172, 7113 "-> array size %x dwords\n", risc_size); 7114 if (risc_size == 0 || risc_size == ~0) 7115 goto default_template; 7116 7117 dlen = (risc_size - 8) * sizeof(*fwcode); 7118 ql_dbg(ql_dbg_init, vha, 0x0173, 7119 "-> template allocating %x bytes...\n", dlen); 7120 ha->fw_dump_template = vmalloc(dlen); 7121 if (!ha->fw_dump_template) { 7122 ql_log(ql_log_warn, vha, 0x0174, 7123 "Failed fwdump template allocate %x bytes.\n", risc_size); 7124 goto default_template; 7125 } 7126 7127 fwcode += 7; 7128 risc_size -= 8; 7129 dcode = ha->fw_dump_template; 7130 for (i = 0; i < risc_size; i++) 7131 dcode[i] = le32_to_cpu(fwcode[i]); 7132 7133 if (!qla27xx_fwdt_template_valid(dcode)) { 7134 ql_log(ql_log_warn, vha, 0x0175, 7135 "Failed fwdump template validate\n"); 7136 goto default_template; 7137 } 7138 7139 dlen = qla27xx_fwdt_template_size(dcode); 7140 ql_dbg(ql_dbg_init, vha, 0x0176, 7141 "-> template size %x bytes\n", dlen); 7142 if (dlen > risc_size * sizeof(*fwcode)) { 7143 ql_log(ql_log_warn, vha, 0x0177, 7144 "Failed fwdump template exceeds array by %zx bytes\n", 7145 (size_t)(dlen - risc_size * sizeof(*fwcode))); 7146 goto default_template; 7147 } 7148 ha->fw_dump_template_len = dlen; 7149 return rval; 7150 7151 default_template: 7152 ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n"); 7153 if (ha->fw_dump_template) 7154 vfree(ha->fw_dump_template); 7155 ha->fw_dump_template = NULL; 7156 ha->fw_dump_template_len = 0; 7157 7158 dlen = qla27xx_fwdt_template_default_size(); 7159 ql_dbg(ql_dbg_init, vha, 0x0179, 7160 "-> template allocating %x bytes...\n", dlen); 7161 ha->fw_dump_template = vmalloc(dlen); 7162 if (!ha->fw_dump_template) { 7163 ql_log(ql_log_warn, vha, 0x017a, 7164 "Failed fwdump template allocate %x bytes.\n", risc_size); 7165 goto failed_template; 7166 } 7167 7168 dcode = ha->fw_dump_template; 7169 risc_size = dlen / sizeof(*fwcode); 7170 fwcode = qla27xx_fwdt_template_default(); 7171 for (i = 0; i < risc_size; i++) 7172 dcode[i] = be32_to_cpu(fwcode[i]); 7173 7174 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) { 7175 ql_log(ql_log_warn, vha, 0x017b, 7176 "Failed fwdump template validate\n"); 7177 goto failed_template; 7178 } 7179 7180 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template); 7181 ql_dbg(ql_dbg_init, vha, 0x017c, 7182 "-> template size %x bytes\n", dlen); 7183 ha->fw_dump_template_len = dlen; 7184 return rval; 7185 7186 failed_template: 7187 ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n"); 7188 if (ha->fw_dump_template) 7189 vfree(ha->fw_dump_template); 7190 ha->fw_dump_template = NULL; 7191 ha->fw_dump_template_len = 0; 7192 return rval; 7193 } 7194 7195 int 7196 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 7197 { 7198 int rval; 7199 7200 if (ql2xfwloadbin == 1) 7201 return qla81xx_load_risc(vha, srisc_addr); 7202 7203 /* 7204 * FW Load priority: 7205 * 1) Firmware via request-firmware interface (.bin file). 7206 * 2) Firmware residing in flash. 7207 */ 7208 rval = qla24xx_load_risc_blob(vha, srisc_addr); 7209 if (rval == QLA_SUCCESS) 7210 return rval; 7211 7212 return qla24xx_load_risc_flash(vha, srisc_addr, 7213 vha->hw->flt_region_fw); 7214 } 7215 7216 int 7217 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 7218 { 7219 int rval; 7220 struct qla_hw_data *ha = vha->hw; 7221 7222 if (ql2xfwloadbin == 2) 7223 goto try_blob_fw; 7224 7225 /* 7226 * FW Load priority: 7227 * 1) Firmware residing in flash. 7228 * 2) Firmware via request-firmware interface (.bin file). 7229 * 3) Golden-Firmware residing in flash -- limited operation. 7230 */ 7231 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw); 7232 if (rval == QLA_SUCCESS) 7233 return rval; 7234 7235 try_blob_fw: 7236 rval = qla24xx_load_risc_blob(vha, srisc_addr); 7237 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw) 7238 return rval; 7239 7240 ql_log(ql_log_info, vha, 0x0099, 7241 "Attempting to fallback to golden firmware.\n"); 7242 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); 7243 if (rval != QLA_SUCCESS) 7244 return rval; 7245 7246 ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n"); 7247 ha->flags.running_gold_fw = 1; 7248 return rval; 7249 } 7250 7251 void 7252 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) 7253 { 7254 int ret, retries; 7255 struct qla_hw_data *ha = vha->hw; 7256 7257 if (ha->flags.pci_channel_io_perm_failure) 7258 return; 7259 if (!IS_FWI2_CAPABLE(ha)) 7260 return; 7261 if (!ha->fw_major_version) 7262 return; 7263 if (!ha->flags.fw_started) 7264 return; 7265 7266 ret = qla2x00_stop_firmware(vha); 7267 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && 7268 ret != QLA_INVALID_COMMAND && retries ; retries--) { 7269 ha->isp_ops->reset_chip(vha); 7270 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS) 7271 continue; 7272 if (qla2x00_setup_chip(vha) != QLA_SUCCESS) 7273 continue; 7274 ql_log(ql_log_info, vha, 0x8015, 7275 "Attempting retry of stop-firmware command.\n"); 7276 ret = qla2x00_stop_firmware(vha); 7277 } 7278 7279 QLA_FW_STOPPED(ha); 7280 ha->flags.fw_init_done = 0; 7281 } 7282 7283 int 7284 qla24xx_configure_vhba(scsi_qla_host_t *vha) 7285 { 7286 int rval = QLA_SUCCESS; 7287 int rval2; 7288 uint16_t mb[MAILBOX_REGISTER_COUNT]; 7289 struct qla_hw_data *ha = vha->hw; 7290 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 7291 struct req_que *req; 7292 struct rsp_que *rsp; 7293 7294 if (!vha->vp_idx) 7295 return -EINVAL; 7296 7297 rval = qla2x00_fw_ready(base_vha); 7298 if (vha->qpair) 7299 req = vha->qpair->req; 7300 else 7301 req = ha->req_q_map[0]; 7302 rsp = req->rsp; 7303 7304 if (rval == QLA_SUCCESS) { 7305 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 7306 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 7307 } 7308 7309 vha->flags.management_server_logged_in = 0; 7310 7311 /* Login to SNS first */ 7312 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, 7313 BIT_1); 7314 if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { 7315 if (rval2 == QLA_MEMORY_ALLOC_FAILED) 7316 ql_dbg(ql_dbg_init, vha, 0x0120, 7317 "Failed SNS login: loop_id=%x, rval2=%d\n", 7318 NPH_SNS, rval2); 7319 else 7320 ql_dbg(ql_dbg_init, vha, 0x0103, 7321 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " 7322 "mb[2]=%x mb[6]=%x mb[7]=%x.\n", 7323 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]); 7324 return (QLA_FUNCTION_FAILED); 7325 } 7326 7327 atomic_set(&vha->loop_down_timer, 0); 7328 atomic_set(&vha->loop_state, LOOP_UP); 7329 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 7330 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 7331 rval = qla2x00_loop_resync(base_vha); 7332 7333 return rval; 7334 } 7335 7336 /* 84XX Support **************************************************************/ 7337 7338 static LIST_HEAD(qla_cs84xx_list); 7339 static DEFINE_MUTEX(qla_cs84xx_mutex); 7340 7341 static struct qla_chip_state_84xx * 7342 qla84xx_get_chip(struct scsi_qla_host *vha) 7343 { 7344 struct qla_chip_state_84xx *cs84xx; 7345 struct qla_hw_data *ha = vha->hw; 7346 7347 mutex_lock(&qla_cs84xx_mutex); 7348 7349 /* Find any shared 84xx chip. */ 7350 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) { 7351 if (cs84xx->bus == ha->pdev->bus) { 7352 kref_get(&cs84xx->kref); 7353 goto done; 7354 } 7355 } 7356 7357 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL); 7358 if (!cs84xx) 7359 goto done; 7360 7361 kref_init(&cs84xx->kref); 7362 spin_lock_init(&cs84xx->access_lock); 7363 mutex_init(&cs84xx->fw_update_mutex); 7364 cs84xx->bus = ha->pdev->bus; 7365 7366 list_add_tail(&cs84xx->list, &qla_cs84xx_list); 7367 done: 7368 mutex_unlock(&qla_cs84xx_mutex); 7369 return cs84xx; 7370 } 7371 7372 static void 7373 __qla84xx_chip_release(struct kref *kref) 7374 { 7375 struct qla_chip_state_84xx *cs84xx = 7376 container_of(kref, struct qla_chip_state_84xx, kref); 7377 7378 mutex_lock(&qla_cs84xx_mutex); 7379 list_del(&cs84xx->list); 7380 mutex_unlock(&qla_cs84xx_mutex); 7381 kfree(cs84xx); 7382 } 7383 7384 void 7385 qla84xx_put_chip(struct scsi_qla_host *vha) 7386 { 7387 struct qla_hw_data *ha = vha->hw; 7388 if (ha->cs84xx) 7389 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release); 7390 } 7391 7392 static int 7393 qla84xx_init_chip(scsi_qla_host_t *vha) 7394 { 7395 int rval; 7396 uint16_t status[2]; 7397 struct qla_hw_data *ha = vha->hw; 7398 7399 mutex_lock(&ha->cs84xx->fw_update_mutex); 7400 7401 rval = qla84xx_verify_chip(vha, status); 7402 7403 mutex_unlock(&ha->cs84xx->fw_update_mutex); 7404 7405 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED: 7406 QLA_SUCCESS; 7407 } 7408 7409 /* 81XX Support **************************************************************/ 7410 7411 int 7412 qla81xx_nvram_config(scsi_qla_host_t *vha) 7413 { 7414 int rval; 7415 struct init_cb_81xx *icb; 7416 struct nvram_81xx *nv; 7417 uint32_t *dptr; 7418 uint8_t *dptr1, *dptr2; 7419 uint32_t chksum; 7420 uint16_t cnt; 7421 struct qla_hw_data *ha = vha->hw; 7422 7423 rval = QLA_SUCCESS; 7424 icb = (struct init_cb_81xx *)ha->init_cb; 7425 nv = ha->nvram; 7426 7427 /* Determine NVRAM starting address. */ 7428 ha->nvram_size = sizeof(struct nvram_81xx); 7429 ha->vpd_size = FA_NVRAM_VPD_SIZE; 7430 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha)) 7431 ha->vpd_size = FA_VPD_SIZE_82XX; 7432 7433 /* Get VPD data into cache */ 7434 ha->vpd = ha->nvram + VPD_OFFSET; 7435 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2, 7436 ha->vpd_size); 7437 7438 /* Get NVRAM data into cache and calculate checksum. */ 7439 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2, 7440 ha->nvram_size); 7441 dptr = (uint32_t *)nv; 7442 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) 7443 chksum += le32_to_cpu(*dptr); 7444 7445 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111, 7446 "Contents of NVRAM:\n"); 7447 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112, 7448 (uint8_t *)nv, ha->nvram_size); 7449 7450 /* Bad NVRAM data, set defaults parameters. */ 7451 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' 7452 || nv->id[3] != ' ' || 7453 nv->nvram_version < cpu_to_le16(ICB_VERSION)) { 7454 /* Reset NVRAM data. */ 7455 ql_log(ql_log_info, vha, 0x0073, 7456 "Inconsistent NVRAM detected: checksum=0x%x id=%c " 7457 "version=0x%x.\n", chksum, nv->id[0], 7458 le16_to_cpu(nv->nvram_version)); 7459 ql_log(ql_log_info, vha, 0x0074, 7460 "Falling back to functioning (yet invalid -- WWPN) " 7461 "defaults.\n"); 7462 7463 /* 7464 * Set default initialization control block. 7465 */ 7466 memset(nv, 0, ha->nvram_size); 7467 nv->nvram_version = cpu_to_le16(ICB_VERSION); 7468 nv->version = cpu_to_le16(ICB_VERSION); 7469 nv->frame_payload_size = 2048; 7470 nv->execution_throttle = cpu_to_le16(0xFFFF); 7471 nv->exchange_count = cpu_to_le16(0); 7472 nv->port_name[0] = 0x21; 7473 nv->port_name[1] = 0x00 + ha->port_no + 1; 7474 nv->port_name[2] = 0x00; 7475 nv->port_name[3] = 0xe0; 7476 nv->port_name[4] = 0x8b; 7477 nv->port_name[5] = 0x1c; 7478 nv->port_name[6] = 0x55; 7479 nv->port_name[7] = 0x86; 7480 nv->node_name[0] = 0x20; 7481 nv->node_name[1] = 0x00; 7482 nv->node_name[2] = 0x00; 7483 nv->node_name[3] = 0xe0; 7484 nv->node_name[4] = 0x8b; 7485 nv->node_name[5] = 0x1c; 7486 nv->node_name[6] = 0x55; 7487 nv->node_name[7] = 0x86; 7488 nv->login_retry_count = cpu_to_le16(8); 7489 nv->interrupt_delay_timer = cpu_to_le16(0); 7490 nv->login_timeout = cpu_to_le16(0); 7491 nv->firmware_options_1 = 7492 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); 7493 nv->firmware_options_2 = cpu_to_le32(2 << 4); 7494 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 7495 nv->firmware_options_3 = cpu_to_le32(2 << 13); 7496 nv->host_p = cpu_to_le32(BIT_11|BIT_10); 7497 nv->efi_parameters = cpu_to_le32(0); 7498 nv->reset_delay = 5; 7499 nv->max_luns_per_target = cpu_to_le16(128); 7500 nv->port_down_retry_count = cpu_to_le16(30); 7501 nv->link_down_timeout = cpu_to_le16(180); 7502 nv->enode_mac[0] = 0x00; 7503 nv->enode_mac[1] = 0xC0; 7504 nv->enode_mac[2] = 0xDD; 7505 nv->enode_mac[3] = 0x04; 7506 nv->enode_mac[4] = 0x05; 7507 nv->enode_mac[5] = 0x06 + ha->port_no + 1; 7508 7509 rval = 1; 7510 } 7511 7512 if (IS_T10_PI_CAPABLE(ha)) 7513 nv->frame_payload_size &= ~7; 7514 7515 qlt_81xx_config_nvram_stage1(vha, nv); 7516 7517 /* Reset Initialization control block */ 7518 memset(icb, 0, ha->init_cb_size); 7519 7520 /* Copy 1st segment. */ 7521 dptr1 = (uint8_t *)icb; 7522 dptr2 = (uint8_t *)&nv->version; 7523 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; 7524 while (cnt--) 7525 *dptr1++ = *dptr2++; 7526 7527 icb->login_retry_count = nv->login_retry_count; 7528 7529 /* Copy 2nd segment. */ 7530 dptr1 = (uint8_t *)&icb->interrupt_delay_timer; 7531 dptr2 = (uint8_t *)&nv->interrupt_delay_timer; 7532 cnt = (uint8_t *)&icb->reserved_5 - 7533 (uint8_t *)&icb->interrupt_delay_timer; 7534 while (cnt--) 7535 *dptr1++ = *dptr2++; 7536 7537 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac)); 7538 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */ 7539 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) { 7540 icb->enode_mac[0] = 0x00; 7541 icb->enode_mac[1] = 0xC0; 7542 icb->enode_mac[2] = 0xDD; 7543 icb->enode_mac[3] = 0x04; 7544 icb->enode_mac[4] = 0x05; 7545 icb->enode_mac[5] = 0x06 + ha->port_no + 1; 7546 } 7547 7548 /* Use extended-initialization control block. */ 7549 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb)); 7550 7551 /* 7552 * Setup driver NVRAM options. 7553 */ 7554 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), 7555 "QLE8XXX"); 7556 7557 qlt_81xx_config_nvram_stage2(vha, icb); 7558 7559 /* Use alternate WWN? */ 7560 if (nv->host_p & cpu_to_le32(BIT_15)) { 7561 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 7562 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 7563 } 7564 7565 /* Prepare nodename */ 7566 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) { 7567 /* 7568 * Firmware will apply the following mask if the nodename was 7569 * not provided. 7570 */ 7571 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 7572 icb->node_name[0] &= 0xF0; 7573 } 7574 7575 /* Set host adapter parameters. */ 7576 ha->flags.disable_risc_code_load = 0; 7577 ha->flags.enable_lip_reset = 0; 7578 ha->flags.enable_lip_full_login = 7579 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0; 7580 ha->flags.enable_target_reset = 7581 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0; 7582 ha->flags.enable_led_scheme = 0; 7583 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0; 7584 7585 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & 7586 (BIT_6 | BIT_5 | BIT_4)) >> 4; 7587 7588 /* save HBA serial number */ 7589 ha->serial0 = icb->port_name[5]; 7590 ha->serial1 = icb->port_name[6]; 7591 ha->serial2 = icb->port_name[7]; 7592 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 7593 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 7594 7595 icb->execution_throttle = cpu_to_le16(0xFFFF); 7596 7597 ha->retry_count = le16_to_cpu(nv->login_retry_count); 7598 7599 /* Set minimum login_timeout to 4 seconds. */ 7600 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) 7601 nv->login_timeout = cpu_to_le16(ql2xlogintimeout); 7602 if (le16_to_cpu(nv->login_timeout) < 4) 7603 nv->login_timeout = cpu_to_le16(4); 7604 ha->login_timeout = le16_to_cpu(nv->login_timeout); 7605 7606 /* Set minimum RATOV to 100 tenths of a second. */ 7607 ha->r_a_tov = 100; 7608 7609 ha->loop_reset_delay = nv->reset_delay; 7610 7611 /* Link Down Timeout = 0: 7612 * 7613 * When Port Down timer expires we will start returning 7614 * I/O's to OS with "DID_NO_CONNECT". 7615 * 7616 * Link Down Timeout != 0: 7617 * 7618 * The driver waits for the link to come up after link down 7619 * before returning I/Os to OS with "DID_NO_CONNECT". 7620 */ 7621 if (le16_to_cpu(nv->link_down_timeout) == 0) { 7622 ha->loop_down_abort_time = 7623 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 7624 } else { 7625 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); 7626 ha->loop_down_abort_time = 7627 (LOOP_DOWN_TIME - ha->link_down_timeout); 7628 } 7629 7630 /* Need enough time to try and get the port back. */ 7631 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); 7632 if (qlport_down_retry) 7633 ha->port_down_retry_count = qlport_down_retry; 7634 7635 /* Set login_retry_count */ 7636 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); 7637 if (ha->port_down_retry_count == 7638 le16_to_cpu(nv->port_down_retry_count) && 7639 ha->port_down_retry_count > 3) 7640 ha->login_retry_count = ha->port_down_retry_count; 7641 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 7642 ha->login_retry_count = ha->port_down_retry_count; 7643 if (ql2xloginretrycount) 7644 ha->login_retry_count = ql2xloginretrycount; 7645 7646 /* if not running MSI-X we need handshaking on interrupts */ 7647 if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha))) 7648 icb->firmware_options_2 |= cpu_to_le32(BIT_22); 7649 7650 /* Enable ZIO. */ 7651 if (!vha->flags.init_done) { 7652 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 7653 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 7654 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 7655 le16_to_cpu(icb->interrupt_delay_timer): 2; 7656 } 7657 icb->firmware_options_2 &= cpu_to_le32( 7658 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 7659 vha->flags.process_response_queue = 0; 7660 if (ha->zio_mode != QLA_ZIO_DISABLED) { 7661 ha->zio_mode = QLA_ZIO_MODE_6; 7662 7663 ql_log(ql_log_info, vha, 0x0075, 7664 "ZIO mode %d enabled; timer delay (%d us).\n", 7665 ha->zio_mode, 7666 ha->zio_timer * 100); 7667 7668 icb->firmware_options_2 |= cpu_to_le32( 7669 (uint32_t)ha->zio_mode); 7670 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); 7671 vha->flags.process_response_queue = 1; 7672 } 7673 7674 /* enable RIDA Format2 */ 7675 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) 7676 icb->firmware_options_3 |= BIT_0; 7677 7678 if (IS_QLA27XX(ha)) { 7679 icb->firmware_options_3 |= BIT_8; 7680 ql_dbg(ql_log_info, vha, 0x0075, 7681 "Enabling direct connection.\n"); 7682 } 7683 7684 if (rval) { 7685 ql_log(ql_log_warn, vha, 0x0076, 7686 "NVRAM configuration failed.\n"); 7687 } 7688 return (rval); 7689 } 7690 7691 int 7692 qla82xx_restart_isp(scsi_qla_host_t *vha) 7693 { 7694 int status, rval; 7695 struct qla_hw_data *ha = vha->hw; 7696 struct req_que *req = ha->req_q_map[0]; 7697 struct rsp_que *rsp = ha->rsp_q_map[0]; 7698 struct scsi_qla_host *vp; 7699 unsigned long flags; 7700 7701 status = qla2x00_init_rings(vha); 7702 if (!status) { 7703 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 7704 ha->flags.chip_reset_done = 1; 7705 7706 status = qla2x00_fw_ready(vha); 7707 if (!status) { 7708 /* Issue a marker after FW becomes ready. */ 7709 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 7710 vha->flags.online = 1; 7711 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 7712 } 7713 7714 /* if no cable then assume it's good */ 7715 if ((vha->device_flags & DFLG_NO_CABLE)) 7716 status = 0; 7717 } 7718 7719 if (!status) { 7720 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 7721 7722 if (!atomic_read(&vha->loop_down_timer)) { 7723 /* 7724 * Issue marker command only when we are going 7725 * to start the I/O . 7726 */ 7727 vha->marker_needed = 1; 7728 } 7729 7730 ha->isp_ops->enable_intrs(ha); 7731 7732 ha->isp_abort_cnt = 0; 7733 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 7734 7735 /* Update the firmware version */ 7736 status = qla82xx_check_md_needed(vha); 7737 7738 if (ha->fce) { 7739 ha->flags.fce_enabled = 1; 7740 memset(ha->fce, 0, 7741 fce_calc_size(ha->fce_bufs)); 7742 rval = qla2x00_enable_fce_trace(vha, 7743 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 7744 &ha->fce_bufs); 7745 if (rval) { 7746 ql_log(ql_log_warn, vha, 0x8001, 7747 "Unable to reinitialize FCE (%d).\n", 7748 rval); 7749 ha->flags.fce_enabled = 0; 7750 } 7751 } 7752 7753 if (ha->eft) { 7754 memset(ha->eft, 0, EFT_SIZE); 7755 rval = qla2x00_enable_eft_trace(vha, 7756 ha->eft_dma, EFT_NUM_BUFFERS); 7757 if (rval) { 7758 ql_log(ql_log_warn, vha, 0x8010, 7759 "Unable to reinitialize EFT (%d).\n", 7760 rval); 7761 } 7762 } 7763 } 7764 7765 if (!status) { 7766 ql_dbg(ql_dbg_taskm, vha, 0x8011, 7767 "qla82xx_restart_isp succeeded.\n"); 7768 7769 spin_lock_irqsave(&ha->vport_slock, flags); 7770 list_for_each_entry(vp, &ha->vp_list, list) { 7771 if (vp->vp_idx) { 7772 atomic_inc(&vp->vref_count); 7773 spin_unlock_irqrestore(&ha->vport_slock, flags); 7774 7775 qla2x00_vp_abort_isp(vp); 7776 7777 spin_lock_irqsave(&ha->vport_slock, flags); 7778 atomic_dec(&vp->vref_count); 7779 } 7780 } 7781 spin_unlock_irqrestore(&ha->vport_slock, flags); 7782 7783 } else { 7784 ql_log(ql_log_warn, vha, 0x8016, 7785 "qla82xx_restart_isp **** FAILED ****.\n"); 7786 } 7787 7788 return status; 7789 } 7790 7791 void 7792 qla81xx_update_fw_options(scsi_qla_host_t *vha) 7793 { 7794 struct qla_hw_data *ha = vha->hw; 7795 7796 /* Hold status IOCBs until ABTS response received. */ 7797 if (ql2xfwholdabts) 7798 ha->fw_options[3] |= BIT_12; 7799 7800 /* Set Retry FLOGI in case of P2P connection */ 7801 if (ha->operating_mode == P2P) { 7802 ha->fw_options[2] |= BIT_3; 7803 ql_dbg(ql_dbg_disc, vha, 0x2103, 7804 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", 7805 __func__, ha->fw_options[2]); 7806 } 7807 7808 /* Move PUREX, ABTS RX & RIDA to ATIOQ */ 7809 if (ql2xmvasynctoatio) { 7810 if (qla_tgt_mode_enabled(vha) || 7811 qla_dual_mode_enabled(vha)) 7812 ha->fw_options[2] |= BIT_11; 7813 else 7814 ha->fw_options[2] &= ~BIT_11; 7815 } 7816 7817 if (qla_tgt_mode_enabled(vha) || 7818 qla_dual_mode_enabled(vha)) { 7819 /* FW auto send SCSI status during */ 7820 ha->fw_options[1] |= BIT_8; 7821 ha->fw_options[10] |= (u16)SAM_STAT_BUSY << 8; 7822 7823 /* FW perform Exchange validation */ 7824 ha->fw_options[2] |= BIT_4; 7825 } else { 7826 ha->fw_options[1] &= ~BIT_8; 7827 ha->fw_options[10] &= 0x00ff; 7828 7829 ha->fw_options[2] &= ~BIT_4; 7830 } 7831 7832 if (ql2xetsenable) { 7833 /* Enable ETS Burst. */ 7834 memset(ha->fw_options, 0, sizeof(ha->fw_options)); 7835 ha->fw_options[2] |= BIT_9; 7836 } 7837 7838 ql_dbg(ql_dbg_init, vha, 0x00e9, 7839 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", 7840 __func__, ha->fw_options[1], ha->fw_options[2], 7841 ha->fw_options[3], vha->host->active_mode); 7842 7843 qla2x00_set_fw_options(vha, ha->fw_options); 7844 } 7845 7846 /* 7847 * qla24xx_get_fcp_prio 7848 * Gets the fcp cmd priority value for the logged in port. 7849 * Looks for a match of the port descriptors within 7850 * each of the fcp prio config entries. If a match is found, 7851 * the tag (priority) value is returned. 7852 * 7853 * Input: 7854 * vha = scsi host structure pointer. 7855 * fcport = port structure pointer. 7856 * 7857 * Return: 7858 * non-zero (if found) 7859 * -1 (if not found) 7860 * 7861 * Context: 7862 * Kernel context 7863 */ 7864 static int 7865 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) 7866 { 7867 int i, entries; 7868 uint8_t pid_match, wwn_match; 7869 int priority; 7870 uint32_t pid1, pid2; 7871 uint64_t wwn1, wwn2; 7872 struct qla_fcp_prio_entry *pri_entry; 7873 struct qla_hw_data *ha = vha->hw; 7874 7875 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled) 7876 return -1; 7877 7878 priority = -1; 7879 entries = ha->fcp_prio_cfg->num_entries; 7880 pri_entry = &ha->fcp_prio_cfg->entry[0]; 7881 7882 for (i = 0; i < entries; i++) { 7883 pid_match = wwn_match = 0; 7884 7885 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) { 7886 pri_entry++; 7887 continue; 7888 } 7889 7890 /* check source pid for a match */ 7891 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) { 7892 pid1 = pri_entry->src_pid & INVALID_PORT_ID; 7893 pid2 = vha->d_id.b24 & INVALID_PORT_ID; 7894 if (pid1 == INVALID_PORT_ID) 7895 pid_match++; 7896 else if (pid1 == pid2) 7897 pid_match++; 7898 } 7899 7900 /* check destination pid for a match */ 7901 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) { 7902 pid1 = pri_entry->dst_pid & INVALID_PORT_ID; 7903 pid2 = fcport->d_id.b24 & INVALID_PORT_ID; 7904 if (pid1 == INVALID_PORT_ID) 7905 pid_match++; 7906 else if (pid1 == pid2) 7907 pid_match++; 7908 } 7909 7910 /* check source WWN for a match */ 7911 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) { 7912 wwn1 = wwn_to_u64(vha->port_name); 7913 wwn2 = wwn_to_u64(pri_entry->src_wwpn); 7914 if (wwn2 == (uint64_t)-1) 7915 wwn_match++; 7916 else if (wwn1 == wwn2) 7917 wwn_match++; 7918 } 7919 7920 /* check destination WWN for a match */ 7921 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) { 7922 wwn1 = wwn_to_u64(fcport->port_name); 7923 wwn2 = wwn_to_u64(pri_entry->dst_wwpn); 7924 if (wwn2 == (uint64_t)-1) 7925 wwn_match++; 7926 else if (wwn1 == wwn2) 7927 wwn_match++; 7928 } 7929 7930 if (pid_match == 2 || wwn_match == 2) { 7931 /* Found a matching entry */ 7932 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID) 7933 priority = pri_entry->tag; 7934 break; 7935 } 7936 7937 pri_entry++; 7938 } 7939 7940 return priority; 7941 } 7942 7943 /* 7944 * qla24xx_update_fcport_fcp_prio 7945 * Activates fcp priority for the logged in fc port 7946 * 7947 * Input: 7948 * vha = scsi host structure pointer. 7949 * fcp = port structure pointer. 7950 * 7951 * Return: 7952 * QLA_SUCCESS or QLA_FUNCTION_FAILED 7953 * 7954 * Context: 7955 * Kernel context. 7956 */ 7957 int 7958 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) 7959 { 7960 int ret; 7961 int priority; 7962 uint16_t mb[5]; 7963 7964 if (fcport->port_type != FCT_TARGET || 7965 fcport->loop_id == FC_NO_LOOP_ID) 7966 return QLA_FUNCTION_FAILED; 7967 7968 priority = qla24xx_get_fcp_prio(vha, fcport); 7969 if (priority < 0) 7970 return QLA_FUNCTION_FAILED; 7971 7972 if (IS_P3P_TYPE(vha->hw)) { 7973 fcport->fcp_prio = priority & 0xf; 7974 return QLA_SUCCESS; 7975 } 7976 7977 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb); 7978 if (ret == QLA_SUCCESS) { 7979 if (fcport->fcp_prio != priority) 7980 ql_dbg(ql_dbg_user, vha, 0x709e, 7981 "Updated FCP_CMND priority - value=%d loop_id=%d " 7982 "port_id=%02x%02x%02x.\n", priority, 7983 fcport->loop_id, fcport->d_id.b.domain, 7984 fcport->d_id.b.area, fcport->d_id.b.al_pa); 7985 fcport->fcp_prio = priority & 0xf; 7986 } else 7987 ql_dbg(ql_dbg_user, vha, 0x704f, 7988 "Unable to update FCP_CMND priority - ret=0x%x for " 7989 "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id, 7990 fcport->d_id.b.domain, fcport->d_id.b.area, 7991 fcport->d_id.b.al_pa); 7992 return ret; 7993 } 7994 7995 /* 7996 * qla24xx_update_all_fcp_prio 7997 * Activates fcp priority for all the logged in ports 7998 * 7999 * Input: 8000 * ha = adapter block pointer. 8001 * 8002 * Return: 8003 * QLA_SUCCESS or QLA_FUNCTION_FAILED 8004 * 8005 * Context: 8006 * Kernel context. 8007 */ 8008 int 8009 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha) 8010 { 8011 int ret; 8012 fc_port_t *fcport; 8013 8014 ret = QLA_FUNCTION_FAILED; 8015 /* We need to set priority for all logged in ports */ 8016 list_for_each_entry(fcport, &vha->vp_fcports, list) 8017 ret = qla24xx_update_fcport_fcp_prio(vha, fcport); 8018 8019 return ret; 8020 } 8021 8022 struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, 8023 int vp_idx, bool startqp) 8024 { 8025 int rsp_id = 0; 8026 int req_id = 0; 8027 int i; 8028 struct qla_hw_data *ha = vha->hw; 8029 uint16_t qpair_id = 0; 8030 struct qla_qpair *qpair = NULL; 8031 struct qla_msix_entry *msix; 8032 8033 if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) { 8034 ql_log(ql_log_warn, vha, 0x00181, 8035 "FW/Driver is not multi-queue capable.\n"); 8036 return NULL; 8037 } 8038 8039 if (ql2xmqsupport || ql2xnvmeenable) { 8040 qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL); 8041 if (qpair == NULL) { 8042 ql_log(ql_log_warn, vha, 0x0182, 8043 "Failed to allocate memory for queue pair.\n"); 8044 return NULL; 8045 } 8046 memset(qpair, 0, sizeof(struct qla_qpair)); 8047 8048 qpair->hw = vha->hw; 8049 qpair->vha = vha; 8050 qpair->qp_lock_ptr = &qpair->qp_lock; 8051 spin_lock_init(&qpair->qp_lock); 8052 qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; 8053 8054 /* Assign available que pair id */ 8055 mutex_lock(&ha->mq_lock); 8056 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs); 8057 if (ha->num_qpairs >= ha->max_qpairs) { 8058 mutex_unlock(&ha->mq_lock); 8059 ql_log(ql_log_warn, vha, 0x0183, 8060 "No resources to create additional q pair.\n"); 8061 goto fail_qid_map; 8062 } 8063 ha->num_qpairs++; 8064 set_bit(qpair_id, ha->qpair_qid_map); 8065 ha->queue_pair_map[qpair_id] = qpair; 8066 qpair->id = qpair_id; 8067 qpair->vp_idx = vp_idx; 8068 qpair->fw_started = ha->flags.fw_started; 8069 INIT_LIST_HEAD(&qpair->hints_list); 8070 INIT_LIST_HEAD(&qpair->nvme_done_list); 8071 qpair->chip_reset = ha->base_qpair->chip_reset; 8072 qpair->enable_class_2 = ha->base_qpair->enable_class_2; 8073 qpair->enable_explicit_conf = 8074 ha->base_qpair->enable_explicit_conf; 8075 8076 for (i = 0; i < ha->msix_count; i++) { 8077 msix = &ha->msix_entries[i]; 8078 if (msix->in_use) 8079 continue; 8080 qpair->msix = msix; 8081 ql_dbg(ql_dbg_multiq, vha, 0xc00f, 8082 "Vector %x selected for qpair\n", msix->vector); 8083 break; 8084 } 8085 if (!qpair->msix) { 8086 ql_log(ql_log_warn, vha, 0x0184, 8087 "Out of MSI-X vectors!.\n"); 8088 goto fail_msix; 8089 } 8090 8091 qpair->msix->in_use = 1; 8092 list_add_tail(&qpair->qp_list_elem, &vha->qp_list); 8093 qpair->pdev = ha->pdev; 8094 if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) 8095 qpair->reqq_start_iocbs = qla_83xx_start_iocbs; 8096 8097 mutex_unlock(&ha->mq_lock); 8098 8099 /* Create response queue first */ 8100 rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp); 8101 if (!rsp_id) { 8102 ql_log(ql_log_warn, vha, 0x0185, 8103 "Failed to create response queue.\n"); 8104 goto fail_rsp; 8105 } 8106 8107 qpair->rsp = ha->rsp_q_map[rsp_id]; 8108 8109 /* Create request queue */ 8110 req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos, 8111 startqp); 8112 if (!req_id) { 8113 ql_log(ql_log_warn, vha, 0x0186, 8114 "Failed to create request queue.\n"); 8115 goto fail_req; 8116 } 8117 8118 qpair->req = ha->req_q_map[req_id]; 8119 qpair->rsp->req = qpair->req; 8120 qpair->rsp->qpair = qpair; 8121 /* init qpair to this cpu. Will adjust at run time. */ 8122 qla_cpu_update(qpair, smp_processor_id()); 8123 8124 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 8125 if (ha->fw_attributes & BIT_4) 8126 qpair->difdix_supported = 1; 8127 } 8128 8129 qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 8130 if (!qpair->srb_mempool) { 8131 ql_log(ql_log_warn, vha, 0xd036, 8132 "Failed to create srb mempool for qpair %d\n", 8133 qpair->id); 8134 goto fail_mempool; 8135 } 8136 8137 /* Mark as online */ 8138 qpair->online = 1; 8139 8140 if (!vha->flags.qpairs_available) 8141 vha->flags.qpairs_available = 1; 8142 8143 ql_dbg(ql_dbg_multiq, vha, 0xc00d, 8144 "Request/Response queue pair created, id %d\n", 8145 qpair->id); 8146 ql_dbg(ql_dbg_init, vha, 0x0187, 8147 "Request/Response queue pair created, id %d\n", 8148 qpair->id); 8149 } 8150 return qpair; 8151 8152 fail_mempool: 8153 fail_req: 8154 qla25xx_delete_rsp_que(vha, qpair->rsp); 8155 fail_rsp: 8156 mutex_lock(&ha->mq_lock); 8157 qpair->msix->in_use = 0; 8158 list_del(&qpair->qp_list_elem); 8159 if (list_empty(&vha->qp_list)) 8160 vha->flags.qpairs_available = 0; 8161 fail_msix: 8162 ha->queue_pair_map[qpair_id] = NULL; 8163 clear_bit(qpair_id, ha->qpair_qid_map); 8164 ha->num_qpairs--; 8165 mutex_unlock(&ha->mq_lock); 8166 fail_qid_map: 8167 kfree(qpair); 8168 return NULL; 8169 } 8170 8171 int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) 8172 { 8173 int ret = QLA_FUNCTION_FAILED; 8174 struct qla_hw_data *ha = qpair->hw; 8175 8176 if (!vha->flags.qpairs_req_created && !vha->flags.qpairs_rsp_created) 8177 goto fail; 8178 8179 qpair->delete_in_progress = 1; 8180 while (atomic_read(&qpair->ref_count)) 8181 msleep(500); 8182 8183 ret = qla25xx_delete_req_que(vha, qpair->req); 8184 if (ret != QLA_SUCCESS) 8185 goto fail; 8186 ret = qla25xx_delete_rsp_que(vha, qpair->rsp); 8187 if (ret != QLA_SUCCESS) 8188 goto fail; 8189 8190 mutex_lock(&ha->mq_lock); 8191 ha->queue_pair_map[qpair->id] = NULL; 8192 clear_bit(qpair->id, ha->qpair_qid_map); 8193 ha->num_qpairs--; 8194 list_del(&qpair->qp_list_elem); 8195 if (list_empty(&vha->qp_list)) { 8196 vha->flags.qpairs_available = 0; 8197 vha->flags.qpairs_req_created = 0; 8198 vha->flags.qpairs_rsp_created = 0; 8199 } 8200 mempool_destroy(qpair->srb_mempool); 8201 kfree(qpair); 8202 mutex_unlock(&ha->mq_lock); 8203 8204 return QLA_SUCCESS; 8205 fail: 8206 return ret; 8207 } 8208