1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 #include <linux/utsname.h> 10 11 static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *); 12 static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *); 13 static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *); 14 static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *); 15 static int qla2x00_sns_rft_id(scsi_qla_host_t *); 16 static int qla2x00_sns_rnn_id(scsi_qla_host_t *); 17 static int qla_async_rftid(scsi_qla_host_t *, port_id_t *); 18 static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8); 19 static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*); 20 static int qla_async_rsnn_nn(scsi_qla_host_t *); 21 22 23 24 /** 25 * qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query. 26 * @vha: HA context 27 * @arg: CT arguments 28 * 29 * Returns a pointer to the @vha's ms_iocb. 30 */ 31 void * 32 qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg) 33 { 34 struct qla_hw_data *ha = vha->hw; 35 ms_iocb_entry_t *ms_pkt; 36 37 ms_pkt = (ms_iocb_entry_t *)arg->iocb; 38 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t)); 39 40 ms_pkt->entry_type = MS_IOCB_TYPE; 41 ms_pkt->entry_count = 1; 42 SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER); 43 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG); 44 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 45 ms_pkt->cmd_dsd_count = cpu_to_le16(1); 46 ms_pkt->total_dsd_count = cpu_to_le16(2); 47 ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size); 48 ms_pkt->req_bytecount = cpu_to_le32(arg->req_size); 49 50 put_unaligned_le64(arg->req_dma, &ms_pkt->req_dsd.address); 51 ms_pkt->req_dsd.length = ms_pkt->req_bytecount; 52 53 put_unaligned_le64(arg->rsp_dma, &ms_pkt->rsp_dsd.address); 54 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount; 55 56 vha->qla_stats.control_requests++; 57 58 return (ms_pkt); 59 } 60 61 /** 62 * qla24xx_prep_ms_iocb() - Prepare common CT IOCB fields for SNS CT query. 63 * @vha: HA context 64 * @arg: CT arguments 65 * 66 * Returns a pointer to the @ha's ms_iocb. 67 */ 68 void * 69 qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg) 70 { 71 struct qla_hw_data *ha = vha->hw; 72 struct ct_entry_24xx *ct_pkt; 73 74 ct_pkt = (struct ct_entry_24xx *)arg->iocb; 75 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx)); 76 77 ct_pkt->entry_type = CT_IOCB_TYPE; 78 ct_pkt->entry_count = 1; 79 ct_pkt->nport_handle = cpu_to_le16(arg->nport_handle); 80 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 81 ct_pkt->cmd_dsd_count = cpu_to_le16(1); 82 ct_pkt->rsp_dsd_count = cpu_to_le16(1); 83 ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size); 84 ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size); 85 86 put_unaligned_le64(arg->req_dma, &ct_pkt->dsd[0].address); 87 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count; 88 89 put_unaligned_le64(arg->rsp_dma, &ct_pkt->dsd[1].address); 90 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count; 91 ct_pkt->vp_index = vha->vp_idx; 92 93 vha->qla_stats.control_requests++; 94 95 return (ct_pkt); 96 } 97 98 /** 99 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query. 100 * @p: CT request buffer 101 * @cmd: GS command 102 * @rsp_size: response size in bytes 103 * 104 * Returns a pointer to the intitialized @ct_req. 105 */ 106 static inline struct ct_sns_req * 107 qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size) 108 { 109 memset(p, 0, sizeof(struct ct_sns_pkt)); 110 111 p->p.req.header.revision = 0x01; 112 p->p.req.header.gs_type = 0xFC; 113 p->p.req.header.gs_subtype = 0x02; 114 p->p.req.command = cpu_to_be16(cmd); 115 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4); 116 117 return &p->p.req; 118 } 119 120 int 121 qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, 122 struct ct_sns_rsp *ct_rsp, const char *routine) 123 { 124 int rval; 125 uint16_t comp_status; 126 struct qla_hw_data *ha = vha->hw; 127 bool lid_is_sns = false; 128 129 rval = QLA_FUNCTION_FAILED; 130 if (ms_pkt->entry_status != 0) { 131 ql_dbg(ql_dbg_disc, vha, 0x2031, 132 "%s failed, error status (%x) on port_id: %02x%02x%02x.\n", 133 routine, ms_pkt->entry_status, vha->d_id.b.domain, 134 vha->d_id.b.area, vha->d_id.b.al_pa); 135 } else { 136 if (IS_FWI2_CAPABLE(ha)) 137 comp_status = le16_to_cpu( 138 ((struct ct_entry_24xx *)ms_pkt)->comp_status); 139 else 140 comp_status = le16_to_cpu(ms_pkt->status); 141 switch (comp_status) { 142 case CS_COMPLETE: 143 case CS_DATA_UNDERRUN: 144 case CS_DATA_OVERRUN: /* Overrun? */ 145 if (ct_rsp->header.response != 146 cpu_to_be16(CT_ACCEPT_RESPONSE)) { 147 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077, 148 "%s failed rejected request on port_id: %02x%02x%02x Completion status 0x%x, response 0x%x\n", 149 routine, vha->d_id.b.domain, 150 vha->d_id.b.area, vha->d_id.b.al_pa, 151 comp_status, ct_rsp->header.response); 152 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 153 0x2078, ct_rsp, 154 offsetof(typeof(*ct_rsp), rsp)); 155 rval = QLA_INVALID_COMMAND; 156 } else 157 rval = QLA_SUCCESS; 158 break; 159 case CS_PORT_LOGGED_OUT: 160 if (IS_FWI2_CAPABLE(ha)) { 161 if (le16_to_cpu(ms_pkt->loop_id.extended) == 162 NPH_SNS) 163 lid_is_sns = true; 164 } else { 165 if (le16_to_cpu(ms_pkt->loop_id.extended) == 166 SIMPLE_NAME_SERVER) 167 lid_is_sns = true; 168 } 169 if (lid_is_sns) { 170 ql_dbg(ql_dbg_async, vha, 0x502b, 171 "%s failed, Name server has logged out", 172 routine); 173 rval = QLA_NOT_LOGGED_IN; 174 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 175 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 176 } 177 break; 178 case CS_TIMEOUT: 179 rval = QLA_FUNCTION_TIMEOUT; 180 fallthrough; 181 default: 182 ql_dbg(ql_dbg_disc, vha, 0x2033, 183 "%s failed, completion status (%x) on port_id: " 184 "%02x%02x%02x.\n", routine, comp_status, 185 vha->d_id.b.domain, vha->d_id.b.area, 186 vha->d_id.b.al_pa); 187 break; 188 } 189 } 190 return rval; 191 } 192 193 /** 194 * qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command. 195 * @vha: HA context 196 * @fcport: fcport entry to updated 197 * 198 * Returns 0 on success. 199 */ 200 int 201 qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) 202 { 203 int rval; 204 205 ms_iocb_entry_t *ms_pkt; 206 struct ct_sns_req *ct_req; 207 struct ct_sns_rsp *ct_rsp; 208 struct qla_hw_data *ha = vha->hw; 209 struct ct_arg arg; 210 211 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 212 return qla2x00_sns_ga_nxt(vha, fcport); 213 214 arg.iocb = ha->ms_iocb; 215 arg.req_dma = ha->ct_sns_dma; 216 arg.rsp_dma = ha->ct_sns_dma; 217 arg.req_size = GA_NXT_REQ_SIZE; 218 arg.rsp_size = GA_NXT_RSP_SIZE; 219 arg.nport_handle = NPH_SNS; 220 221 /* Issue GA_NXT */ 222 /* Prepare common MS IOCB */ 223 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); 224 225 /* Prepare CT request */ 226 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD, 227 GA_NXT_RSP_SIZE); 228 ct_rsp = &ha->ct_sns->p.rsp; 229 230 /* Prepare CT arguments -- port_id */ 231 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id); 232 233 /* Execute MS IOCB */ 234 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 235 sizeof(ms_iocb_entry_t)); 236 if (rval != QLA_SUCCESS) { 237 /*EMPTY*/ 238 ql_dbg(ql_dbg_disc, vha, 0x2062, 239 "GA_NXT issue IOCB failed (%d).\n", rval); 240 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") != 241 QLA_SUCCESS) { 242 rval = QLA_FUNCTION_FAILED; 243 } else { 244 /* Populate fc_port_t entry. */ 245 fcport->d_id = be_to_port_id(ct_rsp->rsp.ga_nxt.port_id); 246 247 memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name, 248 WWN_SIZE); 249 memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name, 250 WWN_SIZE); 251 252 fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ? 253 FS_FC4TYPE_FCP : FC4_TYPE_OTHER; 254 255 if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE && 256 ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE) 257 fcport->d_id.b.domain = 0xf0; 258 259 ql_dbg(ql_dbg_disc, vha, 0x2063, 260 "GA_NXT entry - nn %8phN pn %8phN " 261 "port_id=%02x%02x%02x.\n", 262 fcport->node_name, fcport->port_name, 263 fcport->d_id.b.domain, fcport->d_id.b.area, 264 fcport->d_id.b.al_pa); 265 } 266 267 return (rval); 268 } 269 270 static inline int 271 qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha) 272 { 273 return vha->hw->max_fibre_devices * 4 + 16; 274 } 275 276 /** 277 * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command. 278 * @vha: HA context 279 * @list: switch info entries to populate 280 * 281 * NOTE: Non-Nx_Ports are not requested. 282 * 283 * Returns 0 on success. 284 */ 285 int 286 qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) 287 { 288 int rval; 289 uint16_t i; 290 291 ms_iocb_entry_t *ms_pkt; 292 struct ct_sns_req *ct_req; 293 struct ct_sns_rsp *ct_rsp; 294 295 struct ct_sns_gid_pt_data *gid_data; 296 struct qla_hw_data *ha = vha->hw; 297 uint16_t gid_pt_rsp_size; 298 struct ct_arg arg; 299 300 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 301 return qla2x00_sns_gid_pt(vha, list); 302 303 gid_data = NULL; 304 gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha); 305 306 arg.iocb = ha->ms_iocb; 307 arg.req_dma = ha->ct_sns_dma; 308 arg.rsp_dma = ha->ct_sns_dma; 309 arg.req_size = GID_PT_REQ_SIZE; 310 arg.rsp_size = gid_pt_rsp_size; 311 arg.nport_handle = NPH_SNS; 312 313 /* Issue GID_PT */ 314 /* Prepare common MS IOCB */ 315 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); 316 317 /* Prepare CT request */ 318 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size); 319 ct_rsp = &ha->ct_sns->p.rsp; 320 321 /* Prepare CT arguments -- port_type */ 322 ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE; 323 324 /* Execute MS IOCB */ 325 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 326 sizeof(ms_iocb_entry_t)); 327 if (rval != QLA_SUCCESS) { 328 /*EMPTY*/ 329 ql_dbg(ql_dbg_disc, vha, 0x2055, 330 "GID_PT issue IOCB failed (%d).\n", rval); 331 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") != 332 QLA_SUCCESS) { 333 rval = QLA_FUNCTION_FAILED; 334 } else { 335 /* Set port IDs in switch info list. */ 336 for (i = 0; i < ha->max_fibre_devices; i++) { 337 gid_data = &ct_rsp->rsp.gid_pt.entries[i]; 338 list[i].d_id = be_to_port_id(gid_data->port_id); 339 memset(list[i].fabric_port_name, 0, WWN_SIZE); 340 list[i].fp_speed = PORT_SPEED_UNKNOWN; 341 342 /* Last one exit. */ 343 if (gid_data->control_byte & BIT_7) { 344 list[i].d_id.b.rsvd_1 = gid_data->control_byte; 345 break; 346 } 347 } 348 349 /* 350 * If we've used all available slots, then the switch is 351 * reporting back more devices than we can handle with this 352 * single call. Return a failed status, and let GA_NXT handle 353 * the overload. 354 */ 355 if (i == ha->max_fibre_devices) 356 rval = QLA_FUNCTION_FAILED; 357 } 358 359 return (rval); 360 } 361 362 /** 363 * qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query. 364 * @vha: HA context 365 * @list: switch info entries to populate 366 * 367 * Returns 0 on success. 368 */ 369 int 370 qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) 371 { 372 int rval = QLA_SUCCESS; 373 uint16_t i; 374 375 ms_iocb_entry_t *ms_pkt; 376 struct ct_sns_req *ct_req; 377 struct ct_sns_rsp *ct_rsp; 378 struct qla_hw_data *ha = vha->hw; 379 struct ct_arg arg; 380 381 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 382 return qla2x00_sns_gpn_id(vha, list); 383 384 arg.iocb = ha->ms_iocb; 385 arg.req_dma = ha->ct_sns_dma; 386 arg.rsp_dma = ha->ct_sns_dma; 387 arg.req_size = GPN_ID_REQ_SIZE; 388 arg.rsp_size = GPN_ID_RSP_SIZE; 389 arg.nport_handle = NPH_SNS; 390 391 for (i = 0; i < ha->max_fibre_devices; i++) { 392 /* Issue GPN_ID */ 393 /* Prepare common MS IOCB */ 394 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); 395 396 /* Prepare CT request */ 397 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD, 398 GPN_ID_RSP_SIZE); 399 ct_rsp = &ha->ct_sns->p.rsp; 400 401 /* Prepare CT arguments -- port_id */ 402 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id); 403 404 /* Execute MS IOCB */ 405 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 406 sizeof(ms_iocb_entry_t)); 407 if (rval != QLA_SUCCESS) { 408 /*EMPTY*/ 409 ql_dbg(ql_dbg_disc, vha, 0x2056, 410 "GPN_ID issue IOCB failed (%d).\n", rval); 411 break; 412 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 413 "GPN_ID") != QLA_SUCCESS) { 414 rval = QLA_FUNCTION_FAILED; 415 break; 416 } else { 417 /* Save portname */ 418 memcpy(list[i].port_name, 419 ct_rsp->rsp.gpn_id.port_name, WWN_SIZE); 420 } 421 422 /* Last device exit. */ 423 if (list[i].d_id.b.rsvd_1 != 0) 424 break; 425 } 426 427 return (rval); 428 } 429 430 /** 431 * qla2x00_gnn_id() - SNS Get Node Name (GNN_ID) query. 432 * @vha: HA context 433 * @list: switch info entries to populate 434 * 435 * Returns 0 on success. 436 */ 437 int 438 qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) 439 { 440 int rval = QLA_SUCCESS; 441 uint16_t i; 442 struct qla_hw_data *ha = vha->hw; 443 ms_iocb_entry_t *ms_pkt; 444 struct ct_sns_req *ct_req; 445 struct ct_sns_rsp *ct_rsp; 446 struct ct_arg arg; 447 448 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 449 return qla2x00_sns_gnn_id(vha, list); 450 451 arg.iocb = ha->ms_iocb; 452 arg.req_dma = ha->ct_sns_dma; 453 arg.rsp_dma = ha->ct_sns_dma; 454 arg.req_size = GNN_ID_REQ_SIZE; 455 arg.rsp_size = GNN_ID_RSP_SIZE; 456 arg.nport_handle = NPH_SNS; 457 458 for (i = 0; i < ha->max_fibre_devices; i++) { 459 /* Issue GNN_ID */ 460 /* Prepare common MS IOCB */ 461 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); 462 463 /* Prepare CT request */ 464 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD, 465 GNN_ID_RSP_SIZE); 466 ct_rsp = &ha->ct_sns->p.rsp; 467 468 /* Prepare CT arguments -- port_id */ 469 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id); 470 471 /* Execute MS IOCB */ 472 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 473 sizeof(ms_iocb_entry_t)); 474 if (rval != QLA_SUCCESS) { 475 /*EMPTY*/ 476 ql_dbg(ql_dbg_disc, vha, 0x2057, 477 "GNN_ID issue IOCB failed (%d).\n", rval); 478 break; 479 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 480 "GNN_ID") != QLA_SUCCESS) { 481 rval = QLA_FUNCTION_FAILED; 482 break; 483 } else { 484 /* Save nodename */ 485 memcpy(list[i].node_name, 486 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE); 487 488 ql_dbg(ql_dbg_disc, vha, 0x2058, 489 "GID_PT entry - nn %8phN pn %8phN " 490 "portid=%02x%02x%02x.\n", 491 list[i].node_name, list[i].port_name, 492 list[i].d_id.b.domain, list[i].d_id.b.area, 493 list[i].d_id.b.al_pa); 494 } 495 496 /* Last device exit. */ 497 if (list[i].d_id.b.rsvd_1 != 0) 498 break; 499 } 500 501 return (rval); 502 } 503 504 static void qla2x00_async_sns_sp_done(srb_t *sp, int rc) 505 { 506 struct scsi_qla_host *vha = sp->vha; 507 struct ct_sns_pkt *ct_sns; 508 struct qla_work_evt *e; 509 510 sp->rc = rc; 511 if (rc == QLA_SUCCESS) { 512 ql_dbg(ql_dbg_disc, vha, 0x204f, 513 "Async done-%s exiting normally.\n", 514 sp->name); 515 } else if (rc == QLA_FUNCTION_TIMEOUT) { 516 ql_dbg(ql_dbg_disc, vha, 0x204f, 517 "Async done-%s timeout\n", sp->name); 518 } else { 519 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; 520 memset(ct_sns, 0, sizeof(*ct_sns)); 521 sp->retry_count++; 522 if (sp->retry_count > 3) 523 goto err; 524 525 ql_dbg(ql_dbg_disc, vha, 0x204f, 526 "Async done-%s fail rc %x. Retry count %d\n", 527 sp->name, rc, sp->retry_count); 528 529 e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY); 530 if (!e) 531 goto err2; 532 533 del_timer(&sp->u.iocb_cmd.timer); 534 e->u.iosb.sp = sp; 535 qla2x00_post_work(vha, e); 536 return; 537 } 538 539 err: 540 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); 541 err2: 542 if (!e) { 543 /* please ignore kernel warning. otherwise, we have mem leak. */ 544 if (sp->u.iocb_cmd.u.ctarg.req) { 545 dma_free_coherent(&vha->hw->pdev->dev, 546 sp->u.iocb_cmd.u.ctarg.req_allocated_size, 547 sp->u.iocb_cmd.u.ctarg.req, 548 sp->u.iocb_cmd.u.ctarg.req_dma); 549 sp->u.iocb_cmd.u.ctarg.req = NULL; 550 } 551 552 if (sp->u.iocb_cmd.u.ctarg.rsp) { 553 dma_free_coherent(&vha->hw->pdev->dev, 554 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, 555 sp->u.iocb_cmd.u.ctarg.rsp, 556 sp->u.iocb_cmd.u.ctarg.rsp_dma); 557 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 558 } 559 560 sp->free(sp); 561 562 return; 563 } 564 565 e->u.iosb.sp = sp; 566 qla2x00_post_work(vha, e); 567 } 568 569 /** 570 * qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA. 571 * @vha: HA context 572 * 573 * Returns 0 on success. 574 */ 575 int 576 qla2x00_rft_id(scsi_qla_host_t *vha) 577 { 578 struct qla_hw_data *ha = vha->hw; 579 580 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 581 return qla2x00_sns_rft_id(vha); 582 583 return qla_async_rftid(vha, &vha->d_id); 584 } 585 586 static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id) 587 { 588 int rval = QLA_MEMORY_ALLOC_FAILED; 589 struct ct_sns_req *ct_req; 590 srb_t *sp; 591 struct ct_sns_pkt *ct_sns; 592 593 if (!vha->flags.online) 594 goto done; 595 596 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 597 if (!sp) 598 goto done; 599 600 sp->type = SRB_CT_PTHRU_CMD; 601 sp->name = "rft_id"; 602 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 603 604 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 605 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, 606 GFP_KERNEL); 607 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); 608 if (!sp->u.iocb_cmd.u.ctarg.req) { 609 ql_log(ql_log_warn, vha, 0xd041, 610 "%s: Failed to allocate ct_sns request.\n", 611 __func__); 612 goto done_free_sp; 613 } 614 615 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 616 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, 617 GFP_KERNEL); 618 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); 619 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 620 ql_log(ql_log_warn, vha, 0xd042, 621 "%s: Failed to allocate ct_sns request.\n", 622 __func__); 623 goto done_free_sp; 624 } 625 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; 626 memset(ct_sns, 0, sizeof(*ct_sns)); 627 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; 628 629 /* Prepare CT request */ 630 ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE); 631 632 /* Prepare CT arguments -- port_id, FC-4 types */ 633 ct_req->req.rft_id.port_id = port_id_to_be_id(vha->d_id); 634 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */ 635 636 if (vha->flags.nvme_enabled) 637 ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */ 638 639 sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE; 640 sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE; 641 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 642 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; 643 sp->done = qla2x00_async_sns_sp_done; 644 645 ql_dbg(ql_dbg_disc, vha, 0xffff, 646 "Async-%s - hdl=%x portid %06x.\n", 647 sp->name, sp->handle, d_id->b24); 648 649 rval = qla2x00_start_sp(sp); 650 if (rval != QLA_SUCCESS) { 651 ql_dbg(ql_dbg_disc, vha, 0x2043, 652 "RFT_ID issue IOCB failed (%d).\n", rval); 653 goto done_free_sp; 654 } 655 return rval; 656 done_free_sp: 657 sp->free(sp); 658 done: 659 return rval; 660 } 661 662 /** 663 * qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA. 664 * @vha: HA context 665 * @type: not used 666 * 667 * Returns 0 on success. 668 */ 669 int 670 qla2x00_rff_id(scsi_qla_host_t *vha, u8 type) 671 { 672 struct qla_hw_data *ha = vha->hw; 673 674 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 675 ql_dbg(ql_dbg_disc, vha, 0x2046, 676 "RFF_ID call not supported on ISP2100/ISP2200.\n"); 677 return (QLA_SUCCESS); 678 } 679 680 return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha), 681 FC4_TYPE_FCP_SCSI); 682 } 683 684 static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id, 685 u8 fc4feature, u8 fc4type) 686 { 687 int rval = QLA_MEMORY_ALLOC_FAILED; 688 struct ct_sns_req *ct_req; 689 srb_t *sp; 690 struct ct_sns_pkt *ct_sns; 691 692 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 693 if (!sp) 694 goto done; 695 696 sp->type = SRB_CT_PTHRU_CMD; 697 sp->name = "rff_id"; 698 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 699 700 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 701 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, 702 GFP_KERNEL); 703 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); 704 if (!sp->u.iocb_cmd.u.ctarg.req) { 705 ql_log(ql_log_warn, vha, 0xd041, 706 "%s: Failed to allocate ct_sns request.\n", 707 __func__); 708 goto done_free_sp; 709 } 710 711 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 712 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, 713 GFP_KERNEL); 714 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); 715 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 716 ql_log(ql_log_warn, vha, 0xd042, 717 "%s: Failed to allocate ct_sns request.\n", 718 __func__); 719 goto done_free_sp; 720 } 721 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; 722 memset(ct_sns, 0, sizeof(*ct_sns)); 723 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; 724 725 /* Prepare CT request */ 726 ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE); 727 728 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */ 729 ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id); 730 ct_req->req.rff_id.fc4_feature = fc4feature; 731 ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */ 732 733 sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE; 734 sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE; 735 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 736 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; 737 sp->done = qla2x00_async_sns_sp_done; 738 739 ql_dbg(ql_dbg_disc, vha, 0xffff, 740 "Async-%s - hdl=%x portid %06x feature %x type %x.\n", 741 sp->name, sp->handle, d_id->b24, fc4feature, fc4type); 742 743 rval = qla2x00_start_sp(sp); 744 if (rval != QLA_SUCCESS) { 745 ql_dbg(ql_dbg_disc, vha, 0x2047, 746 "RFF_ID issue IOCB failed (%d).\n", rval); 747 goto done_free_sp; 748 } 749 750 return rval; 751 752 done_free_sp: 753 sp->free(sp); 754 done: 755 return rval; 756 } 757 758 /** 759 * qla2x00_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA. 760 * @vha: HA context 761 * 762 * Returns 0 on success. 763 */ 764 int 765 qla2x00_rnn_id(scsi_qla_host_t *vha) 766 { 767 struct qla_hw_data *ha = vha->hw; 768 769 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 770 return qla2x00_sns_rnn_id(vha); 771 772 return qla_async_rnnid(vha, &vha->d_id, vha->node_name); 773 } 774 775 static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id, 776 u8 *node_name) 777 { 778 int rval = QLA_MEMORY_ALLOC_FAILED; 779 struct ct_sns_req *ct_req; 780 srb_t *sp; 781 struct ct_sns_pkt *ct_sns; 782 783 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 784 if (!sp) 785 goto done; 786 787 sp->type = SRB_CT_PTHRU_CMD; 788 sp->name = "rnid"; 789 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 790 791 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 792 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, 793 GFP_KERNEL); 794 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); 795 if (!sp->u.iocb_cmd.u.ctarg.req) { 796 ql_log(ql_log_warn, vha, 0xd041, 797 "%s: Failed to allocate ct_sns request.\n", 798 __func__); 799 goto done_free_sp; 800 } 801 802 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 803 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, 804 GFP_KERNEL); 805 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); 806 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 807 ql_log(ql_log_warn, vha, 0xd042, 808 "%s: Failed to allocate ct_sns request.\n", 809 __func__); 810 goto done_free_sp; 811 } 812 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; 813 memset(ct_sns, 0, sizeof(*ct_sns)); 814 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; 815 816 /* Prepare CT request */ 817 ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE); 818 819 /* Prepare CT arguments -- port_id, node_name */ 820 ct_req->req.rnn_id.port_id = port_id_to_be_id(vha->d_id); 821 memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE); 822 823 sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE; 824 sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE; 825 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 826 827 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; 828 sp->done = qla2x00_async_sns_sp_done; 829 830 ql_dbg(ql_dbg_disc, vha, 0xffff, 831 "Async-%s - hdl=%x portid %06x\n", 832 sp->name, sp->handle, d_id->b24); 833 834 rval = qla2x00_start_sp(sp); 835 if (rval != QLA_SUCCESS) { 836 ql_dbg(ql_dbg_disc, vha, 0x204d, 837 "RNN_ID issue IOCB failed (%d).\n", rval); 838 goto done_free_sp; 839 } 840 841 return rval; 842 843 done_free_sp: 844 sp->free(sp); 845 done: 846 return rval; 847 } 848 849 size_t 850 qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size) 851 { 852 struct qla_hw_data *ha = vha->hw; 853 854 if (IS_QLAFX00(ha)) 855 return scnprintf(snn, size, "%s FW:v%s DVR:v%s", 856 ha->model_number, ha->mr.fw_version, qla2x00_version_str); 857 858 return scnprintf(snn, size, "%s FW:v%d.%02d.%02d DVR:v%s", 859 ha->model_number, ha->fw_major_version, ha->fw_minor_version, 860 ha->fw_subminor_version, qla2x00_version_str); 861 } 862 863 /** 864 * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA. 865 * @vha: HA context 866 * 867 * Returns 0 on success. 868 */ 869 int 870 qla2x00_rsnn_nn(scsi_qla_host_t *vha) 871 { 872 struct qla_hw_data *ha = vha->hw; 873 874 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 875 ql_dbg(ql_dbg_disc, vha, 0x2050, 876 "RSNN_ID call unsupported on ISP2100/ISP2200.\n"); 877 return (QLA_SUCCESS); 878 } 879 880 return qla_async_rsnn_nn(vha); 881 } 882 883 static int qla_async_rsnn_nn(scsi_qla_host_t *vha) 884 { 885 int rval = QLA_MEMORY_ALLOC_FAILED; 886 struct ct_sns_req *ct_req; 887 srb_t *sp; 888 struct ct_sns_pkt *ct_sns; 889 890 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 891 if (!sp) 892 goto done; 893 894 sp->type = SRB_CT_PTHRU_CMD; 895 sp->name = "rsnn_nn"; 896 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 897 898 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 899 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, 900 GFP_KERNEL); 901 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); 902 if (!sp->u.iocb_cmd.u.ctarg.req) { 903 ql_log(ql_log_warn, vha, 0xd041, 904 "%s: Failed to allocate ct_sns request.\n", 905 __func__); 906 goto done_free_sp; 907 } 908 909 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 910 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, 911 GFP_KERNEL); 912 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); 913 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 914 ql_log(ql_log_warn, vha, 0xd042, 915 "%s: Failed to allocate ct_sns request.\n", 916 __func__); 917 goto done_free_sp; 918 } 919 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; 920 memset(ct_sns, 0, sizeof(*ct_sns)); 921 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; 922 923 /* Prepare CT request */ 924 ct_req = qla2x00_prep_ct_req(ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE); 925 926 /* Prepare CT arguments -- node_name, symbolic node_name, size */ 927 memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE); 928 929 /* Prepare the Symbolic Node Name */ 930 qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name, 931 sizeof(ct_req->req.rsnn_nn.sym_node_name)); 932 ct_req->req.rsnn_nn.name_len = 933 (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name); 934 935 936 sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len; 937 sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE; 938 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 939 940 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; 941 sp->done = qla2x00_async_sns_sp_done; 942 943 ql_dbg(ql_dbg_disc, vha, 0xffff, 944 "Async-%s - hdl=%x.\n", 945 sp->name, sp->handle); 946 947 rval = qla2x00_start_sp(sp); 948 if (rval != QLA_SUCCESS) { 949 ql_dbg(ql_dbg_disc, vha, 0x2043, 950 "RFT_ID issue IOCB failed (%d).\n", rval); 951 goto done_free_sp; 952 } 953 954 return rval; 955 956 done_free_sp: 957 sp->free(sp); 958 done: 959 return rval; 960 } 961 962 /** 963 * qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query. 964 * @vha: HA context 965 * @cmd: GS command 966 * @scmd_len: Subcommand length 967 * @data_size: response size in bytes 968 * 969 * Returns a pointer to the @ha's sns_cmd. 970 */ 971 static inline struct sns_cmd_pkt * 972 qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len, 973 uint16_t data_size) 974 { 975 uint16_t wc; 976 struct sns_cmd_pkt *sns_cmd; 977 struct qla_hw_data *ha = vha->hw; 978 979 sns_cmd = ha->sns_cmd; 980 memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt)); 981 wc = data_size / 2; /* Size in 16bit words. */ 982 sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc); 983 put_unaligned_le64(ha->sns_cmd_dma, &sns_cmd->p.cmd.buffer_address); 984 sns_cmd->p.cmd.subcommand_length = cpu_to_le16(scmd_len); 985 sns_cmd->p.cmd.subcommand = cpu_to_le16(cmd); 986 wc = (data_size - 16) / 4; /* Size in 32bit words. */ 987 sns_cmd->p.cmd.size = cpu_to_le16(wc); 988 989 vha->qla_stats.control_requests++; 990 991 return (sns_cmd); 992 } 993 994 /** 995 * qla2x00_sns_ga_nxt() - SNS scan for fabric devices via GA_NXT command. 996 * @vha: HA context 997 * @fcport: fcport entry to updated 998 * 999 * This command uses the old Exectute SNS Command mailbox routine. 1000 * 1001 * Returns 0 on success. 1002 */ 1003 static int 1004 qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) 1005 { 1006 int rval = QLA_SUCCESS; 1007 struct qla_hw_data *ha = vha->hw; 1008 struct sns_cmd_pkt *sns_cmd; 1009 1010 /* Issue GA_NXT. */ 1011 /* Prepare SNS command request. */ 1012 sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN, 1013 GA_NXT_SNS_DATA_SIZE); 1014 1015 /* Prepare SNS command arguments -- port_id. */ 1016 sns_cmd->p.cmd.param[0] = fcport->d_id.b.al_pa; 1017 sns_cmd->p.cmd.param[1] = fcport->d_id.b.area; 1018 sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain; 1019 1020 /* Execute SNS command. */ 1021 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2, 1022 sizeof(struct sns_cmd_pkt)); 1023 if (rval != QLA_SUCCESS) { 1024 /*EMPTY*/ 1025 ql_dbg(ql_dbg_disc, vha, 0x205f, 1026 "GA_NXT Send SNS failed (%d).\n", rval); 1027 } else if (sns_cmd->p.gan_data[8] != 0x80 || 1028 sns_cmd->p.gan_data[9] != 0x02) { 1029 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084, 1030 "GA_NXT failed, rejected request ga_nxt_rsp:\n"); 1031 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074, 1032 sns_cmd->p.gan_data, 16); 1033 rval = QLA_FUNCTION_FAILED; 1034 } else { 1035 /* Populate fc_port_t entry. */ 1036 fcport->d_id.b.domain = sns_cmd->p.gan_data[17]; 1037 fcport->d_id.b.area = sns_cmd->p.gan_data[18]; 1038 fcport->d_id.b.al_pa = sns_cmd->p.gan_data[19]; 1039 1040 memcpy(fcport->node_name, &sns_cmd->p.gan_data[284], WWN_SIZE); 1041 memcpy(fcport->port_name, &sns_cmd->p.gan_data[20], WWN_SIZE); 1042 1043 if (sns_cmd->p.gan_data[16] != NS_N_PORT_TYPE && 1044 sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE) 1045 fcport->d_id.b.domain = 0xf0; 1046 1047 ql_dbg(ql_dbg_disc, vha, 0x2061, 1048 "GA_NXT entry - nn %8phN pn %8phN " 1049 "port_id=%02x%02x%02x.\n", 1050 fcport->node_name, fcport->port_name, 1051 fcport->d_id.b.domain, fcport->d_id.b.area, 1052 fcport->d_id.b.al_pa); 1053 } 1054 1055 return (rval); 1056 } 1057 1058 /** 1059 * qla2x00_sns_gid_pt() - SNS scan for fabric devices via GID_PT command. 1060 * @vha: HA context 1061 * @list: switch info entries to populate 1062 * 1063 * This command uses the old Exectute SNS Command mailbox routine. 1064 * 1065 * NOTE: Non-Nx_Ports are not requested. 1066 * 1067 * Returns 0 on success. 1068 */ 1069 static int 1070 qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) 1071 { 1072 int rval; 1073 struct qla_hw_data *ha = vha->hw; 1074 uint16_t i; 1075 uint8_t *entry; 1076 struct sns_cmd_pkt *sns_cmd; 1077 uint16_t gid_pt_sns_data_size; 1078 1079 gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha); 1080 1081 /* Issue GID_PT. */ 1082 /* Prepare SNS command request. */ 1083 sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN, 1084 gid_pt_sns_data_size); 1085 1086 /* Prepare SNS command arguments -- port_type. */ 1087 sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE; 1088 1089 /* Execute SNS command. */ 1090 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2, 1091 sizeof(struct sns_cmd_pkt)); 1092 if (rval != QLA_SUCCESS) { 1093 /*EMPTY*/ 1094 ql_dbg(ql_dbg_disc, vha, 0x206d, 1095 "GID_PT Send SNS failed (%d).\n", rval); 1096 } else if (sns_cmd->p.gid_data[8] != 0x80 || 1097 sns_cmd->p.gid_data[9] != 0x02) { 1098 ql_dbg(ql_dbg_disc, vha, 0x202f, 1099 "GID_PT failed, rejected request, gid_rsp:\n"); 1100 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081, 1101 sns_cmd->p.gid_data, 16); 1102 rval = QLA_FUNCTION_FAILED; 1103 } else { 1104 /* Set port IDs in switch info list. */ 1105 for (i = 0; i < ha->max_fibre_devices; i++) { 1106 entry = &sns_cmd->p.gid_data[(i * 4) + 16]; 1107 list[i].d_id.b.domain = entry[1]; 1108 list[i].d_id.b.area = entry[2]; 1109 list[i].d_id.b.al_pa = entry[3]; 1110 1111 /* Last one exit. */ 1112 if (entry[0] & BIT_7) { 1113 list[i].d_id.b.rsvd_1 = entry[0]; 1114 break; 1115 } 1116 } 1117 1118 /* 1119 * If we've used all available slots, then the switch is 1120 * reporting back more devices that we can handle with this 1121 * single call. Return a failed status, and let GA_NXT handle 1122 * the overload. 1123 */ 1124 if (i == ha->max_fibre_devices) 1125 rval = QLA_FUNCTION_FAILED; 1126 } 1127 1128 return (rval); 1129 } 1130 1131 /** 1132 * qla2x00_sns_gpn_id() - SNS Get Port Name (GPN_ID) query. 1133 * @vha: HA context 1134 * @list: switch info entries to populate 1135 * 1136 * This command uses the old Exectute SNS Command mailbox routine. 1137 * 1138 * Returns 0 on success. 1139 */ 1140 static int 1141 qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) 1142 { 1143 int rval = QLA_SUCCESS; 1144 struct qla_hw_data *ha = vha->hw; 1145 uint16_t i; 1146 struct sns_cmd_pkt *sns_cmd; 1147 1148 for (i = 0; i < ha->max_fibre_devices; i++) { 1149 /* Issue GPN_ID */ 1150 /* Prepare SNS command request. */ 1151 sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD, 1152 GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE); 1153 1154 /* Prepare SNS command arguments -- port_id. */ 1155 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa; 1156 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area; 1157 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain; 1158 1159 /* Execute SNS command. */ 1160 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, 1161 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); 1162 if (rval != QLA_SUCCESS) { 1163 /*EMPTY*/ 1164 ql_dbg(ql_dbg_disc, vha, 0x2032, 1165 "GPN_ID Send SNS failed (%d).\n", rval); 1166 } else if (sns_cmd->p.gpn_data[8] != 0x80 || 1167 sns_cmd->p.gpn_data[9] != 0x02) { 1168 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e, 1169 "GPN_ID failed, rejected request, gpn_rsp:\n"); 1170 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f, 1171 sns_cmd->p.gpn_data, 16); 1172 rval = QLA_FUNCTION_FAILED; 1173 } else { 1174 /* Save portname */ 1175 memcpy(list[i].port_name, &sns_cmd->p.gpn_data[16], 1176 WWN_SIZE); 1177 } 1178 1179 /* Last device exit. */ 1180 if (list[i].d_id.b.rsvd_1 != 0) 1181 break; 1182 } 1183 1184 return (rval); 1185 } 1186 1187 /** 1188 * qla2x00_sns_gnn_id() - SNS Get Node Name (GNN_ID) query. 1189 * @vha: HA context 1190 * @list: switch info entries to populate 1191 * 1192 * This command uses the old Exectute SNS Command mailbox routine. 1193 * 1194 * Returns 0 on success. 1195 */ 1196 static int 1197 qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) 1198 { 1199 int rval = QLA_SUCCESS; 1200 struct qla_hw_data *ha = vha->hw; 1201 uint16_t i; 1202 struct sns_cmd_pkt *sns_cmd; 1203 1204 for (i = 0; i < ha->max_fibre_devices; i++) { 1205 /* Issue GNN_ID */ 1206 /* Prepare SNS command request. */ 1207 sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD, 1208 GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE); 1209 1210 /* Prepare SNS command arguments -- port_id. */ 1211 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa; 1212 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area; 1213 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain; 1214 1215 /* Execute SNS command. */ 1216 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, 1217 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); 1218 if (rval != QLA_SUCCESS) { 1219 /*EMPTY*/ 1220 ql_dbg(ql_dbg_disc, vha, 0x203f, 1221 "GNN_ID Send SNS failed (%d).\n", rval); 1222 } else if (sns_cmd->p.gnn_data[8] != 0x80 || 1223 sns_cmd->p.gnn_data[9] != 0x02) { 1224 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082, 1225 "GNN_ID failed, rejected request, gnn_rsp:\n"); 1226 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a, 1227 sns_cmd->p.gnn_data, 16); 1228 rval = QLA_FUNCTION_FAILED; 1229 } else { 1230 /* Save nodename */ 1231 memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16], 1232 WWN_SIZE); 1233 1234 ql_dbg(ql_dbg_disc, vha, 0x206e, 1235 "GID_PT entry - nn %8phN pn %8phN " 1236 "port_id=%02x%02x%02x.\n", 1237 list[i].node_name, list[i].port_name, 1238 list[i].d_id.b.domain, list[i].d_id.b.area, 1239 list[i].d_id.b.al_pa); 1240 } 1241 1242 /* Last device exit. */ 1243 if (list[i].d_id.b.rsvd_1 != 0) 1244 break; 1245 } 1246 1247 return (rval); 1248 } 1249 1250 /** 1251 * qla2x00_snd_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA. 1252 * @vha: HA context 1253 * 1254 * This command uses the old Exectute SNS Command mailbox routine. 1255 * 1256 * Returns 0 on success. 1257 */ 1258 static int 1259 qla2x00_sns_rft_id(scsi_qla_host_t *vha) 1260 { 1261 int rval; 1262 struct qla_hw_data *ha = vha->hw; 1263 struct sns_cmd_pkt *sns_cmd; 1264 1265 /* Issue RFT_ID. */ 1266 /* Prepare SNS command request. */ 1267 sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN, 1268 RFT_ID_SNS_DATA_SIZE); 1269 1270 /* Prepare SNS command arguments -- port_id, FC-4 types */ 1271 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa; 1272 sns_cmd->p.cmd.param[1] = vha->d_id.b.area; 1273 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain; 1274 1275 sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */ 1276 1277 /* Execute SNS command. */ 1278 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2, 1279 sizeof(struct sns_cmd_pkt)); 1280 if (rval != QLA_SUCCESS) { 1281 /*EMPTY*/ 1282 ql_dbg(ql_dbg_disc, vha, 0x2060, 1283 "RFT_ID Send SNS failed (%d).\n", rval); 1284 } else if (sns_cmd->p.rft_data[8] != 0x80 || 1285 sns_cmd->p.rft_data[9] != 0x02) { 1286 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083, 1287 "RFT_ID failed, rejected request rft_rsp:\n"); 1288 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080, 1289 sns_cmd->p.rft_data, 16); 1290 rval = QLA_FUNCTION_FAILED; 1291 } else { 1292 ql_dbg(ql_dbg_disc, vha, 0x2073, 1293 "RFT_ID exiting normally.\n"); 1294 } 1295 1296 return (rval); 1297 } 1298 1299 /** 1300 * qla2x00_sns_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA. 1301 * @vha: HA context 1302 * 1303 * This command uses the old Exectute SNS Command mailbox routine. 1304 * 1305 * Returns 0 on success. 1306 */ 1307 static int 1308 qla2x00_sns_rnn_id(scsi_qla_host_t *vha) 1309 { 1310 int rval; 1311 struct qla_hw_data *ha = vha->hw; 1312 struct sns_cmd_pkt *sns_cmd; 1313 1314 /* Issue RNN_ID. */ 1315 /* Prepare SNS command request. */ 1316 sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN, 1317 RNN_ID_SNS_DATA_SIZE); 1318 1319 /* Prepare SNS command arguments -- port_id, nodename. */ 1320 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa; 1321 sns_cmd->p.cmd.param[1] = vha->d_id.b.area; 1322 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain; 1323 1324 sns_cmd->p.cmd.param[4] = vha->node_name[7]; 1325 sns_cmd->p.cmd.param[5] = vha->node_name[6]; 1326 sns_cmd->p.cmd.param[6] = vha->node_name[5]; 1327 sns_cmd->p.cmd.param[7] = vha->node_name[4]; 1328 sns_cmd->p.cmd.param[8] = vha->node_name[3]; 1329 sns_cmd->p.cmd.param[9] = vha->node_name[2]; 1330 sns_cmd->p.cmd.param[10] = vha->node_name[1]; 1331 sns_cmd->p.cmd.param[11] = vha->node_name[0]; 1332 1333 /* Execute SNS command. */ 1334 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2, 1335 sizeof(struct sns_cmd_pkt)); 1336 if (rval != QLA_SUCCESS) { 1337 /*EMPTY*/ 1338 ql_dbg(ql_dbg_disc, vha, 0x204a, 1339 "RNN_ID Send SNS failed (%d).\n", rval); 1340 } else if (sns_cmd->p.rnn_data[8] != 0x80 || 1341 sns_cmd->p.rnn_data[9] != 0x02) { 1342 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b, 1343 "RNN_ID failed, rejected request, rnn_rsp:\n"); 1344 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c, 1345 sns_cmd->p.rnn_data, 16); 1346 rval = QLA_FUNCTION_FAILED; 1347 } else { 1348 ql_dbg(ql_dbg_disc, vha, 0x204c, 1349 "RNN_ID exiting normally.\n"); 1350 } 1351 1352 return (rval); 1353 } 1354 1355 /** 1356 * qla2x00_mgmt_svr_login() - Login to fabric Management Service. 1357 * @vha: HA context 1358 * 1359 * Returns 0 on success. 1360 */ 1361 int 1362 qla2x00_mgmt_svr_login(scsi_qla_host_t *vha) 1363 { 1364 int ret, rval; 1365 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1366 struct qla_hw_data *ha = vha->hw; 1367 1368 ret = QLA_SUCCESS; 1369 if (vha->flags.management_server_logged_in) 1370 return ret; 1371 1372 rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 1373 0xfa, mb, BIT_1); 1374 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { 1375 if (rval == QLA_MEMORY_ALLOC_FAILED) 1376 ql_dbg(ql_dbg_disc, vha, 0x2085, 1377 "Failed management_server login: loopid=%x " 1378 "rval=%d\n", vha->mgmt_svr_loop_id, rval); 1379 else 1380 ql_dbg(ql_dbg_disc, vha, 0x2024, 1381 "Failed management_server login: loopid=%x " 1382 "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n", 1383 vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6], 1384 mb[7]); 1385 ret = QLA_FUNCTION_FAILED; 1386 } else 1387 vha->flags.management_server_logged_in = 1; 1388 1389 return ret; 1390 } 1391 1392 /** 1393 * qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query. 1394 * @vha: HA context 1395 * @req_size: request size in bytes 1396 * @rsp_size: response size in bytes 1397 * 1398 * Returns a pointer to the @ha's ms_iocb. 1399 */ 1400 void * 1401 qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size, 1402 uint32_t rsp_size) 1403 { 1404 ms_iocb_entry_t *ms_pkt; 1405 struct qla_hw_data *ha = vha->hw; 1406 1407 ms_pkt = ha->ms_iocb; 1408 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t)); 1409 1410 ms_pkt->entry_type = MS_IOCB_TYPE; 1411 ms_pkt->entry_count = 1; 1412 SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id); 1413 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG); 1414 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 1415 ms_pkt->cmd_dsd_count = cpu_to_le16(1); 1416 ms_pkt->total_dsd_count = cpu_to_le16(2); 1417 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); 1418 ms_pkt->req_bytecount = cpu_to_le32(req_size); 1419 1420 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->req_dsd.address); 1421 ms_pkt->req_dsd.length = ms_pkt->req_bytecount; 1422 1423 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->rsp_dsd.address); 1424 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount; 1425 1426 return ms_pkt; 1427 } 1428 1429 /** 1430 * qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query. 1431 * @vha: HA context 1432 * @req_size: request size in bytes 1433 * @rsp_size: response size in bytes 1434 * 1435 * Returns a pointer to the @ha's ms_iocb. 1436 */ 1437 void * 1438 qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size, 1439 uint32_t rsp_size) 1440 { 1441 struct ct_entry_24xx *ct_pkt; 1442 struct qla_hw_data *ha = vha->hw; 1443 1444 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; 1445 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx)); 1446 1447 ct_pkt->entry_type = CT_IOCB_TYPE; 1448 ct_pkt->entry_count = 1; 1449 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id); 1450 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 1451 ct_pkt->cmd_dsd_count = cpu_to_le16(1); 1452 ct_pkt->rsp_dsd_count = cpu_to_le16(1); 1453 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); 1454 ct_pkt->cmd_byte_count = cpu_to_le32(req_size); 1455 1456 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[0].address); 1457 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count; 1458 1459 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[1].address); 1460 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count; 1461 ct_pkt->vp_index = vha->vp_idx; 1462 1463 return ct_pkt; 1464 } 1465 1466 static void 1467 qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size) 1468 { 1469 struct qla_hw_data *ha = vha->hw; 1470 ms_iocb_entry_t *ms_pkt = ha->ms_iocb; 1471 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; 1472 1473 if (IS_FWI2_CAPABLE(ha)) { 1474 ct_pkt->cmd_byte_count = cpu_to_le32(req_size); 1475 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count; 1476 } else { 1477 ms_pkt->req_bytecount = cpu_to_le32(req_size); 1478 ms_pkt->req_dsd.length = ms_pkt->req_bytecount; 1479 } 1480 } 1481 1482 /** 1483 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query. 1484 * @p: CT request buffer 1485 * @cmd: GS command 1486 * @rsp_size: response size in bytes 1487 * 1488 * Returns a pointer to the intitialized @ct_req. 1489 */ 1490 static inline struct ct_sns_req * 1491 qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd, 1492 uint16_t rsp_size) 1493 { 1494 memset(p, 0, sizeof(struct ct_sns_pkt)); 1495 1496 p->p.req.header.revision = 0x01; 1497 p->p.req.header.gs_type = 0xFA; 1498 p->p.req.header.gs_subtype = 0x10; 1499 p->p.req.command = cpu_to_be16(cmd); 1500 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4); 1501 1502 return &p->p.req; 1503 } 1504 1505 static uint 1506 qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha) 1507 { 1508 uint speeds = 0; 1509 1510 if (IS_CNA_CAPABLE(ha)) 1511 return FDMI_PORT_SPEED_10GB; 1512 if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) { 1513 if (ha->max_supported_speed == 2) { 1514 if (ha->min_supported_speed <= 6) 1515 speeds |= FDMI_PORT_SPEED_64GB; 1516 } 1517 if (ha->max_supported_speed == 2 || 1518 ha->max_supported_speed == 1) { 1519 if (ha->min_supported_speed <= 5) 1520 speeds |= FDMI_PORT_SPEED_32GB; 1521 } 1522 if (ha->max_supported_speed == 2 || 1523 ha->max_supported_speed == 1 || 1524 ha->max_supported_speed == 0) { 1525 if (ha->min_supported_speed <= 4) 1526 speeds |= FDMI_PORT_SPEED_16GB; 1527 } 1528 if (ha->max_supported_speed == 1 || 1529 ha->max_supported_speed == 0) { 1530 if (ha->min_supported_speed <= 3) 1531 speeds |= FDMI_PORT_SPEED_8GB; 1532 } 1533 if (ha->max_supported_speed == 0) { 1534 if (ha->min_supported_speed <= 2) 1535 speeds |= FDMI_PORT_SPEED_4GB; 1536 } 1537 return speeds; 1538 } 1539 if (IS_QLA2031(ha)) { 1540 if ((ha->pdev->subsystem_vendor == 0x103C) && 1541 (ha->pdev->subsystem_device == 0x8002)) { 1542 speeds = FDMI_PORT_SPEED_16GB; 1543 } else { 1544 speeds = FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB| 1545 FDMI_PORT_SPEED_4GB; 1546 } 1547 return speeds; 1548 } 1549 if (IS_QLA25XX(ha)) 1550 return FDMI_PORT_SPEED_8GB|FDMI_PORT_SPEED_4GB| 1551 FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB; 1552 if (IS_QLA24XX_TYPE(ha)) 1553 return FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_2GB| 1554 FDMI_PORT_SPEED_1GB; 1555 if (IS_QLA23XX(ha)) 1556 return FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB; 1557 return FDMI_PORT_SPEED_1GB; 1558 } 1559 static uint 1560 qla25xx_fdmi_port_speed_currently(struct qla_hw_data *ha) 1561 { 1562 switch (ha->link_data_rate) { 1563 case PORT_SPEED_1GB: 1564 return FDMI_PORT_SPEED_1GB; 1565 case PORT_SPEED_2GB: 1566 return FDMI_PORT_SPEED_2GB; 1567 case PORT_SPEED_4GB: 1568 return FDMI_PORT_SPEED_4GB; 1569 case PORT_SPEED_8GB: 1570 return FDMI_PORT_SPEED_8GB; 1571 case PORT_SPEED_10GB: 1572 return FDMI_PORT_SPEED_10GB; 1573 case PORT_SPEED_16GB: 1574 return FDMI_PORT_SPEED_16GB; 1575 case PORT_SPEED_32GB: 1576 return FDMI_PORT_SPEED_32GB; 1577 case PORT_SPEED_64GB: 1578 return FDMI_PORT_SPEED_64GB; 1579 default: 1580 return FDMI_PORT_SPEED_UNKNOWN; 1581 } 1582 } 1583 1584 /** 1585 * qla2x00_hba_attributes() perform HBA attributes registration 1586 * @vha: HA context 1587 * @entries: number of entries to use 1588 * @callopt: Option to issue extended or standard FDMI 1589 * command parameter 1590 * 1591 * Returns 0 on success. 1592 */ 1593 static unsigned long 1594 qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries, 1595 unsigned int callopt) 1596 { 1597 struct qla_hw_data *ha = vha->hw; 1598 struct init_cb_24xx *icb24 = (void *)ha->init_cb; 1599 struct new_utsname *p_sysid = utsname(); 1600 struct ct_fdmi_hba_attr *eiter; 1601 uint16_t alen; 1602 unsigned long size = 0; 1603 1604 /* Nodename. */ 1605 eiter = entries + size; 1606 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME); 1607 memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name)); 1608 alen = sizeof(eiter->a.node_name); 1609 alen += FDMI_ATTR_TYPELEN(eiter); 1610 eiter->len = cpu_to_be16(alen); 1611 size += alen; 1612 ql_dbg(ql_dbg_disc, vha, 0x20a0, 1613 "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name)); 1614 /* Manufacturer. */ 1615 eiter = entries + size; 1616 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER); 1617 alen = scnprintf( 1618 eiter->a.manufacturer, sizeof(eiter->a.manufacturer), 1619 "%s", "QLogic Corporation"); 1620 alen += FDMI_ATTR_ALIGNMENT(alen); 1621 alen += FDMI_ATTR_TYPELEN(eiter); 1622 eiter->len = cpu_to_be16(alen); 1623 size += alen; 1624 ql_dbg(ql_dbg_disc, vha, 0x20a1, 1625 "MANUFACTURER = %s.\n", eiter->a.manufacturer); 1626 /* Serial number. */ 1627 eiter = entries + size; 1628 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER); 1629 alen = 0; 1630 if (IS_FWI2_CAPABLE(ha)) { 1631 alen = qla2xxx_get_vpd_field(vha, "SN", 1632 eiter->a.serial_num, sizeof(eiter->a.serial_num)); 1633 } 1634 if (!alen) { 1635 uint32_t sn = ((ha->serial0 & 0x1f) << 16) | 1636 (ha->serial2 << 8) | ha->serial1; 1637 alen = scnprintf( 1638 eiter->a.serial_num, sizeof(eiter->a.serial_num), 1639 "%c%05d", 'A' + sn / 100000, sn % 100000); 1640 } 1641 alen += FDMI_ATTR_ALIGNMENT(alen); 1642 alen += FDMI_ATTR_TYPELEN(eiter); 1643 eiter->len = cpu_to_be16(alen); 1644 size += alen; 1645 ql_dbg(ql_dbg_disc, vha, 0x20a2, 1646 "SERIAL NUMBER = %s.\n", eiter->a.serial_num); 1647 /* Model name. */ 1648 eiter = entries + size; 1649 eiter->type = cpu_to_be16(FDMI_HBA_MODEL); 1650 alen = scnprintf( 1651 eiter->a.model, sizeof(eiter->a.model), 1652 "%s", ha->model_number); 1653 alen += FDMI_ATTR_ALIGNMENT(alen); 1654 alen += FDMI_ATTR_TYPELEN(eiter); 1655 eiter->len = cpu_to_be16(alen); 1656 size += alen; 1657 ql_dbg(ql_dbg_disc, vha, 0x20a3, 1658 "MODEL NAME = %s.\n", eiter->a.model); 1659 /* Model description. */ 1660 eiter = entries + size; 1661 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION); 1662 alen = scnprintf( 1663 eiter->a.model_desc, sizeof(eiter->a.model_desc), 1664 "%s", ha->model_desc); 1665 alen += FDMI_ATTR_ALIGNMENT(alen); 1666 alen += FDMI_ATTR_TYPELEN(eiter); 1667 eiter->len = cpu_to_be16(alen); 1668 size += alen; 1669 ql_dbg(ql_dbg_disc, vha, 0x20a4, 1670 "MODEL DESCRIPTION = %s.\n", eiter->a.model_desc); 1671 /* Hardware version. */ 1672 eiter = entries + size; 1673 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION); 1674 alen = 0; 1675 if (IS_FWI2_CAPABLE(ha)) { 1676 if (!alen) { 1677 alen = qla2xxx_get_vpd_field(vha, "MN", 1678 eiter->a.hw_version, sizeof(eiter->a.hw_version)); 1679 } 1680 if (!alen) { 1681 alen = qla2xxx_get_vpd_field(vha, "EC", 1682 eiter->a.hw_version, sizeof(eiter->a.hw_version)); 1683 } 1684 } 1685 if (!alen) { 1686 alen = scnprintf( 1687 eiter->a.hw_version, sizeof(eiter->a.hw_version), 1688 "HW:%s", ha->adapter_id); 1689 } 1690 alen += FDMI_ATTR_ALIGNMENT(alen); 1691 alen += FDMI_ATTR_TYPELEN(eiter); 1692 eiter->len = cpu_to_be16(alen); 1693 size += alen; 1694 ql_dbg(ql_dbg_disc, vha, 0x20a5, 1695 "HARDWARE VERSION = %s.\n", eiter->a.hw_version); 1696 /* Driver version. */ 1697 eiter = entries + size; 1698 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION); 1699 alen = scnprintf( 1700 eiter->a.driver_version, sizeof(eiter->a.driver_version), 1701 "%s", qla2x00_version_str); 1702 alen += FDMI_ATTR_ALIGNMENT(alen); 1703 alen += FDMI_ATTR_TYPELEN(eiter); 1704 eiter->len = cpu_to_be16(alen); 1705 size += alen; 1706 ql_dbg(ql_dbg_disc, vha, 0x20a6, 1707 "DRIVER VERSION = %s.\n", eiter->a.driver_version); 1708 /* Option ROM version. */ 1709 eiter = entries + size; 1710 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION); 1711 alen = scnprintf( 1712 eiter->a.orom_version, sizeof(eiter->a.orom_version), 1713 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]); 1714 alen += FDMI_ATTR_ALIGNMENT(alen); 1715 alen += FDMI_ATTR_TYPELEN(eiter); 1716 eiter->len = cpu_to_be16(alen); 1717 size += alen; 1718 1719 ql_dbg(ql_dbg_disc, vha, 0x20a7, 1720 "OPTROM VERSION = %d.%02d.\n", 1721 eiter->a.orom_version[1], eiter->a.orom_version[0]); 1722 /* Firmware version */ 1723 eiter = entries + size; 1724 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION); 1725 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version, 1726 sizeof(eiter->a.fw_version)); 1727 alen += FDMI_ATTR_ALIGNMENT(alen); 1728 alen += FDMI_ATTR_TYPELEN(eiter); 1729 eiter->len = cpu_to_be16(alen); 1730 size += alen; 1731 ql_dbg(ql_dbg_disc, vha, 0x20a8, 1732 "FIRMWARE VERSION = %s.\n", eiter->a.fw_version); 1733 if (callopt == CALLOPT_FDMI1) 1734 goto done; 1735 /* OS Name and Version */ 1736 eiter = entries + size; 1737 eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION); 1738 alen = 0; 1739 if (p_sysid) { 1740 alen = scnprintf( 1741 eiter->a.os_version, sizeof(eiter->a.os_version), 1742 "%s %s %s", 1743 p_sysid->sysname, p_sysid->release, p_sysid->machine); 1744 } 1745 if (!alen) { 1746 alen = scnprintf( 1747 eiter->a.os_version, sizeof(eiter->a.os_version), 1748 "%s %s", 1749 "Linux", fc_host_system_hostname(vha->host)); 1750 } 1751 alen += FDMI_ATTR_ALIGNMENT(alen); 1752 alen += FDMI_ATTR_TYPELEN(eiter); 1753 eiter->len = cpu_to_be16(alen); 1754 size += alen; 1755 ql_dbg(ql_dbg_disc, vha, 0x20a9, 1756 "OS VERSION = %s.\n", eiter->a.os_version); 1757 /* MAX CT Payload Length */ 1758 eiter = entries + size; 1759 eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH); 1760 eiter->a.max_ct_len = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ? 1761 icb24->frame_payload_size : ha->init_cb->frame_payload_size)); 1762 alen = sizeof(eiter->a.max_ct_len); 1763 alen += FDMI_ATTR_TYPELEN(eiter); 1764 eiter->len = cpu_to_be16(alen); 1765 size += alen; 1766 ql_dbg(ql_dbg_disc, vha, 0x20aa, 1767 "CT PAYLOAD LENGTH = 0x%x.\n", be32_to_cpu(eiter->a.max_ct_len)); 1768 /* Node Sybolic Name */ 1769 eiter = entries + size; 1770 eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME); 1771 alen = qla2x00_get_sym_node_name(vha, eiter->a.sym_name, 1772 sizeof(eiter->a.sym_name)); 1773 alen += FDMI_ATTR_ALIGNMENT(alen); 1774 alen += FDMI_ATTR_TYPELEN(eiter); 1775 eiter->len = cpu_to_be16(alen); 1776 size += alen; 1777 ql_dbg(ql_dbg_disc, vha, 0x20ab, 1778 "SYMBOLIC NAME = %s.\n", eiter->a.sym_name); 1779 /* Vendor Specific information */ 1780 eiter = entries + size; 1781 eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_SPECIFIC_INFO); 1782 eiter->a.vendor_specific_info = cpu_to_be32(PCI_VENDOR_ID_QLOGIC); 1783 alen = sizeof(eiter->a.vendor_specific_info); 1784 alen += FDMI_ATTR_TYPELEN(eiter); 1785 eiter->len = cpu_to_be16(alen); 1786 size += alen; 1787 ql_dbg(ql_dbg_disc, vha, 0x20ac, 1788 "VENDOR SPECIFIC INFO = 0x%x.\n", 1789 be32_to_cpu(eiter->a.vendor_specific_info)); 1790 /* Num Ports */ 1791 eiter = entries + size; 1792 eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS); 1793 eiter->a.num_ports = cpu_to_be32(1); 1794 alen = sizeof(eiter->a.num_ports); 1795 alen += FDMI_ATTR_TYPELEN(eiter); 1796 eiter->len = cpu_to_be16(alen); 1797 size += alen; 1798 ql_dbg(ql_dbg_disc, vha, 0x20ad, 1799 "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports)); 1800 /* Fabric Name */ 1801 eiter = entries + size; 1802 eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME); 1803 memcpy(eiter->a.fabric_name, vha->fabric_node_name, 1804 sizeof(eiter->a.fabric_name)); 1805 alen = sizeof(eiter->a.fabric_name); 1806 alen += FDMI_ATTR_TYPELEN(eiter); 1807 eiter->len = cpu_to_be16(alen); 1808 size += alen; 1809 ql_dbg(ql_dbg_disc, vha, 0x20ae, 1810 "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name)); 1811 /* BIOS Version */ 1812 eiter = entries + size; 1813 eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME); 1814 alen = scnprintf( 1815 eiter->a.bios_name, sizeof(eiter->a.bios_name), 1816 "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]); 1817 alen += FDMI_ATTR_ALIGNMENT(alen); 1818 alen += FDMI_ATTR_TYPELEN(eiter); 1819 eiter->len = cpu_to_be16(alen); 1820 size += alen; 1821 ql_dbg(ql_dbg_disc, vha, 0x20af, 1822 "BIOS NAME = %s\n", eiter->a.bios_name); 1823 /* Vendor Identifier */ 1824 eiter = entries + size; 1825 eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_IDENTIFIER); 1826 alen = scnprintf( 1827 eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier), 1828 "%s", "QLGC"); 1829 alen += FDMI_ATTR_ALIGNMENT(alen); 1830 alen += FDMI_ATTR_TYPELEN(eiter); 1831 eiter->len = cpu_to_be16(alen); 1832 size += alen; 1833 ql_dbg(ql_dbg_disc, vha, 0x20b0, 1834 "VENDOR IDENTIFIER = %s.\n", eiter->a.vendor_identifier); 1835 done: 1836 return size; 1837 } 1838 1839 /** 1840 * qla2x00_port_attributes() perform Port attributes registration 1841 * @vha: HA context 1842 * @entries: number of entries to use 1843 * @callopt: Option to issue extended or standard FDMI 1844 * command parameter 1845 * 1846 * Returns 0 on success. 1847 */ 1848 static unsigned long 1849 qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, 1850 unsigned int callopt) 1851 { 1852 struct qla_hw_data *ha = vha->hw; 1853 struct init_cb_24xx *icb24 = (void *)ha->init_cb; 1854 struct new_utsname *p_sysid = utsname(); 1855 char *hostname = p_sysid ? 1856 p_sysid->nodename : fc_host_system_hostname(vha->host); 1857 struct ct_fdmi_port_attr *eiter; 1858 uint16_t alen; 1859 unsigned long size = 0; 1860 1861 /* FC4 types. */ 1862 eiter = entries + size; 1863 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES); 1864 eiter->a.fc4_types[0] = 0x00; 1865 eiter->a.fc4_types[1] = 0x00; 1866 eiter->a.fc4_types[2] = 0x01; 1867 eiter->a.fc4_types[3] = 0x00; 1868 alen = sizeof(eiter->a.fc4_types); 1869 alen += FDMI_ATTR_TYPELEN(eiter); 1870 eiter->len = cpu_to_be16(alen); 1871 size += alen; 1872 ql_dbg(ql_dbg_disc, vha, 0x20c0, 1873 "FC4 TYPES = %016llx.\n", *(uint64_t *)eiter->a.fc4_types); 1874 if (vha->flags.nvme_enabled) { 1875 eiter->a.fc4_types[6] = 1; /* NVMe type 28h */ 1876 ql_dbg(ql_dbg_disc, vha, 0x211f, 1877 "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n", 1878 eiter->a.fc4_types[6]); 1879 } 1880 /* Supported speed. */ 1881 eiter = entries + size; 1882 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED); 1883 eiter->a.sup_speed = cpu_to_be32( 1884 qla25xx_fdmi_port_speed_capability(ha)); 1885 alen = sizeof(eiter->a.sup_speed); 1886 alen += FDMI_ATTR_TYPELEN(eiter); 1887 eiter->len = cpu_to_be16(alen); 1888 size += alen; 1889 ql_dbg(ql_dbg_disc, vha, 0x20c1, 1890 "SUPPORTED SPEED = %x.\n", be32_to_cpu(eiter->a.sup_speed)); 1891 /* Current speed. */ 1892 eiter = entries + size; 1893 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED); 1894 eiter->a.cur_speed = cpu_to_be32( 1895 qla25xx_fdmi_port_speed_currently(ha)); 1896 alen = sizeof(eiter->a.cur_speed); 1897 alen += FDMI_ATTR_TYPELEN(eiter); 1898 eiter->len = cpu_to_be16(alen); 1899 size += alen; 1900 ql_dbg(ql_dbg_disc, vha, 0x20c2, 1901 "CURRENT SPEED = %x.\n", be32_to_cpu(eiter->a.cur_speed)); 1902 /* Max frame size. */ 1903 eiter = entries + size; 1904 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE); 1905 eiter->a.max_frame_size = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ? 1906 icb24->frame_payload_size : ha->init_cb->frame_payload_size)); 1907 alen = sizeof(eiter->a.max_frame_size); 1908 alen += FDMI_ATTR_TYPELEN(eiter); 1909 eiter->len = cpu_to_be16(alen); 1910 size += alen; 1911 ql_dbg(ql_dbg_disc, vha, 0x20c3, 1912 "MAX FRAME SIZE = %x.\n", be32_to_cpu(eiter->a.max_frame_size)); 1913 /* OS device name. */ 1914 eiter = entries + size; 1915 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME); 1916 alen = scnprintf( 1917 eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name), 1918 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no); 1919 alen += FDMI_ATTR_ALIGNMENT(alen); 1920 alen += FDMI_ATTR_TYPELEN(eiter); 1921 eiter->len = cpu_to_be16(alen); 1922 size += alen; 1923 ql_dbg(ql_dbg_disc, vha, 0x20c4, 1924 "OS DEVICE NAME = %s.\n", eiter->a.os_dev_name); 1925 /* Hostname. */ 1926 eiter = entries + size; 1927 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME); 1928 if (!*hostname || !strncmp(hostname, "(none)", 6)) 1929 hostname = "Linux-default"; 1930 alen = scnprintf( 1931 eiter->a.host_name, sizeof(eiter->a.host_name), 1932 "%s", hostname); 1933 alen += FDMI_ATTR_ALIGNMENT(alen); 1934 alen += FDMI_ATTR_TYPELEN(eiter); 1935 eiter->len = cpu_to_be16(alen); 1936 size += alen; 1937 ql_dbg(ql_dbg_disc, vha, 0x20c5, 1938 "HOSTNAME = %s.\n", eiter->a.host_name); 1939 1940 if (callopt == CALLOPT_FDMI1) 1941 goto done; 1942 1943 /* Node Name */ 1944 eiter = entries + size; 1945 eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME); 1946 memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name)); 1947 alen = sizeof(eiter->a.node_name); 1948 alen += FDMI_ATTR_TYPELEN(eiter); 1949 eiter->len = cpu_to_be16(alen); 1950 size += alen; 1951 ql_dbg(ql_dbg_disc, vha, 0x20c6, 1952 "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name)); 1953 1954 /* Port Name */ 1955 eiter = entries + size; 1956 eiter->type = cpu_to_be16(FDMI_PORT_NAME); 1957 memcpy(eiter->a.port_name, vha->port_name, sizeof(eiter->a.port_name)); 1958 alen = sizeof(eiter->a.port_name); 1959 alen += FDMI_ATTR_TYPELEN(eiter); 1960 eiter->len = cpu_to_be16(alen); 1961 size += alen; 1962 ql_dbg(ql_dbg_disc, vha, 0x20c7, 1963 "PORTNAME = %016llx.\n", wwn_to_u64(eiter->a.port_name)); 1964 1965 /* Port Symbolic Name */ 1966 eiter = entries + size; 1967 eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME); 1968 alen = qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name, 1969 sizeof(eiter->a.port_sym_name)); 1970 alen += FDMI_ATTR_ALIGNMENT(alen); 1971 alen += FDMI_ATTR_TYPELEN(eiter); 1972 eiter->len = cpu_to_be16(alen); 1973 size += alen; 1974 ql_dbg(ql_dbg_disc, vha, 0x20c8, 1975 "PORT SYMBOLIC NAME = %s\n", eiter->a.port_sym_name); 1976 1977 /* Port Type */ 1978 eiter = entries + size; 1979 eiter->type = cpu_to_be16(FDMI_PORT_TYPE); 1980 eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE); 1981 alen = sizeof(eiter->a.port_type); 1982 alen += FDMI_ATTR_TYPELEN(eiter); 1983 eiter->len = cpu_to_be16(alen); 1984 size += alen; 1985 ql_dbg(ql_dbg_disc, vha, 0x20c9, 1986 "PORT TYPE = %x.\n", be32_to_cpu(eiter->a.port_type)); 1987 1988 /* Supported Class of Service */ 1989 eiter = entries + size; 1990 eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS); 1991 eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3); 1992 alen = sizeof(eiter->a.port_supported_cos); 1993 alen += FDMI_ATTR_TYPELEN(eiter); 1994 eiter->len = cpu_to_be16(alen); 1995 size += alen; 1996 ql_dbg(ql_dbg_disc, vha, 0x20ca, 1997 "SUPPORTED COS = %08x\n", be32_to_cpu(eiter->a.port_supported_cos)); 1998 1999 /* Port Fabric Name */ 2000 eiter = entries + size; 2001 eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME); 2002 memcpy(eiter->a.fabric_name, vha->fabric_node_name, 2003 sizeof(eiter->a.fabric_name)); 2004 alen = sizeof(eiter->a.fabric_name); 2005 alen += FDMI_ATTR_TYPELEN(eiter); 2006 eiter->len = cpu_to_be16(alen); 2007 size += alen; 2008 ql_dbg(ql_dbg_disc, vha, 0x20cb, 2009 "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name)); 2010 2011 /* FC4_type */ 2012 eiter = entries + size; 2013 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE); 2014 eiter->a.port_fc4_type[0] = 0x00; 2015 eiter->a.port_fc4_type[1] = 0x00; 2016 eiter->a.port_fc4_type[2] = 0x01; 2017 eiter->a.port_fc4_type[3] = 0x00; 2018 alen = sizeof(eiter->a.port_fc4_type); 2019 alen += FDMI_ATTR_TYPELEN(eiter); 2020 eiter->len = cpu_to_be16(alen); 2021 size += alen; 2022 ql_dbg(ql_dbg_disc, vha, 0x20cc, 2023 "PORT ACTIVE FC4 TYPE = %016llx.\n", 2024 *(uint64_t *)eiter->a.port_fc4_type); 2025 2026 /* Port State */ 2027 eiter = entries + size; 2028 eiter->type = cpu_to_be16(FDMI_PORT_STATE); 2029 eiter->a.port_state = cpu_to_be32(2); 2030 alen = sizeof(eiter->a.port_state); 2031 alen += FDMI_ATTR_TYPELEN(eiter); 2032 eiter->len = cpu_to_be16(alen); 2033 size += alen; 2034 ql_dbg(ql_dbg_disc, vha, 0x20cd, 2035 "PORT_STATE = %x.\n", be32_to_cpu(eiter->a.port_state)); 2036 2037 /* Number of Ports */ 2038 eiter = entries + size; 2039 eiter->type = cpu_to_be16(FDMI_PORT_COUNT); 2040 eiter->a.num_ports = cpu_to_be32(1); 2041 alen = sizeof(eiter->a.num_ports); 2042 alen += FDMI_ATTR_TYPELEN(eiter); 2043 eiter->len = cpu_to_be16(alen); 2044 size += alen; 2045 ql_dbg(ql_dbg_disc, vha, 0x20ce, 2046 "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports)); 2047 2048 /* Port Identifier */ 2049 eiter = entries + size; 2050 eiter->type = cpu_to_be16(FDMI_PORT_IDENTIFIER); 2051 eiter->a.port_id = cpu_to_be32(vha->d_id.b24); 2052 alen = sizeof(eiter->a.port_id); 2053 alen += FDMI_ATTR_TYPELEN(eiter); 2054 eiter->len = cpu_to_be16(alen); 2055 size += alen; 2056 ql_dbg(ql_dbg_disc, vha, 0x20cf, 2057 "PORT ID = %x.\n", be32_to_cpu(eiter->a.port_id)); 2058 2059 if (callopt == CALLOPT_FDMI2 || !ql2xsmartsan) 2060 goto done; 2061 2062 /* Smart SAN Service Category (Populate Smart SAN Initiator)*/ 2063 eiter = entries + size; 2064 eiter->type = cpu_to_be16(FDMI_SMARTSAN_SERVICE); 2065 alen = scnprintf( 2066 eiter->a.smartsan_service, sizeof(eiter->a.smartsan_service), 2067 "%s", "Smart SAN Initiator"); 2068 alen += FDMI_ATTR_ALIGNMENT(alen); 2069 alen += FDMI_ATTR_TYPELEN(eiter); 2070 eiter->len = cpu_to_be16(alen); 2071 size += alen; 2072 ql_dbg(ql_dbg_disc, vha, 0x20d0, 2073 "SMARTSAN SERVICE CATEGORY = %s.\n", eiter->a.smartsan_service); 2074 2075 /* Smart SAN GUID (NWWN+PWWN) */ 2076 eiter = entries + size; 2077 eiter->type = cpu_to_be16(FDMI_SMARTSAN_GUID); 2078 memcpy(eiter->a.smartsan_guid, vha->node_name, WWN_SIZE); 2079 memcpy(eiter->a.smartsan_guid + WWN_SIZE, vha->port_name, WWN_SIZE); 2080 alen = sizeof(eiter->a.smartsan_guid); 2081 alen += FDMI_ATTR_TYPELEN(eiter); 2082 eiter->len = cpu_to_be16(alen); 2083 size += alen; 2084 ql_dbg(ql_dbg_disc, vha, 0x20d1, 2085 "Smart SAN GUID = %016llx-%016llx\n", 2086 wwn_to_u64(eiter->a.smartsan_guid), 2087 wwn_to_u64(eiter->a.smartsan_guid + WWN_SIZE)); 2088 2089 /* Smart SAN Version (populate "Smart SAN Version 1.0") */ 2090 eiter = entries + size; 2091 eiter->type = cpu_to_be16(FDMI_SMARTSAN_VERSION); 2092 alen = scnprintf( 2093 eiter->a.smartsan_version, sizeof(eiter->a.smartsan_version), 2094 "%s", "Smart SAN Version 2.0"); 2095 alen += FDMI_ATTR_ALIGNMENT(alen); 2096 alen += FDMI_ATTR_TYPELEN(eiter); 2097 eiter->len = cpu_to_be16(alen); 2098 size += alen; 2099 ql_dbg(ql_dbg_disc, vha, 0x20d2, 2100 "SMARTSAN VERSION = %s\n", eiter->a.smartsan_version); 2101 2102 /* Smart SAN Product Name (Specify Adapter Model No) */ 2103 eiter = entries + size; 2104 eiter->type = cpu_to_be16(FDMI_SMARTSAN_PROD_NAME); 2105 alen = scnprintf(eiter->a.smartsan_prod_name, 2106 sizeof(eiter->a.smartsan_prod_name), 2107 "ISP%04x", ha->pdev->device); 2108 alen += FDMI_ATTR_ALIGNMENT(alen); 2109 alen += FDMI_ATTR_TYPELEN(eiter); 2110 eiter->len = cpu_to_be16(alen); 2111 size += alen; 2112 ql_dbg(ql_dbg_disc, vha, 0x20d3, 2113 "SMARTSAN PRODUCT NAME = %s\n", eiter->a.smartsan_prod_name); 2114 2115 /* Smart SAN Port Info (specify: 1=Physical, 2=NPIV, 3=SRIOV) */ 2116 eiter = entries + size; 2117 eiter->type = cpu_to_be16(FDMI_SMARTSAN_PORT_INFO); 2118 eiter->a.smartsan_port_info = cpu_to_be32(vha->vp_idx ? 2 : 1); 2119 alen = sizeof(eiter->a.smartsan_port_info); 2120 alen += FDMI_ATTR_TYPELEN(eiter); 2121 eiter->len = cpu_to_be16(alen); 2122 size += alen; 2123 ql_dbg(ql_dbg_disc, vha, 0x20d4, 2124 "SMARTSAN PORT INFO = %x\n", eiter->a.smartsan_port_info); 2125 2126 /* Smart SAN Security Support */ 2127 eiter = entries + size; 2128 eiter->type = cpu_to_be16(FDMI_SMARTSAN_SECURITY_SUPPORT); 2129 eiter->a.smartsan_security_support = cpu_to_be32(1); 2130 alen = sizeof(eiter->a.smartsan_security_support); 2131 alen += FDMI_ATTR_TYPELEN(eiter); 2132 eiter->len = cpu_to_be16(alen); 2133 size += alen; 2134 ql_dbg(ql_dbg_disc, vha, 0x20d6, 2135 "SMARTSAN SECURITY SUPPORT = %d\n", 2136 be32_to_cpu(eiter->a.smartsan_security_support)); 2137 2138 done: 2139 return size; 2140 } 2141 2142 /** 2143 * qla2x00_fdmi_rhba() - perform RHBA FDMI registration 2144 * @vha: HA context 2145 * @callopt: Option to issue FDMI registration 2146 * 2147 * Returns 0 on success. 2148 */ 2149 static int 2150 qla2x00_fdmi_rhba(scsi_qla_host_t *vha, unsigned int callopt) 2151 { 2152 struct qla_hw_data *ha = vha->hw; 2153 unsigned long size = 0; 2154 unsigned int rval, count; 2155 ms_iocb_entry_t *ms_pkt; 2156 struct ct_sns_req *ct_req; 2157 struct ct_sns_rsp *ct_rsp; 2158 void *entries; 2159 2160 count = callopt != CALLOPT_FDMI1 ? 2161 FDMI2_HBA_ATTR_COUNT : FDMI1_HBA_ATTR_COUNT; 2162 2163 size = RHBA_RSP_SIZE; 2164 2165 ql_dbg(ql_dbg_disc, vha, 0x20e0, 2166 "RHBA (callopt=%x count=%u size=%lu).\n", callopt, count, size); 2167 2168 /* Request size adjusted after CT preparation */ 2169 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size); 2170 2171 /* Prepare CT request */ 2172 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, size); 2173 ct_rsp = &ha->ct_sns->p.rsp; 2174 2175 /* Prepare FDMI command entries */ 2176 memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, 2177 sizeof(ct_req->req.rhba.hba_identifier)); 2178 size += sizeof(ct_req->req.rhba.hba_identifier); 2179 2180 ct_req->req.rhba.entry_count = cpu_to_be32(1); 2181 size += sizeof(ct_req->req.rhba.entry_count); 2182 2183 memcpy(ct_req->req.rhba.port_name, vha->port_name, 2184 sizeof(ct_req->req.rhba.port_name)); 2185 size += sizeof(ct_req->req.rhba.port_name); 2186 2187 /* Attribute count */ 2188 ct_req->req.rhba.attrs.count = cpu_to_be32(count); 2189 size += sizeof(ct_req->req.rhba.attrs.count); 2190 2191 /* Attribute block */ 2192 entries = &ct_req->req.rhba.attrs.entry; 2193 2194 size += qla2x00_hba_attributes(vha, entries, callopt); 2195 2196 /* Update MS request size. */ 2197 qla2x00_update_ms_fdmi_iocb(vha, size + 16); 2198 2199 ql_dbg(ql_dbg_disc, vha, 0x20e1, 2200 "RHBA %016llx %016llx.\n", 2201 wwn_to_u64(ct_req->req.rhba.hba_identifier), 2202 wwn_to_u64(ct_req->req.rhba.port_name)); 2203 2204 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20e2, 2205 entries, size); 2206 2207 /* Execute MS IOCB */ 2208 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 2209 sizeof(*ha->ms_iocb)); 2210 if (rval) { 2211 ql_dbg(ql_dbg_disc, vha, 0x20e3, 2212 "RHBA iocb failed (%d).\n", rval); 2213 return rval; 2214 } 2215 2216 rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA"); 2217 if (rval) { 2218 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && 2219 ct_rsp->header.explanation_code == 2220 CT_EXPL_ALREADY_REGISTERED) { 2221 ql_dbg(ql_dbg_disc, vha, 0x20e4, 2222 "RHBA already registered.\n"); 2223 return QLA_ALREADY_REGISTERED; 2224 } 2225 2226 ql_dbg(ql_dbg_disc, vha, 0x20e5, 2227 "RHBA failed, CT Reason %#x, CT Explanation %#x\n", 2228 ct_rsp->header.reason_code, 2229 ct_rsp->header.explanation_code); 2230 return rval; 2231 } 2232 2233 ql_dbg(ql_dbg_disc, vha, 0x20e6, "RHBA exiting normally.\n"); 2234 return rval; 2235 } 2236 2237 2238 static int 2239 qla2x00_fdmi_dhba(scsi_qla_host_t *vha) 2240 { 2241 int rval; 2242 struct qla_hw_data *ha = vha->hw; 2243 ms_iocb_entry_t *ms_pkt; 2244 struct ct_sns_req *ct_req; 2245 struct ct_sns_rsp *ct_rsp; 2246 /* Issue RPA */ 2247 /* Prepare common MS IOCB */ 2248 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE, 2249 DHBA_RSP_SIZE); 2250 /* Prepare CT request */ 2251 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, DHBA_CMD, DHBA_RSP_SIZE); 2252 ct_rsp = &ha->ct_sns->p.rsp; 2253 /* Prepare FDMI command arguments -- portname. */ 2254 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE); 2255 ql_dbg(ql_dbg_disc, vha, 0x2036, 2256 "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name); 2257 /* Execute MS IOCB */ 2258 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 2259 sizeof(ms_iocb_entry_t)); 2260 if (rval != QLA_SUCCESS) { 2261 /*EMPTY*/ 2262 ql_dbg(ql_dbg_disc, vha, 0x2037, 2263 "DHBA issue IOCB failed (%d).\n", rval); 2264 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") != 2265 QLA_SUCCESS) { 2266 rval = QLA_FUNCTION_FAILED; 2267 } else { 2268 ql_dbg(ql_dbg_disc, vha, 0x2038, 2269 "DHBA exiting normally.\n"); 2270 } 2271 return rval; 2272 } 2273 2274 /** 2275 * qla2x00_fdmi_rprt() perform RPRT registration 2276 * @vha: HA context 2277 * @callopt: Option to issue extended or standard FDMI 2278 * command parameter 2279 * 2280 * Returns 0 on success. 2281 */ 2282 static int 2283 qla2x00_fdmi_rprt(scsi_qla_host_t *vha, int callopt) 2284 { 2285 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev); 2286 struct qla_hw_data *ha = vha->hw; 2287 ulong size = 0; 2288 uint rval, count; 2289 ms_iocb_entry_t *ms_pkt; 2290 struct ct_sns_req *ct_req; 2291 struct ct_sns_rsp *ct_rsp; 2292 void *entries; 2293 count = callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ? 2294 FDMI2_SMARTSAN_PORT_ATTR_COUNT : 2295 callopt != CALLOPT_FDMI1 ? 2296 FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT; 2297 2298 size = RPRT_RSP_SIZE; 2299 ql_dbg(ql_dbg_disc, vha, 0x20e8, 2300 "RPRT (callopt=%x count=%u size=%lu).\n", callopt, count, size); 2301 /* Request size adjusted after CT preparation */ 2302 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size); 2303 /* Prepare CT request */ 2304 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPRT_CMD, size); 2305 ct_rsp = &ha->ct_sns->p.rsp; 2306 /* Prepare FDMI command entries */ 2307 memcpy(ct_req->req.rprt.hba_identifier, base_vha->port_name, 2308 sizeof(ct_req->req.rprt.hba_identifier)); 2309 size += sizeof(ct_req->req.rprt.hba_identifier); 2310 memcpy(ct_req->req.rprt.port_name, vha->port_name, 2311 sizeof(ct_req->req.rprt.port_name)); 2312 size += sizeof(ct_req->req.rprt.port_name); 2313 /* Attribute count */ 2314 ct_req->req.rprt.attrs.count = cpu_to_be32(count); 2315 size += sizeof(ct_req->req.rprt.attrs.count); 2316 /* Attribute block */ 2317 entries = ct_req->req.rprt.attrs.entry; 2318 size += qla2x00_port_attributes(vha, entries, callopt); 2319 /* Update MS request size. */ 2320 qla2x00_update_ms_fdmi_iocb(vha, size + 16); 2321 ql_dbg(ql_dbg_disc, vha, 0x20e9, 2322 "RPRT %016llx %016llx.\n", 2323 wwn_to_u64(ct_req->req.rprt.port_name), 2324 wwn_to_u64(ct_req->req.rprt.port_name)); 2325 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ea, 2326 entries, size); 2327 /* Execute MS IOCB */ 2328 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 2329 sizeof(*ha->ms_iocb)); 2330 if (rval) { 2331 ql_dbg(ql_dbg_disc, vha, 0x20eb, 2332 "RPRT iocb failed (%d).\n", rval); 2333 return rval; 2334 } 2335 rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPRT"); 2336 if (rval) { 2337 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && 2338 ct_rsp->header.explanation_code == 2339 CT_EXPL_ALREADY_REGISTERED) { 2340 ql_dbg(ql_dbg_disc, vha, 0x20ec, 2341 "RPRT already registered.\n"); 2342 return QLA_ALREADY_REGISTERED; 2343 } 2344 2345 ql_dbg(ql_dbg_disc, vha, 0x20ed, 2346 "RPRT failed, CT Reason code: %#x, CT Explanation %#x\n", 2347 ct_rsp->header.reason_code, 2348 ct_rsp->header.explanation_code); 2349 return rval; 2350 } 2351 ql_dbg(ql_dbg_disc, vha, 0x20ee, "RPRT exiting normally.\n"); 2352 return rval; 2353 } 2354 2355 /** 2356 * qla2x00_fdmi_rpa() - perform RPA registration 2357 * @vha: HA context 2358 * @callopt: Option to issue FDMI registration 2359 * 2360 * Returns 0 on success. 2361 */ 2362 static int 2363 qla2x00_fdmi_rpa(scsi_qla_host_t *vha, uint callopt) 2364 { 2365 struct qla_hw_data *ha = vha->hw; 2366 ulong size = 0; 2367 uint rval, count; 2368 ms_iocb_entry_t *ms_pkt; 2369 struct ct_sns_req *ct_req; 2370 struct ct_sns_rsp *ct_rsp; 2371 void *entries; 2372 2373 count = 2374 callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ? 2375 FDMI2_SMARTSAN_PORT_ATTR_COUNT : 2376 callopt != CALLOPT_FDMI1 ? 2377 FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT; 2378 2379 size = 2380 callopt != CALLOPT_FDMI1 ? 2381 SMARTSAN_RPA_RSP_SIZE : RPA_RSP_SIZE; 2382 2383 ql_dbg(ql_dbg_disc, vha, 0x20f0, 2384 "RPA (callopt=%x count=%u size=%lu).\n", callopt, count, size); 2385 2386 /* Request size adjusted after CT preparation */ 2387 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size); 2388 2389 /* Prepare CT request */ 2390 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, size); 2391 ct_rsp = &ha->ct_sns->p.rsp; 2392 2393 /* Prepare FDMI command entries. */ 2394 memcpy(ct_req->req.rpa.port_name, vha->port_name, 2395 sizeof(ct_req->req.rpa.port_name)); 2396 size += sizeof(ct_req->req.rpa.port_name); 2397 2398 /* Attribute count */ 2399 ct_req->req.rpa.attrs.count = cpu_to_be32(count); 2400 size += sizeof(ct_req->req.rpa.attrs.count); 2401 2402 /* Attribute block */ 2403 entries = ct_req->req.rpa.attrs.entry; 2404 2405 size += qla2x00_port_attributes(vha, entries, callopt); 2406 2407 /* Update MS request size. */ 2408 qla2x00_update_ms_fdmi_iocb(vha, size + 16); 2409 2410 ql_dbg(ql_dbg_disc, vha, 0x20f1, 2411 "RPA %016llx.\n", wwn_to_u64(ct_req->req.rpa.port_name)); 2412 2413 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20f2, 2414 entries, size); 2415 2416 /* Execute MS IOCB */ 2417 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 2418 sizeof(*ha->ms_iocb)); 2419 if (rval) { 2420 ql_dbg(ql_dbg_disc, vha, 0x20f3, 2421 "RPA iocb failed (%d).\n", rval); 2422 return rval; 2423 } 2424 2425 rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA"); 2426 if (rval) { 2427 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && 2428 ct_rsp->header.explanation_code == 2429 CT_EXPL_ALREADY_REGISTERED) { 2430 ql_dbg(ql_dbg_disc, vha, 0x20f4, 2431 "RPA already registered.\n"); 2432 return QLA_ALREADY_REGISTERED; 2433 } 2434 2435 ql_dbg(ql_dbg_disc, vha, 0x20f5, 2436 "RPA failed, CT Reason code: %#x, CT Explanation %#x\n", 2437 ct_rsp->header.reason_code, 2438 ct_rsp->header.explanation_code); 2439 return rval; 2440 } 2441 2442 ql_dbg(ql_dbg_disc, vha, 0x20f6, "RPA exiting normally.\n"); 2443 return rval; 2444 } 2445 2446 /** 2447 * qla2x00_fdmi_register() - 2448 * @vha: HA context 2449 * 2450 * Returns 0 on success. 2451 */ 2452 int 2453 qla2x00_fdmi_register(scsi_qla_host_t *vha) 2454 { 2455 int rval = QLA_SUCCESS; 2456 struct qla_hw_data *ha = vha->hw; 2457 2458 if (IS_QLA2100(ha) || IS_QLA2200(ha) || 2459 IS_QLAFX00(ha)) 2460 return rval; 2461 2462 rval = qla2x00_mgmt_svr_login(vha); 2463 if (rval) 2464 return rval; 2465 2466 /* For npiv/vport send rprt only */ 2467 if (vha->vp_idx) { 2468 if (ql2xsmartsan) 2469 rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2_SMARTSAN); 2470 if (rval || !ql2xsmartsan) 2471 rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2); 2472 if (rval) 2473 rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI1); 2474 2475 return rval; 2476 } 2477 2478 /* Try fdmi2 first, if fails then try fdmi1 */ 2479 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2); 2480 if (rval) { 2481 if (rval != QLA_ALREADY_REGISTERED) 2482 goto try_fdmi; 2483 2484 rval = qla2x00_fdmi_dhba(vha); 2485 if (rval) 2486 goto try_fdmi; 2487 2488 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2); 2489 if (rval) 2490 goto try_fdmi; 2491 } 2492 2493 if (ql2xsmartsan) 2494 rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2_SMARTSAN); 2495 if (rval || !ql2xsmartsan) 2496 rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2); 2497 if (rval) 2498 goto try_fdmi; 2499 2500 return rval; 2501 2502 try_fdmi: 2503 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1); 2504 if (rval) { 2505 if (rval != QLA_ALREADY_REGISTERED) 2506 return rval; 2507 2508 rval = qla2x00_fdmi_dhba(vha); 2509 if (rval) 2510 return rval; 2511 2512 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1); 2513 if (rval) 2514 return rval; 2515 } 2516 2517 rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI1); 2518 2519 return rval; 2520 } 2521 2522 /** 2523 * qla2x00_gfpn_id() - SNS Get Fabric Port Name (GFPN_ID) query. 2524 * @vha: HA context 2525 * @list: switch info entries to populate 2526 * 2527 * Returns 0 on success. 2528 */ 2529 int 2530 qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list) 2531 { 2532 int rval = QLA_SUCCESS; 2533 uint16_t i; 2534 struct qla_hw_data *ha = vha->hw; 2535 ms_iocb_entry_t *ms_pkt; 2536 struct ct_sns_req *ct_req; 2537 struct ct_sns_rsp *ct_rsp; 2538 struct ct_arg arg; 2539 2540 if (!IS_IIDMA_CAPABLE(ha)) 2541 return QLA_FUNCTION_FAILED; 2542 2543 arg.iocb = ha->ms_iocb; 2544 arg.req_dma = ha->ct_sns_dma; 2545 arg.rsp_dma = ha->ct_sns_dma; 2546 arg.req_size = GFPN_ID_REQ_SIZE; 2547 arg.rsp_size = GFPN_ID_RSP_SIZE; 2548 arg.nport_handle = NPH_SNS; 2549 2550 for (i = 0; i < ha->max_fibre_devices; i++) { 2551 /* Issue GFPN_ID */ 2552 /* Prepare common MS IOCB */ 2553 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); 2554 2555 /* Prepare CT request */ 2556 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD, 2557 GFPN_ID_RSP_SIZE); 2558 ct_rsp = &ha->ct_sns->p.rsp; 2559 2560 /* Prepare CT arguments -- port_id */ 2561 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id); 2562 2563 /* Execute MS IOCB */ 2564 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 2565 sizeof(ms_iocb_entry_t)); 2566 if (rval != QLA_SUCCESS) { 2567 /*EMPTY*/ 2568 ql_dbg(ql_dbg_disc, vha, 0x2023, 2569 "GFPN_ID issue IOCB failed (%d).\n", rval); 2570 break; 2571 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 2572 "GFPN_ID") != QLA_SUCCESS) { 2573 rval = QLA_FUNCTION_FAILED; 2574 break; 2575 } else { 2576 /* Save fabric portname */ 2577 memcpy(list[i].fabric_port_name, 2578 ct_rsp->rsp.gfpn_id.port_name, WWN_SIZE); 2579 } 2580 2581 /* Last device exit. */ 2582 if (list[i].d_id.b.rsvd_1 != 0) 2583 break; 2584 } 2585 2586 return (rval); 2587 } 2588 2589 2590 static inline struct ct_sns_req * 2591 qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd, 2592 uint16_t rsp_size) 2593 { 2594 memset(p, 0, sizeof(struct ct_sns_pkt)); 2595 2596 p->p.req.header.revision = 0x01; 2597 p->p.req.header.gs_type = 0xFA; 2598 p->p.req.header.gs_subtype = 0x01; 2599 p->p.req.command = cpu_to_be16(cmd); 2600 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4); 2601 2602 return &p->p.req; 2603 } 2604 2605 static uint16_t 2606 qla2x00_port_speed_capability(uint16_t speed) 2607 { 2608 switch (speed) { 2609 case BIT_15: 2610 return PORT_SPEED_1GB; 2611 case BIT_14: 2612 return PORT_SPEED_2GB; 2613 case BIT_13: 2614 return PORT_SPEED_4GB; 2615 case BIT_12: 2616 return PORT_SPEED_10GB; 2617 case BIT_11: 2618 return PORT_SPEED_8GB; 2619 case BIT_10: 2620 return PORT_SPEED_16GB; 2621 case BIT_8: 2622 return PORT_SPEED_32GB; 2623 case BIT_7: 2624 return PORT_SPEED_64GB; 2625 default: 2626 return PORT_SPEED_UNKNOWN; 2627 } 2628 } 2629 2630 /** 2631 * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query. 2632 * @vha: HA context 2633 * @list: switch info entries to populate 2634 * 2635 * Returns 0 on success. 2636 */ 2637 int 2638 qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) 2639 { 2640 int rval; 2641 uint16_t i; 2642 struct qla_hw_data *ha = vha->hw; 2643 ms_iocb_entry_t *ms_pkt; 2644 struct ct_sns_req *ct_req; 2645 struct ct_sns_rsp *ct_rsp; 2646 struct ct_arg arg; 2647 2648 if (!IS_IIDMA_CAPABLE(ha)) 2649 return QLA_FUNCTION_FAILED; 2650 if (!ha->flags.gpsc_supported) 2651 return QLA_FUNCTION_FAILED; 2652 2653 rval = qla2x00_mgmt_svr_login(vha); 2654 if (rval) 2655 return rval; 2656 2657 arg.iocb = ha->ms_iocb; 2658 arg.req_dma = ha->ct_sns_dma; 2659 arg.rsp_dma = ha->ct_sns_dma; 2660 arg.req_size = GPSC_REQ_SIZE; 2661 arg.rsp_size = GPSC_RSP_SIZE; 2662 arg.nport_handle = vha->mgmt_svr_loop_id; 2663 2664 for (i = 0; i < ha->max_fibre_devices; i++) { 2665 /* Issue GFPN_ID */ 2666 /* Prepare common MS IOCB */ 2667 ms_pkt = qla24xx_prep_ms_iocb(vha, &arg); 2668 2669 /* Prepare CT request */ 2670 ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD, 2671 GPSC_RSP_SIZE); 2672 ct_rsp = &ha->ct_sns->p.rsp; 2673 2674 /* Prepare CT arguments -- port_name */ 2675 memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name, 2676 WWN_SIZE); 2677 2678 /* Execute MS IOCB */ 2679 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 2680 sizeof(ms_iocb_entry_t)); 2681 if (rval != QLA_SUCCESS) { 2682 /*EMPTY*/ 2683 ql_dbg(ql_dbg_disc, vha, 0x2059, 2684 "GPSC issue IOCB failed (%d).\n", rval); 2685 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 2686 "GPSC")) != QLA_SUCCESS) { 2687 /* FM command unsupported? */ 2688 if (rval == QLA_INVALID_COMMAND && 2689 (ct_rsp->header.reason_code == 2690 CT_REASON_INVALID_COMMAND_CODE || 2691 ct_rsp->header.reason_code == 2692 CT_REASON_COMMAND_UNSUPPORTED)) { 2693 ql_dbg(ql_dbg_disc, vha, 0x205a, 2694 "GPSC command unsupported, disabling " 2695 "query.\n"); 2696 ha->flags.gpsc_supported = 0; 2697 rval = QLA_FUNCTION_FAILED; 2698 break; 2699 } 2700 rval = QLA_FUNCTION_FAILED; 2701 } else { 2702 list->fp_speed = qla2x00_port_speed_capability( 2703 be16_to_cpu(ct_rsp->rsp.gpsc.speed)); 2704 ql_dbg(ql_dbg_disc, vha, 0x205b, 2705 "GPSC ext entry - fpn " 2706 "%8phN speeds=%04x speed=%04x.\n", 2707 list[i].fabric_port_name, 2708 be16_to_cpu(ct_rsp->rsp.gpsc.speeds), 2709 be16_to_cpu(ct_rsp->rsp.gpsc.speed)); 2710 } 2711 2712 /* Last device exit. */ 2713 if (list[i].d_id.b.rsvd_1 != 0) 2714 break; 2715 } 2716 2717 return (rval); 2718 } 2719 2720 /** 2721 * qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query. 2722 * 2723 * @vha: HA context 2724 * @list: switch info entries to populate 2725 * 2726 */ 2727 void 2728 qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list) 2729 { 2730 int rval; 2731 uint16_t i; 2732 2733 ms_iocb_entry_t *ms_pkt; 2734 struct ct_sns_req *ct_req; 2735 struct ct_sns_rsp *ct_rsp; 2736 struct qla_hw_data *ha = vha->hw; 2737 uint8_t fcp_scsi_features = 0, nvme_features = 0; 2738 struct ct_arg arg; 2739 2740 for (i = 0; i < ha->max_fibre_devices; i++) { 2741 /* Set default FC4 Type as UNKNOWN so the default is to 2742 * Process this port */ 2743 list[i].fc4_type = 0; 2744 2745 /* Do not attempt GFF_ID if we are not FWI_2 capable */ 2746 if (!IS_FWI2_CAPABLE(ha)) 2747 continue; 2748 2749 arg.iocb = ha->ms_iocb; 2750 arg.req_dma = ha->ct_sns_dma; 2751 arg.rsp_dma = ha->ct_sns_dma; 2752 arg.req_size = GFF_ID_REQ_SIZE; 2753 arg.rsp_size = GFF_ID_RSP_SIZE; 2754 arg.nport_handle = NPH_SNS; 2755 2756 /* Prepare common MS IOCB */ 2757 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); 2758 2759 /* Prepare CT request */ 2760 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD, 2761 GFF_ID_RSP_SIZE); 2762 ct_rsp = &ha->ct_sns->p.rsp; 2763 2764 /* Prepare CT arguments -- port_id */ 2765 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id); 2766 2767 /* Execute MS IOCB */ 2768 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 2769 sizeof(ms_iocb_entry_t)); 2770 2771 if (rval != QLA_SUCCESS) { 2772 ql_dbg(ql_dbg_disc, vha, 0x205c, 2773 "GFF_ID issue IOCB failed (%d).\n", rval); 2774 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 2775 "GFF_ID") != QLA_SUCCESS) { 2776 ql_dbg(ql_dbg_disc, vha, 0x205d, 2777 "GFF_ID IOCB status had a failure status code.\n"); 2778 } else { 2779 fcp_scsi_features = 2780 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET]; 2781 fcp_scsi_features &= 0x0f; 2782 2783 if (fcp_scsi_features) { 2784 list[i].fc4_type = FS_FC4TYPE_FCP; 2785 list[i].fc4_features = fcp_scsi_features; 2786 } 2787 2788 nvme_features = 2789 ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET]; 2790 nvme_features &= 0xf; 2791 2792 if (nvme_features) { 2793 list[i].fc4_type |= FS_FC4TYPE_NVME; 2794 list[i].fc4_features = nvme_features; 2795 } 2796 } 2797 2798 /* Last device exit. */ 2799 if (list[i].d_id.b.rsvd_1 != 0) 2800 break; 2801 } 2802 } 2803 2804 int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport) 2805 { 2806 struct qla_work_evt *e; 2807 2808 e = qla2x00_alloc_work(vha, QLA_EVT_GPSC); 2809 if (!e) 2810 return QLA_FUNCTION_FAILED; 2811 2812 e->u.fcport.fcport = fcport; 2813 return qla2x00_post_work(vha, e); 2814 } 2815 2816 void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea) 2817 { 2818 struct fc_port *fcport = ea->fcport; 2819 2820 ql_dbg(ql_dbg_disc, vha, 0x20d8, 2821 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n", 2822 __func__, fcport->port_name, fcport->disc_state, 2823 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen, 2824 ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id); 2825 2826 if (fcport->disc_state == DSC_DELETE_PEND) 2827 return; 2828 2829 if (ea->sp->gen2 != fcport->login_gen) { 2830 /* target side must have changed it. */ 2831 ql_dbg(ql_dbg_disc, vha, 0x20d3, 2832 "%s %8phC generation changed\n", 2833 __func__, fcport->port_name); 2834 return; 2835 } else if (ea->sp->gen1 != fcport->rscn_gen) { 2836 return; 2837 } 2838 2839 qla_post_iidma_work(vha, fcport); 2840 } 2841 2842 static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res) 2843 { 2844 struct scsi_qla_host *vha = sp->vha; 2845 struct qla_hw_data *ha = vha->hw; 2846 fc_port_t *fcport = sp->fcport; 2847 struct ct_sns_rsp *ct_rsp; 2848 struct event_arg ea; 2849 2850 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp; 2851 2852 ql_dbg(ql_dbg_disc, vha, 0x2053, 2853 "Async done-%s res %x, WWPN %8phC \n", 2854 sp->name, res, fcport->port_name); 2855 2856 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 2857 2858 if (res == QLA_FUNCTION_TIMEOUT) 2859 goto done; 2860 2861 if (res == (DID_ERROR << 16)) { 2862 /* entry status error */ 2863 goto done; 2864 } else if (res) { 2865 if ((ct_rsp->header.reason_code == 2866 CT_REASON_INVALID_COMMAND_CODE) || 2867 (ct_rsp->header.reason_code == 2868 CT_REASON_COMMAND_UNSUPPORTED)) { 2869 ql_dbg(ql_dbg_disc, vha, 0x2019, 2870 "GPSC command unsupported, disabling query.\n"); 2871 ha->flags.gpsc_supported = 0; 2872 goto done; 2873 } 2874 } else { 2875 fcport->fp_speed = qla2x00_port_speed_capability( 2876 be16_to_cpu(ct_rsp->rsp.gpsc.speed)); 2877 2878 ql_dbg(ql_dbg_disc, vha, 0x2054, 2879 "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n", 2880 sp->name, fcport->fabric_port_name, 2881 be16_to_cpu(ct_rsp->rsp.gpsc.speeds), 2882 be16_to_cpu(ct_rsp->rsp.gpsc.speed)); 2883 } 2884 memset(&ea, 0, sizeof(ea)); 2885 ea.rc = res; 2886 ea.fcport = fcport; 2887 ea.sp = sp; 2888 qla24xx_handle_gpsc_event(vha, &ea); 2889 2890 done: 2891 sp->free(sp); 2892 } 2893 2894 int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport) 2895 { 2896 int rval = QLA_FUNCTION_FAILED; 2897 struct ct_sns_req *ct_req; 2898 srb_t *sp; 2899 2900 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) 2901 return rval; 2902 2903 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 2904 if (!sp) 2905 goto done; 2906 2907 sp->type = SRB_CT_PTHRU_CMD; 2908 sp->name = "gpsc"; 2909 sp->gen1 = fcport->rscn_gen; 2910 sp->gen2 = fcport->login_gen; 2911 2912 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 2913 2914 /* CT_IU preamble */ 2915 ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD, 2916 GPSC_RSP_SIZE); 2917 2918 /* GPSC req */ 2919 memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name, 2920 WWN_SIZE); 2921 2922 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; 2923 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma; 2924 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns; 2925 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma; 2926 sp->u.iocb_cmd.u.ctarg.req_size = GPSC_REQ_SIZE; 2927 sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE; 2928 sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id; 2929 2930 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; 2931 sp->done = qla24xx_async_gpsc_sp_done; 2932 2933 ql_dbg(ql_dbg_disc, vha, 0x205e, 2934 "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n", 2935 sp->name, fcport->port_name, sp->handle, 2936 fcport->loop_id, fcport->d_id.b.domain, 2937 fcport->d_id.b.area, fcport->d_id.b.al_pa); 2938 2939 rval = qla2x00_start_sp(sp); 2940 if (rval != QLA_SUCCESS) 2941 goto done_free_sp; 2942 return rval; 2943 2944 done_free_sp: 2945 sp->free(sp); 2946 done: 2947 return rval; 2948 } 2949 2950 int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id) 2951 { 2952 struct qla_work_evt *e; 2953 2954 if (test_bit(UNLOADING, &vha->dpc_flags) || 2955 (vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags))) 2956 return 0; 2957 2958 e = qla2x00_alloc_work(vha, QLA_EVT_GPNID); 2959 if (!e) 2960 return QLA_FUNCTION_FAILED; 2961 2962 e->u.gpnid.id = *id; 2963 return qla2x00_post_work(vha, e); 2964 } 2965 2966 void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp) 2967 { 2968 struct srb_iocb *c = &sp->u.iocb_cmd; 2969 2970 switch (sp->type) { 2971 case SRB_ELS_DCMD: 2972 qla2x00_els_dcmd2_free(vha, &c->u.els_plogi); 2973 break; 2974 case SRB_CT_PTHRU_CMD: 2975 default: 2976 if (sp->u.iocb_cmd.u.ctarg.req) { 2977 dma_free_coherent(&vha->hw->pdev->dev, 2978 sp->u.iocb_cmd.u.ctarg.req_allocated_size, 2979 sp->u.iocb_cmd.u.ctarg.req, 2980 sp->u.iocb_cmd.u.ctarg.req_dma); 2981 sp->u.iocb_cmd.u.ctarg.req = NULL; 2982 } 2983 2984 if (sp->u.iocb_cmd.u.ctarg.rsp) { 2985 dma_free_coherent(&vha->hw->pdev->dev, 2986 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, 2987 sp->u.iocb_cmd.u.ctarg.rsp, 2988 sp->u.iocb_cmd.u.ctarg.rsp_dma); 2989 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 2990 } 2991 break; 2992 } 2993 2994 sp->free(sp); 2995 } 2996 2997 void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea) 2998 { 2999 fc_port_t *fcport, *conflict, *t; 3000 u16 data[2]; 3001 3002 ql_dbg(ql_dbg_disc, vha, 0xffff, 3003 "%s %d port_id: %06x\n", 3004 __func__, __LINE__, ea->id.b24); 3005 3006 if (ea->rc) { 3007 /* cable is disconnected */ 3008 list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) { 3009 if (fcport->d_id.b24 == ea->id.b24) 3010 fcport->scan_state = QLA_FCPORT_SCAN; 3011 3012 qlt_schedule_sess_for_deletion(fcport); 3013 } 3014 } else { 3015 /* cable is connected */ 3016 fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1); 3017 if (fcport) { 3018 list_for_each_entry_safe(conflict, t, &vha->vp_fcports, 3019 list) { 3020 if ((conflict->d_id.b24 == ea->id.b24) && 3021 (fcport != conflict)) 3022 /* 3023 * 2 fcports with conflict Nport ID or 3024 * an existing fcport is having nport ID 3025 * conflict with new fcport. 3026 */ 3027 3028 conflict->scan_state = QLA_FCPORT_SCAN; 3029 3030 qlt_schedule_sess_for_deletion(conflict); 3031 } 3032 3033 fcport->scan_needed = 0; 3034 fcport->rscn_gen++; 3035 fcport->scan_state = QLA_FCPORT_FOUND; 3036 fcport->flags |= FCF_FABRIC_DEVICE; 3037 if (fcport->login_retry == 0) { 3038 fcport->login_retry = 3039 vha->hw->login_retry_count; 3040 ql_dbg(ql_dbg_disc, vha, 0xffff, 3041 "Port login retry %8phN, lid 0x%04x cnt=%d.\n", 3042 fcport->port_name, fcport->loop_id, 3043 fcport->login_retry); 3044 } 3045 switch (fcport->disc_state) { 3046 case DSC_LOGIN_COMPLETE: 3047 /* recheck session is still intact. */ 3048 ql_dbg(ql_dbg_disc, vha, 0x210d, 3049 "%s %d %8phC revalidate session with ADISC\n", 3050 __func__, __LINE__, fcport->port_name); 3051 data[0] = data[1] = 0; 3052 qla2x00_post_async_adisc_work(vha, fcport, 3053 data); 3054 break; 3055 case DSC_DELETED: 3056 ql_dbg(ql_dbg_disc, vha, 0x210d, 3057 "%s %d %8phC login\n", __func__, __LINE__, 3058 fcport->port_name); 3059 fcport->d_id = ea->id; 3060 qla24xx_fcport_handle_login(vha, fcport); 3061 break; 3062 case DSC_DELETE_PEND: 3063 fcport->d_id = ea->id; 3064 break; 3065 default: 3066 fcport->d_id = ea->id; 3067 break; 3068 } 3069 } else { 3070 list_for_each_entry_safe(conflict, t, &vha->vp_fcports, 3071 list) { 3072 if (conflict->d_id.b24 == ea->id.b24) { 3073 /* 2 fcports with conflict Nport ID or 3074 * an existing fcport is having nport ID 3075 * conflict with new fcport. 3076 */ 3077 ql_dbg(ql_dbg_disc, vha, 0xffff, 3078 "%s %d %8phC DS %d\n", 3079 __func__, __LINE__, 3080 conflict->port_name, 3081 conflict->disc_state); 3082 3083 conflict->scan_state = QLA_FCPORT_SCAN; 3084 qlt_schedule_sess_for_deletion(conflict); 3085 } 3086 } 3087 3088 /* create new fcport */ 3089 ql_dbg(ql_dbg_disc, vha, 0x2065, 3090 "%s %d %8phC post new sess\n", 3091 __func__, __LINE__, ea->port_name); 3092 qla24xx_post_newsess_work(vha, &ea->id, 3093 ea->port_name, NULL, NULL, 0); 3094 } 3095 } 3096 } 3097 3098 static void qla2x00_async_gpnid_sp_done(srb_t *sp, int res) 3099 { 3100 struct scsi_qla_host *vha = sp->vha; 3101 struct ct_sns_req *ct_req = 3102 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; 3103 struct ct_sns_rsp *ct_rsp = 3104 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp; 3105 struct event_arg ea; 3106 struct qla_work_evt *e; 3107 unsigned long flags; 3108 3109 if (res) 3110 ql_dbg(ql_dbg_disc, vha, 0x2066, 3111 "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n", 3112 sp->name, res, sp->gen1, &ct_req->req.port_id.port_id, 3113 ct_rsp->rsp.gpn_id.port_name); 3114 else 3115 ql_dbg(ql_dbg_disc, vha, 0x2066, 3116 "Async done-%s good rscn gen %d ID %3phC. %8phC\n", 3117 sp->name, sp->gen1, &ct_req->req.port_id.port_id, 3118 ct_rsp->rsp.gpn_id.port_name); 3119 3120 memset(&ea, 0, sizeof(ea)); 3121 memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE); 3122 ea.sp = sp; 3123 ea.id = be_to_port_id(ct_req->req.port_id.port_id); 3124 ea.rc = res; 3125 3126 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 3127 list_del(&sp->elem); 3128 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 3129 3130 if (res) { 3131 if (res == QLA_FUNCTION_TIMEOUT) { 3132 qla24xx_post_gpnid_work(sp->vha, &ea.id); 3133 sp->free(sp); 3134 return; 3135 } 3136 } else if (sp->gen1) { 3137 /* There was another RSCN for this Nport ID */ 3138 qla24xx_post_gpnid_work(sp->vha, &ea.id); 3139 sp->free(sp); 3140 return; 3141 } 3142 3143 qla24xx_handle_gpnid_event(vha, &ea); 3144 3145 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); 3146 if (!e) { 3147 /* please ignore kernel warning. otherwise, we have mem leak. */ 3148 dma_free_coherent(&vha->hw->pdev->dev, 3149 sp->u.iocb_cmd.u.ctarg.req_allocated_size, 3150 sp->u.iocb_cmd.u.ctarg.req, 3151 sp->u.iocb_cmd.u.ctarg.req_dma); 3152 sp->u.iocb_cmd.u.ctarg.req = NULL; 3153 3154 dma_free_coherent(&vha->hw->pdev->dev, 3155 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, 3156 sp->u.iocb_cmd.u.ctarg.rsp, 3157 sp->u.iocb_cmd.u.ctarg.rsp_dma); 3158 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 3159 3160 sp->free(sp); 3161 return; 3162 } 3163 3164 e->u.iosb.sp = sp; 3165 qla2x00_post_work(vha, e); 3166 } 3167 3168 /* Get WWPN with Nport ID. */ 3169 int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) 3170 { 3171 int rval = QLA_FUNCTION_FAILED; 3172 struct ct_sns_req *ct_req; 3173 srb_t *sp, *tsp; 3174 struct ct_sns_pkt *ct_sns; 3175 unsigned long flags; 3176 3177 if (!vha->flags.online) 3178 goto done; 3179 3180 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 3181 if (!sp) 3182 goto done; 3183 3184 sp->type = SRB_CT_PTHRU_CMD; 3185 sp->name = "gpnid"; 3186 sp->u.iocb_cmd.u.ctarg.id = *id; 3187 sp->gen1 = 0; 3188 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 3189 3190 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 3191 list_for_each_entry(tsp, &vha->gpnid_list, elem) { 3192 if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) { 3193 tsp->gen1++; 3194 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 3195 sp->free(sp); 3196 goto done; 3197 } 3198 } 3199 list_add_tail(&sp->elem, &vha->gpnid_list); 3200 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 3201 3202 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 3203 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, 3204 GFP_KERNEL); 3205 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); 3206 if (!sp->u.iocb_cmd.u.ctarg.req) { 3207 ql_log(ql_log_warn, vha, 0xd041, 3208 "Failed to allocate ct_sns request.\n"); 3209 goto done_free_sp; 3210 } 3211 3212 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 3213 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, 3214 GFP_KERNEL); 3215 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); 3216 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 3217 ql_log(ql_log_warn, vha, 0xd042, 3218 "Failed to allocate ct_sns request.\n"); 3219 goto done_free_sp; 3220 } 3221 3222 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; 3223 memset(ct_sns, 0, sizeof(*ct_sns)); 3224 3225 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; 3226 /* CT_IU preamble */ 3227 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE); 3228 3229 /* GPN_ID req */ 3230 ct_req->req.port_id.port_id = port_id_to_be_id(*id); 3231 3232 sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE; 3233 sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE; 3234 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 3235 3236 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; 3237 sp->done = qla2x00_async_gpnid_sp_done; 3238 3239 ql_dbg(ql_dbg_disc, vha, 0x2067, 3240 "Async-%s hdl=%x ID %3phC.\n", sp->name, 3241 sp->handle, &ct_req->req.port_id.port_id); 3242 3243 rval = qla2x00_start_sp(sp); 3244 if (rval != QLA_SUCCESS) 3245 goto done_free_sp; 3246 3247 return rval; 3248 3249 done_free_sp: 3250 spin_lock_irqsave(&vha->hw->vport_slock, flags); 3251 list_del(&sp->elem); 3252 spin_unlock_irqrestore(&vha->hw->vport_slock, flags); 3253 3254 if (sp->u.iocb_cmd.u.ctarg.req) { 3255 dma_free_coherent(&vha->hw->pdev->dev, 3256 sizeof(struct ct_sns_pkt), 3257 sp->u.iocb_cmd.u.ctarg.req, 3258 sp->u.iocb_cmd.u.ctarg.req_dma); 3259 sp->u.iocb_cmd.u.ctarg.req = NULL; 3260 } 3261 if (sp->u.iocb_cmd.u.ctarg.rsp) { 3262 dma_free_coherent(&vha->hw->pdev->dev, 3263 sizeof(struct ct_sns_pkt), 3264 sp->u.iocb_cmd.u.ctarg.rsp, 3265 sp->u.iocb_cmd.u.ctarg.rsp_dma); 3266 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 3267 } 3268 3269 sp->free(sp); 3270 done: 3271 return rval; 3272 } 3273 3274 void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea) 3275 { 3276 fc_port_t *fcport = ea->fcport; 3277 3278 qla24xx_post_gnl_work(vha, fcport); 3279 } 3280 3281 void qla24xx_async_gffid_sp_done(srb_t *sp, int res) 3282 { 3283 struct scsi_qla_host *vha = sp->vha; 3284 fc_port_t *fcport = sp->fcport; 3285 struct ct_sns_rsp *ct_rsp; 3286 struct event_arg ea; 3287 uint8_t fc4_scsi_feat; 3288 uint8_t fc4_nvme_feat; 3289 3290 ql_dbg(ql_dbg_disc, vha, 0x2133, 3291 "Async done-%s res %x ID %x. %8phC\n", 3292 sp->name, res, fcport->d_id.b24, fcport->port_name); 3293 3294 fcport->flags &= ~FCF_ASYNC_SENT; 3295 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp; 3296 fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET]; 3297 fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET]; 3298 3299 /* 3300 * FC-GS-7, 5.2.3.12 FC-4 Features - format 3301 * The format of the FC-4 Features object, as defined by the FC-4, 3302 * Shall be an array of 4-bit values, one for each type code value 3303 */ 3304 if (!res) { 3305 if (fc4_scsi_feat & 0xf) { 3306 /* w1 b00:03 */ 3307 fcport->fc4_type = FS_FC4TYPE_FCP; 3308 fcport->fc4_features = fc4_scsi_feat & 0xf; 3309 } 3310 3311 if (fc4_nvme_feat & 0xf) { 3312 /* w5 [00:03]/28h */ 3313 fcport->fc4_type |= FS_FC4TYPE_NVME; 3314 fcport->fc4_features = fc4_nvme_feat & 0xf; 3315 } 3316 } 3317 3318 memset(&ea, 0, sizeof(ea)); 3319 ea.sp = sp; 3320 ea.fcport = sp->fcport; 3321 ea.rc = res; 3322 3323 qla24xx_handle_gffid_event(vha, &ea); 3324 sp->free(sp); 3325 } 3326 3327 /* Get FC4 Feature with Nport ID. */ 3328 int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport) 3329 { 3330 int rval = QLA_FUNCTION_FAILED; 3331 struct ct_sns_req *ct_req; 3332 srb_t *sp; 3333 3334 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) 3335 return rval; 3336 3337 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 3338 if (!sp) 3339 return rval; 3340 3341 fcport->flags |= FCF_ASYNC_SENT; 3342 sp->type = SRB_CT_PTHRU_CMD; 3343 sp->name = "gffid"; 3344 sp->gen1 = fcport->rscn_gen; 3345 sp->gen2 = fcport->login_gen; 3346 3347 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; 3348 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 3349 3350 /* CT_IU preamble */ 3351 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD, 3352 GFF_ID_RSP_SIZE); 3353 3354 ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain; 3355 ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area; 3356 ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa; 3357 3358 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; 3359 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma; 3360 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns; 3361 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma; 3362 sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE; 3363 sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE; 3364 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 3365 3366 sp->done = qla24xx_async_gffid_sp_done; 3367 3368 ql_dbg(ql_dbg_disc, vha, 0x2132, 3369 "Async-%s hdl=%x %8phC.\n", sp->name, 3370 sp->handle, fcport->port_name); 3371 3372 rval = qla2x00_start_sp(sp); 3373 if (rval != QLA_SUCCESS) 3374 goto done_free_sp; 3375 3376 return rval; 3377 done_free_sp: 3378 sp->free(sp); 3379 fcport->flags &= ~FCF_ASYNC_SENT; 3380 return rval; 3381 } 3382 3383 /* GPN_FT + GNN_FT*/ 3384 static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn) 3385 { 3386 struct qla_hw_data *ha = vha->hw; 3387 scsi_qla_host_t *vp; 3388 unsigned long flags; 3389 u64 twwn; 3390 int rc = 0; 3391 3392 if (!ha->num_vhosts) 3393 return 0; 3394 3395 spin_lock_irqsave(&ha->vport_slock, flags); 3396 list_for_each_entry(vp, &ha->vp_list, list) { 3397 twwn = wwn_to_u64(vp->port_name); 3398 if (wwn == twwn) { 3399 rc = 1; 3400 break; 3401 } 3402 } 3403 spin_unlock_irqrestore(&ha->vport_slock, flags); 3404 3405 return rc; 3406 } 3407 3408 void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) 3409 { 3410 fc_port_t *fcport; 3411 u32 i, rc; 3412 bool found; 3413 struct fab_scan_rp *rp, *trp; 3414 unsigned long flags; 3415 u8 recheck = 0; 3416 u16 dup = 0, dup_cnt = 0; 3417 3418 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 3419 "%s enter\n", __func__); 3420 3421 if (sp->gen1 != vha->hw->base_qpair->chip_reset) { 3422 ql_dbg(ql_dbg_disc, vha, 0xffff, 3423 "%s scan stop due to chip reset %x/%x\n", 3424 sp->name, sp->gen1, vha->hw->base_qpair->chip_reset); 3425 goto out; 3426 } 3427 3428 rc = sp->rc; 3429 if (rc) { 3430 vha->scan.scan_retry++; 3431 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { 3432 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3433 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3434 goto out; 3435 } else { 3436 ql_dbg(ql_dbg_disc, vha, 0xffff, 3437 "%s: Fabric scan failed for %d retries.\n", 3438 __func__, vha->scan.scan_retry); 3439 /* 3440 * Unable to scan any rports. logout loop below 3441 * will unregister all sessions. 3442 */ 3443 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3444 if ((fcport->flags & FCF_FABRIC_DEVICE) != 0) { 3445 fcport->scan_state = QLA_FCPORT_SCAN; 3446 } 3447 } 3448 goto login_logout; 3449 } 3450 } 3451 vha->scan.scan_retry = 0; 3452 3453 list_for_each_entry(fcport, &vha->vp_fcports, list) 3454 fcport->scan_state = QLA_FCPORT_SCAN; 3455 3456 for (i = 0; i < vha->hw->max_fibre_devices; i++) { 3457 u64 wwn; 3458 int k; 3459 3460 rp = &vha->scan.l[i]; 3461 found = false; 3462 3463 wwn = wwn_to_u64(rp->port_name); 3464 if (wwn == 0) 3465 continue; 3466 3467 /* Remove duplicate NPORT ID entries from switch data base */ 3468 for (k = i + 1; k < vha->hw->max_fibre_devices; k++) { 3469 trp = &vha->scan.l[k]; 3470 if (rp->id.b24 == trp->id.b24) { 3471 dup = 1; 3472 dup_cnt++; 3473 ql_dbg(ql_dbg_disc + ql_dbg_verbose, 3474 vha, 0xffff, 3475 "Detected duplicate NPORT ID from switch data base: ID %06x WWN %8phN WWN %8phN\n", 3476 rp->id.b24, rp->port_name, trp->port_name); 3477 memset(trp, 0, sizeof(*trp)); 3478 } 3479 } 3480 3481 if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE)) 3482 continue; 3483 3484 /* Bypass reserved domain fields. */ 3485 if ((rp->id.b.domain & 0xf0) == 0xf0) 3486 continue; 3487 3488 /* Bypass virtual ports of the same host. */ 3489 if (qla2x00_is_a_vp(vha, wwn)) 3490 continue; 3491 3492 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3493 if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE)) 3494 continue; 3495 fcport->scan_state = QLA_FCPORT_FOUND; 3496 fcport->last_rscn_gen = fcport->rscn_gen; 3497 found = true; 3498 /* 3499 * If device was not a fabric device before. 3500 */ 3501 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { 3502 qla2x00_clear_loop_id(fcport); 3503 fcport->flags |= FCF_FABRIC_DEVICE; 3504 } else if (fcport->d_id.b24 != rp->id.b24 || 3505 (fcport->scan_needed && 3506 fcport->port_type != FCT_INITIATOR && 3507 fcport->port_type != FCT_NVME_INITIATOR)) { 3508 qlt_schedule_sess_for_deletion(fcport); 3509 } 3510 fcport->d_id.b24 = rp->id.b24; 3511 fcport->scan_needed = 0; 3512 break; 3513 } 3514 3515 if (!found) { 3516 ql_dbg(ql_dbg_disc, vha, 0xffff, 3517 "%s %d %8phC post new sess\n", 3518 __func__, __LINE__, rp->port_name); 3519 qla24xx_post_newsess_work(vha, &rp->id, rp->port_name, 3520 rp->node_name, NULL, rp->fc4type); 3521 } 3522 } 3523 3524 if (dup) { 3525 ql_log(ql_log_warn, vha, 0xffff, 3526 "Detected %d duplicate NPORT ID(s) from switch data base\n", 3527 dup_cnt); 3528 } 3529 3530 login_logout: 3531 /* 3532 * Logout all previous fabric dev marked lost, except FCP2 devices. 3533 */ 3534 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3535 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { 3536 fcport->scan_needed = 0; 3537 continue; 3538 } 3539 3540 if (fcport->scan_state != QLA_FCPORT_FOUND) { 3541 bool do_delete = false; 3542 3543 if (fcport->scan_needed && 3544 fcport->disc_state == DSC_LOGIN_PEND) { 3545 /* Cable got disconnected after we sent 3546 * a login. Do delete to prevent timeout. 3547 */ 3548 fcport->logout_on_delete = 1; 3549 do_delete = true; 3550 } 3551 3552 fcport->scan_needed = 0; 3553 if (((qla_dual_mode_enabled(vha) || 3554 qla_ini_mode_enabled(vha)) && 3555 atomic_read(&fcport->state) == FCS_ONLINE) || 3556 do_delete) { 3557 if (fcport->loop_id != FC_NO_LOOP_ID) { 3558 if (fcport->flags & FCF_FCP2_DEVICE) 3559 fcport->logout_on_delete = 0; 3560 3561 ql_dbg(ql_dbg_disc, vha, 0x20f0, 3562 "%s %d %8phC post del sess\n", 3563 __func__, __LINE__, 3564 fcport->port_name); 3565 3566 qlt_schedule_sess_for_deletion(fcport); 3567 continue; 3568 } 3569 } 3570 } else { 3571 if (fcport->scan_needed || 3572 fcport->disc_state != DSC_LOGIN_COMPLETE) { 3573 if (fcport->login_retry == 0) { 3574 fcport->login_retry = 3575 vha->hw->login_retry_count; 3576 ql_dbg(ql_dbg_disc, vha, 0x20a3, 3577 "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n", 3578 fcport->port_name, fcport->loop_id, 3579 fcport->login_retry); 3580 } 3581 fcport->scan_needed = 0; 3582 qla24xx_fcport_handle_login(vha, fcport); 3583 } 3584 } 3585 } 3586 3587 recheck = 1; 3588 out: 3589 qla24xx_sp_unmap(vha, sp); 3590 spin_lock_irqsave(&vha->work_lock, flags); 3591 vha->scan.scan_flags &= ~SF_SCANNING; 3592 spin_unlock_irqrestore(&vha->work_lock, flags); 3593 3594 if (recheck) { 3595 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3596 if (fcport->scan_needed) { 3597 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3598 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3599 break; 3600 } 3601 } 3602 } 3603 } 3604 3605 static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha, 3606 srb_t *sp, int cmd) 3607 { 3608 struct qla_work_evt *e; 3609 3610 if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE) 3611 return QLA_PARAMETER_ERROR; 3612 3613 e = qla2x00_alloc_work(vha, cmd); 3614 if (!e) 3615 return QLA_FUNCTION_FAILED; 3616 3617 e->u.iosb.sp = sp; 3618 3619 return qla2x00_post_work(vha, e); 3620 } 3621 3622 static int qla2x00_post_nvme_gpnft_work(struct scsi_qla_host *vha, 3623 srb_t *sp, int cmd) 3624 { 3625 struct qla_work_evt *e; 3626 3627 if (cmd != QLA_EVT_GPNFT) 3628 return QLA_PARAMETER_ERROR; 3629 3630 e = qla2x00_alloc_work(vha, cmd); 3631 if (!e) 3632 return QLA_FUNCTION_FAILED; 3633 3634 e->u.gpnft.fc4_type = FC4_TYPE_NVME; 3635 e->u.gpnft.sp = sp; 3636 3637 return qla2x00_post_work(vha, e); 3638 } 3639 3640 static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha, 3641 struct srb *sp) 3642 { 3643 struct qla_hw_data *ha = vha->hw; 3644 int num_fibre_dev = ha->max_fibre_devices; 3645 struct ct_sns_req *ct_req = 3646 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; 3647 struct ct_sns_gpnft_rsp *ct_rsp = 3648 (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp; 3649 struct ct_sns_gpn_ft_data *d; 3650 struct fab_scan_rp *rp; 3651 u16 cmd = be16_to_cpu(ct_req->command); 3652 u8 fc4_type = sp->gen2; 3653 int i, j, k; 3654 port_id_t id; 3655 u8 found; 3656 u64 wwn; 3657 3658 j = 0; 3659 for (i = 0; i < num_fibre_dev; i++) { 3660 d = &ct_rsp->entries[i]; 3661 3662 id.b.rsvd_1 = 0; 3663 id.b.domain = d->port_id[0]; 3664 id.b.area = d->port_id[1]; 3665 id.b.al_pa = d->port_id[2]; 3666 wwn = wwn_to_u64(d->port_name); 3667 3668 if (id.b24 == 0 || wwn == 0) 3669 continue; 3670 3671 if (fc4_type == FC4_TYPE_FCP_SCSI) { 3672 if (cmd == GPN_FT_CMD) { 3673 rp = &vha->scan.l[j]; 3674 rp->id = id; 3675 memcpy(rp->port_name, d->port_name, 8); 3676 j++; 3677 rp->fc4type = FS_FC4TYPE_FCP; 3678 } else { 3679 for (k = 0; k < num_fibre_dev; k++) { 3680 rp = &vha->scan.l[k]; 3681 if (id.b24 == rp->id.b24) { 3682 memcpy(rp->node_name, 3683 d->port_name, 8); 3684 break; 3685 } 3686 } 3687 } 3688 } else { 3689 /* Search if the fibre device supports FC4_TYPE_NVME */ 3690 if (cmd == GPN_FT_CMD) { 3691 found = 0; 3692 3693 for (k = 0; k < num_fibre_dev; k++) { 3694 rp = &vha->scan.l[k]; 3695 if (!memcmp(rp->port_name, 3696 d->port_name, 8)) { 3697 /* 3698 * Supports FC-NVMe & FCP 3699 */ 3700 rp->fc4type |= FS_FC4TYPE_NVME; 3701 found = 1; 3702 break; 3703 } 3704 } 3705 3706 /* We found new FC-NVMe only port */ 3707 if (!found) { 3708 for (k = 0; k < num_fibre_dev; k++) { 3709 rp = &vha->scan.l[k]; 3710 if (wwn_to_u64(rp->port_name)) { 3711 continue; 3712 } else { 3713 rp->id = id; 3714 memcpy(rp->port_name, 3715 d->port_name, 8); 3716 rp->fc4type = 3717 FS_FC4TYPE_NVME; 3718 break; 3719 } 3720 } 3721 } 3722 } else { 3723 for (k = 0; k < num_fibre_dev; k++) { 3724 rp = &vha->scan.l[k]; 3725 if (id.b24 == rp->id.b24) { 3726 memcpy(rp->node_name, 3727 d->port_name, 8); 3728 break; 3729 } 3730 } 3731 } 3732 } 3733 } 3734 } 3735 3736 static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res) 3737 { 3738 struct scsi_qla_host *vha = sp->vha; 3739 struct ct_sns_req *ct_req = 3740 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; 3741 u16 cmd = be16_to_cpu(ct_req->command); 3742 u8 fc4_type = sp->gen2; 3743 unsigned long flags; 3744 int rc; 3745 3746 /* gen2 field is holding the fc4type */ 3747 ql_dbg(ql_dbg_disc, vha, 0xffff, 3748 "Async done-%s res %x FC4Type %x\n", 3749 sp->name, res, sp->gen2); 3750 3751 del_timer(&sp->u.iocb_cmd.timer); 3752 sp->rc = res; 3753 if (res) { 3754 unsigned long flags; 3755 const char *name = sp->name; 3756 3757 if (res == QLA_OS_TIMER_EXPIRED) { 3758 /* switch is ignoring all commands. 3759 * This might be a zone disable behavior. 3760 * This means we hit 64s timeout. 3761 * 22s GPNFT + 44s Abort = 64s 3762 */ 3763 ql_dbg(ql_dbg_disc, vha, 0xffff, 3764 "%s: Switch Zone check please .\n", 3765 name); 3766 qla2x00_mark_all_devices_lost(vha); 3767 } 3768 3769 /* 3770 * We are in an Interrupt context, queue up this 3771 * sp for GNNFT_DONE work. This will allow all 3772 * the resource to get freed up. 3773 */ 3774 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, 3775 QLA_EVT_GNNFT_DONE); 3776 if (rc) { 3777 /* Cleanup here to prevent memory leak */ 3778 qla24xx_sp_unmap(vha, sp); 3779 3780 spin_lock_irqsave(&vha->work_lock, flags); 3781 vha->scan.scan_flags &= ~SF_SCANNING; 3782 vha->scan.scan_retry++; 3783 spin_unlock_irqrestore(&vha->work_lock, flags); 3784 3785 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { 3786 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3787 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3788 qla2xxx_wake_dpc(vha); 3789 } else { 3790 ql_dbg(ql_dbg_disc, vha, 0xffff, 3791 "Async done-%s rescan failed on all retries.\n", 3792 name); 3793 } 3794 } 3795 return; 3796 } 3797 3798 qla2x00_find_free_fcp_nvme_slot(vha, sp); 3799 3800 if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled && 3801 cmd == GNN_FT_CMD) { 3802 spin_lock_irqsave(&vha->work_lock, flags); 3803 vha->scan.scan_flags &= ~SF_SCANNING; 3804 spin_unlock_irqrestore(&vha->work_lock, flags); 3805 3806 sp->rc = res; 3807 rc = qla2x00_post_nvme_gpnft_work(vha, sp, QLA_EVT_GPNFT); 3808 if (rc) { 3809 qla24xx_sp_unmap(vha, sp); 3810 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3811 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3812 } 3813 return; 3814 } 3815 3816 if (cmd == GPN_FT_CMD) { 3817 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, 3818 QLA_EVT_GPNFT_DONE); 3819 } else { 3820 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, 3821 QLA_EVT_GNNFT_DONE); 3822 } 3823 3824 if (rc) { 3825 qla24xx_sp_unmap(vha, sp); 3826 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3827 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3828 return; 3829 } 3830 } 3831 3832 /* 3833 * Get WWNN list for fc4_type 3834 * 3835 * It is assumed the same SRB is re-used from GPNFT to avoid 3836 * mem free & re-alloc 3837 */ 3838 static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp, 3839 u8 fc4_type) 3840 { 3841 int rval = QLA_FUNCTION_FAILED; 3842 struct ct_sns_req *ct_req; 3843 struct ct_sns_pkt *ct_sns; 3844 unsigned long flags; 3845 3846 if (!vha->flags.online) { 3847 spin_lock_irqsave(&vha->work_lock, flags); 3848 vha->scan.scan_flags &= ~SF_SCANNING; 3849 spin_unlock_irqrestore(&vha->work_lock, flags); 3850 goto done_free_sp; 3851 } 3852 3853 if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) { 3854 ql_log(ql_log_warn, vha, 0xffff, 3855 "%s: req %p rsp %p are not setup\n", 3856 __func__, sp->u.iocb_cmd.u.ctarg.req, 3857 sp->u.iocb_cmd.u.ctarg.rsp); 3858 spin_lock_irqsave(&vha->work_lock, flags); 3859 vha->scan.scan_flags &= ~SF_SCANNING; 3860 spin_unlock_irqrestore(&vha->work_lock, flags); 3861 WARN_ON(1); 3862 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3863 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3864 goto done_free_sp; 3865 } 3866 3867 ql_dbg(ql_dbg_disc, vha, 0xfffff, 3868 "%s: FC4Type %x, CT-PASSTHRU %s command ctarg rsp size %d, ctarg req size %d\n", 3869 __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size, 3870 sp->u.iocb_cmd.u.ctarg.req_size); 3871 3872 sp->type = SRB_CT_PTHRU_CMD; 3873 sp->name = "gnnft"; 3874 sp->gen1 = vha->hw->base_qpair->chip_reset; 3875 sp->gen2 = fc4_type; 3876 3877 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; 3878 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 3879 3880 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); 3881 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size); 3882 3883 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; 3884 /* CT_IU preamble */ 3885 ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD, 3886 sp->u.iocb_cmd.u.ctarg.rsp_size); 3887 3888 /* GPN_FT req */ 3889 ct_req->req.gpn_ft.port_type = fc4_type; 3890 3891 sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE; 3892 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 3893 3894 sp->done = qla2x00_async_gpnft_gnnft_sp_done; 3895 3896 ql_dbg(ql_dbg_disc, vha, 0xffff, 3897 "Async-%s hdl=%x FC4Type %x.\n", sp->name, 3898 sp->handle, ct_req->req.gpn_ft.port_type); 3899 3900 rval = qla2x00_start_sp(sp); 3901 if (rval != QLA_SUCCESS) { 3902 goto done_free_sp; 3903 } 3904 3905 return rval; 3906 3907 done_free_sp: 3908 if (sp->u.iocb_cmd.u.ctarg.req) { 3909 dma_free_coherent(&vha->hw->pdev->dev, 3910 sp->u.iocb_cmd.u.ctarg.req_allocated_size, 3911 sp->u.iocb_cmd.u.ctarg.req, 3912 sp->u.iocb_cmd.u.ctarg.req_dma); 3913 sp->u.iocb_cmd.u.ctarg.req = NULL; 3914 } 3915 if (sp->u.iocb_cmd.u.ctarg.rsp) { 3916 dma_free_coherent(&vha->hw->pdev->dev, 3917 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, 3918 sp->u.iocb_cmd.u.ctarg.rsp, 3919 sp->u.iocb_cmd.u.ctarg.rsp_dma); 3920 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 3921 } 3922 3923 sp->free(sp); 3924 3925 spin_lock_irqsave(&vha->work_lock, flags); 3926 vha->scan.scan_flags &= ~SF_SCANNING; 3927 if (vha->scan.scan_flags == 0) { 3928 ql_dbg(ql_dbg_disc, vha, 0xffff, 3929 "%s: schedule\n", __func__); 3930 vha->scan.scan_flags |= SF_QUEUED; 3931 schedule_delayed_work(&vha->scan.scan_work, 5); 3932 } 3933 spin_unlock_irqrestore(&vha->work_lock, flags); 3934 3935 3936 return rval; 3937 } /* GNNFT */ 3938 3939 void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp) 3940 { 3941 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 3942 "%s enter\n", __func__); 3943 qla24xx_async_gnnft(vha, sp, sp->gen2); 3944 } 3945 3946 /* Get WWPN list for certain fc4_type */ 3947 int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) 3948 { 3949 int rval = QLA_FUNCTION_FAILED; 3950 struct ct_sns_req *ct_req; 3951 struct ct_sns_pkt *ct_sns; 3952 u32 rspsz; 3953 unsigned long flags; 3954 3955 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 3956 "%s enter\n", __func__); 3957 3958 if (!vha->flags.online) 3959 return rval; 3960 3961 spin_lock_irqsave(&vha->work_lock, flags); 3962 if (vha->scan.scan_flags & SF_SCANNING) { 3963 spin_unlock_irqrestore(&vha->work_lock, flags); 3964 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 3965 "%s: scan active\n", __func__); 3966 return rval; 3967 } 3968 vha->scan.scan_flags |= SF_SCANNING; 3969 spin_unlock_irqrestore(&vha->work_lock, flags); 3970 3971 if (fc4_type == FC4_TYPE_FCP_SCSI) { 3972 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 3973 "%s: Performing FCP Scan\n", __func__); 3974 3975 if (sp) 3976 sp->free(sp); /* should not happen */ 3977 3978 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 3979 if (!sp) { 3980 spin_lock_irqsave(&vha->work_lock, flags); 3981 vha->scan.scan_flags &= ~SF_SCANNING; 3982 spin_unlock_irqrestore(&vha->work_lock, flags); 3983 return rval; 3984 } 3985 3986 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 3987 sizeof(struct ct_sns_pkt), 3988 &sp->u.iocb_cmd.u.ctarg.req_dma, 3989 GFP_KERNEL); 3990 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); 3991 if (!sp->u.iocb_cmd.u.ctarg.req) { 3992 ql_log(ql_log_warn, vha, 0xffff, 3993 "Failed to allocate ct_sns request.\n"); 3994 spin_lock_irqsave(&vha->work_lock, flags); 3995 vha->scan.scan_flags &= ~SF_SCANNING; 3996 spin_unlock_irqrestore(&vha->work_lock, flags); 3997 qla2x00_rel_sp(sp); 3998 return rval; 3999 } 4000 sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE; 4001 4002 rspsz = sizeof(struct ct_sns_gpnft_rsp) + 4003 ((vha->hw->max_fibre_devices - 1) * 4004 sizeof(struct ct_sns_gpn_ft_data)); 4005 4006 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 4007 rspsz, 4008 &sp->u.iocb_cmd.u.ctarg.rsp_dma, 4009 GFP_KERNEL); 4010 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = rspsz; 4011 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 4012 ql_log(ql_log_warn, vha, 0xffff, 4013 "Failed to allocate ct_sns request.\n"); 4014 spin_lock_irqsave(&vha->work_lock, flags); 4015 vha->scan.scan_flags &= ~SF_SCANNING; 4016 spin_unlock_irqrestore(&vha->work_lock, flags); 4017 dma_free_coherent(&vha->hw->pdev->dev, 4018 sp->u.iocb_cmd.u.ctarg.req_allocated_size, 4019 sp->u.iocb_cmd.u.ctarg.req, 4020 sp->u.iocb_cmd.u.ctarg.req_dma); 4021 sp->u.iocb_cmd.u.ctarg.req = NULL; 4022 qla2x00_rel_sp(sp); 4023 return rval; 4024 } 4025 sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz; 4026 4027 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 4028 "%s scan list size %d\n", __func__, vha->scan.size); 4029 4030 memset(vha->scan.l, 0, vha->scan.size); 4031 } else if (!sp) { 4032 ql_dbg(ql_dbg_disc, vha, 0xffff, 4033 "NVME scan did not provide SP\n"); 4034 return rval; 4035 } 4036 4037 sp->type = SRB_CT_PTHRU_CMD; 4038 sp->name = "gpnft"; 4039 sp->gen1 = vha->hw->base_qpair->chip_reset; 4040 sp->gen2 = fc4_type; 4041 4042 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; 4043 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 4044 4045 rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size; 4046 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); 4047 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size); 4048 4049 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; 4050 /* CT_IU preamble */ 4051 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz); 4052 4053 /* GPN_FT req */ 4054 ct_req->req.gpn_ft.port_type = fc4_type; 4055 4056 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 4057 4058 sp->done = qla2x00_async_gpnft_gnnft_sp_done; 4059 4060 ql_dbg(ql_dbg_disc, vha, 0xffff, 4061 "Async-%s hdl=%x FC4Type %x.\n", sp->name, 4062 sp->handle, ct_req->req.gpn_ft.port_type); 4063 4064 rval = qla2x00_start_sp(sp); 4065 if (rval != QLA_SUCCESS) { 4066 goto done_free_sp; 4067 } 4068 4069 return rval; 4070 4071 done_free_sp: 4072 if (sp->u.iocb_cmd.u.ctarg.req) { 4073 dma_free_coherent(&vha->hw->pdev->dev, 4074 sp->u.iocb_cmd.u.ctarg.req_allocated_size, 4075 sp->u.iocb_cmd.u.ctarg.req, 4076 sp->u.iocb_cmd.u.ctarg.req_dma); 4077 sp->u.iocb_cmd.u.ctarg.req = NULL; 4078 } 4079 if (sp->u.iocb_cmd.u.ctarg.rsp) { 4080 dma_free_coherent(&vha->hw->pdev->dev, 4081 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, 4082 sp->u.iocb_cmd.u.ctarg.rsp, 4083 sp->u.iocb_cmd.u.ctarg.rsp_dma); 4084 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 4085 } 4086 4087 sp->free(sp); 4088 4089 spin_lock_irqsave(&vha->work_lock, flags); 4090 vha->scan.scan_flags &= ~SF_SCANNING; 4091 if (vha->scan.scan_flags == 0) { 4092 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 4093 "%s: Scan scheduled.\n", __func__); 4094 vha->scan.scan_flags |= SF_QUEUED; 4095 schedule_delayed_work(&vha->scan.scan_work, 5); 4096 } 4097 spin_unlock_irqrestore(&vha->work_lock, flags); 4098 4099 4100 return rval; 4101 } 4102 4103 void qla_scan_work_fn(struct work_struct *work) 4104 { 4105 struct fab_scan *s = container_of(to_delayed_work(work), 4106 struct fab_scan, scan_work); 4107 struct scsi_qla_host *vha = container_of(s, struct scsi_qla_host, 4108 scan); 4109 unsigned long flags; 4110 4111 ql_dbg(ql_dbg_disc, vha, 0xffff, 4112 "%s: schedule loop resync\n", __func__); 4113 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 4114 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 4115 qla2xxx_wake_dpc(vha); 4116 spin_lock_irqsave(&vha->work_lock, flags); 4117 vha->scan.scan_flags &= ~SF_QUEUED; 4118 spin_unlock_irqrestore(&vha->work_lock, flags); 4119 } 4120 4121 /* GNN_ID */ 4122 void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea) 4123 { 4124 qla24xx_post_gnl_work(vha, ea->fcport); 4125 } 4126 4127 static void qla2x00_async_gnnid_sp_done(srb_t *sp, int res) 4128 { 4129 struct scsi_qla_host *vha = sp->vha; 4130 fc_port_t *fcport = sp->fcport; 4131 u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name; 4132 struct event_arg ea; 4133 u64 wwnn; 4134 4135 fcport->flags &= ~FCF_ASYNC_SENT; 4136 wwnn = wwn_to_u64(node_name); 4137 if (wwnn) 4138 memcpy(fcport->node_name, node_name, WWN_SIZE); 4139 4140 memset(&ea, 0, sizeof(ea)); 4141 ea.fcport = fcport; 4142 ea.sp = sp; 4143 ea.rc = res; 4144 4145 ql_dbg(ql_dbg_disc, vha, 0x204f, 4146 "Async done-%s res %x, WWPN %8phC %8phC\n", 4147 sp->name, res, fcport->port_name, fcport->node_name); 4148 4149 qla24xx_handle_gnnid_event(vha, &ea); 4150 4151 sp->free(sp); 4152 } 4153 4154 int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport) 4155 { 4156 int rval = QLA_FUNCTION_FAILED; 4157 struct ct_sns_req *ct_req; 4158 srb_t *sp; 4159 4160 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) 4161 return rval; 4162 4163 qla2x00_set_fcport_disc_state(fcport, DSC_GNN_ID); 4164 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 4165 if (!sp) 4166 goto done; 4167 4168 fcport->flags |= FCF_ASYNC_SENT; 4169 sp->type = SRB_CT_PTHRU_CMD; 4170 sp->name = "gnnid"; 4171 sp->gen1 = fcport->rscn_gen; 4172 sp->gen2 = fcport->login_gen; 4173 4174 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; 4175 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 4176 4177 /* CT_IU preamble */ 4178 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD, 4179 GNN_ID_RSP_SIZE); 4180 4181 /* GNN_ID req */ 4182 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id); 4183 4184 4185 /* req & rsp use the same buffer */ 4186 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; 4187 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma; 4188 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns; 4189 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma; 4190 sp->u.iocb_cmd.u.ctarg.req_size = GNN_ID_REQ_SIZE; 4191 sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE; 4192 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 4193 4194 sp->done = qla2x00_async_gnnid_sp_done; 4195 4196 ql_dbg(ql_dbg_disc, vha, 0xffff, 4197 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n", 4198 sp->name, fcport->port_name, 4199 sp->handle, fcport->loop_id, fcport->d_id.b24); 4200 4201 rval = qla2x00_start_sp(sp); 4202 if (rval != QLA_SUCCESS) 4203 goto done_free_sp; 4204 return rval; 4205 4206 done_free_sp: 4207 sp->free(sp); 4208 fcport->flags &= ~FCF_ASYNC_SENT; 4209 done: 4210 return rval; 4211 } 4212 4213 int qla24xx_post_gnnid_work(struct scsi_qla_host *vha, fc_port_t *fcport) 4214 { 4215 struct qla_work_evt *e; 4216 int ls; 4217 4218 ls = atomic_read(&vha->loop_state); 4219 if (((ls != LOOP_READY) && (ls != LOOP_UP)) || 4220 test_bit(UNLOADING, &vha->dpc_flags)) 4221 return 0; 4222 4223 e = qla2x00_alloc_work(vha, QLA_EVT_GNNID); 4224 if (!e) 4225 return QLA_FUNCTION_FAILED; 4226 4227 e->u.fcport.fcport = fcport; 4228 return qla2x00_post_work(vha, e); 4229 } 4230 4231 /* GPFN_ID */ 4232 void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea) 4233 { 4234 fc_port_t *fcport = ea->fcport; 4235 4236 ql_dbg(ql_dbg_disc, vha, 0xffff, 4237 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d fcpcnt %d\n", 4238 __func__, fcport->port_name, fcport->disc_state, 4239 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2, 4240 fcport->rscn_gen, ea->sp->gen1, vha->fcport_count); 4241 4242 if (fcport->disc_state == DSC_DELETE_PEND) 4243 return; 4244 4245 if (ea->sp->gen2 != fcport->login_gen) { 4246 /* target side must have changed it. */ 4247 ql_dbg(ql_dbg_disc, vha, 0x20d3, 4248 "%s %8phC generation changed\n", 4249 __func__, fcport->port_name); 4250 return; 4251 } else if (ea->sp->gen1 != fcport->rscn_gen) { 4252 return; 4253 } 4254 4255 qla24xx_post_gpsc_work(vha, fcport); 4256 } 4257 4258 static void qla2x00_async_gfpnid_sp_done(srb_t *sp, int res) 4259 { 4260 struct scsi_qla_host *vha = sp->vha; 4261 fc_port_t *fcport = sp->fcport; 4262 u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name; 4263 struct event_arg ea; 4264 u64 wwn; 4265 4266 wwn = wwn_to_u64(fpn); 4267 if (wwn) 4268 memcpy(fcport->fabric_port_name, fpn, WWN_SIZE); 4269 4270 memset(&ea, 0, sizeof(ea)); 4271 ea.fcport = fcport; 4272 ea.sp = sp; 4273 ea.rc = res; 4274 4275 ql_dbg(ql_dbg_disc, vha, 0x204f, 4276 "Async done-%s res %x, WWPN %8phC %8phC\n", 4277 sp->name, res, fcport->port_name, fcport->fabric_port_name); 4278 4279 qla24xx_handle_gfpnid_event(vha, &ea); 4280 4281 sp->free(sp); 4282 } 4283 4284 int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport) 4285 { 4286 int rval = QLA_FUNCTION_FAILED; 4287 struct ct_sns_req *ct_req; 4288 srb_t *sp; 4289 4290 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) 4291 return rval; 4292 4293 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 4294 if (!sp) 4295 goto done; 4296 4297 sp->type = SRB_CT_PTHRU_CMD; 4298 sp->name = "gfpnid"; 4299 sp->gen1 = fcport->rscn_gen; 4300 sp->gen2 = fcport->login_gen; 4301 4302 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; 4303 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 4304 4305 /* CT_IU preamble */ 4306 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD, 4307 GFPN_ID_RSP_SIZE); 4308 4309 /* GFPN_ID req */ 4310 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id); 4311 4312 4313 /* req & rsp use the same buffer */ 4314 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; 4315 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma; 4316 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns; 4317 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma; 4318 sp->u.iocb_cmd.u.ctarg.req_size = GFPN_ID_REQ_SIZE; 4319 sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE; 4320 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 4321 4322 sp->done = qla2x00_async_gfpnid_sp_done; 4323 4324 ql_dbg(ql_dbg_disc, vha, 0xffff, 4325 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n", 4326 sp->name, fcport->port_name, 4327 sp->handle, fcport->loop_id, fcport->d_id.b24); 4328 4329 rval = qla2x00_start_sp(sp); 4330 if (rval != QLA_SUCCESS) 4331 goto done_free_sp; 4332 4333 return rval; 4334 4335 done_free_sp: 4336 sp->free(sp); 4337 done: 4338 return rval; 4339 } 4340 4341 int qla24xx_post_gfpnid_work(struct scsi_qla_host *vha, fc_port_t *fcport) 4342 { 4343 struct qla_work_evt *e; 4344 int ls; 4345 4346 ls = atomic_read(&vha->loop_state); 4347 if (((ls != LOOP_READY) && (ls != LOOP_UP)) || 4348 test_bit(UNLOADING, &vha->dpc_flags)) 4349 return 0; 4350 4351 e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID); 4352 if (!e) 4353 return QLA_FUNCTION_FAILED; 4354 4355 e->u.fcport.fcport = fcport; 4356 return qla2x00_post_work(vha, e); 4357 } 4358