1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 #include "qla_def.h" 7 #include "qla_target.h" 8 #include <linux/utsname.h> 9 10 static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *); 11 static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *); 12 static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *); 13 static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *); 14 static int qla2x00_sns_rft_id(scsi_qla_host_t *); 15 static int qla2x00_sns_rnn_id(scsi_qla_host_t *); 16 static int qla_async_rftid(scsi_qla_host_t *, port_id_t *); 17 static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8); 18 static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*); 19 static int qla_async_rsnn_nn(scsi_qla_host_t *); 20 21 22 23 /** 24 * qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query. 25 * @vha: HA context 26 * @arg: CT arguments 27 * 28 * Returns a pointer to the @vha's ms_iocb. 29 */ 30 void * 31 qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg) 32 { 33 struct qla_hw_data *ha = vha->hw; 34 ms_iocb_entry_t *ms_pkt; 35 36 ms_pkt = (ms_iocb_entry_t *)arg->iocb; 37 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t)); 38 39 ms_pkt->entry_type = MS_IOCB_TYPE; 40 ms_pkt->entry_count = 1; 41 SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER); 42 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG); 43 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 44 ms_pkt->cmd_dsd_count = cpu_to_le16(1); 45 ms_pkt->total_dsd_count = cpu_to_le16(2); 46 ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size); 47 ms_pkt->req_bytecount = cpu_to_le32(arg->req_size); 48 49 put_unaligned_le64(arg->req_dma, &ms_pkt->req_dsd.address); 50 ms_pkt->req_dsd.length = ms_pkt->req_bytecount; 51 52 put_unaligned_le64(arg->rsp_dma, &ms_pkt->rsp_dsd.address); 53 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount; 54 55 vha->qla_stats.control_requests++; 56 57 return (ms_pkt); 58 } 59 60 /** 61 * qla24xx_prep_ms_iocb() - Prepare common CT IOCB fields for SNS CT query. 62 * @vha: HA context 63 * @arg: CT arguments 64 * 65 * Returns a pointer to the @ha's ms_iocb. 66 */ 67 void * 68 qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg) 69 { 70 struct qla_hw_data *ha = vha->hw; 71 struct ct_entry_24xx *ct_pkt; 72 73 ct_pkt = (struct ct_entry_24xx *)arg->iocb; 74 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx)); 75 76 ct_pkt->entry_type = CT_IOCB_TYPE; 77 ct_pkt->entry_count = 1; 78 ct_pkt->nport_handle = cpu_to_le16(arg->nport_handle); 79 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 80 ct_pkt->cmd_dsd_count = cpu_to_le16(1); 81 ct_pkt->rsp_dsd_count = cpu_to_le16(1); 82 ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size); 83 ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size); 84 85 put_unaligned_le64(arg->req_dma, &ct_pkt->dsd[0].address); 86 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count; 87 88 put_unaligned_le64(arg->rsp_dma, &ct_pkt->dsd[1].address); 89 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count; 90 ct_pkt->vp_index = vha->vp_idx; 91 92 vha->qla_stats.control_requests++; 93 94 return (ct_pkt); 95 } 96 97 /** 98 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query. 99 * @p: CT request buffer 100 * @cmd: GS command 101 * @rsp_size: response size in bytes 102 * 103 * Returns a pointer to the intitialized @ct_req. 104 */ 105 static inline struct ct_sns_req * 106 qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size) 107 { 108 memset(p, 0, sizeof(struct ct_sns_pkt)); 109 110 p->p.req.header.revision = 0x01; 111 p->p.req.header.gs_type = 0xFC; 112 p->p.req.header.gs_subtype = 0x02; 113 p->p.req.command = cpu_to_be16(cmd); 114 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4); 115 116 return &p->p.req; 117 } 118 119 int 120 qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, 121 struct ct_sns_rsp *ct_rsp, const char *routine) 122 { 123 int rval; 124 uint16_t comp_status; 125 struct qla_hw_data *ha = vha->hw; 126 bool lid_is_sns = false; 127 128 rval = QLA_FUNCTION_FAILED; 129 if (ms_pkt->entry_status != 0) { 130 ql_dbg(ql_dbg_disc, vha, 0x2031, 131 "%s failed, error status (%x) on port_id: %02x%02x%02x.\n", 132 routine, ms_pkt->entry_status, vha->d_id.b.domain, 133 vha->d_id.b.area, vha->d_id.b.al_pa); 134 } else { 135 if (IS_FWI2_CAPABLE(ha)) 136 comp_status = le16_to_cpu( 137 ((struct ct_entry_24xx *)ms_pkt)->comp_status); 138 else 139 comp_status = le16_to_cpu(ms_pkt->status); 140 switch (comp_status) { 141 case CS_COMPLETE: 142 case CS_DATA_UNDERRUN: 143 case CS_DATA_OVERRUN: /* Overrun? */ 144 if (ct_rsp->header.response != 145 cpu_to_be16(CT_ACCEPT_RESPONSE)) { 146 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077, 147 "%s failed rejected request on port_id: %02x%02x%02x Completion status 0x%x, response 0x%x\n", 148 routine, vha->d_id.b.domain, 149 vha->d_id.b.area, vha->d_id.b.al_pa, 150 comp_status, ct_rsp->header.response); 151 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 152 0x2078, ct_rsp, 153 offsetof(typeof(*ct_rsp), rsp)); 154 rval = QLA_INVALID_COMMAND; 155 } else 156 rval = QLA_SUCCESS; 157 break; 158 case CS_PORT_LOGGED_OUT: 159 if (IS_FWI2_CAPABLE(ha)) { 160 if (le16_to_cpu(ms_pkt->loop_id.extended) == 161 NPH_SNS) 162 lid_is_sns = true; 163 } else { 164 if (le16_to_cpu(ms_pkt->loop_id.extended) == 165 SIMPLE_NAME_SERVER) 166 lid_is_sns = true; 167 } 168 if (lid_is_sns) { 169 ql_dbg(ql_dbg_async, vha, 0x502b, 170 "%s failed, Name server has logged out", 171 routine); 172 rval = QLA_NOT_LOGGED_IN; 173 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 174 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 175 } 176 break; 177 case CS_TIMEOUT: 178 rval = QLA_FUNCTION_TIMEOUT; 179 fallthrough; 180 default: 181 ql_dbg(ql_dbg_disc, vha, 0x2033, 182 "%s failed, completion status (%x) on port_id: " 183 "%02x%02x%02x.\n", routine, comp_status, 184 vha->d_id.b.domain, vha->d_id.b.area, 185 vha->d_id.b.al_pa); 186 break; 187 } 188 } 189 return rval; 190 } 191 192 /** 193 * qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command. 194 * @vha: HA context 195 * @fcport: fcport entry to updated 196 * 197 * Returns 0 on success. 198 */ 199 int 200 qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) 201 { 202 int rval; 203 204 ms_iocb_entry_t *ms_pkt; 205 struct ct_sns_req *ct_req; 206 struct ct_sns_rsp *ct_rsp; 207 struct qla_hw_data *ha = vha->hw; 208 struct ct_arg arg; 209 210 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 211 return qla2x00_sns_ga_nxt(vha, fcport); 212 213 arg.iocb = ha->ms_iocb; 214 arg.req_dma = ha->ct_sns_dma; 215 arg.rsp_dma = ha->ct_sns_dma; 216 arg.req_size = GA_NXT_REQ_SIZE; 217 arg.rsp_size = GA_NXT_RSP_SIZE; 218 arg.nport_handle = NPH_SNS; 219 220 /* Issue GA_NXT */ 221 /* Prepare common MS IOCB */ 222 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); 223 224 /* Prepare CT request */ 225 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD, 226 GA_NXT_RSP_SIZE); 227 ct_rsp = &ha->ct_sns->p.rsp; 228 229 /* Prepare CT arguments -- port_id */ 230 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id); 231 232 /* Execute MS IOCB */ 233 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 234 sizeof(ms_iocb_entry_t)); 235 if (rval != QLA_SUCCESS) { 236 /*EMPTY*/ 237 ql_dbg(ql_dbg_disc, vha, 0x2062, 238 "GA_NXT issue IOCB failed (%d).\n", rval); 239 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") != 240 QLA_SUCCESS) { 241 rval = QLA_FUNCTION_FAILED; 242 } else { 243 /* Populate fc_port_t entry. */ 244 fcport->d_id = be_to_port_id(ct_rsp->rsp.ga_nxt.port_id); 245 246 memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name, 247 WWN_SIZE); 248 memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name, 249 WWN_SIZE); 250 251 fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ? 252 FS_FC4TYPE_FCP : FC4_TYPE_OTHER; 253 254 if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE && 255 ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE) 256 fcport->d_id.b.domain = 0xf0; 257 258 ql_dbg(ql_dbg_disc, vha, 0x2063, 259 "GA_NXT entry - nn %8phN pn %8phN " 260 "port_id=%02x%02x%02x.\n", 261 fcport->node_name, fcport->port_name, 262 fcport->d_id.b.domain, fcport->d_id.b.area, 263 fcport->d_id.b.al_pa); 264 } 265 266 return (rval); 267 } 268 269 static inline int 270 qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha) 271 { 272 return vha->hw->max_fibre_devices * 4 + 16; 273 } 274 275 /** 276 * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command. 277 * @vha: HA context 278 * @list: switch info entries to populate 279 * 280 * NOTE: Non-Nx_Ports are not requested. 281 * 282 * Returns 0 on success. 283 */ 284 int 285 qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) 286 { 287 int rval; 288 uint16_t i; 289 290 ms_iocb_entry_t *ms_pkt; 291 struct ct_sns_req *ct_req; 292 struct ct_sns_rsp *ct_rsp; 293 294 struct ct_sns_gid_pt_data *gid_data; 295 struct qla_hw_data *ha = vha->hw; 296 uint16_t gid_pt_rsp_size; 297 struct ct_arg arg; 298 299 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 300 return qla2x00_sns_gid_pt(vha, list); 301 302 gid_data = NULL; 303 gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha); 304 305 arg.iocb = ha->ms_iocb; 306 arg.req_dma = ha->ct_sns_dma; 307 arg.rsp_dma = ha->ct_sns_dma; 308 arg.req_size = GID_PT_REQ_SIZE; 309 arg.rsp_size = gid_pt_rsp_size; 310 arg.nport_handle = NPH_SNS; 311 312 /* Issue GID_PT */ 313 /* Prepare common MS IOCB */ 314 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); 315 316 /* Prepare CT request */ 317 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size); 318 ct_rsp = &ha->ct_sns->p.rsp; 319 320 /* Prepare CT arguments -- port_type */ 321 ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE; 322 323 /* Execute MS IOCB */ 324 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 325 sizeof(ms_iocb_entry_t)); 326 if (rval != QLA_SUCCESS) { 327 /*EMPTY*/ 328 ql_dbg(ql_dbg_disc, vha, 0x2055, 329 "GID_PT issue IOCB failed (%d).\n", rval); 330 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") != 331 QLA_SUCCESS) { 332 rval = QLA_FUNCTION_FAILED; 333 } else { 334 /* Set port IDs in switch info list. */ 335 for (i = 0; i < ha->max_fibre_devices; i++) { 336 gid_data = &ct_rsp->rsp.gid_pt.entries[i]; 337 list[i].d_id = be_to_port_id(gid_data->port_id); 338 memset(list[i].fabric_port_name, 0, WWN_SIZE); 339 list[i].fp_speed = PORT_SPEED_UNKNOWN; 340 341 /* Last one exit. */ 342 if (gid_data->control_byte & BIT_7) { 343 list[i].d_id.b.rsvd_1 = gid_data->control_byte; 344 break; 345 } 346 } 347 348 /* 349 * If we've used all available slots, then the switch is 350 * reporting back more devices than we can handle with this 351 * single call. Return a failed status, and let GA_NXT handle 352 * the overload. 353 */ 354 if (i == ha->max_fibre_devices) 355 rval = QLA_FUNCTION_FAILED; 356 } 357 358 return (rval); 359 } 360 361 /** 362 * qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query. 363 * @vha: HA context 364 * @list: switch info entries to populate 365 * 366 * Returns 0 on success. 367 */ 368 int 369 qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) 370 { 371 int rval = QLA_SUCCESS; 372 uint16_t i; 373 374 ms_iocb_entry_t *ms_pkt; 375 struct ct_sns_req *ct_req; 376 struct ct_sns_rsp *ct_rsp; 377 struct qla_hw_data *ha = vha->hw; 378 struct ct_arg arg; 379 380 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 381 return qla2x00_sns_gpn_id(vha, list); 382 383 arg.iocb = ha->ms_iocb; 384 arg.req_dma = ha->ct_sns_dma; 385 arg.rsp_dma = ha->ct_sns_dma; 386 arg.req_size = GPN_ID_REQ_SIZE; 387 arg.rsp_size = GPN_ID_RSP_SIZE; 388 arg.nport_handle = NPH_SNS; 389 390 for (i = 0; i < ha->max_fibre_devices; i++) { 391 /* Issue GPN_ID */ 392 /* Prepare common MS IOCB */ 393 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); 394 395 /* Prepare CT request */ 396 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD, 397 GPN_ID_RSP_SIZE); 398 ct_rsp = &ha->ct_sns->p.rsp; 399 400 /* Prepare CT arguments -- port_id */ 401 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id); 402 403 /* Execute MS IOCB */ 404 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 405 sizeof(ms_iocb_entry_t)); 406 if (rval != QLA_SUCCESS) { 407 /*EMPTY*/ 408 ql_dbg(ql_dbg_disc, vha, 0x2056, 409 "GPN_ID issue IOCB failed (%d).\n", rval); 410 break; 411 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 412 "GPN_ID") != QLA_SUCCESS) { 413 rval = QLA_FUNCTION_FAILED; 414 break; 415 } else { 416 /* Save portname */ 417 memcpy(list[i].port_name, 418 ct_rsp->rsp.gpn_id.port_name, WWN_SIZE); 419 } 420 421 /* Last device exit. */ 422 if (list[i].d_id.b.rsvd_1 != 0) 423 break; 424 } 425 426 return (rval); 427 } 428 429 /** 430 * qla2x00_gnn_id() - SNS Get Node Name (GNN_ID) query. 431 * @vha: HA context 432 * @list: switch info entries to populate 433 * 434 * Returns 0 on success. 435 */ 436 int 437 qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) 438 { 439 int rval = QLA_SUCCESS; 440 uint16_t i; 441 struct qla_hw_data *ha = vha->hw; 442 ms_iocb_entry_t *ms_pkt; 443 struct ct_sns_req *ct_req; 444 struct ct_sns_rsp *ct_rsp; 445 struct ct_arg arg; 446 447 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 448 return qla2x00_sns_gnn_id(vha, list); 449 450 arg.iocb = ha->ms_iocb; 451 arg.req_dma = ha->ct_sns_dma; 452 arg.rsp_dma = ha->ct_sns_dma; 453 arg.req_size = GNN_ID_REQ_SIZE; 454 arg.rsp_size = GNN_ID_RSP_SIZE; 455 arg.nport_handle = NPH_SNS; 456 457 for (i = 0; i < ha->max_fibre_devices; i++) { 458 /* Issue GNN_ID */ 459 /* Prepare common MS IOCB */ 460 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); 461 462 /* Prepare CT request */ 463 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD, 464 GNN_ID_RSP_SIZE); 465 ct_rsp = &ha->ct_sns->p.rsp; 466 467 /* Prepare CT arguments -- port_id */ 468 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id); 469 470 /* Execute MS IOCB */ 471 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 472 sizeof(ms_iocb_entry_t)); 473 if (rval != QLA_SUCCESS) { 474 /*EMPTY*/ 475 ql_dbg(ql_dbg_disc, vha, 0x2057, 476 "GNN_ID issue IOCB failed (%d).\n", rval); 477 break; 478 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 479 "GNN_ID") != QLA_SUCCESS) { 480 rval = QLA_FUNCTION_FAILED; 481 break; 482 } else { 483 /* Save nodename */ 484 memcpy(list[i].node_name, 485 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE); 486 487 ql_dbg(ql_dbg_disc, vha, 0x2058, 488 "GID_PT entry - nn %8phN pn %8phN " 489 "portid=%02x%02x%02x.\n", 490 list[i].node_name, list[i].port_name, 491 list[i].d_id.b.domain, list[i].d_id.b.area, 492 list[i].d_id.b.al_pa); 493 } 494 495 /* Last device exit. */ 496 if (list[i].d_id.b.rsvd_1 != 0) 497 break; 498 } 499 500 return (rval); 501 } 502 503 static void qla2x00_async_sns_sp_done(srb_t *sp, int rc) 504 { 505 struct scsi_qla_host *vha = sp->vha; 506 struct ct_sns_pkt *ct_sns; 507 struct qla_work_evt *e; 508 509 sp->rc = rc; 510 if (rc == QLA_SUCCESS) { 511 ql_dbg(ql_dbg_disc, vha, 0x204f, 512 "Async done-%s exiting normally.\n", 513 sp->name); 514 } else if (rc == QLA_FUNCTION_TIMEOUT) { 515 ql_dbg(ql_dbg_disc, vha, 0x204f, 516 "Async done-%s timeout\n", sp->name); 517 } else { 518 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; 519 memset(ct_sns, 0, sizeof(*ct_sns)); 520 sp->retry_count++; 521 if (sp->retry_count > 3) 522 goto err; 523 524 ql_dbg(ql_dbg_disc, vha, 0x204f, 525 "Async done-%s fail rc %x. Retry count %d\n", 526 sp->name, rc, sp->retry_count); 527 528 e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY); 529 if (!e) 530 goto err2; 531 532 e->u.iosb.sp = sp; 533 qla2x00_post_work(vha, e); 534 return; 535 } 536 537 err: 538 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); 539 err2: 540 if (!e) { 541 /* please ignore kernel warning. otherwise, we have mem leak. */ 542 if (sp->u.iocb_cmd.u.ctarg.req) { 543 dma_free_coherent(&vha->hw->pdev->dev, 544 sp->u.iocb_cmd.u.ctarg.req_allocated_size, 545 sp->u.iocb_cmd.u.ctarg.req, 546 sp->u.iocb_cmd.u.ctarg.req_dma); 547 sp->u.iocb_cmd.u.ctarg.req = NULL; 548 } 549 550 if (sp->u.iocb_cmd.u.ctarg.rsp) { 551 dma_free_coherent(&vha->hw->pdev->dev, 552 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, 553 sp->u.iocb_cmd.u.ctarg.rsp, 554 sp->u.iocb_cmd.u.ctarg.rsp_dma); 555 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 556 } 557 558 /* ref: INIT */ 559 kref_put(&sp->cmd_kref, qla2x00_sp_release); 560 return; 561 } 562 563 e->u.iosb.sp = sp; 564 qla2x00_post_work(vha, e); 565 } 566 567 /** 568 * qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA. 569 * @vha: HA context 570 * 571 * Returns 0 on success. 572 */ 573 int 574 qla2x00_rft_id(scsi_qla_host_t *vha) 575 { 576 struct qla_hw_data *ha = vha->hw; 577 578 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 579 return qla2x00_sns_rft_id(vha); 580 581 return qla_async_rftid(vha, &vha->d_id); 582 } 583 584 static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id) 585 { 586 int rval = QLA_MEMORY_ALLOC_FAILED; 587 struct ct_sns_req *ct_req; 588 srb_t *sp; 589 struct ct_sns_pkt *ct_sns; 590 591 if (!vha->flags.online) 592 goto done; 593 594 /* ref: INIT */ 595 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 596 if (!sp) 597 goto done; 598 599 sp->type = SRB_CT_PTHRU_CMD; 600 sp->name = "rft_id"; 601 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 602 qla2x00_async_sns_sp_done); 603 604 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 605 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, 606 GFP_KERNEL); 607 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); 608 if (!sp->u.iocb_cmd.u.ctarg.req) { 609 ql_log(ql_log_warn, vha, 0xd041, 610 "%s: Failed to allocate ct_sns request.\n", 611 __func__); 612 goto done_free_sp; 613 } 614 615 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 616 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, 617 GFP_KERNEL); 618 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); 619 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 620 ql_log(ql_log_warn, vha, 0xd042, 621 "%s: Failed to allocate ct_sns request.\n", 622 __func__); 623 goto done_free_sp; 624 } 625 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; 626 memset(ct_sns, 0, sizeof(*ct_sns)); 627 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; 628 629 /* Prepare CT request */ 630 ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE); 631 632 /* Prepare CT arguments -- port_id, FC-4 types */ 633 ct_req->req.rft_id.port_id = port_id_to_be_id(vha->d_id); 634 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */ 635 636 if (vha->flags.nvme_enabled && qla_ini_mode_enabled(vha)) 637 ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */ 638 639 sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE; 640 sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE; 641 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 642 643 ql_dbg(ql_dbg_disc, vha, 0xffff, 644 "Async-%s - hdl=%x portid %06x.\n", 645 sp->name, sp->handle, d_id->b24); 646 647 rval = qla2x00_start_sp(sp); 648 if (rval != QLA_SUCCESS) { 649 ql_dbg(ql_dbg_disc, vha, 0x2043, 650 "RFT_ID issue IOCB failed (%d).\n", rval); 651 goto done_free_sp; 652 } 653 return rval; 654 done_free_sp: 655 /* ref: INIT */ 656 kref_put(&sp->cmd_kref, qla2x00_sp_release); 657 done: 658 return rval; 659 } 660 661 /** 662 * qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA. 663 * @vha: HA context 664 * @type: not used 665 * 666 * Returns 0 on success. 667 */ 668 int 669 qla2x00_rff_id(scsi_qla_host_t *vha, u8 type) 670 { 671 struct qla_hw_data *ha = vha->hw; 672 673 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 674 ql_dbg(ql_dbg_disc, vha, 0x2046, 675 "RFF_ID call not supported on ISP2100/ISP2200.\n"); 676 return (QLA_SUCCESS); 677 } 678 679 return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha), type); 680 } 681 682 static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id, 683 u8 fc4feature, u8 fc4type) 684 { 685 int rval = QLA_MEMORY_ALLOC_FAILED; 686 struct ct_sns_req *ct_req; 687 srb_t *sp; 688 struct ct_sns_pkt *ct_sns; 689 690 /* ref: INIT */ 691 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 692 if (!sp) 693 goto done; 694 695 sp->type = SRB_CT_PTHRU_CMD; 696 sp->name = "rff_id"; 697 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 698 qla2x00_async_sns_sp_done); 699 700 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 701 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, 702 GFP_KERNEL); 703 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); 704 if (!sp->u.iocb_cmd.u.ctarg.req) { 705 ql_log(ql_log_warn, vha, 0xd041, 706 "%s: Failed to allocate ct_sns request.\n", 707 __func__); 708 goto done_free_sp; 709 } 710 711 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 712 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, 713 GFP_KERNEL); 714 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); 715 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 716 ql_log(ql_log_warn, vha, 0xd042, 717 "%s: Failed to allocate ct_sns request.\n", 718 __func__); 719 goto done_free_sp; 720 } 721 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; 722 memset(ct_sns, 0, sizeof(*ct_sns)); 723 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; 724 725 /* Prepare CT request */ 726 ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE); 727 728 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */ 729 ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id); 730 ct_req->req.rff_id.fc4_feature = fc4feature; 731 ct_req->req.rff_id.fc4_type = fc4type; /* SCSI-FCP or FC-NVMe */ 732 733 sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE; 734 sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE; 735 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 736 737 ql_dbg(ql_dbg_disc, vha, 0xffff, 738 "Async-%s - hdl=%x portid %06x feature %x type %x.\n", 739 sp->name, sp->handle, d_id->b24, fc4feature, fc4type); 740 741 rval = qla2x00_start_sp(sp); 742 if (rval != QLA_SUCCESS) { 743 ql_dbg(ql_dbg_disc, vha, 0x2047, 744 "RFF_ID issue IOCB failed (%d).\n", rval); 745 goto done_free_sp; 746 } 747 748 return rval; 749 750 done_free_sp: 751 /* ref: INIT */ 752 kref_put(&sp->cmd_kref, qla2x00_sp_release); 753 done: 754 return rval; 755 } 756 757 /** 758 * qla2x00_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA. 759 * @vha: HA context 760 * 761 * Returns 0 on success. 762 */ 763 int 764 qla2x00_rnn_id(scsi_qla_host_t *vha) 765 { 766 struct qla_hw_data *ha = vha->hw; 767 768 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 769 return qla2x00_sns_rnn_id(vha); 770 771 return qla_async_rnnid(vha, &vha->d_id, vha->node_name); 772 } 773 774 static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id, 775 u8 *node_name) 776 { 777 int rval = QLA_MEMORY_ALLOC_FAILED; 778 struct ct_sns_req *ct_req; 779 srb_t *sp; 780 struct ct_sns_pkt *ct_sns; 781 782 /* ref: INIT */ 783 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 784 if (!sp) 785 goto done; 786 787 sp->type = SRB_CT_PTHRU_CMD; 788 sp->name = "rnid"; 789 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 790 qla2x00_async_sns_sp_done); 791 792 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 793 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, 794 GFP_KERNEL); 795 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); 796 if (!sp->u.iocb_cmd.u.ctarg.req) { 797 ql_log(ql_log_warn, vha, 0xd041, 798 "%s: Failed to allocate ct_sns request.\n", 799 __func__); 800 goto done_free_sp; 801 } 802 803 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 804 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, 805 GFP_KERNEL); 806 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); 807 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 808 ql_log(ql_log_warn, vha, 0xd042, 809 "%s: Failed to allocate ct_sns request.\n", 810 __func__); 811 goto done_free_sp; 812 } 813 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; 814 memset(ct_sns, 0, sizeof(*ct_sns)); 815 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; 816 817 /* Prepare CT request */ 818 ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE); 819 820 /* Prepare CT arguments -- port_id, node_name */ 821 ct_req->req.rnn_id.port_id = port_id_to_be_id(vha->d_id); 822 memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE); 823 824 sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE; 825 sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE; 826 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 827 828 ql_dbg(ql_dbg_disc, vha, 0xffff, 829 "Async-%s - hdl=%x portid %06x\n", 830 sp->name, sp->handle, d_id->b24); 831 832 rval = qla2x00_start_sp(sp); 833 if (rval != QLA_SUCCESS) { 834 ql_dbg(ql_dbg_disc, vha, 0x204d, 835 "RNN_ID issue IOCB failed (%d).\n", rval); 836 goto done_free_sp; 837 } 838 839 return rval; 840 841 done_free_sp: 842 /* ref: INIT */ 843 kref_put(&sp->cmd_kref, qla2x00_sp_release); 844 done: 845 return rval; 846 } 847 848 size_t 849 qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size) 850 { 851 struct qla_hw_data *ha = vha->hw; 852 853 if (IS_QLAFX00(ha)) 854 return scnprintf(snn, size, "%s FW:v%s DVR:v%s", 855 ha->model_number, ha->mr.fw_version, qla2x00_version_str); 856 857 return scnprintf(snn, size, "%s FW:v%d.%02d.%02d DVR:v%s", 858 ha->model_number, ha->fw_major_version, ha->fw_minor_version, 859 ha->fw_subminor_version, qla2x00_version_str); 860 } 861 862 /** 863 * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA. 864 * @vha: HA context 865 * 866 * Returns 0 on success. 867 */ 868 int 869 qla2x00_rsnn_nn(scsi_qla_host_t *vha) 870 { 871 struct qla_hw_data *ha = vha->hw; 872 873 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 874 ql_dbg(ql_dbg_disc, vha, 0x2050, 875 "RSNN_ID call unsupported on ISP2100/ISP2200.\n"); 876 return (QLA_SUCCESS); 877 } 878 879 return qla_async_rsnn_nn(vha); 880 } 881 882 static int qla_async_rsnn_nn(scsi_qla_host_t *vha) 883 { 884 int rval = QLA_MEMORY_ALLOC_FAILED; 885 struct ct_sns_req *ct_req; 886 srb_t *sp; 887 struct ct_sns_pkt *ct_sns; 888 889 /* ref: INIT */ 890 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 891 if (!sp) 892 goto done; 893 894 sp->type = SRB_CT_PTHRU_CMD; 895 sp->name = "rsnn_nn"; 896 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 897 qla2x00_async_sns_sp_done); 898 899 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 900 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, 901 GFP_KERNEL); 902 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); 903 if (!sp->u.iocb_cmd.u.ctarg.req) { 904 ql_log(ql_log_warn, vha, 0xd041, 905 "%s: Failed to allocate ct_sns request.\n", 906 __func__); 907 goto done_free_sp; 908 } 909 910 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 911 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, 912 GFP_KERNEL); 913 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); 914 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 915 ql_log(ql_log_warn, vha, 0xd042, 916 "%s: Failed to allocate ct_sns request.\n", 917 __func__); 918 goto done_free_sp; 919 } 920 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; 921 memset(ct_sns, 0, sizeof(*ct_sns)); 922 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; 923 924 /* Prepare CT request */ 925 ct_req = qla2x00_prep_ct_req(ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE); 926 927 /* Prepare CT arguments -- node_name, symbolic node_name, size */ 928 memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE); 929 930 /* Prepare the Symbolic Node Name */ 931 qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name, 932 sizeof(ct_req->req.rsnn_nn.sym_node_name)); 933 ct_req->req.rsnn_nn.name_len = 934 (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name); 935 936 937 sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len; 938 sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE; 939 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 940 941 ql_dbg(ql_dbg_disc, vha, 0xffff, 942 "Async-%s - hdl=%x.\n", 943 sp->name, sp->handle); 944 945 rval = qla2x00_start_sp(sp); 946 if (rval != QLA_SUCCESS) { 947 ql_dbg(ql_dbg_disc, vha, 0x2043, 948 "RFT_ID issue IOCB failed (%d).\n", rval); 949 goto done_free_sp; 950 } 951 952 return rval; 953 954 done_free_sp: 955 /* ref: INIT */ 956 kref_put(&sp->cmd_kref, qla2x00_sp_release); 957 done: 958 return rval; 959 } 960 961 /** 962 * qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query. 963 * @vha: HA context 964 * @cmd: GS command 965 * @scmd_len: Subcommand length 966 * @data_size: response size in bytes 967 * 968 * Returns a pointer to the @ha's sns_cmd. 969 */ 970 static inline struct sns_cmd_pkt * 971 qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len, 972 uint16_t data_size) 973 { 974 uint16_t wc; 975 struct sns_cmd_pkt *sns_cmd; 976 struct qla_hw_data *ha = vha->hw; 977 978 sns_cmd = ha->sns_cmd; 979 memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt)); 980 wc = data_size / 2; /* Size in 16bit words. */ 981 sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc); 982 put_unaligned_le64(ha->sns_cmd_dma, &sns_cmd->p.cmd.buffer_address); 983 sns_cmd->p.cmd.subcommand_length = cpu_to_le16(scmd_len); 984 sns_cmd->p.cmd.subcommand = cpu_to_le16(cmd); 985 wc = (data_size - 16) / 4; /* Size in 32bit words. */ 986 sns_cmd->p.cmd.size = cpu_to_le16(wc); 987 988 vha->qla_stats.control_requests++; 989 990 return (sns_cmd); 991 } 992 993 /** 994 * qla2x00_sns_ga_nxt() - SNS scan for fabric devices via GA_NXT command. 995 * @vha: HA context 996 * @fcport: fcport entry to updated 997 * 998 * This command uses the old Exectute SNS Command mailbox routine. 999 * 1000 * Returns 0 on success. 1001 */ 1002 static int 1003 qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) 1004 { 1005 int rval = QLA_SUCCESS; 1006 struct qla_hw_data *ha = vha->hw; 1007 struct sns_cmd_pkt *sns_cmd; 1008 1009 /* Issue GA_NXT. */ 1010 /* Prepare SNS command request. */ 1011 sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN, 1012 GA_NXT_SNS_DATA_SIZE); 1013 1014 /* Prepare SNS command arguments -- port_id. */ 1015 sns_cmd->p.cmd.param[0] = fcport->d_id.b.al_pa; 1016 sns_cmd->p.cmd.param[1] = fcport->d_id.b.area; 1017 sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain; 1018 1019 /* Execute SNS command. */ 1020 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2, 1021 sizeof(struct sns_cmd_pkt)); 1022 if (rval != QLA_SUCCESS) { 1023 /*EMPTY*/ 1024 ql_dbg(ql_dbg_disc, vha, 0x205f, 1025 "GA_NXT Send SNS failed (%d).\n", rval); 1026 } else if (sns_cmd->p.gan_data[8] != 0x80 || 1027 sns_cmd->p.gan_data[9] != 0x02) { 1028 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084, 1029 "GA_NXT failed, rejected request ga_nxt_rsp:\n"); 1030 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074, 1031 sns_cmd->p.gan_data, 16); 1032 rval = QLA_FUNCTION_FAILED; 1033 } else { 1034 /* Populate fc_port_t entry. */ 1035 fcport->d_id.b.domain = sns_cmd->p.gan_data[17]; 1036 fcport->d_id.b.area = sns_cmd->p.gan_data[18]; 1037 fcport->d_id.b.al_pa = sns_cmd->p.gan_data[19]; 1038 1039 memcpy(fcport->node_name, &sns_cmd->p.gan_data[284], WWN_SIZE); 1040 memcpy(fcport->port_name, &sns_cmd->p.gan_data[20], WWN_SIZE); 1041 1042 if (sns_cmd->p.gan_data[16] != NS_N_PORT_TYPE && 1043 sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE) 1044 fcport->d_id.b.domain = 0xf0; 1045 1046 ql_dbg(ql_dbg_disc, vha, 0x2061, 1047 "GA_NXT entry - nn %8phN pn %8phN " 1048 "port_id=%02x%02x%02x.\n", 1049 fcport->node_name, fcport->port_name, 1050 fcport->d_id.b.domain, fcport->d_id.b.area, 1051 fcport->d_id.b.al_pa); 1052 } 1053 1054 return (rval); 1055 } 1056 1057 /** 1058 * qla2x00_sns_gid_pt() - SNS scan for fabric devices via GID_PT command. 1059 * @vha: HA context 1060 * @list: switch info entries to populate 1061 * 1062 * This command uses the old Exectute SNS Command mailbox routine. 1063 * 1064 * NOTE: Non-Nx_Ports are not requested. 1065 * 1066 * Returns 0 on success. 1067 */ 1068 static int 1069 qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) 1070 { 1071 int rval; 1072 struct qla_hw_data *ha = vha->hw; 1073 uint16_t i; 1074 uint8_t *entry; 1075 struct sns_cmd_pkt *sns_cmd; 1076 uint16_t gid_pt_sns_data_size; 1077 1078 gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha); 1079 1080 /* Issue GID_PT. */ 1081 /* Prepare SNS command request. */ 1082 sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN, 1083 gid_pt_sns_data_size); 1084 1085 /* Prepare SNS command arguments -- port_type. */ 1086 sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE; 1087 1088 /* Execute SNS command. */ 1089 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2, 1090 sizeof(struct sns_cmd_pkt)); 1091 if (rval != QLA_SUCCESS) { 1092 /*EMPTY*/ 1093 ql_dbg(ql_dbg_disc, vha, 0x206d, 1094 "GID_PT Send SNS failed (%d).\n", rval); 1095 } else if (sns_cmd->p.gid_data[8] != 0x80 || 1096 sns_cmd->p.gid_data[9] != 0x02) { 1097 ql_dbg(ql_dbg_disc, vha, 0x202f, 1098 "GID_PT failed, rejected request, gid_rsp:\n"); 1099 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081, 1100 sns_cmd->p.gid_data, 16); 1101 rval = QLA_FUNCTION_FAILED; 1102 } else { 1103 /* Set port IDs in switch info list. */ 1104 for (i = 0; i < ha->max_fibre_devices; i++) { 1105 entry = &sns_cmd->p.gid_data[(i * 4) + 16]; 1106 list[i].d_id.b.domain = entry[1]; 1107 list[i].d_id.b.area = entry[2]; 1108 list[i].d_id.b.al_pa = entry[3]; 1109 1110 /* Last one exit. */ 1111 if (entry[0] & BIT_7) { 1112 list[i].d_id.b.rsvd_1 = entry[0]; 1113 break; 1114 } 1115 } 1116 1117 /* 1118 * If we've used all available slots, then the switch is 1119 * reporting back more devices that we can handle with this 1120 * single call. Return a failed status, and let GA_NXT handle 1121 * the overload. 1122 */ 1123 if (i == ha->max_fibre_devices) 1124 rval = QLA_FUNCTION_FAILED; 1125 } 1126 1127 return (rval); 1128 } 1129 1130 /** 1131 * qla2x00_sns_gpn_id() - SNS Get Port Name (GPN_ID) query. 1132 * @vha: HA context 1133 * @list: switch info entries to populate 1134 * 1135 * This command uses the old Exectute SNS Command mailbox routine. 1136 * 1137 * Returns 0 on success. 1138 */ 1139 static int 1140 qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) 1141 { 1142 int rval = QLA_SUCCESS; 1143 struct qla_hw_data *ha = vha->hw; 1144 uint16_t i; 1145 struct sns_cmd_pkt *sns_cmd; 1146 1147 for (i = 0; i < ha->max_fibre_devices; i++) { 1148 /* Issue GPN_ID */ 1149 /* Prepare SNS command request. */ 1150 sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD, 1151 GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE); 1152 1153 /* Prepare SNS command arguments -- port_id. */ 1154 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa; 1155 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area; 1156 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain; 1157 1158 /* Execute SNS command. */ 1159 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, 1160 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); 1161 if (rval != QLA_SUCCESS) { 1162 /*EMPTY*/ 1163 ql_dbg(ql_dbg_disc, vha, 0x2032, 1164 "GPN_ID Send SNS failed (%d).\n", rval); 1165 } else if (sns_cmd->p.gpn_data[8] != 0x80 || 1166 sns_cmd->p.gpn_data[9] != 0x02) { 1167 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e, 1168 "GPN_ID failed, rejected request, gpn_rsp:\n"); 1169 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f, 1170 sns_cmd->p.gpn_data, 16); 1171 rval = QLA_FUNCTION_FAILED; 1172 } else { 1173 /* Save portname */ 1174 memcpy(list[i].port_name, &sns_cmd->p.gpn_data[16], 1175 WWN_SIZE); 1176 } 1177 1178 /* Last device exit. */ 1179 if (list[i].d_id.b.rsvd_1 != 0) 1180 break; 1181 } 1182 1183 return (rval); 1184 } 1185 1186 /** 1187 * qla2x00_sns_gnn_id() - SNS Get Node Name (GNN_ID) query. 1188 * @vha: HA context 1189 * @list: switch info entries to populate 1190 * 1191 * This command uses the old Exectute SNS Command mailbox routine. 1192 * 1193 * Returns 0 on success. 1194 */ 1195 static int 1196 qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) 1197 { 1198 int rval = QLA_SUCCESS; 1199 struct qla_hw_data *ha = vha->hw; 1200 uint16_t i; 1201 struct sns_cmd_pkt *sns_cmd; 1202 1203 for (i = 0; i < ha->max_fibre_devices; i++) { 1204 /* Issue GNN_ID */ 1205 /* Prepare SNS command request. */ 1206 sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD, 1207 GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE); 1208 1209 /* Prepare SNS command arguments -- port_id. */ 1210 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa; 1211 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area; 1212 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain; 1213 1214 /* Execute SNS command. */ 1215 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, 1216 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); 1217 if (rval != QLA_SUCCESS) { 1218 /*EMPTY*/ 1219 ql_dbg(ql_dbg_disc, vha, 0x203f, 1220 "GNN_ID Send SNS failed (%d).\n", rval); 1221 } else if (sns_cmd->p.gnn_data[8] != 0x80 || 1222 sns_cmd->p.gnn_data[9] != 0x02) { 1223 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082, 1224 "GNN_ID failed, rejected request, gnn_rsp:\n"); 1225 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a, 1226 sns_cmd->p.gnn_data, 16); 1227 rval = QLA_FUNCTION_FAILED; 1228 } else { 1229 /* Save nodename */ 1230 memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16], 1231 WWN_SIZE); 1232 1233 ql_dbg(ql_dbg_disc, vha, 0x206e, 1234 "GID_PT entry - nn %8phN pn %8phN " 1235 "port_id=%02x%02x%02x.\n", 1236 list[i].node_name, list[i].port_name, 1237 list[i].d_id.b.domain, list[i].d_id.b.area, 1238 list[i].d_id.b.al_pa); 1239 } 1240 1241 /* Last device exit. */ 1242 if (list[i].d_id.b.rsvd_1 != 0) 1243 break; 1244 } 1245 1246 return (rval); 1247 } 1248 1249 /** 1250 * qla2x00_sns_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA. 1251 * @vha: HA context 1252 * 1253 * This command uses the old Exectute SNS Command mailbox routine. 1254 * 1255 * Returns 0 on success. 1256 */ 1257 static int 1258 qla2x00_sns_rft_id(scsi_qla_host_t *vha) 1259 { 1260 int rval; 1261 struct qla_hw_data *ha = vha->hw; 1262 struct sns_cmd_pkt *sns_cmd; 1263 1264 /* Issue RFT_ID. */ 1265 /* Prepare SNS command request. */ 1266 sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN, 1267 RFT_ID_SNS_DATA_SIZE); 1268 1269 /* Prepare SNS command arguments -- port_id, FC-4 types */ 1270 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa; 1271 sns_cmd->p.cmd.param[1] = vha->d_id.b.area; 1272 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain; 1273 1274 sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */ 1275 1276 /* Execute SNS command. */ 1277 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2, 1278 sizeof(struct sns_cmd_pkt)); 1279 if (rval != QLA_SUCCESS) { 1280 /*EMPTY*/ 1281 ql_dbg(ql_dbg_disc, vha, 0x2060, 1282 "RFT_ID Send SNS failed (%d).\n", rval); 1283 } else if (sns_cmd->p.rft_data[8] != 0x80 || 1284 sns_cmd->p.rft_data[9] != 0x02) { 1285 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083, 1286 "RFT_ID failed, rejected request rft_rsp:\n"); 1287 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080, 1288 sns_cmd->p.rft_data, 16); 1289 rval = QLA_FUNCTION_FAILED; 1290 } else { 1291 ql_dbg(ql_dbg_disc, vha, 0x2073, 1292 "RFT_ID exiting normally.\n"); 1293 } 1294 1295 return (rval); 1296 } 1297 1298 /** 1299 * qla2x00_sns_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA. 1300 * @vha: HA context 1301 * 1302 * This command uses the old Exectute SNS Command mailbox routine. 1303 * 1304 * Returns 0 on success. 1305 */ 1306 static int 1307 qla2x00_sns_rnn_id(scsi_qla_host_t *vha) 1308 { 1309 int rval; 1310 struct qla_hw_data *ha = vha->hw; 1311 struct sns_cmd_pkt *sns_cmd; 1312 1313 /* Issue RNN_ID. */ 1314 /* Prepare SNS command request. */ 1315 sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN, 1316 RNN_ID_SNS_DATA_SIZE); 1317 1318 /* Prepare SNS command arguments -- port_id, nodename. */ 1319 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa; 1320 sns_cmd->p.cmd.param[1] = vha->d_id.b.area; 1321 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain; 1322 1323 sns_cmd->p.cmd.param[4] = vha->node_name[7]; 1324 sns_cmd->p.cmd.param[5] = vha->node_name[6]; 1325 sns_cmd->p.cmd.param[6] = vha->node_name[5]; 1326 sns_cmd->p.cmd.param[7] = vha->node_name[4]; 1327 sns_cmd->p.cmd.param[8] = vha->node_name[3]; 1328 sns_cmd->p.cmd.param[9] = vha->node_name[2]; 1329 sns_cmd->p.cmd.param[10] = vha->node_name[1]; 1330 sns_cmd->p.cmd.param[11] = vha->node_name[0]; 1331 1332 /* Execute SNS command. */ 1333 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2, 1334 sizeof(struct sns_cmd_pkt)); 1335 if (rval != QLA_SUCCESS) { 1336 /*EMPTY*/ 1337 ql_dbg(ql_dbg_disc, vha, 0x204a, 1338 "RNN_ID Send SNS failed (%d).\n", rval); 1339 } else if (sns_cmd->p.rnn_data[8] != 0x80 || 1340 sns_cmd->p.rnn_data[9] != 0x02) { 1341 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b, 1342 "RNN_ID failed, rejected request, rnn_rsp:\n"); 1343 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c, 1344 sns_cmd->p.rnn_data, 16); 1345 rval = QLA_FUNCTION_FAILED; 1346 } else { 1347 ql_dbg(ql_dbg_disc, vha, 0x204c, 1348 "RNN_ID exiting normally.\n"); 1349 } 1350 1351 return (rval); 1352 } 1353 1354 /** 1355 * qla2x00_mgmt_svr_login() - Login to fabric Management Service. 1356 * @vha: HA context 1357 * 1358 * Returns 0 on success. 1359 */ 1360 int 1361 qla2x00_mgmt_svr_login(scsi_qla_host_t *vha) 1362 { 1363 int ret, rval; 1364 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1365 struct qla_hw_data *ha = vha->hw; 1366 1367 ret = QLA_SUCCESS; 1368 if (vha->flags.management_server_logged_in) 1369 return ret; 1370 1371 rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 1372 0xfa, mb, BIT_1); 1373 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { 1374 if (rval == QLA_MEMORY_ALLOC_FAILED) 1375 ql_dbg(ql_dbg_disc, vha, 0x2085, 1376 "Failed management_server login: loopid=%x " 1377 "rval=%d\n", vha->mgmt_svr_loop_id, rval); 1378 else 1379 ql_dbg(ql_dbg_disc, vha, 0x2024, 1380 "Failed management_server login: loopid=%x " 1381 "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n", 1382 vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6], 1383 mb[7]); 1384 ret = QLA_FUNCTION_FAILED; 1385 } else 1386 vha->flags.management_server_logged_in = 1; 1387 1388 return ret; 1389 } 1390 1391 /** 1392 * qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query. 1393 * @vha: HA context 1394 * @req_size: request size in bytes 1395 * @rsp_size: response size in bytes 1396 * 1397 * Returns a pointer to the @ha's ms_iocb. 1398 */ 1399 void * 1400 qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size, 1401 uint32_t rsp_size) 1402 { 1403 ms_iocb_entry_t *ms_pkt; 1404 struct qla_hw_data *ha = vha->hw; 1405 1406 ms_pkt = ha->ms_iocb; 1407 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t)); 1408 1409 ms_pkt->entry_type = MS_IOCB_TYPE; 1410 ms_pkt->entry_count = 1; 1411 SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id); 1412 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG); 1413 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 1414 ms_pkt->cmd_dsd_count = cpu_to_le16(1); 1415 ms_pkt->total_dsd_count = cpu_to_le16(2); 1416 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); 1417 ms_pkt->req_bytecount = cpu_to_le32(req_size); 1418 1419 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->req_dsd.address); 1420 ms_pkt->req_dsd.length = ms_pkt->req_bytecount; 1421 1422 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->rsp_dsd.address); 1423 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount; 1424 1425 return ms_pkt; 1426 } 1427 1428 /** 1429 * qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query. 1430 * @vha: HA context 1431 * @req_size: request size in bytes 1432 * @rsp_size: response size in bytes 1433 * 1434 * Returns a pointer to the @ha's ms_iocb. 1435 */ 1436 void * 1437 qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size, 1438 uint32_t rsp_size) 1439 { 1440 struct ct_entry_24xx *ct_pkt; 1441 struct qla_hw_data *ha = vha->hw; 1442 1443 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; 1444 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx)); 1445 1446 ct_pkt->entry_type = CT_IOCB_TYPE; 1447 ct_pkt->entry_count = 1; 1448 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id); 1449 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 1450 ct_pkt->cmd_dsd_count = cpu_to_le16(1); 1451 ct_pkt->rsp_dsd_count = cpu_to_le16(1); 1452 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); 1453 ct_pkt->cmd_byte_count = cpu_to_le32(req_size); 1454 1455 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[0].address); 1456 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count; 1457 1458 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[1].address); 1459 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count; 1460 ct_pkt->vp_index = vha->vp_idx; 1461 1462 return ct_pkt; 1463 } 1464 1465 static void 1466 qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size) 1467 { 1468 struct qla_hw_data *ha = vha->hw; 1469 ms_iocb_entry_t *ms_pkt = ha->ms_iocb; 1470 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; 1471 1472 if (IS_FWI2_CAPABLE(ha)) { 1473 ct_pkt->cmd_byte_count = cpu_to_le32(req_size); 1474 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count; 1475 } else { 1476 ms_pkt->req_bytecount = cpu_to_le32(req_size); 1477 ms_pkt->req_dsd.length = ms_pkt->req_bytecount; 1478 } 1479 } 1480 1481 /** 1482 * qla2x00_prep_ct_fdmi_req() - Prepare common CT request fields for SNS query. 1483 * @p: CT request buffer 1484 * @cmd: GS command 1485 * @rsp_size: response size in bytes 1486 * 1487 * Returns a pointer to the intitialized @ct_req. 1488 */ 1489 static inline struct ct_sns_req * 1490 qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd, 1491 uint16_t rsp_size) 1492 { 1493 memset(p, 0, sizeof(struct ct_sns_pkt)); 1494 1495 p->p.req.header.revision = 0x01; 1496 p->p.req.header.gs_type = 0xFA; 1497 p->p.req.header.gs_subtype = 0x10; 1498 p->p.req.command = cpu_to_be16(cmd); 1499 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4); 1500 1501 return &p->p.req; 1502 } 1503 1504 uint 1505 qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha) 1506 { 1507 uint speeds = 0; 1508 1509 if (IS_CNA_CAPABLE(ha)) 1510 return FDMI_PORT_SPEED_10GB; 1511 if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) { 1512 if (ha->max_supported_speed == 2) { 1513 if (ha->min_supported_speed <= 6) 1514 speeds |= FDMI_PORT_SPEED_64GB; 1515 } 1516 if (ha->max_supported_speed == 2 || 1517 ha->max_supported_speed == 1) { 1518 if (ha->min_supported_speed <= 5) 1519 speeds |= FDMI_PORT_SPEED_32GB; 1520 } 1521 if (ha->max_supported_speed == 2 || 1522 ha->max_supported_speed == 1 || 1523 ha->max_supported_speed == 0) { 1524 if (ha->min_supported_speed <= 4) 1525 speeds |= FDMI_PORT_SPEED_16GB; 1526 } 1527 if (ha->max_supported_speed == 1 || 1528 ha->max_supported_speed == 0) { 1529 if (ha->min_supported_speed <= 3) 1530 speeds |= FDMI_PORT_SPEED_8GB; 1531 } 1532 if (ha->max_supported_speed == 0) { 1533 if (ha->min_supported_speed <= 2) 1534 speeds |= FDMI_PORT_SPEED_4GB; 1535 } 1536 return speeds; 1537 } 1538 if (IS_QLA2031(ha)) { 1539 if ((ha->pdev->subsystem_vendor == 0x103C) && 1540 ((ha->pdev->subsystem_device == 0x8002) || 1541 (ha->pdev->subsystem_device == 0x8086))) { 1542 speeds = FDMI_PORT_SPEED_16GB; 1543 } else { 1544 speeds = FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB| 1545 FDMI_PORT_SPEED_4GB; 1546 } 1547 return speeds; 1548 } 1549 if (IS_QLA25XX(ha) || IS_QLAFX00(ha)) 1550 return FDMI_PORT_SPEED_8GB|FDMI_PORT_SPEED_4GB| 1551 FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB; 1552 if (IS_QLA24XX_TYPE(ha)) 1553 return FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_2GB| 1554 FDMI_PORT_SPEED_1GB; 1555 if (IS_QLA23XX(ha)) 1556 return FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB; 1557 return FDMI_PORT_SPEED_1GB; 1558 } 1559 1560 uint 1561 qla25xx_fdmi_port_speed_currently(struct qla_hw_data *ha) 1562 { 1563 switch (ha->link_data_rate) { 1564 case PORT_SPEED_1GB: 1565 return FDMI_PORT_SPEED_1GB; 1566 case PORT_SPEED_2GB: 1567 return FDMI_PORT_SPEED_2GB; 1568 case PORT_SPEED_4GB: 1569 return FDMI_PORT_SPEED_4GB; 1570 case PORT_SPEED_8GB: 1571 return FDMI_PORT_SPEED_8GB; 1572 case PORT_SPEED_10GB: 1573 return FDMI_PORT_SPEED_10GB; 1574 case PORT_SPEED_16GB: 1575 return FDMI_PORT_SPEED_16GB; 1576 case PORT_SPEED_32GB: 1577 return FDMI_PORT_SPEED_32GB; 1578 case PORT_SPEED_64GB: 1579 return FDMI_PORT_SPEED_64GB; 1580 default: 1581 return FDMI_PORT_SPEED_UNKNOWN; 1582 } 1583 } 1584 1585 /** 1586 * qla2x00_hba_attributes() - perform HBA attributes registration 1587 * @vha: HA context 1588 * @entries: number of entries to use 1589 * @callopt: Option to issue extended or standard FDMI 1590 * command parameter 1591 * 1592 * Returns 0 on success. 1593 */ 1594 static unsigned long 1595 qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries, 1596 unsigned int callopt) 1597 { 1598 struct qla_hw_data *ha = vha->hw; 1599 struct new_utsname *p_sysid = utsname(); 1600 struct ct_fdmi_hba_attr *eiter; 1601 uint16_t alen; 1602 unsigned long size = 0; 1603 1604 /* Nodename. */ 1605 eiter = entries + size; 1606 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME); 1607 memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name)); 1608 alen = sizeof(eiter->a.node_name); 1609 alen += FDMI_ATTR_TYPELEN(eiter); 1610 eiter->len = cpu_to_be16(alen); 1611 size += alen; 1612 ql_dbg(ql_dbg_disc, vha, 0x20a0, 1613 "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name)); 1614 /* Manufacturer. */ 1615 eiter = entries + size; 1616 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER); 1617 alen = scnprintf( 1618 eiter->a.manufacturer, sizeof(eiter->a.manufacturer), 1619 "%s", QLA2XXX_MANUFACTURER); 1620 alen += FDMI_ATTR_ALIGNMENT(alen); 1621 alen += FDMI_ATTR_TYPELEN(eiter); 1622 eiter->len = cpu_to_be16(alen); 1623 size += alen; 1624 ql_dbg(ql_dbg_disc, vha, 0x20a1, 1625 "MANUFACTURER = %s.\n", eiter->a.manufacturer); 1626 /* Serial number. */ 1627 eiter = entries + size; 1628 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER); 1629 alen = 0; 1630 if (IS_FWI2_CAPABLE(ha)) { 1631 alen = qla2xxx_get_vpd_field(vha, "SN", 1632 eiter->a.serial_num, sizeof(eiter->a.serial_num)); 1633 } 1634 if (!alen) { 1635 uint32_t sn = ((ha->serial0 & 0x1f) << 16) | 1636 (ha->serial2 << 8) | ha->serial1; 1637 alen = scnprintf( 1638 eiter->a.serial_num, sizeof(eiter->a.serial_num), 1639 "%c%05d", 'A' + sn / 100000, sn % 100000); 1640 } 1641 alen += FDMI_ATTR_ALIGNMENT(alen); 1642 alen += FDMI_ATTR_TYPELEN(eiter); 1643 eiter->len = cpu_to_be16(alen); 1644 size += alen; 1645 ql_dbg(ql_dbg_disc, vha, 0x20a2, 1646 "SERIAL NUMBER = %s.\n", eiter->a.serial_num); 1647 /* Model name. */ 1648 eiter = entries + size; 1649 eiter->type = cpu_to_be16(FDMI_HBA_MODEL); 1650 alen = scnprintf( 1651 eiter->a.model, sizeof(eiter->a.model), 1652 "%s", ha->model_number); 1653 alen += FDMI_ATTR_ALIGNMENT(alen); 1654 alen += FDMI_ATTR_TYPELEN(eiter); 1655 eiter->len = cpu_to_be16(alen); 1656 size += alen; 1657 ql_dbg(ql_dbg_disc, vha, 0x20a3, 1658 "MODEL NAME = %s.\n", eiter->a.model); 1659 /* Model description. */ 1660 eiter = entries + size; 1661 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION); 1662 alen = scnprintf( 1663 eiter->a.model_desc, sizeof(eiter->a.model_desc), 1664 "%s", ha->model_desc); 1665 alen += FDMI_ATTR_ALIGNMENT(alen); 1666 alen += FDMI_ATTR_TYPELEN(eiter); 1667 eiter->len = cpu_to_be16(alen); 1668 size += alen; 1669 ql_dbg(ql_dbg_disc, vha, 0x20a4, 1670 "MODEL DESCRIPTION = %s.\n", eiter->a.model_desc); 1671 /* Hardware version. */ 1672 eiter = entries + size; 1673 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION); 1674 alen = 0; 1675 if (IS_FWI2_CAPABLE(ha)) { 1676 if (!alen) { 1677 alen = qla2xxx_get_vpd_field(vha, "MN", 1678 eiter->a.hw_version, sizeof(eiter->a.hw_version)); 1679 } 1680 if (!alen) { 1681 alen = qla2xxx_get_vpd_field(vha, "EC", 1682 eiter->a.hw_version, sizeof(eiter->a.hw_version)); 1683 } 1684 } 1685 if (!alen) { 1686 alen = scnprintf( 1687 eiter->a.hw_version, sizeof(eiter->a.hw_version), 1688 "HW:%s", ha->adapter_id); 1689 } 1690 alen += FDMI_ATTR_ALIGNMENT(alen); 1691 alen += FDMI_ATTR_TYPELEN(eiter); 1692 eiter->len = cpu_to_be16(alen); 1693 size += alen; 1694 ql_dbg(ql_dbg_disc, vha, 0x20a5, 1695 "HARDWARE VERSION = %s.\n", eiter->a.hw_version); 1696 /* Driver version. */ 1697 eiter = entries + size; 1698 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION); 1699 alen = scnprintf( 1700 eiter->a.driver_version, sizeof(eiter->a.driver_version), 1701 "%s", qla2x00_version_str); 1702 alen += FDMI_ATTR_ALIGNMENT(alen); 1703 alen += FDMI_ATTR_TYPELEN(eiter); 1704 eiter->len = cpu_to_be16(alen); 1705 size += alen; 1706 ql_dbg(ql_dbg_disc, vha, 0x20a6, 1707 "DRIVER VERSION = %s.\n", eiter->a.driver_version); 1708 /* Option ROM version. */ 1709 eiter = entries + size; 1710 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION); 1711 alen = scnprintf( 1712 eiter->a.orom_version, sizeof(eiter->a.orom_version), 1713 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]); 1714 alen += FDMI_ATTR_ALIGNMENT(alen); 1715 alen += FDMI_ATTR_TYPELEN(eiter); 1716 eiter->len = cpu_to_be16(alen); 1717 size += alen; 1718 1719 ql_dbg(ql_dbg_disc, vha, 0x20a7, 1720 "OPTROM VERSION = %d.%02d.\n", 1721 eiter->a.orom_version[1], eiter->a.orom_version[0]); 1722 /* Firmware version */ 1723 eiter = entries + size; 1724 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION); 1725 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version, 1726 sizeof(eiter->a.fw_version)); 1727 alen += FDMI_ATTR_ALIGNMENT(alen); 1728 alen += FDMI_ATTR_TYPELEN(eiter); 1729 eiter->len = cpu_to_be16(alen); 1730 size += alen; 1731 ql_dbg(ql_dbg_disc, vha, 0x20a8, 1732 "FIRMWARE VERSION = %s.\n", eiter->a.fw_version); 1733 /* OS Name and Version */ 1734 eiter = entries + size; 1735 eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION); 1736 alen = 0; 1737 if (p_sysid) { 1738 alen = scnprintf( 1739 eiter->a.os_version, sizeof(eiter->a.os_version), 1740 "%s %s %s", 1741 p_sysid->sysname, p_sysid->release, p_sysid->machine); 1742 } 1743 if (!alen) { 1744 alen = scnprintf( 1745 eiter->a.os_version, sizeof(eiter->a.os_version), 1746 "%s %s", 1747 "Linux", fc_host_system_hostname(vha->host)); 1748 } 1749 alen += FDMI_ATTR_ALIGNMENT(alen); 1750 alen += FDMI_ATTR_TYPELEN(eiter); 1751 eiter->len = cpu_to_be16(alen); 1752 size += alen; 1753 ql_dbg(ql_dbg_disc, vha, 0x20a9, 1754 "OS VERSION = %s.\n", eiter->a.os_version); 1755 if (callopt == CALLOPT_FDMI1) 1756 goto done; 1757 /* MAX CT Payload Length */ 1758 eiter = entries + size; 1759 eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH); 1760 eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size >> 2); 1761 1762 alen = sizeof(eiter->a.max_ct_len); 1763 alen += FDMI_ATTR_TYPELEN(eiter); 1764 eiter->len = cpu_to_be16(alen); 1765 size += alen; 1766 ql_dbg(ql_dbg_disc, vha, 0x20aa, 1767 "CT PAYLOAD LENGTH = 0x%x.\n", be32_to_cpu(eiter->a.max_ct_len)); 1768 /* Node Symbolic Name */ 1769 eiter = entries + size; 1770 eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME); 1771 alen = qla2x00_get_sym_node_name(vha, eiter->a.sym_name, 1772 sizeof(eiter->a.sym_name)); 1773 alen += FDMI_ATTR_ALIGNMENT(alen); 1774 alen += FDMI_ATTR_TYPELEN(eiter); 1775 eiter->len = cpu_to_be16(alen); 1776 size += alen; 1777 ql_dbg(ql_dbg_disc, vha, 0x20ab, 1778 "SYMBOLIC NAME = %s.\n", eiter->a.sym_name); 1779 /* Vendor Specific information */ 1780 eiter = entries + size; 1781 eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_SPECIFIC_INFO); 1782 eiter->a.vendor_specific_info = cpu_to_be32(PCI_VENDOR_ID_QLOGIC); 1783 alen = sizeof(eiter->a.vendor_specific_info); 1784 alen += FDMI_ATTR_TYPELEN(eiter); 1785 eiter->len = cpu_to_be16(alen); 1786 size += alen; 1787 ql_dbg(ql_dbg_disc, vha, 0x20ac, 1788 "VENDOR SPECIFIC INFO = 0x%x.\n", 1789 be32_to_cpu(eiter->a.vendor_specific_info)); 1790 /* Num Ports */ 1791 eiter = entries + size; 1792 eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS); 1793 eiter->a.num_ports = cpu_to_be32(1); 1794 alen = sizeof(eiter->a.num_ports); 1795 alen += FDMI_ATTR_TYPELEN(eiter); 1796 eiter->len = cpu_to_be16(alen); 1797 size += alen; 1798 ql_dbg(ql_dbg_disc, vha, 0x20ad, 1799 "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports)); 1800 /* Fabric Name */ 1801 eiter = entries + size; 1802 eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME); 1803 memcpy(eiter->a.fabric_name, vha->fabric_node_name, 1804 sizeof(eiter->a.fabric_name)); 1805 alen = sizeof(eiter->a.fabric_name); 1806 alen += FDMI_ATTR_TYPELEN(eiter); 1807 eiter->len = cpu_to_be16(alen); 1808 size += alen; 1809 ql_dbg(ql_dbg_disc, vha, 0x20ae, 1810 "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name)); 1811 /* BIOS Version */ 1812 eiter = entries + size; 1813 eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME); 1814 alen = scnprintf( 1815 eiter->a.bios_name, sizeof(eiter->a.bios_name), 1816 "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]); 1817 alen += FDMI_ATTR_ALIGNMENT(alen); 1818 alen += FDMI_ATTR_TYPELEN(eiter); 1819 eiter->len = cpu_to_be16(alen); 1820 size += alen; 1821 ql_dbg(ql_dbg_disc, vha, 0x20af, 1822 "BIOS NAME = %s\n", eiter->a.bios_name); 1823 /* Vendor Identifier */ 1824 eiter = entries + size; 1825 eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_IDENTIFIER); 1826 alen = scnprintf( 1827 eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier), 1828 "%s", "QLGC"); 1829 alen += FDMI_ATTR_ALIGNMENT(alen); 1830 alen += FDMI_ATTR_TYPELEN(eiter); 1831 eiter->len = cpu_to_be16(alen); 1832 size += alen; 1833 ql_dbg(ql_dbg_disc, vha, 0x20b0, 1834 "VENDOR IDENTIFIER = %s.\n", eiter->a.vendor_identifier); 1835 done: 1836 return size; 1837 } 1838 1839 /** 1840 * qla2x00_port_attributes() - perform Port attributes registration 1841 * @vha: HA context 1842 * @entries: number of entries to use 1843 * @callopt: Option to issue extended or standard FDMI 1844 * command parameter 1845 * 1846 * Returns 0 on success. 1847 */ 1848 static unsigned long 1849 qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, 1850 unsigned int callopt) 1851 { 1852 struct qla_hw_data *ha = vha->hw; 1853 struct new_utsname *p_sysid = utsname(); 1854 char *hostname = p_sysid ? 1855 p_sysid->nodename : fc_host_system_hostname(vha->host); 1856 struct ct_fdmi_port_attr *eiter; 1857 uint16_t alen; 1858 unsigned long size = 0; 1859 1860 /* FC4 types. */ 1861 eiter = entries + size; 1862 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES); 1863 eiter->a.fc4_types[0] = 0x00; 1864 eiter->a.fc4_types[1] = 0x00; 1865 eiter->a.fc4_types[2] = 0x01; 1866 eiter->a.fc4_types[3] = 0x00; 1867 alen = sizeof(eiter->a.fc4_types); 1868 alen += FDMI_ATTR_TYPELEN(eiter); 1869 eiter->len = cpu_to_be16(alen); 1870 size += alen; 1871 ql_dbg(ql_dbg_disc, vha, 0x20c0, 1872 "FC4 TYPES = %016llx.\n", *(uint64_t *)eiter->a.fc4_types); 1873 if (vha->flags.nvme_enabled) { 1874 eiter->a.fc4_types[6] = 1; /* NVMe type 28h */ 1875 ql_dbg(ql_dbg_disc, vha, 0x211f, 1876 "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n", 1877 eiter->a.fc4_types[6]); 1878 } 1879 /* Supported speed. */ 1880 eiter = entries + size; 1881 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED); 1882 eiter->a.sup_speed = cpu_to_be32( 1883 qla25xx_fdmi_port_speed_capability(ha)); 1884 alen = sizeof(eiter->a.sup_speed); 1885 alen += FDMI_ATTR_TYPELEN(eiter); 1886 eiter->len = cpu_to_be16(alen); 1887 size += alen; 1888 ql_dbg(ql_dbg_disc, vha, 0x20c1, 1889 "SUPPORTED SPEED = %x.\n", be32_to_cpu(eiter->a.sup_speed)); 1890 /* Current speed. */ 1891 eiter = entries + size; 1892 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED); 1893 eiter->a.cur_speed = cpu_to_be32( 1894 qla25xx_fdmi_port_speed_currently(ha)); 1895 alen = sizeof(eiter->a.cur_speed); 1896 alen += FDMI_ATTR_TYPELEN(eiter); 1897 eiter->len = cpu_to_be16(alen); 1898 size += alen; 1899 ql_dbg(ql_dbg_disc, vha, 0x20c2, 1900 "CURRENT SPEED = %x.\n", be32_to_cpu(eiter->a.cur_speed)); 1901 /* Max frame size. */ 1902 eiter = entries + size; 1903 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE); 1904 eiter->a.max_frame_size = cpu_to_be32(ha->frame_payload_size); 1905 alen = sizeof(eiter->a.max_frame_size); 1906 alen += FDMI_ATTR_TYPELEN(eiter); 1907 eiter->len = cpu_to_be16(alen); 1908 size += alen; 1909 ql_dbg(ql_dbg_disc, vha, 0x20c3, 1910 "MAX FRAME SIZE = %x.\n", be32_to_cpu(eiter->a.max_frame_size)); 1911 /* OS device name. */ 1912 eiter = entries + size; 1913 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME); 1914 alen = scnprintf( 1915 eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name), 1916 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no); 1917 alen += FDMI_ATTR_ALIGNMENT(alen); 1918 alen += FDMI_ATTR_TYPELEN(eiter); 1919 eiter->len = cpu_to_be16(alen); 1920 size += alen; 1921 ql_dbg(ql_dbg_disc, vha, 0x20c4, 1922 "OS DEVICE NAME = %s.\n", eiter->a.os_dev_name); 1923 /* Hostname. */ 1924 eiter = entries + size; 1925 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME); 1926 if (!*hostname || !strncmp(hostname, "(none)", 6)) 1927 hostname = "Linux-default"; 1928 alen = scnprintf( 1929 eiter->a.host_name, sizeof(eiter->a.host_name), 1930 "%s", hostname); 1931 alen += FDMI_ATTR_ALIGNMENT(alen); 1932 alen += FDMI_ATTR_TYPELEN(eiter); 1933 eiter->len = cpu_to_be16(alen); 1934 size += alen; 1935 ql_dbg(ql_dbg_disc, vha, 0x20c5, 1936 "HOSTNAME = %s.\n", eiter->a.host_name); 1937 1938 if (callopt == CALLOPT_FDMI1) 1939 goto done; 1940 1941 /* Node Name */ 1942 eiter = entries + size; 1943 eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME); 1944 memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name)); 1945 alen = sizeof(eiter->a.node_name); 1946 alen += FDMI_ATTR_TYPELEN(eiter); 1947 eiter->len = cpu_to_be16(alen); 1948 size += alen; 1949 ql_dbg(ql_dbg_disc, vha, 0x20c6, 1950 "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name)); 1951 1952 /* Port Name */ 1953 eiter = entries + size; 1954 eiter->type = cpu_to_be16(FDMI_PORT_NAME); 1955 memcpy(eiter->a.port_name, vha->port_name, sizeof(eiter->a.port_name)); 1956 alen = sizeof(eiter->a.port_name); 1957 alen += FDMI_ATTR_TYPELEN(eiter); 1958 eiter->len = cpu_to_be16(alen); 1959 size += alen; 1960 ql_dbg(ql_dbg_disc, vha, 0x20c7, 1961 "PORTNAME = %016llx.\n", wwn_to_u64(eiter->a.port_name)); 1962 1963 /* Port Symbolic Name */ 1964 eiter = entries + size; 1965 eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME); 1966 alen = qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name, 1967 sizeof(eiter->a.port_sym_name)); 1968 alen += FDMI_ATTR_ALIGNMENT(alen); 1969 alen += FDMI_ATTR_TYPELEN(eiter); 1970 eiter->len = cpu_to_be16(alen); 1971 size += alen; 1972 ql_dbg(ql_dbg_disc, vha, 0x20c8, 1973 "PORT SYMBOLIC NAME = %s\n", eiter->a.port_sym_name); 1974 1975 /* Port Type */ 1976 eiter = entries + size; 1977 eiter->type = cpu_to_be16(FDMI_PORT_TYPE); 1978 eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE); 1979 alen = sizeof(eiter->a.port_type); 1980 alen += FDMI_ATTR_TYPELEN(eiter); 1981 eiter->len = cpu_to_be16(alen); 1982 size += alen; 1983 ql_dbg(ql_dbg_disc, vha, 0x20c9, 1984 "PORT TYPE = %x.\n", be32_to_cpu(eiter->a.port_type)); 1985 1986 /* Supported Class of Service */ 1987 eiter = entries + size; 1988 eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS); 1989 eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3); 1990 alen = sizeof(eiter->a.port_supported_cos); 1991 alen += FDMI_ATTR_TYPELEN(eiter); 1992 eiter->len = cpu_to_be16(alen); 1993 size += alen; 1994 ql_dbg(ql_dbg_disc, vha, 0x20ca, 1995 "SUPPORTED COS = %08x\n", be32_to_cpu(eiter->a.port_supported_cos)); 1996 1997 /* Port Fabric Name */ 1998 eiter = entries + size; 1999 eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME); 2000 memcpy(eiter->a.fabric_name, vha->fabric_node_name, 2001 sizeof(eiter->a.fabric_name)); 2002 alen = sizeof(eiter->a.fabric_name); 2003 alen += FDMI_ATTR_TYPELEN(eiter); 2004 eiter->len = cpu_to_be16(alen); 2005 size += alen; 2006 ql_dbg(ql_dbg_disc, vha, 0x20cb, 2007 "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name)); 2008 2009 /* FC4_type */ 2010 eiter = entries + size; 2011 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE); 2012 eiter->a.port_fc4_type[0] = 0x00; 2013 eiter->a.port_fc4_type[1] = 0x00; 2014 eiter->a.port_fc4_type[2] = 0x01; 2015 eiter->a.port_fc4_type[3] = 0x00; 2016 alen = sizeof(eiter->a.port_fc4_type); 2017 alen += FDMI_ATTR_TYPELEN(eiter); 2018 eiter->len = cpu_to_be16(alen); 2019 size += alen; 2020 ql_dbg(ql_dbg_disc, vha, 0x20cc, 2021 "PORT ACTIVE FC4 TYPE = %016llx.\n", 2022 *(uint64_t *)eiter->a.port_fc4_type); 2023 2024 /* Port State */ 2025 eiter = entries + size; 2026 eiter->type = cpu_to_be16(FDMI_PORT_STATE); 2027 eiter->a.port_state = cpu_to_be32(2); 2028 alen = sizeof(eiter->a.port_state); 2029 alen += FDMI_ATTR_TYPELEN(eiter); 2030 eiter->len = cpu_to_be16(alen); 2031 size += alen; 2032 ql_dbg(ql_dbg_disc, vha, 0x20cd, 2033 "PORT_STATE = %x.\n", be32_to_cpu(eiter->a.port_state)); 2034 2035 /* Number of Ports */ 2036 eiter = entries + size; 2037 eiter->type = cpu_to_be16(FDMI_PORT_COUNT); 2038 eiter->a.num_ports = cpu_to_be32(1); 2039 alen = sizeof(eiter->a.num_ports); 2040 alen += FDMI_ATTR_TYPELEN(eiter); 2041 eiter->len = cpu_to_be16(alen); 2042 size += alen; 2043 ql_dbg(ql_dbg_disc, vha, 0x20ce, 2044 "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports)); 2045 2046 /* Port Identifier */ 2047 eiter = entries + size; 2048 eiter->type = cpu_to_be16(FDMI_PORT_IDENTIFIER); 2049 eiter->a.port_id = cpu_to_be32(vha->d_id.b24); 2050 alen = sizeof(eiter->a.port_id); 2051 alen += FDMI_ATTR_TYPELEN(eiter); 2052 eiter->len = cpu_to_be16(alen); 2053 size += alen; 2054 ql_dbg(ql_dbg_disc, vha, 0x20cf, 2055 "PORT ID = %x.\n", be32_to_cpu(eiter->a.port_id)); 2056 2057 if (callopt == CALLOPT_FDMI2 || !ql2xsmartsan) 2058 goto done; 2059 2060 /* Smart SAN Service Category (Populate Smart SAN Initiator)*/ 2061 eiter = entries + size; 2062 eiter->type = cpu_to_be16(FDMI_SMARTSAN_SERVICE); 2063 alen = scnprintf( 2064 eiter->a.smartsan_service, sizeof(eiter->a.smartsan_service), 2065 "%s", "Smart SAN Initiator"); 2066 alen += FDMI_ATTR_ALIGNMENT(alen); 2067 alen += FDMI_ATTR_TYPELEN(eiter); 2068 eiter->len = cpu_to_be16(alen); 2069 size += alen; 2070 ql_dbg(ql_dbg_disc, vha, 0x20d0, 2071 "SMARTSAN SERVICE CATEGORY = %s.\n", eiter->a.smartsan_service); 2072 2073 /* Smart SAN GUID (NWWN+PWWN) */ 2074 eiter = entries + size; 2075 eiter->type = cpu_to_be16(FDMI_SMARTSAN_GUID); 2076 memcpy(eiter->a.smartsan_guid, vha->node_name, WWN_SIZE); 2077 memcpy(eiter->a.smartsan_guid + WWN_SIZE, vha->port_name, WWN_SIZE); 2078 alen = sizeof(eiter->a.smartsan_guid); 2079 alen += FDMI_ATTR_TYPELEN(eiter); 2080 eiter->len = cpu_to_be16(alen); 2081 size += alen; 2082 ql_dbg(ql_dbg_disc, vha, 0x20d1, 2083 "Smart SAN GUID = %016llx-%016llx\n", 2084 wwn_to_u64(eiter->a.smartsan_guid), 2085 wwn_to_u64(eiter->a.smartsan_guid + WWN_SIZE)); 2086 2087 /* Smart SAN Version (populate "Smart SAN Version 1.0") */ 2088 eiter = entries + size; 2089 eiter->type = cpu_to_be16(FDMI_SMARTSAN_VERSION); 2090 alen = scnprintf( 2091 eiter->a.smartsan_version, sizeof(eiter->a.smartsan_version), 2092 "%s", "Smart SAN Version 2.0"); 2093 alen += FDMI_ATTR_ALIGNMENT(alen); 2094 alen += FDMI_ATTR_TYPELEN(eiter); 2095 eiter->len = cpu_to_be16(alen); 2096 size += alen; 2097 ql_dbg(ql_dbg_disc, vha, 0x20d2, 2098 "SMARTSAN VERSION = %s\n", eiter->a.smartsan_version); 2099 2100 /* Smart SAN Product Name (Specify Adapter Model No) */ 2101 eiter = entries + size; 2102 eiter->type = cpu_to_be16(FDMI_SMARTSAN_PROD_NAME); 2103 alen = scnprintf(eiter->a.smartsan_prod_name, 2104 sizeof(eiter->a.smartsan_prod_name), 2105 "ISP%04x", ha->pdev->device); 2106 alen += FDMI_ATTR_ALIGNMENT(alen); 2107 alen += FDMI_ATTR_TYPELEN(eiter); 2108 eiter->len = cpu_to_be16(alen); 2109 size += alen; 2110 ql_dbg(ql_dbg_disc, vha, 0x20d3, 2111 "SMARTSAN PRODUCT NAME = %s\n", eiter->a.smartsan_prod_name); 2112 2113 /* Smart SAN Port Info (specify: 1=Physical, 2=NPIV, 3=SRIOV) */ 2114 eiter = entries + size; 2115 eiter->type = cpu_to_be16(FDMI_SMARTSAN_PORT_INFO); 2116 eiter->a.smartsan_port_info = cpu_to_be32(vha->vp_idx ? 2 : 1); 2117 alen = sizeof(eiter->a.smartsan_port_info); 2118 alen += FDMI_ATTR_TYPELEN(eiter); 2119 eiter->len = cpu_to_be16(alen); 2120 size += alen; 2121 ql_dbg(ql_dbg_disc, vha, 0x20d4, 2122 "SMARTSAN PORT INFO = %x\n", eiter->a.smartsan_port_info); 2123 2124 /* Smart SAN Security Support */ 2125 eiter = entries + size; 2126 eiter->type = cpu_to_be16(FDMI_SMARTSAN_SECURITY_SUPPORT); 2127 eiter->a.smartsan_security_support = cpu_to_be32(1); 2128 alen = sizeof(eiter->a.smartsan_security_support); 2129 alen += FDMI_ATTR_TYPELEN(eiter); 2130 eiter->len = cpu_to_be16(alen); 2131 size += alen; 2132 ql_dbg(ql_dbg_disc, vha, 0x20d6, 2133 "SMARTSAN SECURITY SUPPORT = %d\n", 2134 be32_to_cpu(eiter->a.smartsan_security_support)); 2135 2136 done: 2137 return size; 2138 } 2139 2140 /** 2141 * qla2x00_fdmi_rhba() - perform RHBA FDMI registration 2142 * @vha: HA context 2143 * @callopt: Option to issue FDMI registration 2144 * 2145 * Returns 0 on success. 2146 */ 2147 static int 2148 qla2x00_fdmi_rhba(scsi_qla_host_t *vha, unsigned int callopt) 2149 { 2150 struct qla_hw_data *ha = vha->hw; 2151 unsigned long size = 0; 2152 unsigned int rval, count; 2153 ms_iocb_entry_t *ms_pkt; 2154 struct ct_sns_req *ct_req; 2155 struct ct_sns_rsp *ct_rsp; 2156 void *entries; 2157 2158 count = callopt != CALLOPT_FDMI1 ? 2159 FDMI2_HBA_ATTR_COUNT : FDMI1_HBA_ATTR_COUNT; 2160 2161 size = RHBA_RSP_SIZE; 2162 2163 ql_dbg(ql_dbg_disc, vha, 0x20e0, 2164 "RHBA (callopt=%x count=%u size=%lu).\n", callopt, count, size); 2165 2166 /* Request size adjusted after CT preparation */ 2167 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size); 2168 2169 /* Prepare CT request */ 2170 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, size); 2171 ct_rsp = &ha->ct_sns->p.rsp; 2172 2173 /* Prepare FDMI command entries */ 2174 memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, 2175 sizeof(ct_req->req.rhba.hba_identifier)); 2176 size += sizeof(ct_req->req.rhba.hba_identifier); 2177 2178 ct_req->req.rhba.entry_count = cpu_to_be32(1); 2179 size += sizeof(ct_req->req.rhba.entry_count); 2180 2181 memcpy(ct_req->req.rhba.port_name, vha->port_name, 2182 sizeof(ct_req->req.rhba.port_name)); 2183 size += sizeof(ct_req->req.rhba.port_name); 2184 2185 /* Attribute count */ 2186 ct_req->req.rhba.attrs.count = cpu_to_be32(count); 2187 size += sizeof(ct_req->req.rhba.attrs.count); 2188 2189 /* Attribute block */ 2190 entries = &ct_req->req.rhba.attrs.entry; 2191 2192 size += qla2x00_hba_attributes(vha, entries, callopt); 2193 2194 /* Update MS request size. */ 2195 qla2x00_update_ms_fdmi_iocb(vha, size + 16); 2196 2197 ql_dbg(ql_dbg_disc, vha, 0x20e1, 2198 "RHBA %016llx %016llx.\n", 2199 wwn_to_u64(ct_req->req.rhba.hba_identifier), 2200 wwn_to_u64(ct_req->req.rhba.port_name)); 2201 2202 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20e2, 2203 entries, size); 2204 2205 /* Execute MS IOCB */ 2206 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 2207 sizeof(*ha->ms_iocb)); 2208 if (rval) { 2209 ql_dbg(ql_dbg_disc, vha, 0x20e3, 2210 "RHBA iocb failed (%d).\n", rval); 2211 return rval; 2212 } 2213 2214 rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA"); 2215 if (rval) { 2216 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && 2217 ct_rsp->header.explanation_code == 2218 CT_EXPL_ALREADY_REGISTERED) { 2219 ql_dbg(ql_dbg_disc, vha, 0x20e4, 2220 "RHBA already registered.\n"); 2221 return QLA_ALREADY_REGISTERED; 2222 } 2223 2224 ql_dbg(ql_dbg_disc, vha, 0x20e5, 2225 "RHBA failed, CT Reason %#x, CT Explanation %#x\n", 2226 ct_rsp->header.reason_code, 2227 ct_rsp->header.explanation_code); 2228 return rval; 2229 } 2230 2231 ql_dbg(ql_dbg_disc, vha, 0x20e6, "RHBA exiting normally.\n"); 2232 return rval; 2233 } 2234 2235 2236 static int 2237 qla2x00_fdmi_dhba(scsi_qla_host_t *vha) 2238 { 2239 int rval; 2240 struct qla_hw_data *ha = vha->hw; 2241 ms_iocb_entry_t *ms_pkt; 2242 struct ct_sns_req *ct_req; 2243 struct ct_sns_rsp *ct_rsp; 2244 /* Issue RPA */ 2245 /* Prepare common MS IOCB */ 2246 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE, 2247 DHBA_RSP_SIZE); 2248 /* Prepare CT request */ 2249 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, DHBA_CMD, DHBA_RSP_SIZE); 2250 ct_rsp = &ha->ct_sns->p.rsp; 2251 /* Prepare FDMI command arguments -- portname. */ 2252 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE); 2253 ql_dbg(ql_dbg_disc, vha, 0x2036, 2254 "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name); 2255 /* Execute MS IOCB */ 2256 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 2257 sizeof(ms_iocb_entry_t)); 2258 if (rval != QLA_SUCCESS) { 2259 /*EMPTY*/ 2260 ql_dbg(ql_dbg_disc, vha, 0x2037, 2261 "DHBA issue IOCB failed (%d).\n", rval); 2262 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") != 2263 QLA_SUCCESS) { 2264 rval = QLA_FUNCTION_FAILED; 2265 } else { 2266 ql_dbg(ql_dbg_disc, vha, 0x2038, 2267 "DHBA exiting normally.\n"); 2268 } 2269 return rval; 2270 } 2271 2272 /** 2273 * qla2x00_fdmi_rprt() - perform RPRT registration 2274 * @vha: HA context 2275 * @callopt: Option to issue extended or standard FDMI 2276 * command parameter 2277 * 2278 * Returns 0 on success. 2279 */ 2280 static int 2281 qla2x00_fdmi_rprt(scsi_qla_host_t *vha, int callopt) 2282 { 2283 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev); 2284 struct qla_hw_data *ha = vha->hw; 2285 ulong size = 0; 2286 uint rval, count; 2287 ms_iocb_entry_t *ms_pkt; 2288 struct ct_sns_req *ct_req; 2289 struct ct_sns_rsp *ct_rsp; 2290 void *entries; 2291 count = callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ? 2292 FDMI2_SMARTSAN_PORT_ATTR_COUNT : 2293 callopt != CALLOPT_FDMI1 ? 2294 FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT; 2295 2296 size = RPRT_RSP_SIZE; 2297 ql_dbg(ql_dbg_disc, vha, 0x20e8, 2298 "RPRT (callopt=%x count=%u size=%lu).\n", callopt, count, size); 2299 /* Request size adjusted after CT preparation */ 2300 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size); 2301 /* Prepare CT request */ 2302 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPRT_CMD, size); 2303 ct_rsp = &ha->ct_sns->p.rsp; 2304 /* Prepare FDMI command entries */ 2305 memcpy(ct_req->req.rprt.hba_identifier, base_vha->port_name, 2306 sizeof(ct_req->req.rprt.hba_identifier)); 2307 size += sizeof(ct_req->req.rprt.hba_identifier); 2308 memcpy(ct_req->req.rprt.port_name, vha->port_name, 2309 sizeof(ct_req->req.rprt.port_name)); 2310 size += sizeof(ct_req->req.rprt.port_name); 2311 /* Attribute count */ 2312 ct_req->req.rprt.attrs.count = cpu_to_be32(count); 2313 size += sizeof(ct_req->req.rprt.attrs.count); 2314 /* Attribute block */ 2315 entries = ct_req->req.rprt.attrs.entry; 2316 size += qla2x00_port_attributes(vha, entries, callopt); 2317 /* Update MS request size. */ 2318 qla2x00_update_ms_fdmi_iocb(vha, size + 16); 2319 ql_dbg(ql_dbg_disc, vha, 0x20e9, 2320 "RPRT %016llx %016llx.\n", 2321 wwn_to_u64(ct_req->req.rprt.port_name), 2322 wwn_to_u64(ct_req->req.rprt.port_name)); 2323 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ea, 2324 entries, size); 2325 /* Execute MS IOCB */ 2326 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 2327 sizeof(*ha->ms_iocb)); 2328 if (rval) { 2329 ql_dbg(ql_dbg_disc, vha, 0x20eb, 2330 "RPRT iocb failed (%d).\n", rval); 2331 return rval; 2332 } 2333 rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPRT"); 2334 if (rval) { 2335 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && 2336 ct_rsp->header.explanation_code == 2337 CT_EXPL_ALREADY_REGISTERED) { 2338 ql_dbg(ql_dbg_disc, vha, 0x20ec, 2339 "RPRT already registered.\n"); 2340 return QLA_ALREADY_REGISTERED; 2341 } 2342 2343 ql_dbg(ql_dbg_disc, vha, 0x20ed, 2344 "RPRT failed, CT Reason code: %#x, CT Explanation %#x\n", 2345 ct_rsp->header.reason_code, 2346 ct_rsp->header.explanation_code); 2347 return rval; 2348 } 2349 ql_dbg(ql_dbg_disc, vha, 0x20ee, "RPRT exiting normally.\n"); 2350 return rval; 2351 } 2352 2353 /** 2354 * qla2x00_fdmi_rpa() - perform RPA registration 2355 * @vha: HA context 2356 * @callopt: Option to issue FDMI registration 2357 * 2358 * Returns 0 on success. 2359 */ 2360 static int 2361 qla2x00_fdmi_rpa(scsi_qla_host_t *vha, uint callopt) 2362 { 2363 struct qla_hw_data *ha = vha->hw; 2364 ulong size = 0; 2365 uint rval, count; 2366 ms_iocb_entry_t *ms_pkt; 2367 struct ct_sns_req *ct_req; 2368 struct ct_sns_rsp *ct_rsp; 2369 void *entries; 2370 2371 count = 2372 callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ? 2373 FDMI2_SMARTSAN_PORT_ATTR_COUNT : 2374 callopt != CALLOPT_FDMI1 ? 2375 FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT; 2376 2377 size = 2378 callopt != CALLOPT_FDMI1 ? 2379 SMARTSAN_RPA_RSP_SIZE : RPA_RSP_SIZE; 2380 2381 ql_dbg(ql_dbg_disc, vha, 0x20f0, 2382 "RPA (callopt=%x count=%u size=%lu).\n", callopt, count, size); 2383 2384 /* Request size adjusted after CT preparation */ 2385 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size); 2386 2387 /* Prepare CT request */ 2388 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, size); 2389 ct_rsp = &ha->ct_sns->p.rsp; 2390 2391 /* Prepare FDMI command entries. */ 2392 memcpy(ct_req->req.rpa.port_name, vha->port_name, 2393 sizeof(ct_req->req.rpa.port_name)); 2394 size += sizeof(ct_req->req.rpa.port_name); 2395 2396 /* Attribute count */ 2397 ct_req->req.rpa.attrs.count = cpu_to_be32(count); 2398 size += sizeof(ct_req->req.rpa.attrs.count); 2399 2400 /* Attribute block */ 2401 entries = ct_req->req.rpa.attrs.entry; 2402 2403 size += qla2x00_port_attributes(vha, entries, callopt); 2404 2405 /* Update MS request size. */ 2406 qla2x00_update_ms_fdmi_iocb(vha, size + 16); 2407 2408 ql_dbg(ql_dbg_disc, vha, 0x20f1, 2409 "RPA %016llx.\n", wwn_to_u64(ct_req->req.rpa.port_name)); 2410 2411 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20f2, 2412 entries, size); 2413 2414 /* Execute MS IOCB */ 2415 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 2416 sizeof(*ha->ms_iocb)); 2417 if (rval) { 2418 ql_dbg(ql_dbg_disc, vha, 0x20f3, 2419 "RPA iocb failed (%d).\n", rval); 2420 return rval; 2421 } 2422 2423 rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA"); 2424 if (rval) { 2425 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && 2426 ct_rsp->header.explanation_code == 2427 CT_EXPL_ALREADY_REGISTERED) { 2428 ql_dbg(ql_dbg_disc, vha, 0x20f4, 2429 "RPA already registered.\n"); 2430 return QLA_ALREADY_REGISTERED; 2431 } 2432 2433 ql_dbg(ql_dbg_disc, vha, 0x20f5, 2434 "RPA failed, CT Reason code: %#x, CT Explanation %#x\n", 2435 ct_rsp->header.reason_code, 2436 ct_rsp->header.explanation_code); 2437 return rval; 2438 } 2439 2440 ql_dbg(ql_dbg_disc, vha, 0x20f6, "RPA exiting normally.\n"); 2441 return rval; 2442 } 2443 2444 /** 2445 * qla2x00_fdmi_register() - 2446 * @vha: HA context 2447 * 2448 * Returns 0 on success. 2449 */ 2450 int 2451 qla2x00_fdmi_register(scsi_qla_host_t *vha) 2452 { 2453 int rval = QLA_SUCCESS; 2454 struct qla_hw_data *ha = vha->hw; 2455 2456 if (IS_QLA2100(ha) || IS_QLA2200(ha) || 2457 IS_QLAFX00(ha)) 2458 return rval; 2459 2460 rval = qla2x00_mgmt_svr_login(vha); 2461 if (rval) 2462 return rval; 2463 2464 /* For npiv/vport send rprt only */ 2465 if (vha->vp_idx) { 2466 if (ql2xsmartsan) 2467 rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2_SMARTSAN); 2468 if (rval || !ql2xsmartsan) 2469 rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2); 2470 if (rval) 2471 rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI1); 2472 2473 return rval; 2474 } 2475 2476 /* Try fdmi2 first, if fails then try fdmi1 */ 2477 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2); 2478 if (rval) { 2479 if (rval != QLA_ALREADY_REGISTERED) 2480 goto try_fdmi; 2481 2482 rval = qla2x00_fdmi_dhba(vha); 2483 if (rval) 2484 goto try_fdmi; 2485 2486 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2); 2487 if (rval) 2488 goto try_fdmi; 2489 } 2490 2491 if (ql2xsmartsan) 2492 rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2_SMARTSAN); 2493 if (rval || !ql2xsmartsan) 2494 rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2); 2495 if (rval) 2496 goto try_fdmi; 2497 2498 return rval; 2499 2500 try_fdmi: 2501 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1); 2502 if (rval) { 2503 if (rval != QLA_ALREADY_REGISTERED) 2504 return rval; 2505 2506 rval = qla2x00_fdmi_dhba(vha); 2507 if (rval) 2508 return rval; 2509 2510 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1); 2511 if (rval) 2512 return rval; 2513 } 2514 2515 rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI1); 2516 2517 return rval; 2518 } 2519 2520 /** 2521 * qla2x00_gfpn_id() - SNS Get Fabric Port Name (GFPN_ID) query. 2522 * @vha: HA context 2523 * @list: switch info entries to populate 2524 * 2525 * Returns 0 on success. 2526 */ 2527 int 2528 qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list) 2529 { 2530 int rval = QLA_SUCCESS; 2531 uint16_t i; 2532 struct qla_hw_data *ha = vha->hw; 2533 ms_iocb_entry_t *ms_pkt; 2534 struct ct_sns_req *ct_req; 2535 struct ct_sns_rsp *ct_rsp; 2536 struct ct_arg arg; 2537 2538 if (!IS_IIDMA_CAPABLE(ha)) 2539 return QLA_FUNCTION_FAILED; 2540 2541 arg.iocb = ha->ms_iocb; 2542 arg.req_dma = ha->ct_sns_dma; 2543 arg.rsp_dma = ha->ct_sns_dma; 2544 arg.req_size = GFPN_ID_REQ_SIZE; 2545 arg.rsp_size = GFPN_ID_RSP_SIZE; 2546 arg.nport_handle = NPH_SNS; 2547 2548 for (i = 0; i < ha->max_fibre_devices; i++) { 2549 /* Issue GFPN_ID */ 2550 /* Prepare common MS IOCB */ 2551 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); 2552 2553 /* Prepare CT request */ 2554 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD, 2555 GFPN_ID_RSP_SIZE); 2556 ct_rsp = &ha->ct_sns->p.rsp; 2557 2558 /* Prepare CT arguments -- port_id */ 2559 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id); 2560 2561 /* Execute MS IOCB */ 2562 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 2563 sizeof(ms_iocb_entry_t)); 2564 if (rval != QLA_SUCCESS) { 2565 /*EMPTY*/ 2566 ql_dbg(ql_dbg_disc, vha, 0x2023, 2567 "GFPN_ID issue IOCB failed (%d).\n", rval); 2568 break; 2569 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 2570 "GFPN_ID") != QLA_SUCCESS) { 2571 rval = QLA_FUNCTION_FAILED; 2572 break; 2573 } else { 2574 /* Save fabric portname */ 2575 memcpy(list[i].fabric_port_name, 2576 ct_rsp->rsp.gfpn_id.port_name, WWN_SIZE); 2577 } 2578 2579 /* Last device exit. */ 2580 if (list[i].d_id.b.rsvd_1 != 0) 2581 break; 2582 } 2583 2584 return (rval); 2585 } 2586 2587 2588 static inline struct ct_sns_req * 2589 qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd, 2590 uint16_t rsp_size) 2591 { 2592 memset(p, 0, sizeof(struct ct_sns_pkt)); 2593 2594 p->p.req.header.revision = 0x01; 2595 p->p.req.header.gs_type = 0xFA; 2596 p->p.req.header.gs_subtype = 0x01; 2597 p->p.req.command = cpu_to_be16(cmd); 2598 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4); 2599 2600 return &p->p.req; 2601 } 2602 2603 static uint16_t 2604 qla2x00_port_speed_capability(uint16_t speed) 2605 { 2606 switch (speed) { 2607 case BIT_15: 2608 return PORT_SPEED_1GB; 2609 case BIT_14: 2610 return PORT_SPEED_2GB; 2611 case BIT_13: 2612 return PORT_SPEED_4GB; 2613 case BIT_12: 2614 return PORT_SPEED_10GB; 2615 case BIT_11: 2616 return PORT_SPEED_8GB; 2617 case BIT_10: 2618 return PORT_SPEED_16GB; 2619 case BIT_8: 2620 return PORT_SPEED_32GB; 2621 case BIT_7: 2622 return PORT_SPEED_64GB; 2623 default: 2624 return PORT_SPEED_UNKNOWN; 2625 } 2626 } 2627 2628 /** 2629 * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query. 2630 * @vha: HA context 2631 * @list: switch info entries to populate 2632 * 2633 * Returns 0 on success. 2634 */ 2635 int 2636 qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) 2637 { 2638 int rval; 2639 uint16_t i; 2640 struct qla_hw_data *ha = vha->hw; 2641 ms_iocb_entry_t *ms_pkt; 2642 struct ct_sns_req *ct_req; 2643 struct ct_sns_rsp *ct_rsp; 2644 struct ct_arg arg; 2645 2646 if (!IS_IIDMA_CAPABLE(ha)) 2647 return QLA_FUNCTION_FAILED; 2648 if (!ha->flags.gpsc_supported) 2649 return QLA_FUNCTION_FAILED; 2650 2651 rval = qla2x00_mgmt_svr_login(vha); 2652 if (rval) 2653 return rval; 2654 2655 arg.iocb = ha->ms_iocb; 2656 arg.req_dma = ha->ct_sns_dma; 2657 arg.rsp_dma = ha->ct_sns_dma; 2658 arg.req_size = GPSC_REQ_SIZE; 2659 arg.rsp_size = GPSC_RSP_SIZE; 2660 arg.nport_handle = vha->mgmt_svr_loop_id; 2661 2662 for (i = 0; i < ha->max_fibre_devices; i++) { 2663 /* Issue GFPN_ID */ 2664 /* Prepare common MS IOCB */ 2665 ms_pkt = qla24xx_prep_ms_iocb(vha, &arg); 2666 2667 /* Prepare CT request */ 2668 ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD, 2669 GPSC_RSP_SIZE); 2670 ct_rsp = &ha->ct_sns->p.rsp; 2671 2672 /* Prepare CT arguments -- port_name */ 2673 memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name, 2674 WWN_SIZE); 2675 2676 /* Execute MS IOCB */ 2677 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 2678 sizeof(ms_iocb_entry_t)); 2679 if (rval != QLA_SUCCESS) { 2680 /*EMPTY*/ 2681 ql_dbg(ql_dbg_disc, vha, 0x2059, 2682 "GPSC issue IOCB failed (%d).\n", rval); 2683 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 2684 "GPSC")) != QLA_SUCCESS) { 2685 /* FM command unsupported? */ 2686 if (rval == QLA_INVALID_COMMAND && 2687 (ct_rsp->header.reason_code == 2688 CT_REASON_INVALID_COMMAND_CODE || 2689 ct_rsp->header.reason_code == 2690 CT_REASON_COMMAND_UNSUPPORTED)) { 2691 ql_dbg(ql_dbg_disc, vha, 0x205a, 2692 "GPSC command unsupported, disabling " 2693 "query.\n"); 2694 ha->flags.gpsc_supported = 0; 2695 rval = QLA_FUNCTION_FAILED; 2696 break; 2697 } 2698 rval = QLA_FUNCTION_FAILED; 2699 } else { 2700 list->fp_speed = qla2x00_port_speed_capability( 2701 be16_to_cpu(ct_rsp->rsp.gpsc.speed)); 2702 ql_dbg(ql_dbg_disc, vha, 0x205b, 2703 "GPSC ext entry - fpn " 2704 "%8phN speeds=%04x speed=%04x.\n", 2705 list[i].fabric_port_name, 2706 be16_to_cpu(ct_rsp->rsp.gpsc.speeds), 2707 be16_to_cpu(ct_rsp->rsp.gpsc.speed)); 2708 } 2709 2710 /* Last device exit. */ 2711 if (list[i].d_id.b.rsvd_1 != 0) 2712 break; 2713 } 2714 2715 return (rval); 2716 } 2717 2718 /** 2719 * qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query. 2720 * 2721 * @vha: HA context 2722 * @list: switch info entries to populate 2723 * 2724 */ 2725 void 2726 qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list) 2727 { 2728 int rval; 2729 uint16_t i; 2730 2731 ms_iocb_entry_t *ms_pkt; 2732 struct ct_sns_req *ct_req; 2733 struct ct_sns_rsp *ct_rsp; 2734 struct qla_hw_data *ha = vha->hw; 2735 uint8_t fcp_scsi_features = 0, nvme_features = 0; 2736 struct ct_arg arg; 2737 2738 for (i = 0; i < ha->max_fibre_devices; i++) { 2739 /* Set default FC4 Type as UNKNOWN so the default is to 2740 * Process this port */ 2741 list[i].fc4_type = 0; 2742 2743 /* Do not attempt GFF_ID if we are not FWI_2 capable */ 2744 if (!IS_FWI2_CAPABLE(ha)) 2745 continue; 2746 2747 arg.iocb = ha->ms_iocb; 2748 arg.req_dma = ha->ct_sns_dma; 2749 arg.rsp_dma = ha->ct_sns_dma; 2750 arg.req_size = GFF_ID_REQ_SIZE; 2751 arg.rsp_size = GFF_ID_RSP_SIZE; 2752 arg.nport_handle = NPH_SNS; 2753 2754 /* Prepare common MS IOCB */ 2755 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); 2756 2757 /* Prepare CT request */ 2758 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD, 2759 GFF_ID_RSP_SIZE); 2760 ct_rsp = &ha->ct_sns->p.rsp; 2761 2762 /* Prepare CT arguments -- port_id */ 2763 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id); 2764 2765 /* Execute MS IOCB */ 2766 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 2767 sizeof(ms_iocb_entry_t)); 2768 2769 if (rval != QLA_SUCCESS) { 2770 ql_dbg(ql_dbg_disc, vha, 0x205c, 2771 "GFF_ID issue IOCB failed (%d).\n", rval); 2772 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 2773 "GFF_ID") != QLA_SUCCESS) { 2774 ql_dbg(ql_dbg_disc, vha, 0x205d, 2775 "GFF_ID IOCB status had a failure status code.\n"); 2776 } else { 2777 fcp_scsi_features = 2778 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET]; 2779 fcp_scsi_features &= 0x0f; 2780 2781 if (fcp_scsi_features) { 2782 list[i].fc4_type = FS_FC4TYPE_FCP; 2783 list[i].fc4_features = fcp_scsi_features; 2784 } 2785 2786 nvme_features = 2787 ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET]; 2788 nvme_features &= 0xf; 2789 2790 if (nvme_features) { 2791 list[i].fc4_type |= FS_FC4TYPE_NVME; 2792 list[i].fc4_features = nvme_features; 2793 } 2794 } 2795 2796 /* Last device exit. */ 2797 if (list[i].d_id.b.rsvd_1 != 0) 2798 break; 2799 } 2800 } 2801 2802 int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport) 2803 { 2804 struct qla_work_evt *e; 2805 2806 e = qla2x00_alloc_work(vha, QLA_EVT_GPSC); 2807 if (!e) 2808 return QLA_FUNCTION_FAILED; 2809 2810 e->u.fcport.fcport = fcport; 2811 return qla2x00_post_work(vha, e); 2812 } 2813 2814 void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea) 2815 { 2816 struct fc_port *fcport = ea->fcport; 2817 2818 ql_dbg(ql_dbg_disc, vha, 0x20d8, 2819 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n", 2820 __func__, fcport->port_name, fcport->disc_state, 2821 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen, 2822 ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id); 2823 2824 if (fcport->disc_state == DSC_DELETE_PEND) 2825 return; 2826 2827 /* We will figure-out what happen after AUTH completes */ 2828 if (fcport->disc_state == DSC_LOGIN_AUTH_PEND) 2829 return; 2830 2831 if (ea->sp->gen2 != fcport->login_gen) { 2832 /* target side must have changed it. */ 2833 ql_dbg(ql_dbg_disc, vha, 0x20d3, 2834 "%s %8phC generation changed\n", 2835 __func__, fcport->port_name); 2836 return; 2837 } else if (ea->sp->gen1 != fcport->rscn_gen) { 2838 return; 2839 } 2840 2841 qla_post_iidma_work(vha, fcport); 2842 } 2843 2844 static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res) 2845 { 2846 struct scsi_qla_host *vha = sp->vha; 2847 struct qla_hw_data *ha = vha->hw; 2848 fc_port_t *fcport = sp->fcport; 2849 struct ct_sns_rsp *ct_rsp; 2850 struct event_arg ea; 2851 2852 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp; 2853 2854 ql_dbg(ql_dbg_disc, vha, 0x2053, 2855 "Async done-%s res %x, WWPN %8phC \n", 2856 sp->name, res, fcport->port_name); 2857 2858 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 2859 2860 if (res == QLA_FUNCTION_TIMEOUT) 2861 goto done; 2862 2863 if (res == (DID_ERROR << 16)) { 2864 /* entry status error */ 2865 goto done; 2866 } else if (res) { 2867 if ((ct_rsp->header.reason_code == 2868 CT_REASON_INVALID_COMMAND_CODE) || 2869 (ct_rsp->header.reason_code == 2870 CT_REASON_COMMAND_UNSUPPORTED)) { 2871 ql_dbg(ql_dbg_disc, vha, 0x2019, 2872 "GPSC command unsupported, disabling query.\n"); 2873 ha->flags.gpsc_supported = 0; 2874 goto done; 2875 } 2876 } else { 2877 fcport->fp_speed = qla2x00_port_speed_capability( 2878 be16_to_cpu(ct_rsp->rsp.gpsc.speed)); 2879 2880 ql_dbg(ql_dbg_disc, vha, 0x2054, 2881 "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n", 2882 sp->name, fcport->fabric_port_name, 2883 be16_to_cpu(ct_rsp->rsp.gpsc.speeds), 2884 be16_to_cpu(ct_rsp->rsp.gpsc.speed)); 2885 } 2886 memset(&ea, 0, sizeof(ea)); 2887 ea.rc = res; 2888 ea.fcport = fcport; 2889 ea.sp = sp; 2890 qla24xx_handle_gpsc_event(vha, &ea); 2891 2892 done: 2893 /* ref: INIT */ 2894 kref_put(&sp->cmd_kref, qla2x00_sp_release); 2895 } 2896 2897 int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport) 2898 { 2899 int rval = QLA_FUNCTION_FAILED; 2900 struct ct_sns_req *ct_req; 2901 srb_t *sp; 2902 2903 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) 2904 return rval; 2905 2906 /* ref: INIT */ 2907 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 2908 if (!sp) 2909 goto done; 2910 2911 sp->type = SRB_CT_PTHRU_CMD; 2912 sp->name = "gpsc"; 2913 sp->gen1 = fcport->rscn_gen; 2914 sp->gen2 = fcport->login_gen; 2915 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 2916 qla24xx_async_gpsc_sp_done); 2917 2918 /* CT_IU preamble */ 2919 ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD, 2920 GPSC_RSP_SIZE); 2921 2922 /* GPSC req */ 2923 memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name, 2924 WWN_SIZE); 2925 2926 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; 2927 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma; 2928 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns; 2929 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma; 2930 sp->u.iocb_cmd.u.ctarg.req_size = GPSC_REQ_SIZE; 2931 sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE; 2932 sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id; 2933 2934 ql_dbg(ql_dbg_disc, vha, 0x205e, 2935 "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n", 2936 sp->name, fcport->port_name, sp->handle, 2937 fcport->loop_id, fcport->d_id.b.domain, 2938 fcport->d_id.b.area, fcport->d_id.b.al_pa); 2939 2940 rval = qla2x00_start_sp(sp); 2941 if (rval != QLA_SUCCESS) 2942 goto done_free_sp; 2943 return rval; 2944 2945 done_free_sp: 2946 /* ref: INIT */ 2947 kref_put(&sp->cmd_kref, qla2x00_sp_release); 2948 done: 2949 return rval; 2950 } 2951 2952 int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id) 2953 { 2954 struct qla_work_evt *e; 2955 2956 if (test_bit(UNLOADING, &vha->dpc_flags) || 2957 (vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags))) 2958 return 0; 2959 2960 e = qla2x00_alloc_work(vha, QLA_EVT_GPNID); 2961 if (!e) 2962 return QLA_FUNCTION_FAILED; 2963 2964 e->u.gpnid.id = *id; 2965 return qla2x00_post_work(vha, e); 2966 } 2967 2968 void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp) 2969 { 2970 struct srb_iocb *c = &sp->u.iocb_cmd; 2971 2972 switch (sp->type) { 2973 case SRB_ELS_DCMD: 2974 qla2x00_els_dcmd2_free(vha, &c->u.els_plogi); 2975 break; 2976 case SRB_CT_PTHRU_CMD: 2977 default: 2978 if (sp->u.iocb_cmd.u.ctarg.req) { 2979 dma_free_coherent(&vha->hw->pdev->dev, 2980 sp->u.iocb_cmd.u.ctarg.req_allocated_size, 2981 sp->u.iocb_cmd.u.ctarg.req, 2982 sp->u.iocb_cmd.u.ctarg.req_dma); 2983 sp->u.iocb_cmd.u.ctarg.req = NULL; 2984 } 2985 2986 if (sp->u.iocb_cmd.u.ctarg.rsp) { 2987 dma_free_coherent(&vha->hw->pdev->dev, 2988 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, 2989 sp->u.iocb_cmd.u.ctarg.rsp, 2990 sp->u.iocb_cmd.u.ctarg.rsp_dma); 2991 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 2992 } 2993 break; 2994 } 2995 2996 /* ref: INIT */ 2997 kref_put(&sp->cmd_kref, qla2x00_sp_release); 2998 } 2999 3000 void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea) 3001 { 3002 fc_port_t *fcport, *conflict, *t; 3003 u16 data[2]; 3004 3005 ql_dbg(ql_dbg_disc, vha, 0xffff, 3006 "%s %d port_id: %06x\n", 3007 __func__, __LINE__, ea->id.b24); 3008 3009 if (ea->rc) { 3010 /* cable is disconnected */ 3011 list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) { 3012 if (fcport->d_id.b24 == ea->id.b24) 3013 fcport->scan_state = QLA_FCPORT_SCAN; 3014 3015 qlt_schedule_sess_for_deletion(fcport); 3016 } 3017 } else { 3018 /* cable is connected */ 3019 fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1); 3020 if (fcport) { 3021 list_for_each_entry_safe(conflict, t, &vha->vp_fcports, 3022 list) { 3023 if ((conflict->d_id.b24 == ea->id.b24) && 3024 (fcport != conflict)) 3025 /* 3026 * 2 fcports with conflict Nport ID or 3027 * an existing fcport is having nport ID 3028 * conflict with new fcport. 3029 */ 3030 3031 conflict->scan_state = QLA_FCPORT_SCAN; 3032 3033 qlt_schedule_sess_for_deletion(conflict); 3034 } 3035 3036 fcport->scan_needed = 0; 3037 fcport->rscn_gen++; 3038 fcport->scan_state = QLA_FCPORT_FOUND; 3039 fcport->flags |= FCF_FABRIC_DEVICE; 3040 if (fcport->login_retry == 0) { 3041 fcport->login_retry = 3042 vha->hw->login_retry_count; 3043 ql_dbg(ql_dbg_disc, vha, 0xffff, 3044 "Port login retry %8phN, lid 0x%04x cnt=%d.\n", 3045 fcport->port_name, fcport->loop_id, 3046 fcport->login_retry); 3047 } 3048 switch (fcport->disc_state) { 3049 case DSC_LOGIN_COMPLETE: 3050 /* recheck session is still intact. */ 3051 ql_dbg(ql_dbg_disc, vha, 0x210d, 3052 "%s %d %8phC revalidate session with ADISC\n", 3053 __func__, __LINE__, fcport->port_name); 3054 data[0] = data[1] = 0; 3055 qla2x00_post_async_adisc_work(vha, fcport, 3056 data); 3057 break; 3058 case DSC_DELETED: 3059 ql_dbg(ql_dbg_disc, vha, 0x210d, 3060 "%s %d %8phC login\n", __func__, __LINE__, 3061 fcport->port_name); 3062 fcport->d_id = ea->id; 3063 qla24xx_fcport_handle_login(vha, fcport); 3064 break; 3065 case DSC_DELETE_PEND: 3066 fcport->d_id = ea->id; 3067 break; 3068 default: 3069 fcport->d_id = ea->id; 3070 break; 3071 } 3072 } else { 3073 list_for_each_entry_safe(conflict, t, &vha->vp_fcports, 3074 list) { 3075 if (conflict->d_id.b24 == ea->id.b24) { 3076 /* 2 fcports with conflict Nport ID or 3077 * an existing fcport is having nport ID 3078 * conflict with new fcport. 3079 */ 3080 ql_dbg(ql_dbg_disc, vha, 0xffff, 3081 "%s %d %8phC DS %d\n", 3082 __func__, __LINE__, 3083 conflict->port_name, 3084 conflict->disc_state); 3085 3086 conflict->scan_state = QLA_FCPORT_SCAN; 3087 qlt_schedule_sess_for_deletion(conflict); 3088 } 3089 } 3090 3091 /* create new fcport */ 3092 ql_dbg(ql_dbg_disc, vha, 0x2065, 3093 "%s %d %8phC post new sess\n", 3094 __func__, __LINE__, ea->port_name); 3095 qla24xx_post_newsess_work(vha, &ea->id, 3096 ea->port_name, NULL, NULL, 0); 3097 } 3098 } 3099 } 3100 3101 static void qla2x00_async_gpnid_sp_done(srb_t *sp, int res) 3102 { 3103 struct scsi_qla_host *vha = sp->vha; 3104 struct ct_sns_req *ct_req = 3105 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; 3106 struct ct_sns_rsp *ct_rsp = 3107 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp; 3108 struct event_arg ea; 3109 struct qla_work_evt *e; 3110 unsigned long flags; 3111 3112 if (res) 3113 ql_dbg(ql_dbg_disc, vha, 0x2066, 3114 "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n", 3115 sp->name, res, sp->gen1, &ct_req->req.port_id.port_id, 3116 ct_rsp->rsp.gpn_id.port_name); 3117 else 3118 ql_dbg(ql_dbg_disc, vha, 0x2066, 3119 "Async done-%s good rscn gen %d ID %3phC. %8phC\n", 3120 sp->name, sp->gen1, &ct_req->req.port_id.port_id, 3121 ct_rsp->rsp.gpn_id.port_name); 3122 3123 memset(&ea, 0, sizeof(ea)); 3124 memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE); 3125 ea.sp = sp; 3126 ea.id = be_to_port_id(ct_req->req.port_id.port_id); 3127 ea.rc = res; 3128 3129 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 3130 list_del(&sp->elem); 3131 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 3132 3133 if (res) { 3134 if (res == QLA_FUNCTION_TIMEOUT) { 3135 qla24xx_post_gpnid_work(sp->vha, &ea.id); 3136 /* ref: INIT */ 3137 kref_put(&sp->cmd_kref, qla2x00_sp_release); 3138 return; 3139 } 3140 } else if (sp->gen1) { 3141 /* There was another RSCN for this Nport ID */ 3142 qla24xx_post_gpnid_work(sp->vha, &ea.id); 3143 /* ref: INIT */ 3144 kref_put(&sp->cmd_kref, qla2x00_sp_release); 3145 return; 3146 } 3147 3148 qla24xx_handle_gpnid_event(vha, &ea); 3149 3150 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); 3151 if (!e) { 3152 /* please ignore kernel warning. otherwise, we have mem leak. */ 3153 dma_free_coherent(&vha->hw->pdev->dev, 3154 sp->u.iocb_cmd.u.ctarg.req_allocated_size, 3155 sp->u.iocb_cmd.u.ctarg.req, 3156 sp->u.iocb_cmd.u.ctarg.req_dma); 3157 sp->u.iocb_cmd.u.ctarg.req = NULL; 3158 3159 dma_free_coherent(&vha->hw->pdev->dev, 3160 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, 3161 sp->u.iocb_cmd.u.ctarg.rsp, 3162 sp->u.iocb_cmd.u.ctarg.rsp_dma); 3163 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 3164 3165 /* ref: INIT */ 3166 kref_put(&sp->cmd_kref, qla2x00_sp_release); 3167 return; 3168 } 3169 3170 e->u.iosb.sp = sp; 3171 qla2x00_post_work(vha, e); 3172 } 3173 3174 /* Get WWPN with Nport ID. */ 3175 int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) 3176 { 3177 int rval = QLA_FUNCTION_FAILED; 3178 struct ct_sns_req *ct_req; 3179 srb_t *sp, *tsp; 3180 struct ct_sns_pkt *ct_sns; 3181 unsigned long flags; 3182 3183 if (!vha->flags.online) 3184 goto done; 3185 3186 /* ref: INIT */ 3187 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 3188 if (!sp) 3189 goto done; 3190 3191 sp->type = SRB_CT_PTHRU_CMD; 3192 sp->name = "gpnid"; 3193 sp->u.iocb_cmd.u.ctarg.id = *id; 3194 sp->gen1 = 0; 3195 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 3196 qla2x00_async_gpnid_sp_done); 3197 3198 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 3199 list_for_each_entry(tsp, &vha->gpnid_list, elem) { 3200 if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) { 3201 tsp->gen1++; 3202 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 3203 /* ref: INIT */ 3204 kref_put(&sp->cmd_kref, qla2x00_sp_release); 3205 goto done; 3206 } 3207 } 3208 list_add_tail(&sp->elem, &vha->gpnid_list); 3209 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 3210 3211 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 3212 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, 3213 GFP_KERNEL); 3214 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); 3215 if (!sp->u.iocb_cmd.u.ctarg.req) { 3216 ql_log(ql_log_warn, vha, 0xd041, 3217 "Failed to allocate ct_sns request.\n"); 3218 goto done_free_sp; 3219 } 3220 3221 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 3222 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, 3223 GFP_KERNEL); 3224 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); 3225 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 3226 ql_log(ql_log_warn, vha, 0xd042, 3227 "Failed to allocate ct_sns request.\n"); 3228 goto done_free_sp; 3229 } 3230 3231 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; 3232 memset(ct_sns, 0, sizeof(*ct_sns)); 3233 3234 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; 3235 /* CT_IU preamble */ 3236 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE); 3237 3238 /* GPN_ID req */ 3239 ct_req->req.port_id.port_id = port_id_to_be_id(*id); 3240 3241 sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE; 3242 sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE; 3243 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 3244 3245 ql_dbg(ql_dbg_disc, vha, 0x2067, 3246 "Async-%s hdl=%x ID %3phC.\n", sp->name, 3247 sp->handle, &ct_req->req.port_id.port_id); 3248 3249 rval = qla2x00_start_sp(sp); 3250 if (rval != QLA_SUCCESS) 3251 goto done_free_sp; 3252 3253 return rval; 3254 3255 done_free_sp: 3256 spin_lock_irqsave(&vha->hw->vport_slock, flags); 3257 list_del(&sp->elem); 3258 spin_unlock_irqrestore(&vha->hw->vport_slock, flags); 3259 3260 if (sp->u.iocb_cmd.u.ctarg.req) { 3261 dma_free_coherent(&vha->hw->pdev->dev, 3262 sizeof(struct ct_sns_pkt), 3263 sp->u.iocb_cmd.u.ctarg.req, 3264 sp->u.iocb_cmd.u.ctarg.req_dma); 3265 sp->u.iocb_cmd.u.ctarg.req = NULL; 3266 } 3267 if (sp->u.iocb_cmd.u.ctarg.rsp) { 3268 dma_free_coherent(&vha->hw->pdev->dev, 3269 sizeof(struct ct_sns_pkt), 3270 sp->u.iocb_cmd.u.ctarg.rsp, 3271 sp->u.iocb_cmd.u.ctarg.rsp_dma); 3272 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 3273 } 3274 /* ref: INIT */ 3275 kref_put(&sp->cmd_kref, qla2x00_sp_release); 3276 done: 3277 return rval; 3278 } 3279 3280 3281 void qla24xx_async_gffid_sp_done(srb_t *sp, int res) 3282 { 3283 struct scsi_qla_host *vha = sp->vha; 3284 fc_port_t *fcport = sp->fcport; 3285 struct ct_sns_rsp *ct_rsp; 3286 uint8_t fc4_scsi_feat; 3287 uint8_t fc4_nvme_feat; 3288 3289 ql_dbg(ql_dbg_disc, vha, 0x2133, 3290 "Async done-%s res %x ID %x. %8phC\n", 3291 sp->name, res, fcport->d_id.b24, fcport->port_name); 3292 3293 ct_rsp = sp->u.iocb_cmd.u.ctarg.rsp; 3294 fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET]; 3295 fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET]; 3296 sp->rc = res; 3297 3298 /* 3299 * FC-GS-7, 5.2.3.12 FC-4 Features - format 3300 * The format of the FC-4 Features object, as defined by the FC-4, 3301 * Shall be an array of 4-bit values, one for each type code value 3302 */ 3303 if (!res) { 3304 if (fc4_scsi_feat & 0xf) { 3305 /* w1 b00:03 */ 3306 fcport->fc4_type = FS_FC4TYPE_FCP; 3307 fcport->fc4_features = fc4_scsi_feat & 0xf; 3308 } 3309 3310 if (fc4_nvme_feat & 0xf) { 3311 /* w5 [00:03]/28h */ 3312 fcport->fc4_type |= FS_FC4TYPE_NVME; 3313 fcport->fc4_features = fc4_nvme_feat & 0xf; 3314 } 3315 } 3316 3317 if (sp->flags & SRB_WAKEUP_ON_COMP) { 3318 complete(sp->comp); 3319 } else { 3320 if (sp->u.iocb_cmd.u.ctarg.req) { 3321 dma_free_coherent(&vha->hw->pdev->dev, 3322 sp->u.iocb_cmd.u.ctarg.req_allocated_size, 3323 sp->u.iocb_cmd.u.ctarg.req, 3324 sp->u.iocb_cmd.u.ctarg.req_dma); 3325 sp->u.iocb_cmd.u.ctarg.req = NULL; 3326 } 3327 3328 if (sp->u.iocb_cmd.u.ctarg.rsp) { 3329 dma_free_coherent(&vha->hw->pdev->dev, 3330 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, 3331 sp->u.iocb_cmd.u.ctarg.rsp, 3332 sp->u.iocb_cmd.u.ctarg.rsp_dma); 3333 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 3334 } 3335 3336 /* ref: INIT */ 3337 kref_put(&sp->cmd_kref, qla2x00_sp_release); 3338 /* we should not be here */ 3339 dump_stack(); 3340 } 3341 } 3342 3343 /* Get FC4 Feature with Nport ID. */ 3344 int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool wait) 3345 { 3346 int rval = QLA_FUNCTION_FAILED; 3347 struct ct_sns_req *ct_req; 3348 srb_t *sp; 3349 DECLARE_COMPLETION_ONSTACK(comp); 3350 3351 /* this routine does not have handling for no wait */ 3352 if (!vha->flags.online || !wait) 3353 return rval; 3354 3355 /* ref: INIT */ 3356 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 3357 if (!sp) 3358 return rval; 3359 3360 sp->type = SRB_CT_PTHRU_CMD; 3361 sp->name = "gffid"; 3362 sp->gen1 = fcport->rscn_gen; 3363 sp->gen2 = fcport->login_gen; 3364 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 3365 qla24xx_async_gffid_sp_done); 3366 sp->comp = ∁ 3367 sp->u.iocb_cmd.timeout = qla2x00_els_dcmd2_iocb_timeout; 3368 3369 if (wait) 3370 sp->flags = SRB_WAKEUP_ON_COMP; 3371 3372 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); 3373 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 3374 sp->u.iocb_cmd.u.ctarg.req_allocated_size, 3375 &sp->u.iocb_cmd.u.ctarg.req_dma, 3376 GFP_KERNEL); 3377 if (!sp->u.iocb_cmd.u.ctarg.req) { 3378 ql_log(ql_log_warn, vha, 0xd041, 3379 "%s: Failed to allocate ct_sns request.\n", 3380 __func__); 3381 goto done_free_sp; 3382 } 3383 3384 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); 3385 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 3386 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, 3387 &sp->u.iocb_cmd.u.ctarg.rsp_dma, 3388 GFP_KERNEL); 3389 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 3390 ql_log(ql_log_warn, vha, 0xd041, 3391 "%s: Failed to allocate ct_sns response.\n", 3392 __func__); 3393 goto done_free_sp; 3394 } 3395 3396 /* CT_IU preamble */ 3397 ct_req = qla2x00_prep_ct_req(sp->u.iocb_cmd.u.ctarg.req, GFF_ID_CMD, GFF_ID_RSP_SIZE); 3398 3399 ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain; 3400 ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area; 3401 ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa; 3402 3403 sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE; 3404 sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE; 3405 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 3406 3407 rval = qla2x00_start_sp(sp); 3408 3409 if (rval != QLA_SUCCESS) { 3410 rval = QLA_FUNCTION_FAILED; 3411 goto done_free_sp; 3412 } else { 3413 ql_dbg(ql_dbg_disc, vha, 0x3074, 3414 "Async-%s hdl=%x portid %06x\n", 3415 sp->name, sp->handle, fcport->d_id.b24); 3416 } 3417 3418 wait_for_completion(sp->comp); 3419 rval = sp->rc; 3420 3421 done_free_sp: 3422 if (sp->u.iocb_cmd.u.ctarg.req) { 3423 dma_free_coherent(&vha->hw->pdev->dev, 3424 sp->u.iocb_cmd.u.ctarg.req_allocated_size, 3425 sp->u.iocb_cmd.u.ctarg.req, 3426 sp->u.iocb_cmd.u.ctarg.req_dma); 3427 sp->u.iocb_cmd.u.ctarg.req = NULL; 3428 } 3429 3430 if (sp->u.iocb_cmd.u.ctarg.rsp) { 3431 dma_free_coherent(&vha->hw->pdev->dev, 3432 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, 3433 sp->u.iocb_cmd.u.ctarg.rsp, 3434 sp->u.iocb_cmd.u.ctarg.rsp_dma); 3435 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 3436 } 3437 3438 /* ref: INIT */ 3439 kref_put(&sp->cmd_kref, qla2x00_sp_release); 3440 return rval; 3441 } 3442 3443 /* GPN_FT + GNN_FT*/ 3444 static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn) 3445 { 3446 struct qla_hw_data *ha = vha->hw; 3447 scsi_qla_host_t *vp; 3448 unsigned long flags; 3449 u64 twwn; 3450 int rc = 0; 3451 3452 if (!ha->num_vhosts) 3453 return 0; 3454 3455 spin_lock_irqsave(&ha->vport_slock, flags); 3456 list_for_each_entry(vp, &ha->vp_list, list) { 3457 twwn = wwn_to_u64(vp->port_name); 3458 if (wwn == twwn) { 3459 rc = 1; 3460 break; 3461 } 3462 } 3463 spin_unlock_irqrestore(&ha->vport_slock, flags); 3464 3465 return rc; 3466 } 3467 3468 void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) 3469 { 3470 fc_port_t *fcport; 3471 u32 i, rc; 3472 bool found; 3473 struct fab_scan_rp *rp, *trp; 3474 unsigned long flags; 3475 u8 recheck = 0; 3476 u16 dup = 0, dup_cnt = 0; 3477 3478 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 3479 "%s enter\n", __func__); 3480 3481 if (sp->gen1 != vha->hw->base_qpair->chip_reset) { 3482 ql_dbg(ql_dbg_disc, vha, 0xffff, 3483 "%s scan stop due to chip reset %x/%x\n", 3484 sp->name, sp->gen1, vha->hw->base_qpair->chip_reset); 3485 goto out; 3486 } 3487 3488 rc = sp->rc; 3489 if (rc) { 3490 vha->scan.scan_retry++; 3491 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { 3492 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3493 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3494 goto out; 3495 } else { 3496 ql_dbg(ql_dbg_disc, vha, 0xffff, 3497 "%s: Fabric scan failed for %d retries.\n", 3498 __func__, vha->scan.scan_retry); 3499 /* 3500 * Unable to scan any rports. logout loop below 3501 * will unregister all sessions. 3502 */ 3503 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3504 if ((fcport->flags & FCF_FABRIC_DEVICE) != 0) { 3505 fcport->scan_state = QLA_FCPORT_SCAN; 3506 if (fcport->loop_id == FC_NO_LOOP_ID) 3507 fcport->logout_on_delete = 0; 3508 else 3509 fcport->logout_on_delete = 1; 3510 } 3511 } 3512 goto login_logout; 3513 } 3514 } 3515 vha->scan.scan_retry = 0; 3516 3517 list_for_each_entry(fcport, &vha->vp_fcports, list) 3518 fcport->scan_state = QLA_FCPORT_SCAN; 3519 3520 for (i = 0; i < vha->hw->max_fibre_devices; i++) { 3521 u64 wwn; 3522 int k; 3523 3524 rp = &vha->scan.l[i]; 3525 found = false; 3526 3527 wwn = wwn_to_u64(rp->port_name); 3528 if (wwn == 0) 3529 continue; 3530 3531 /* Remove duplicate NPORT ID entries from switch data base */ 3532 for (k = i + 1; k < vha->hw->max_fibre_devices; k++) { 3533 trp = &vha->scan.l[k]; 3534 if (rp->id.b24 == trp->id.b24) { 3535 dup = 1; 3536 dup_cnt++; 3537 ql_dbg(ql_dbg_disc + ql_dbg_verbose, 3538 vha, 0xffff, 3539 "Detected duplicate NPORT ID from switch data base: ID %06x WWN %8phN WWN %8phN\n", 3540 rp->id.b24, rp->port_name, trp->port_name); 3541 memset(trp, 0, sizeof(*trp)); 3542 } 3543 } 3544 3545 if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE)) 3546 continue; 3547 3548 /* Bypass reserved domain fields. */ 3549 if ((rp->id.b.domain & 0xf0) == 0xf0) 3550 continue; 3551 3552 /* Bypass virtual ports of the same host. */ 3553 if (qla2x00_is_a_vp(vha, wwn)) 3554 continue; 3555 3556 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3557 if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE)) 3558 continue; 3559 fcport->scan_state = QLA_FCPORT_FOUND; 3560 fcport->last_rscn_gen = fcport->rscn_gen; 3561 fcport->fc4_type = rp->fc4type; 3562 found = true; 3563 3564 if (fcport->scan_needed) { 3565 if (NVME_PRIORITY(vha->hw, fcport)) 3566 fcport->do_prli_nvme = 1; 3567 else 3568 fcport->do_prli_nvme = 0; 3569 } 3570 3571 /* 3572 * If device was not a fabric device before. 3573 */ 3574 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { 3575 qla2x00_clear_loop_id(fcport); 3576 fcport->flags |= FCF_FABRIC_DEVICE; 3577 } else if (fcport->d_id.b24 != rp->id.b24 || 3578 (fcport->scan_needed && 3579 fcport->port_type != FCT_INITIATOR && 3580 fcport->port_type != FCT_NVME_INITIATOR)) { 3581 qlt_schedule_sess_for_deletion(fcport); 3582 } 3583 fcport->d_id.b24 = rp->id.b24; 3584 fcport->scan_needed = 0; 3585 break; 3586 } 3587 3588 if (!found) { 3589 ql_dbg(ql_dbg_disc, vha, 0xffff, 3590 "%s %d %8phC post new sess\n", 3591 __func__, __LINE__, rp->port_name); 3592 qla24xx_post_newsess_work(vha, &rp->id, rp->port_name, 3593 rp->node_name, NULL, rp->fc4type); 3594 } 3595 } 3596 3597 if (dup) { 3598 ql_log(ql_log_warn, vha, 0xffff, 3599 "Detected %d duplicate NPORT ID(s) from switch data base\n", 3600 dup_cnt); 3601 } 3602 3603 login_logout: 3604 /* 3605 * Logout all previous fabric dev marked lost, except FCP2 devices. 3606 */ 3607 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3608 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { 3609 fcport->scan_needed = 0; 3610 continue; 3611 } 3612 3613 if (fcport->scan_state != QLA_FCPORT_FOUND) { 3614 bool do_delete = false; 3615 3616 if (fcport->scan_needed && 3617 fcport->disc_state == DSC_LOGIN_PEND) { 3618 /* Cable got disconnected after we sent 3619 * a login. Do delete to prevent timeout. 3620 */ 3621 fcport->logout_on_delete = 1; 3622 do_delete = true; 3623 } 3624 3625 fcport->scan_needed = 0; 3626 if (((qla_dual_mode_enabled(vha) || 3627 qla_ini_mode_enabled(vha)) && 3628 atomic_read(&fcport->state) == FCS_ONLINE) || 3629 do_delete) { 3630 if (fcport->loop_id != FC_NO_LOOP_ID) { 3631 if (fcport->flags & FCF_FCP2_DEVICE) 3632 continue; 3633 3634 ql_log(ql_log_warn, vha, 0x20f0, 3635 "%s %d %8phC post del sess\n", 3636 __func__, __LINE__, 3637 fcport->port_name); 3638 3639 fcport->tgt_link_down_time = 0; 3640 qlt_schedule_sess_for_deletion(fcport); 3641 continue; 3642 } 3643 } 3644 } else { 3645 if (fcport->scan_needed || 3646 fcport->disc_state != DSC_LOGIN_COMPLETE) { 3647 if (fcport->login_retry == 0) { 3648 fcport->login_retry = 3649 vha->hw->login_retry_count; 3650 ql_dbg(ql_dbg_disc, vha, 0x20a3, 3651 "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n", 3652 fcport->port_name, fcport->loop_id, 3653 fcport->login_retry); 3654 } 3655 fcport->scan_needed = 0; 3656 qla24xx_fcport_handle_login(vha, fcport); 3657 } 3658 } 3659 } 3660 3661 recheck = 1; 3662 out: 3663 qla24xx_sp_unmap(vha, sp); 3664 spin_lock_irqsave(&vha->work_lock, flags); 3665 vha->scan.scan_flags &= ~SF_SCANNING; 3666 spin_unlock_irqrestore(&vha->work_lock, flags); 3667 3668 if (recheck) { 3669 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3670 if (fcport->scan_needed) { 3671 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3672 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3673 break; 3674 } 3675 } 3676 } 3677 } 3678 3679 static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha, 3680 srb_t *sp, int cmd) 3681 { 3682 struct qla_work_evt *e; 3683 3684 if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE) 3685 return QLA_PARAMETER_ERROR; 3686 3687 e = qla2x00_alloc_work(vha, cmd); 3688 if (!e) 3689 return QLA_FUNCTION_FAILED; 3690 3691 e->u.iosb.sp = sp; 3692 3693 return qla2x00_post_work(vha, e); 3694 } 3695 3696 static int qla2x00_post_nvme_gpnft_work(struct scsi_qla_host *vha, 3697 srb_t *sp, int cmd) 3698 { 3699 struct qla_work_evt *e; 3700 3701 if (cmd != QLA_EVT_GPNFT) 3702 return QLA_PARAMETER_ERROR; 3703 3704 e = qla2x00_alloc_work(vha, cmd); 3705 if (!e) 3706 return QLA_FUNCTION_FAILED; 3707 3708 e->u.gpnft.fc4_type = FC4_TYPE_NVME; 3709 e->u.gpnft.sp = sp; 3710 3711 return qla2x00_post_work(vha, e); 3712 } 3713 3714 static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha, 3715 struct srb *sp) 3716 { 3717 struct qla_hw_data *ha = vha->hw; 3718 int num_fibre_dev = ha->max_fibre_devices; 3719 struct ct_sns_req *ct_req = 3720 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; 3721 struct ct_sns_gpnft_rsp *ct_rsp = 3722 (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp; 3723 struct ct_sns_gpn_ft_data *d; 3724 struct fab_scan_rp *rp; 3725 u16 cmd = be16_to_cpu(ct_req->command); 3726 u8 fc4_type = sp->gen2; 3727 int i, j, k; 3728 port_id_t id; 3729 u8 found; 3730 u64 wwn; 3731 3732 j = 0; 3733 for (i = 0; i < num_fibre_dev; i++) { 3734 d = &ct_rsp->entries[i]; 3735 3736 id.b.rsvd_1 = 0; 3737 id.b.domain = d->port_id[0]; 3738 id.b.area = d->port_id[1]; 3739 id.b.al_pa = d->port_id[2]; 3740 wwn = wwn_to_u64(d->port_name); 3741 3742 if (id.b24 == 0 || wwn == 0) 3743 continue; 3744 3745 if (fc4_type == FC4_TYPE_FCP_SCSI) { 3746 if (cmd == GPN_FT_CMD) { 3747 rp = &vha->scan.l[j]; 3748 rp->id = id; 3749 memcpy(rp->port_name, d->port_name, 8); 3750 j++; 3751 rp->fc4type = FS_FC4TYPE_FCP; 3752 } else { 3753 for (k = 0; k < num_fibre_dev; k++) { 3754 rp = &vha->scan.l[k]; 3755 if (id.b24 == rp->id.b24) { 3756 memcpy(rp->node_name, 3757 d->port_name, 8); 3758 break; 3759 } 3760 } 3761 } 3762 } else { 3763 /* Search if the fibre device supports FC4_TYPE_NVME */ 3764 if (cmd == GPN_FT_CMD) { 3765 found = 0; 3766 3767 for (k = 0; k < num_fibre_dev; k++) { 3768 rp = &vha->scan.l[k]; 3769 if (!memcmp(rp->port_name, 3770 d->port_name, 8)) { 3771 /* 3772 * Supports FC-NVMe & FCP 3773 */ 3774 rp->fc4type |= FS_FC4TYPE_NVME; 3775 found = 1; 3776 break; 3777 } 3778 } 3779 3780 /* We found new FC-NVMe only port */ 3781 if (!found) { 3782 for (k = 0; k < num_fibre_dev; k++) { 3783 rp = &vha->scan.l[k]; 3784 if (wwn_to_u64(rp->port_name)) { 3785 continue; 3786 } else { 3787 rp->id = id; 3788 memcpy(rp->port_name, 3789 d->port_name, 8); 3790 rp->fc4type = 3791 FS_FC4TYPE_NVME; 3792 break; 3793 } 3794 } 3795 } 3796 } else { 3797 for (k = 0; k < num_fibre_dev; k++) { 3798 rp = &vha->scan.l[k]; 3799 if (id.b24 == rp->id.b24) { 3800 memcpy(rp->node_name, 3801 d->port_name, 8); 3802 break; 3803 } 3804 } 3805 } 3806 } 3807 } 3808 } 3809 3810 static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res) 3811 { 3812 struct scsi_qla_host *vha = sp->vha; 3813 struct ct_sns_req *ct_req = 3814 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; 3815 u16 cmd = be16_to_cpu(ct_req->command); 3816 u8 fc4_type = sp->gen2; 3817 unsigned long flags; 3818 int rc; 3819 3820 /* gen2 field is holding the fc4type */ 3821 ql_dbg(ql_dbg_disc, vha, 0xffff, 3822 "Async done-%s res %x FC4Type %x\n", 3823 sp->name, res, sp->gen2); 3824 3825 sp->rc = res; 3826 if (res) { 3827 unsigned long flags; 3828 const char *name = sp->name; 3829 3830 if (res == QLA_OS_TIMER_EXPIRED) { 3831 /* switch is ignoring all commands. 3832 * This might be a zone disable behavior. 3833 * This means we hit 64s timeout. 3834 * 22s GPNFT + 44s Abort = 64s 3835 */ 3836 ql_dbg(ql_dbg_disc, vha, 0xffff, 3837 "%s: Switch Zone check please .\n", 3838 name); 3839 qla2x00_mark_all_devices_lost(vha); 3840 } 3841 3842 /* 3843 * We are in an Interrupt context, queue up this 3844 * sp for GNNFT_DONE work. This will allow all 3845 * the resource to get freed up. 3846 */ 3847 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, 3848 QLA_EVT_GNNFT_DONE); 3849 if (rc) { 3850 /* Cleanup here to prevent memory leak */ 3851 qla24xx_sp_unmap(vha, sp); 3852 3853 spin_lock_irqsave(&vha->work_lock, flags); 3854 vha->scan.scan_flags &= ~SF_SCANNING; 3855 vha->scan.scan_retry++; 3856 spin_unlock_irqrestore(&vha->work_lock, flags); 3857 3858 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { 3859 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3860 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3861 qla2xxx_wake_dpc(vha); 3862 } else { 3863 ql_dbg(ql_dbg_disc, vha, 0xffff, 3864 "Async done-%s rescan failed on all retries.\n", 3865 name); 3866 } 3867 } 3868 return; 3869 } 3870 3871 qla2x00_find_free_fcp_nvme_slot(vha, sp); 3872 3873 if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled && 3874 cmd == GNN_FT_CMD) { 3875 spin_lock_irqsave(&vha->work_lock, flags); 3876 vha->scan.scan_flags &= ~SF_SCANNING; 3877 spin_unlock_irqrestore(&vha->work_lock, flags); 3878 3879 sp->rc = res; 3880 rc = qla2x00_post_nvme_gpnft_work(vha, sp, QLA_EVT_GPNFT); 3881 if (rc) { 3882 qla24xx_sp_unmap(vha, sp); 3883 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3884 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3885 } 3886 return; 3887 } 3888 3889 if (cmd == GPN_FT_CMD) { 3890 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, 3891 QLA_EVT_GPNFT_DONE); 3892 } else { 3893 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, 3894 QLA_EVT_GNNFT_DONE); 3895 } 3896 3897 if (rc) { 3898 qla24xx_sp_unmap(vha, sp); 3899 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3900 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3901 return; 3902 } 3903 } 3904 3905 /* 3906 * Get WWNN list for fc4_type 3907 * 3908 * It is assumed the same SRB is re-used from GPNFT to avoid 3909 * mem free & re-alloc 3910 */ 3911 static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp, 3912 u8 fc4_type) 3913 { 3914 int rval = QLA_FUNCTION_FAILED; 3915 struct ct_sns_req *ct_req; 3916 struct ct_sns_pkt *ct_sns; 3917 unsigned long flags; 3918 3919 if (!vha->flags.online) { 3920 spin_lock_irqsave(&vha->work_lock, flags); 3921 vha->scan.scan_flags &= ~SF_SCANNING; 3922 spin_unlock_irqrestore(&vha->work_lock, flags); 3923 goto done_free_sp; 3924 } 3925 3926 if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) { 3927 ql_log(ql_log_warn, vha, 0xffff, 3928 "%s: req %p rsp %p are not setup\n", 3929 __func__, sp->u.iocb_cmd.u.ctarg.req, 3930 sp->u.iocb_cmd.u.ctarg.rsp); 3931 spin_lock_irqsave(&vha->work_lock, flags); 3932 vha->scan.scan_flags &= ~SF_SCANNING; 3933 spin_unlock_irqrestore(&vha->work_lock, flags); 3934 WARN_ON(1); 3935 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3936 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3937 goto done_free_sp; 3938 } 3939 3940 ql_dbg(ql_dbg_disc, vha, 0xfffff, 3941 "%s: FC4Type %x, CT-PASSTHRU %s command ctarg rsp size %d, ctarg req size %d\n", 3942 __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size, 3943 sp->u.iocb_cmd.u.ctarg.req_size); 3944 3945 sp->type = SRB_CT_PTHRU_CMD; 3946 sp->name = "gnnft"; 3947 sp->gen1 = vha->hw->base_qpair->chip_reset; 3948 sp->gen2 = fc4_type; 3949 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 3950 qla2x00_async_gpnft_gnnft_sp_done); 3951 3952 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); 3953 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size); 3954 3955 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; 3956 /* CT_IU preamble */ 3957 ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD, 3958 sp->u.iocb_cmd.u.ctarg.rsp_size); 3959 3960 /* GPN_FT req */ 3961 ct_req->req.gpn_ft.port_type = fc4_type; 3962 3963 sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE; 3964 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 3965 3966 ql_dbg(ql_dbg_disc, vha, 0xffff, 3967 "Async-%s hdl=%x FC4Type %x.\n", sp->name, 3968 sp->handle, ct_req->req.gpn_ft.port_type); 3969 3970 rval = qla2x00_start_sp(sp); 3971 if (rval != QLA_SUCCESS) { 3972 goto done_free_sp; 3973 } 3974 3975 return rval; 3976 3977 done_free_sp: 3978 if (sp->u.iocb_cmd.u.ctarg.req) { 3979 dma_free_coherent(&vha->hw->pdev->dev, 3980 sp->u.iocb_cmd.u.ctarg.req_allocated_size, 3981 sp->u.iocb_cmd.u.ctarg.req, 3982 sp->u.iocb_cmd.u.ctarg.req_dma); 3983 sp->u.iocb_cmd.u.ctarg.req = NULL; 3984 } 3985 if (sp->u.iocb_cmd.u.ctarg.rsp) { 3986 dma_free_coherent(&vha->hw->pdev->dev, 3987 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, 3988 sp->u.iocb_cmd.u.ctarg.rsp, 3989 sp->u.iocb_cmd.u.ctarg.rsp_dma); 3990 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 3991 } 3992 /* ref: INIT */ 3993 kref_put(&sp->cmd_kref, qla2x00_sp_release); 3994 3995 spin_lock_irqsave(&vha->work_lock, flags); 3996 vha->scan.scan_flags &= ~SF_SCANNING; 3997 if (vha->scan.scan_flags == 0) { 3998 ql_dbg(ql_dbg_disc, vha, 0xffff, 3999 "%s: schedule\n", __func__); 4000 vha->scan.scan_flags |= SF_QUEUED; 4001 schedule_delayed_work(&vha->scan.scan_work, 5); 4002 } 4003 spin_unlock_irqrestore(&vha->work_lock, flags); 4004 4005 4006 return rval; 4007 } /* GNNFT */ 4008 4009 void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp) 4010 { 4011 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 4012 "%s enter\n", __func__); 4013 qla24xx_async_gnnft(vha, sp, sp->gen2); 4014 } 4015 4016 /* Get WWPN list for certain fc4_type */ 4017 int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) 4018 { 4019 int rval = QLA_FUNCTION_FAILED; 4020 struct ct_sns_req *ct_req; 4021 struct ct_sns_pkt *ct_sns; 4022 u32 rspsz; 4023 unsigned long flags; 4024 4025 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 4026 "%s enter\n", __func__); 4027 4028 if (!vha->flags.online) 4029 return rval; 4030 4031 spin_lock_irqsave(&vha->work_lock, flags); 4032 if (vha->scan.scan_flags & SF_SCANNING) { 4033 spin_unlock_irqrestore(&vha->work_lock, flags); 4034 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 4035 "%s: scan active\n", __func__); 4036 return rval; 4037 } 4038 vha->scan.scan_flags |= SF_SCANNING; 4039 spin_unlock_irqrestore(&vha->work_lock, flags); 4040 4041 if (fc4_type == FC4_TYPE_FCP_SCSI) { 4042 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 4043 "%s: Performing FCP Scan\n", __func__); 4044 4045 if (sp) { 4046 /* ref: INIT */ 4047 kref_put(&sp->cmd_kref, qla2x00_sp_release); 4048 } 4049 4050 /* ref: INIT */ 4051 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 4052 if (!sp) { 4053 spin_lock_irqsave(&vha->work_lock, flags); 4054 vha->scan.scan_flags &= ~SF_SCANNING; 4055 spin_unlock_irqrestore(&vha->work_lock, flags); 4056 return rval; 4057 } 4058 4059 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 4060 sizeof(struct ct_sns_pkt), 4061 &sp->u.iocb_cmd.u.ctarg.req_dma, 4062 GFP_KERNEL); 4063 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); 4064 if (!sp->u.iocb_cmd.u.ctarg.req) { 4065 ql_log(ql_log_warn, vha, 0xffff, 4066 "Failed to allocate ct_sns request.\n"); 4067 spin_lock_irqsave(&vha->work_lock, flags); 4068 vha->scan.scan_flags &= ~SF_SCANNING; 4069 spin_unlock_irqrestore(&vha->work_lock, flags); 4070 qla2x00_rel_sp(sp); 4071 return rval; 4072 } 4073 sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE; 4074 4075 rspsz = sizeof(struct ct_sns_gpnft_rsp) + 4076 ((vha->hw->max_fibre_devices - 1) * 4077 sizeof(struct ct_sns_gpn_ft_data)); 4078 4079 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 4080 rspsz, 4081 &sp->u.iocb_cmd.u.ctarg.rsp_dma, 4082 GFP_KERNEL); 4083 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = rspsz; 4084 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 4085 ql_log(ql_log_warn, vha, 0xffff, 4086 "Failed to allocate ct_sns request.\n"); 4087 spin_lock_irqsave(&vha->work_lock, flags); 4088 vha->scan.scan_flags &= ~SF_SCANNING; 4089 spin_unlock_irqrestore(&vha->work_lock, flags); 4090 dma_free_coherent(&vha->hw->pdev->dev, 4091 sp->u.iocb_cmd.u.ctarg.req_allocated_size, 4092 sp->u.iocb_cmd.u.ctarg.req, 4093 sp->u.iocb_cmd.u.ctarg.req_dma); 4094 sp->u.iocb_cmd.u.ctarg.req = NULL; 4095 /* ref: INIT */ 4096 qla2x00_rel_sp(sp); 4097 return rval; 4098 } 4099 sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz; 4100 4101 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 4102 "%s scan list size %d\n", __func__, vha->scan.size); 4103 4104 memset(vha->scan.l, 0, vha->scan.size); 4105 } else if (!sp) { 4106 ql_dbg(ql_dbg_disc, vha, 0xffff, 4107 "NVME scan did not provide SP\n"); 4108 return rval; 4109 } 4110 4111 sp->type = SRB_CT_PTHRU_CMD; 4112 sp->name = "gpnft"; 4113 sp->gen1 = vha->hw->base_qpair->chip_reset; 4114 sp->gen2 = fc4_type; 4115 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 4116 qla2x00_async_gpnft_gnnft_sp_done); 4117 4118 rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size; 4119 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); 4120 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size); 4121 4122 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; 4123 /* CT_IU preamble */ 4124 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz); 4125 4126 /* GPN_FT req */ 4127 ct_req->req.gpn_ft.port_type = fc4_type; 4128 4129 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 4130 4131 ql_dbg(ql_dbg_disc, vha, 0xffff, 4132 "Async-%s hdl=%x FC4Type %x.\n", sp->name, 4133 sp->handle, ct_req->req.gpn_ft.port_type); 4134 4135 rval = qla2x00_start_sp(sp); 4136 if (rval != QLA_SUCCESS) { 4137 goto done_free_sp; 4138 } 4139 4140 return rval; 4141 4142 done_free_sp: 4143 if (sp->u.iocb_cmd.u.ctarg.req) { 4144 dma_free_coherent(&vha->hw->pdev->dev, 4145 sp->u.iocb_cmd.u.ctarg.req_allocated_size, 4146 sp->u.iocb_cmd.u.ctarg.req, 4147 sp->u.iocb_cmd.u.ctarg.req_dma); 4148 sp->u.iocb_cmd.u.ctarg.req = NULL; 4149 } 4150 if (sp->u.iocb_cmd.u.ctarg.rsp) { 4151 dma_free_coherent(&vha->hw->pdev->dev, 4152 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, 4153 sp->u.iocb_cmd.u.ctarg.rsp, 4154 sp->u.iocb_cmd.u.ctarg.rsp_dma); 4155 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 4156 } 4157 4158 /* ref: INIT */ 4159 kref_put(&sp->cmd_kref, qla2x00_sp_release); 4160 4161 spin_lock_irqsave(&vha->work_lock, flags); 4162 vha->scan.scan_flags &= ~SF_SCANNING; 4163 if (vha->scan.scan_flags == 0) { 4164 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 4165 "%s: Scan scheduled.\n", __func__); 4166 vha->scan.scan_flags |= SF_QUEUED; 4167 schedule_delayed_work(&vha->scan.scan_work, 5); 4168 } 4169 spin_unlock_irqrestore(&vha->work_lock, flags); 4170 4171 4172 return rval; 4173 } 4174 4175 void qla_scan_work_fn(struct work_struct *work) 4176 { 4177 struct fab_scan *s = container_of(to_delayed_work(work), 4178 struct fab_scan, scan_work); 4179 struct scsi_qla_host *vha = container_of(s, struct scsi_qla_host, 4180 scan); 4181 unsigned long flags; 4182 4183 ql_dbg(ql_dbg_disc, vha, 0xffff, 4184 "%s: schedule loop resync\n", __func__); 4185 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 4186 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 4187 qla2xxx_wake_dpc(vha); 4188 spin_lock_irqsave(&vha->work_lock, flags); 4189 vha->scan.scan_flags &= ~SF_QUEUED; 4190 spin_unlock_irqrestore(&vha->work_lock, flags); 4191 } 4192 4193 /* GNN_ID */ 4194 void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea) 4195 { 4196 qla24xx_post_gnl_work(vha, ea->fcport); 4197 } 4198 4199 static void qla2x00_async_gnnid_sp_done(srb_t *sp, int res) 4200 { 4201 struct scsi_qla_host *vha = sp->vha; 4202 fc_port_t *fcport = sp->fcport; 4203 u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name; 4204 struct event_arg ea; 4205 u64 wwnn; 4206 4207 fcport->flags &= ~FCF_ASYNC_SENT; 4208 wwnn = wwn_to_u64(node_name); 4209 if (wwnn) 4210 memcpy(fcport->node_name, node_name, WWN_SIZE); 4211 4212 memset(&ea, 0, sizeof(ea)); 4213 ea.fcport = fcport; 4214 ea.sp = sp; 4215 ea.rc = res; 4216 4217 ql_dbg(ql_dbg_disc, vha, 0x204f, 4218 "Async done-%s res %x, WWPN %8phC %8phC\n", 4219 sp->name, res, fcport->port_name, fcport->node_name); 4220 4221 qla24xx_handle_gnnid_event(vha, &ea); 4222 4223 /* ref: INIT */ 4224 kref_put(&sp->cmd_kref, qla2x00_sp_release); 4225 } 4226 4227 int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport) 4228 { 4229 int rval = QLA_FUNCTION_FAILED; 4230 struct ct_sns_req *ct_req; 4231 srb_t *sp; 4232 4233 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) 4234 return rval; 4235 4236 qla2x00_set_fcport_disc_state(fcport, DSC_GNN_ID); 4237 /* ref: INIT */ 4238 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 4239 if (!sp) 4240 goto done; 4241 4242 fcport->flags |= FCF_ASYNC_SENT; 4243 sp->type = SRB_CT_PTHRU_CMD; 4244 sp->name = "gnnid"; 4245 sp->gen1 = fcport->rscn_gen; 4246 sp->gen2 = fcport->login_gen; 4247 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 4248 qla2x00_async_gnnid_sp_done); 4249 4250 /* CT_IU preamble */ 4251 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD, 4252 GNN_ID_RSP_SIZE); 4253 4254 /* GNN_ID req */ 4255 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id); 4256 4257 4258 /* req & rsp use the same buffer */ 4259 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; 4260 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma; 4261 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns; 4262 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma; 4263 sp->u.iocb_cmd.u.ctarg.req_size = GNN_ID_REQ_SIZE; 4264 sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE; 4265 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 4266 4267 ql_dbg(ql_dbg_disc, vha, 0xffff, 4268 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n", 4269 sp->name, fcport->port_name, 4270 sp->handle, fcport->loop_id, fcport->d_id.b24); 4271 4272 rval = qla2x00_start_sp(sp); 4273 if (rval != QLA_SUCCESS) 4274 goto done_free_sp; 4275 return rval; 4276 4277 done_free_sp: 4278 /* ref: INIT */ 4279 kref_put(&sp->cmd_kref, qla2x00_sp_release); 4280 fcport->flags &= ~FCF_ASYNC_SENT; 4281 done: 4282 return rval; 4283 } 4284 4285 int qla24xx_post_gnnid_work(struct scsi_qla_host *vha, fc_port_t *fcport) 4286 { 4287 struct qla_work_evt *e; 4288 int ls; 4289 4290 ls = atomic_read(&vha->loop_state); 4291 if (((ls != LOOP_READY) && (ls != LOOP_UP)) || 4292 test_bit(UNLOADING, &vha->dpc_flags)) 4293 return 0; 4294 4295 e = qla2x00_alloc_work(vha, QLA_EVT_GNNID); 4296 if (!e) 4297 return QLA_FUNCTION_FAILED; 4298 4299 e->u.fcport.fcport = fcport; 4300 return qla2x00_post_work(vha, e); 4301 } 4302 4303 /* GPFN_ID */ 4304 void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea) 4305 { 4306 fc_port_t *fcport = ea->fcport; 4307 4308 ql_dbg(ql_dbg_disc, vha, 0xffff, 4309 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d fcpcnt %d\n", 4310 __func__, fcport->port_name, fcport->disc_state, 4311 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2, 4312 fcport->rscn_gen, ea->sp->gen1, vha->fcport_count); 4313 4314 if (fcport->disc_state == DSC_DELETE_PEND) 4315 return; 4316 4317 if (ea->sp->gen2 != fcport->login_gen) { 4318 /* target side must have changed it. */ 4319 ql_dbg(ql_dbg_disc, vha, 0x20d3, 4320 "%s %8phC generation changed\n", 4321 __func__, fcport->port_name); 4322 return; 4323 } else if (ea->sp->gen1 != fcport->rscn_gen) { 4324 return; 4325 } 4326 4327 qla24xx_post_gpsc_work(vha, fcport); 4328 } 4329 4330 static void qla2x00_async_gfpnid_sp_done(srb_t *sp, int res) 4331 { 4332 struct scsi_qla_host *vha = sp->vha; 4333 fc_port_t *fcport = sp->fcport; 4334 u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name; 4335 struct event_arg ea; 4336 u64 wwn; 4337 4338 wwn = wwn_to_u64(fpn); 4339 if (wwn) 4340 memcpy(fcport->fabric_port_name, fpn, WWN_SIZE); 4341 4342 memset(&ea, 0, sizeof(ea)); 4343 ea.fcport = fcport; 4344 ea.sp = sp; 4345 ea.rc = res; 4346 4347 ql_dbg(ql_dbg_disc, vha, 0x204f, 4348 "Async done-%s res %x, WWPN %8phC %8phC\n", 4349 sp->name, res, fcport->port_name, fcport->fabric_port_name); 4350 4351 qla24xx_handle_gfpnid_event(vha, &ea); 4352 4353 /* ref: INIT */ 4354 kref_put(&sp->cmd_kref, qla2x00_sp_release); 4355 } 4356 4357 int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport) 4358 { 4359 int rval = QLA_FUNCTION_FAILED; 4360 struct ct_sns_req *ct_req; 4361 srb_t *sp; 4362 4363 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) 4364 return rval; 4365 4366 /* ref: INIT */ 4367 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 4368 if (!sp) 4369 goto done; 4370 4371 sp->type = SRB_CT_PTHRU_CMD; 4372 sp->name = "gfpnid"; 4373 sp->gen1 = fcport->rscn_gen; 4374 sp->gen2 = fcport->login_gen; 4375 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 4376 qla2x00_async_gfpnid_sp_done); 4377 4378 /* CT_IU preamble */ 4379 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD, 4380 GFPN_ID_RSP_SIZE); 4381 4382 /* GFPN_ID req */ 4383 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id); 4384 4385 4386 /* req & rsp use the same buffer */ 4387 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; 4388 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma; 4389 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns; 4390 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma; 4391 sp->u.iocb_cmd.u.ctarg.req_size = GFPN_ID_REQ_SIZE; 4392 sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE; 4393 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 4394 4395 ql_dbg(ql_dbg_disc, vha, 0xffff, 4396 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n", 4397 sp->name, fcport->port_name, 4398 sp->handle, fcport->loop_id, fcport->d_id.b24); 4399 4400 rval = qla2x00_start_sp(sp); 4401 if (rval != QLA_SUCCESS) 4402 goto done_free_sp; 4403 4404 return rval; 4405 4406 done_free_sp: 4407 /* ref: INIT */ 4408 kref_put(&sp->cmd_kref, qla2x00_sp_release); 4409 done: 4410 return rval; 4411 } 4412 4413 int qla24xx_post_gfpnid_work(struct scsi_qla_host *vha, fc_port_t *fcport) 4414 { 4415 struct qla_work_evt *e; 4416 int ls; 4417 4418 ls = atomic_read(&vha->loop_state); 4419 if (((ls != LOOP_READY) && (ls != LOOP_UP)) || 4420 test_bit(UNLOADING, &vha->dpc_flags)) 4421 return 0; 4422 4423 e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID); 4424 if (!e) 4425 return QLA_FUNCTION_FAILED; 4426 4427 e->u.fcport.fcport = fcport; 4428 return qla2x00_post_work(vha, e); 4429 } 4430