1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/ctype.h> 25 #include <linux/delay.h> 26 #include <linux/pci.h> 27 #include <linux/interrupt.h> 28 #include <linux/module.h> 29 #include <linux/aer.h> 30 #include <linux/gfp.h> 31 #include <linux/kernel.h> 32 33 #include <scsi/scsi.h> 34 #include <scsi/scsi_device.h> 35 #include <scsi/scsi_host.h> 36 #include <scsi/scsi_tcq.h> 37 #include <scsi/scsi_transport_fc.h> 38 #include <scsi/fc/fc_fs.h> 39 40 #include "lpfc_hw4.h" 41 #include "lpfc_hw.h" 42 #include "lpfc_sli.h" 43 #include "lpfc_sli4.h" 44 #include "lpfc_nl.h" 45 #include "lpfc_disc.h" 46 #include "lpfc.h" 47 #include "lpfc_scsi.h" 48 #include "lpfc_nvme.h" 49 #include "lpfc_logmsg.h" 50 #include "lpfc_version.h" 51 #include "lpfc_compat.h" 52 #include "lpfc_crtn.h" 53 #include "lpfc_vport.h" 54 #include "lpfc_attr.h" 55 56 #define LPFC_DEF_DEVLOSS_TMO 30 57 #define LPFC_MIN_DEVLOSS_TMO 1 58 #define LPFC_MAX_DEVLOSS_TMO 255 59 60 #define LPFC_MAX_INFO_TMP_LEN 100 61 #define LPFC_INFO_MORE_STR "\nCould be more info...\n" 62 /* 63 * Write key size should be multiple of 4. If write key is changed 64 * make sure that library write key is also changed. 65 */ 66 #define LPFC_REG_WRITE_KEY_SIZE 4 67 #define LPFC_REG_WRITE_KEY "EMLX" 68 69 const char *const trunk_errmsg[] = { /* map errcode */ 70 "", /* There is no such error code at index 0*/ 71 "link negotiated speed does not match existing" 72 " trunk - link was \"low\" speed", 73 "link negotiated speed does not match" 74 " existing trunk - link was \"middle\" speed", 75 "link negotiated speed does not match existing" 76 " trunk - link was \"high\" speed", 77 "Attached to non-trunking port - F_Port", 78 "Attached to non-trunking port - N_Port", 79 "FLOGI response timeout", 80 "non-FLOGI frame received", 81 "Invalid FLOGI response", 82 "Trunking initialization protocol", 83 "Trunk peer device mismatch", 84 }; 85 86 /** 87 * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules 88 * @incr: integer to convert. 89 * @hdw: ascii string holding converted integer plus a string terminator. 90 * 91 * Description: 92 * JEDEC Joint Electron Device Engineering Council. 93 * Convert a 32 bit integer composed of 8 nibbles into an 8 byte ascii 94 * character string. The string is then terminated with a NULL in byte 9. 95 * Hex 0-9 becomes ascii '0' to '9'. 96 * Hex a-f becomes ascii '=' to 'B' capital B. 97 * 98 * Notes: 99 * Coded for 32 bit integers only. 100 **/ 101 static void 102 lpfc_jedec_to_ascii(int incr, char hdw[]) 103 { 104 int i, j; 105 for (i = 0; i < 8; i++) { 106 j = (incr & 0xf); 107 if (j <= 9) 108 hdw[7 - i] = 0x30 + j; 109 else 110 hdw[7 - i] = 0x61 + j - 10; 111 incr = (incr >> 4); 112 } 113 hdw[8] = 0; 114 return; 115 } 116 117 static ssize_t 118 lpfc_cmf_info_show(struct device *dev, struct device_attribute *attr, 119 char *buf) 120 { 121 struct Scsi_Host *shost = class_to_shost(dev); 122 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 123 struct lpfc_hba *phba = vport->phba; 124 struct lpfc_cgn_info *cp = NULL; 125 struct lpfc_cgn_stat *cgs; 126 int len = 0; 127 int cpu; 128 u64 rcv, total; 129 char tmp[LPFC_MAX_INFO_TMP_LEN] = {0}; 130 131 if (phba->cgn_i) 132 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 133 134 scnprintf(tmp, sizeof(tmp), 135 "Congestion Mgmt Info: E2Eattr %d Ver %d " 136 "CMF %d cnt %d\n", 137 phba->sli4_hba.pc_sli4_params.mi_cap, 138 cp ? cp->cgn_info_version : 0, 139 phba->sli4_hba.pc_sli4_params.cmf, phba->cmf_timer_cnt); 140 141 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 142 goto buffer_done; 143 144 if (!phba->sli4_hba.pc_sli4_params.cmf) 145 goto buffer_done; 146 147 switch (phba->cgn_init_reg_signal) { 148 case EDC_CG_SIG_WARN_ONLY: 149 scnprintf(tmp, sizeof(tmp), 150 "Register: Init: Signal:WARN "); 151 break; 152 case EDC_CG_SIG_WARN_ALARM: 153 scnprintf(tmp, sizeof(tmp), 154 "Register: Init: Signal:WARN|ALARM "); 155 break; 156 default: 157 scnprintf(tmp, sizeof(tmp), 158 "Register: Init: Signal:NONE "); 159 break; 160 } 161 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 162 goto buffer_done; 163 164 switch (phba->cgn_init_reg_fpin) { 165 case LPFC_CGN_FPIN_WARN: 166 scnprintf(tmp, sizeof(tmp), 167 "FPIN:WARN\n"); 168 break; 169 case LPFC_CGN_FPIN_ALARM: 170 scnprintf(tmp, sizeof(tmp), 171 "FPIN:ALARM\n"); 172 break; 173 case LPFC_CGN_FPIN_BOTH: 174 scnprintf(tmp, sizeof(tmp), 175 "FPIN:WARN|ALARM\n"); 176 break; 177 default: 178 scnprintf(tmp, sizeof(tmp), 179 "FPIN:NONE\n"); 180 break; 181 } 182 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 183 goto buffer_done; 184 185 switch (phba->cgn_reg_signal) { 186 case EDC_CG_SIG_WARN_ONLY: 187 scnprintf(tmp, sizeof(tmp), 188 " Current: Signal:WARN "); 189 break; 190 case EDC_CG_SIG_WARN_ALARM: 191 scnprintf(tmp, sizeof(tmp), 192 " Current: Signal:WARN|ALARM "); 193 break; 194 default: 195 scnprintf(tmp, sizeof(tmp), 196 " Current: Signal:NONE "); 197 break; 198 } 199 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 200 goto buffer_done; 201 202 switch (phba->cgn_reg_fpin) { 203 case LPFC_CGN_FPIN_WARN: 204 scnprintf(tmp, sizeof(tmp), 205 "FPIN:WARN ACQEcnt:%d\n", phba->cgn_acqe_cnt); 206 break; 207 case LPFC_CGN_FPIN_ALARM: 208 scnprintf(tmp, sizeof(tmp), 209 "FPIN:ALARM ACQEcnt:%d\n", phba->cgn_acqe_cnt); 210 break; 211 case LPFC_CGN_FPIN_BOTH: 212 scnprintf(tmp, sizeof(tmp), 213 "FPIN:WARN|ALARM ACQEcnt:%d\n", phba->cgn_acqe_cnt); 214 break; 215 default: 216 scnprintf(tmp, sizeof(tmp), 217 "FPIN:NONE ACQEcnt:%d\n", phba->cgn_acqe_cnt); 218 break; 219 } 220 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 221 goto buffer_done; 222 223 if (phba->cmf_active_mode != phba->cgn_p.cgn_param_mode) { 224 switch (phba->cmf_active_mode) { 225 case LPFC_CFG_OFF: 226 scnprintf(tmp, sizeof(tmp), "Active: Mode:Off\n"); 227 break; 228 case LPFC_CFG_MANAGED: 229 scnprintf(tmp, sizeof(tmp), "Active: Mode:Managed\n"); 230 break; 231 case LPFC_CFG_MONITOR: 232 scnprintf(tmp, sizeof(tmp), "Active: Mode:Monitor\n"); 233 break; 234 default: 235 scnprintf(tmp, sizeof(tmp), "Active: Mode:Unknown\n"); 236 } 237 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 238 goto buffer_done; 239 } 240 241 switch (phba->cgn_p.cgn_param_mode) { 242 case LPFC_CFG_OFF: 243 scnprintf(tmp, sizeof(tmp), "Config: Mode:Off "); 244 break; 245 case LPFC_CFG_MANAGED: 246 scnprintf(tmp, sizeof(tmp), "Config: Mode:Managed "); 247 break; 248 case LPFC_CFG_MONITOR: 249 scnprintf(tmp, sizeof(tmp), "Config: Mode:Monitor "); 250 break; 251 default: 252 scnprintf(tmp, sizeof(tmp), "Config: Mode:Unknown "); 253 } 254 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 255 goto buffer_done; 256 257 total = 0; 258 rcv = 0; 259 for_each_present_cpu(cpu) { 260 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 261 total += atomic64_read(&cgs->total_bytes); 262 rcv += atomic64_read(&cgs->rcv_bytes); 263 } 264 265 scnprintf(tmp, sizeof(tmp), 266 "IObusy:%d Info:%d Bytes: Rcv:x%llx Total:x%llx\n", 267 atomic_read(&phba->cmf_busy), 268 phba->cmf_active_info, rcv, total); 269 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 270 goto buffer_done; 271 272 scnprintf(tmp, sizeof(tmp), 273 "Port_speed:%d Link_byte_cnt:%ld " 274 "Max_byte_per_interval:%ld\n", 275 lpfc_sli_port_speed_get(phba), 276 (unsigned long)phba->cmf_link_byte_count, 277 (unsigned long)phba->cmf_max_bytes_per_interval); 278 strlcat(buf, tmp, PAGE_SIZE); 279 280 buffer_done: 281 len = strnlen(buf, PAGE_SIZE); 282 283 if (unlikely(len >= (PAGE_SIZE - 1))) { 284 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 285 "6312 Catching potential buffer " 286 "overflow > PAGE_SIZE = %lu bytes\n", 287 PAGE_SIZE); 288 strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR), 289 LPFC_INFO_MORE_STR, sizeof(LPFC_INFO_MORE_STR) + 1); 290 } 291 return len; 292 } 293 294 /** 295 * lpfc_drvr_version_show - Return the Emulex driver string with version number 296 * @dev: class unused variable. 297 * @attr: device attribute, not used. 298 * @buf: on return contains the module description text. 299 * 300 * Returns: size of formatted string. 301 **/ 302 static ssize_t 303 lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr, 304 char *buf) 305 { 306 return scnprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n"); 307 } 308 309 /** 310 * lpfc_enable_fip_show - Return the fip mode of the HBA 311 * @dev: class unused variable. 312 * @attr: device attribute, not used. 313 * @buf: on return contains the module description text. 314 * 315 * Returns: size of formatted string. 316 **/ 317 static ssize_t 318 lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr, 319 char *buf) 320 { 321 struct Scsi_Host *shost = class_to_shost(dev); 322 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 323 struct lpfc_hba *phba = vport->phba; 324 325 if (phba->hba_flag & HBA_FIP_SUPPORT) 326 return scnprintf(buf, PAGE_SIZE, "1\n"); 327 else 328 return scnprintf(buf, PAGE_SIZE, "0\n"); 329 } 330 331 static ssize_t 332 lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, 333 char *buf) 334 { 335 struct Scsi_Host *shost = class_to_shost(dev); 336 struct lpfc_vport *vport = shost_priv(shost); 337 struct lpfc_hba *phba = vport->phba; 338 struct lpfc_nvmet_tgtport *tgtp; 339 struct nvme_fc_local_port *localport; 340 struct lpfc_nvme_lport *lport; 341 struct lpfc_nvme_rport *rport; 342 struct lpfc_nodelist *ndlp; 343 struct nvme_fc_remote_port *nrport; 344 struct lpfc_fc4_ctrl_stat *cstat; 345 uint64_t data1, data2, data3; 346 uint64_t totin, totout, tot; 347 char *statep; 348 int i; 349 int len = 0; 350 char tmp[LPFC_MAX_INFO_TMP_LEN] = {0}; 351 352 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { 353 len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n"); 354 return len; 355 } 356 if (phba->nvmet_support) { 357 if (!phba->targetport) { 358 len = scnprintf(buf, PAGE_SIZE, 359 "NVME Target: x%llx is not allocated\n", 360 wwn_to_u64(vport->fc_portname.u.wwn)); 361 return len; 362 } 363 /* Port state is only one of two values for now. */ 364 if (phba->targetport->port_id) 365 statep = "REGISTERED"; 366 else 367 statep = "INIT"; 368 scnprintf(tmp, sizeof(tmp), 369 "NVME Target Enabled State %s\n", 370 statep); 371 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 372 goto buffer_done; 373 374 scnprintf(tmp, sizeof(tmp), 375 "%s%d WWPN x%llx WWNN x%llx DID x%06x\n", 376 "NVME Target: lpfc", 377 phba->brd_no, 378 wwn_to_u64(vport->fc_portname.u.wwn), 379 wwn_to_u64(vport->fc_nodename.u.wwn), 380 phba->targetport->port_id); 381 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 382 goto buffer_done; 383 384 if (strlcat(buf, "\nNVME Target: Statistics\n", PAGE_SIZE) 385 >= PAGE_SIZE) 386 goto buffer_done; 387 388 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 389 scnprintf(tmp, sizeof(tmp), 390 "LS: Rcv %08x Drop %08x Abort %08x\n", 391 atomic_read(&tgtp->rcv_ls_req_in), 392 atomic_read(&tgtp->rcv_ls_req_drop), 393 atomic_read(&tgtp->xmt_ls_abort)); 394 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 395 goto buffer_done; 396 397 if (atomic_read(&tgtp->rcv_ls_req_in) != 398 atomic_read(&tgtp->rcv_ls_req_out)) { 399 scnprintf(tmp, sizeof(tmp), 400 "Rcv LS: in %08x != out %08x\n", 401 atomic_read(&tgtp->rcv_ls_req_in), 402 atomic_read(&tgtp->rcv_ls_req_out)); 403 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 404 goto buffer_done; 405 } 406 407 scnprintf(tmp, sizeof(tmp), 408 "LS: Xmt %08x Drop %08x Cmpl %08x\n", 409 atomic_read(&tgtp->xmt_ls_rsp), 410 atomic_read(&tgtp->xmt_ls_drop), 411 atomic_read(&tgtp->xmt_ls_rsp_cmpl)); 412 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 413 goto buffer_done; 414 415 scnprintf(tmp, sizeof(tmp), 416 "LS: RSP Abort %08x xb %08x Err %08x\n", 417 atomic_read(&tgtp->xmt_ls_rsp_aborted), 418 atomic_read(&tgtp->xmt_ls_rsp_xb_set), 419 atomic_read(&tgtp->xmt_ls_rsp_error)); 420 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 421 goto buffer_done; 422 423 scnprintf(tmp, sizeof(tmp), 424 "FCP: Rcv %08x Defer %08x Release %08x " 425 "Drop %08x\n", 426 atomic_read(&tgtp->rcv_fcp_cmd_in), 427 atomic_read(&tgtp->rcv_fcp_cmd_defer), 428 atomic_read(&tgtp->xmt_fcp_release), 429 atomic_read(&tgtp->rcv_fcp_cmd_drop)); 430 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 431 goto buffer_done; 432 433 if (atomic_read(&tgtp->rcv_fcp_cmd_in) != 434 atomic_read(&tgtp->rcv_fcp_cmd_out)) { 435 scnprintf(tmp, sizeof(tmp), 436 "Rcv FCP: in %08x != out %08x\n", 437 atomic_read(&tgtp->rcv_fcp_cmd_in), 438 atomic_read(&tgtp->rcv_fcp_cmd_out)); 439 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 440 goto buffer_done; 441 } 442 443 scnprintf(tmp, sizeof(tmp), 444 "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x " 445 "drop %08x\n", 446 atomic_read(&tgtp->xmt_fcp_read), 447 atomic_read(&tgtp->xmt_fcp_read_rsp), 448 atomic_read(&tgtp->xmt_fcp_write), 449 atomic_read(&tgtp->xmt_fcp_rsp), 450 atomic_read(&tgtp->xmt_fcp_drop)); 451 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 452 goto buffer_done; 453 454 scnprintf(tmp, sizeof(tmp), 455 "FCP Rsp Cmpl: %08x err %08x drop %08x\n", 456 atomic_read(&tgtp->xmt_fcp_rsp_cmpl), 457 atomic_read(&tgtp->xmt_fcp_rsp_error), 458 atomic_read(&tgtp->xmt_fcp_rsp_drop)); 459 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 460 goto buffer_done; 461 462 scnprintf(tmp, sizeof(tmp), 463 "FCP Rsp Abort: %08x xb %08x xricqe %08x\n", 464 atomic_read(&tgtp->xmt_fcp_rsp_aborted), 465 atomic_read(&tgtp->xmt_fcp_rsp_xb_set), 466 atomic_read(&tgtp->xmt_fcp_xri_abort_cqe)); 467 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 468 goto buffer_done; 469 470 scnprintf(tmp, sizeof(tmp), 471 "ABORT: Xmt %08x Cmpl %08x\n", 472 atomic_read(&tgtp->xmt_fcp_abort), 473 atomic_read(&tgtp->xmt_fcp_abort_cmpl)); 474 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 475 goto buffer_done; 476 477 scnprintf(tmp, sizeof(tmp), 478 "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x\n", 479 atomic_read(&tgtp->xmt_abort_sol), 480 atomic_read(&tgtp->xmt_abort_unsol), 481 atomic_read(&tgtp->xmt_abort_rsp), 482 atomic_read(&tgtp->xmt_abort_rsp_error)); 483 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 484 goto buffer_done; 485 486 scnprintf(tmp, sizeof(tmp), 487 "DELAY: ctx %08x fod %08x wqfull %08x\n", 488 atomic_read(&tgtp->defer_ctx), 489 atomic_read(&tgtp->defer_fod), 490 atomic_read(&tgtp->defer_wqfull)); 491 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 492 goto buffer_done; 493 494 /* Calculate outstanding IOs */ 495 tot = atomic_read(&tgtp->rcv_fcp_cmd_drop); 496 tot += atomic_read(&tgtp->xmt_fcp_release); 497 tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot; 498 499 scnprintf(tmp, sizeof(tmp), 500 "IO_CTX: %08x WAIT: cur %08x tot %08x\n" 501 "CTX Outstanding %08llx\n\n", 502 phba->sli4_hba.nvmet_xri_cnt, 503 phba->sli4_hba.nvmet_io_wait_cnt, 504 phba->sli4_hba.nvmet_io_wait_total, 505 tot); 506 strlcat(buf, tmp, PAGE_SIZE); 507 goto buffer_done; 508 } 509 510 localport = vport->localport; 511 if (!localport) { 512 len = scnprintf(buf, PAGE_SIZE, 513 "NVME Initiator x%llx is not allocated\n", 514 wwn_to_u64(vport->fc_portname.u.wwn)); 515 return len; 516 } 517 lport = (struct lpfc_nvme_lport *)localport->private; 518 if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE) 519 goto buffer_done; 520 521 scnprintf(tmp, sizeof(tmp), 522 "XRI Dist lpfc%d Total %d IO %d ELS %d\n", 523 phba->brd_no, 524 phba->sli4_hba.max_cfg_param.max_xri, 525 phba->sli4_hba.io_xri_max, 526 lpfc_sli4_get_els_iocb_cnt(phba)); 527 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 528 goto buffer_done; 529 530 /* Port state is only one of two values for now. */ 531 if (localport->port_id) 532 statep = "ONLINE"; 533 else 534 statep = "UNKNOWN "; 535 536 scnprintf(tmp, sizeof(tmp), 537 "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n", 538 "NVME LPORT lpfc", 539 phba->brd_no, 540 wwn_to_u64(vport->fc_portname.u.wwn), 541 wwn_to_u64(vport->fc_nodename.u.wwn), 542 localport->port_id, statep); 543 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 544 goto buffer_done; 545 546 spin_lock_irq(shost->host_lock); 547 548 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 549 nrport = NULL; 550 spin_lock(&ndlp->lock); 551 rport = lpfc_ndlp_get_nrport(ndlp); 552 if (rport) 553 nrport = rport->remoteport; 554 spin_unlock(&ndlp->lock); 555 if (!nrport) 556 continue; 557 558 /* Port state is only one of two values for now. */ 559 switch (nrport->port_state) { 560 case FC_OBJSTATE_ONLINE: 561 statep = "ONLINE"; 562 break; 563 case FC_OBJSTATE_UNKNOWN: 564 statep = "UNKNOWN "; 565 break; 566 default: 567 statep = "UNSUPPORTED"; 568 break; 569 } 570 571 /* Tab in to show lport ownership. */ 572 if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE) 573 goto unlock_buf_done; 574 if (phba->brd_no >= 10) { 575 if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE) 576 goto unlock_buf_done; 577 } 578 579 scnprintf(tmp, sizeof(tmp), "WWPN x%llx ", 580 nrport->port_name); 581 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 582 goto unlock_buf_done; 583 584 scnprintf(tmp, sizeof(tmp), "WWNN x%llx ", 585 nrport->node_name); 586 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 587 goto unlock_buf_done; 588 589 scnprintf(tmp, sizeof(tmp), "DID x%06x ", 590 nrport->port_id); 591 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 592 goto unlock_buf_done; 593 594 /* An NVME rport can have multiple roles. */ 595 if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) { 596 if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE) 597 goto unlock_buf_done; 598 } 599 if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) { 600 if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE) 601 goto unlock_buf_done; 602 } 603 if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) { 604 if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE) 605 goto unlock_buf_done; 606 } 607 if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR | 608 FC_PORT_ROLE_NVME_TARGET | 609 FC_PORT_ROLE_NVME_DISCOVERY)) { 610 scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x", 611 nrport->port_role); 612 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 613 goto unlock_buf_done; 614 } 615 616 scnprintf(tmp, sizeof(tmp), "%s\n", statep); 617 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 618 goto unlock_buf_done; 619 } 620 spin_unlock_irq(shost->host_lock); 621 622 if (!lport) 623 goto buffer_done; 624 625 if (strlcat(buf, "\nNVME Statistics\n", PAGE_SIZE) >= PAGE_SIZE) 626 goto buffer_done; 627 628 scnprintf(tmp, sizeof(tmp), 629 "LS: Xmt %010x Cmpl %010x Abort %08x\n", 630 atomic_read(&lport->fc4NvmeLsRequests), 631 atomic_read(&lport->fc4NvmeLsCmpls), 632 atomic_read(&lport->xmt_ls_abort)); 633 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 634 goto buffer_done; 635 636 scnprintf(tmp, sizeof(tmp), 637 "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n", 638 atomic_read(&lport->xmt_ls_err), 639 atomic_read(&lport->cmpl_ls_xb), 640 atomic_read(&lport->cmpl_ls_err)); 641 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 642 goto buffer_done; 643 644 totin = 0; 645 totout = 0; 646 for (i = 0; i < phba->cfg_hdw_queue; i++) { 647 cstat = &phba->sli4_hba.hdwq[i].nvme_cstat; 648 tot = cstat->io_cmpls; 649 totin += tot; 650 data1 = cstat->input_requests; 651 data2 = cstat->output_requests; 652 data3 = cstat->control_requests; 653 totout += (data1 + data2 + data3); 654 } 655 scnprintf(tmp, sizeof(tmp), 656 "Total FCP Cmpl %016llx Issue %016llx " 657 "OutIO %016llx\n", 658 totin, totout, totout - totin); 659 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 660 goto buffer_done; 661 662 scnprintf(tmp, sizeof(tmp), 663 "\tabort %08x noxri %08x nondlp %08x qdepth %08x " 664 "wqerr %08x err %08x\n", 665 atomic_read(&lport->xmt_fcp_abort), 666 atomic_read(&lport->xmt_fcp_noxri), 667 atomic_read(&lport->xmt_fcp_bad_ndlp), 668 atomic_read(&lport->xmt_fcp_qdepth), 669 atomic_read(&lport->xmt_fcp_wqerr), 670 atomic_read(&lport->xmt_fcp_err)); 671 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 672 goto buffer_done; 673 674 scnprintf(tmp, sizeof(tmp), 675 "FCP CMPL: xb %08x Err %08x\n", 676 atomic_read(&lport->cmpl_fcp_xb), 677 atomic_read(&lport->cmpl_fcp_err)); 678 strlcat(buf, tmp, PAGE_SIZE); 679 680 /* host_lock is already unlocked. */ 681 goto buffer_done; 682 683 unlock_buf_done: 684 spin_unlock_irq(shost->host_lock); 685 686 buffer_done: 687 len = strnlen(buf, PAGE_SIZE); 688 689 if (unlikely(len >= (PAGE_SIZE - 1))) { 690 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 691 "6314 Catching potential buffer " 692 "overflow > PAGE_SIZE = %lu bytes\n", 693 PAGE_SIZE); 694 strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR), 695 LPFC_INFO_MORE_STR, 696 sizeof(LPFC_INFO_MORE_STR) + 1); 697 } 698 699 return len; 700 } 701 702 static ssize_t 703 lpfc_scsi_stat_show(struct device *dev, struct device_attribute *attr, 704 char *buf) 705 { 706 struct Scsi_Host *shost = class_to_shost(dev); 707 struct lpfc_vport *vport = shost_priv(shost); 708 struct lpfc_hba *phba = vport->phba; 709 int len; 710 struct lpfc_fc4_ctrl_stat *cstat; 711 u64 data1, data2, data3; 712 u64 tot, totin, totout; 713 int i; 714 char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0}; 715 716 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP) || 717 (phba->sli_rev != LPFC_SLI_REV4)) 718 return 0; 719 720 scnprintf(buf, PAGE_SIZE, "SCSI HDWQ Statistics\n"); 721 722 totin = 0; 723 totout = 0; 724 for (i = 0; i < phba->cfg_hdw_queue; i++) { 725 cstat = &phba->sli4_hba.hdwq[i].scsi_cstat; 726 tot = cstat->io_cmpls; 727 totin += tot; 728 data1 = cstat->input_requests; 729 data2 = cstat->output_requests; 730 data3 = cstat->control_requests; 731 totout += (data1 + data2 + data3); 732 733 scnprintf(tmp, sizeof(tmp), "HDWQ (%d): Rd %016llx Wr %016llx " 734 "IO %016llx ", i, data1, data2, data3); 735 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 736 goto buffer_done; 737 738 scnprintf(tmp, sizeof(tmp), "Cmpl %016llx OutIO %016llx\n", 739 tot, ((data1 + data2 + data3) - tot)); 740 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 741 goto buffer_done; 742 } 743 scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx " 744 "OutIO %016llx\n", totin, totout, totout - totin); 745 strlcat(buf, tmp, PAGE_SIZE); 746 747 buffer_done: 748 len = strnlen(buf, PAGE_SIZE); 749 750 return len; 751 } 752 753 static ssize_t 754 lpfc_bg_info_show(struct device *dev, struct device_attribute *attr, 755 char *buf) 756 { 757 struct Scsi_Host *shost = class_to_shost(dev); 758 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 759 struct lpfc_hba *phba = vport->phba; 760 761 if (phba->cfg_enable_bg) { 762 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 763 return scnprintf(buf, PAGE_SIZE, 764 "BlockGuard Enabled\n"); 765 else 766 return scnprintf(buf, PAGE_SIZE, 767 "BlockGuard Not Supported\n"); 768 } else 769 return scnprintf(buf, PAGE_SIZE, 770 "BlockGuard Disabled\n"); 771 } 772 773 static ssize_t 774 lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr, 775 char *buf) 776 { 777 struct Scsi_Host *shost = class_to_shost(dev); 778 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 779 struct lpfc_hba *phba = vport->phba; 780 781 return scnprintf(buf, PAGE_SIZE, "%llu\n", 782 (unsigned long long)phba->bg_guard_err_cnt); 783 } 784 785 static ssize_t 786 lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr, 787 char *buf) 788 { 789 struct Scsi_Host *shost = class_to_shost(dev); 790 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 791 struct lpfc_hba *phba = vport->phba; 792 793 return scnprintf(buf, PAGE_SIZE, "%llu\n", 794 (unsigned long long)phba->bg_apptag_err_cnt); 795 } 796 797 static ssize_t 798 lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr, 799 char *buf) 800 { 801 struct Scsi_Host *shost = class_to_shost(dev); 802 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 803 struct lpfc_hba *phba = vport->phba; 804 805 return scnprintf(buf, PAGE_SIZE, "%llu\n", 806 (unsigned long long)phba->bg_reftag_err_cnt); 807 } 808 809 /** 810 * lpfc_info_show - Return some pci info about the host in ascii 811 * @dev: class converted to a Scsi_host structure. 812 * @attr: device attribute, not used. 813 * @buf: on return contains the formatted text from lpfc_info(). 814 * 815 * Returns: size of formatted string. 816 **/ 817 static ssize_t 818 lpfc_info_show(struct device *dev, struct device_attribute *attr, 819 char *buf) 820 { 821 struct Scsi_Host *host = class_to_shost(dev); 822 823 return scnprintf(buf, PAGE_SIZE, "%s\n", lpfc_info(host)); 824 } 825 826 /** 827 * lpfc_serialnum_show - Return the hba serial number in ascii 828 * @dev: class converted to a Scsi_host structure. 829 * @attr: device attribute, not used. 830 * @buf: on return contains the formatted text serial number. 831 * 832 * Returns: size of formatted string. 833 **/ 834 static ssize_t 835 lpfc_serialnum_show(struct device *dev, struct device_attribute *attr, 836 char *buf) 837 { 838 struct Scsi_Host *shost = class_to_shost(dev); 839 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 840 struct lpfc_hba *phba = vport->phba; 841 842 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->SerialNumber); 843 } 844 845 /** 846 * lpfc_temp_sensor_show - Return the temperature sensor level 847 * @dev: class converted to a Scsi_host structure. 848 * @attr: device attribute, not used. 849 * @buf: on return contains the formatted support level. 850 * 851 * Description: 852 * Returns a number indicating the temperature sensor level currently 853 * supported, zero or one in ascii. 854 * 855 * Returns: size of formatted string. 856 **/ 857 static ssize_t 858 lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr, 859 char *buf) 860 { 861 struct Scsi_Host *shost = class_to_shost(dev); 862 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 863 struct lpfc_hba *phba = vport->phba; 864 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->temp_sensor_support); 865 } 866 867 /** 868 * lpfc_modeldesc_show - Return the model description of the hba 869 * @dev: class converted to a Scsi_host structure. 870 * @attr: device attribute, not used. 871 * @buf: on return contains the scsi vpd model description. 872 * 873 * Returns: size of formatted string. 874 **/ 875 static ssize_t 876 lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr, 877 char *buf) 878 { 879 struct Scsi_Host *shost = class_to_shost(dev); 880 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 881 struct lpfc_hba *phba = vport->phba; 882 883 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelDesc); 884 } 885 886 /** 887 * lpfc_modelname_show - Return the model name of the hba 888 * @dev: class converted to a Scsi_host structure. 889 * @attr: device attribute, not used. 890 * @buf: on return contains the scsi vpd model name. 891 * 892 * Returns: size of formatted string. 893 **/ 894 static ssize_t 895 lpfc_modelname_show(struct device *dev, struct device_attribute *attr, 896 char *buf) 897 { 898 struct Scsi_Host *shost = class_to_shost(dev); 899 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 900 struct lpfc_hba *phba = vport->phba; 901 902 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelName); 903 } 904 905 /** 906 * lpfc_programtype_show - Return the program type of the hba 907 * @dev: class converted to a Scsi_host structure. 908 * @attr: device attribute, not used. 909 * @buf: on return contains the scsi vpd program type. 910 * 911 * Returns: size of formatted string. 912 **/ 913 static ssize_t 914 lpfc_programtype_show(struct device *dev, struct device_attribute *attr, 915 char *buf) 916 { 917 struct Scsi_Host *shost = class_to_shost(dev); 918 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 919 struct lpfc_hba *phba = vport->phba; 920 921 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ProgramType); 922 } 923 924 /** 925 * lpfc_vportnum_show - Return the port number in ascii of the hba 926 * @dev: class converted to a Scsi_host structure. 927 * @attr: device attribute, not used. 928 * @buf: on return contains scsi vpd program type. 929 * 930 * Returns: size of formatted string. 931 **/ 932 static ssize_t 933 lpfc_vportnum_show(struct device *dev, struct device_attribute *attr, 934 char *buf) 935 { 936 struct Scsi_Host *shost = class_to_shost(dev); 937 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 938 struct lpfc_hba *phba = vport->phba; 939 940 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->Port); 941 } 942 943 /** 944 * lpfc_fwrev_show - Return the firmware rev running in the hba 945 * @dev: class converted to a Scsi_host structure. 946 * @attr: device attribute, not used. 947 * @buf: on return contains the scsi vpd program type. 948 * 949 * Returns: size of formatted string. 950 **/ 951 static ssize_t 952 lpfc_fwrev_show(struct device *dev, struct device_attribute *attr, 953 char *buf) 954 { 955 struct Scsi_Host *shost = class_to_shost(dev); 956 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 957 struct lpfc_hba *phba = vport->phba; 958 uint32_t if_type; 959 uint8_t sli_family; 960 char fwrev[FW_REV_STR_SIZE]; 961 int len; 962 963 lpfc_decode_firmware_rev(phba, fwrev, 1); 964 if_type = phba->sli4_hba.pc_sli4_params.if_type; 965 sli_family = phba->sli4_hba.pc_sli4_params.sli_family; 966 967 if (phba->sli_rev < LPFC_SLI_REV4) 968 len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d\n", 969 fwrev, phba->sli_rev); 970 else 971 len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n", 972 fwrev, phba->sli_rev, if_type, sli_family); 973 974 return len; 975 } 976 977 /** 978 * lpfc_hdw_show - Return the jedec information about the hba 979 * @dev: class converted to a Scsi_host structure. 980 * @attr: device attribute, not used. 981 * @buf: on return contains the scsi vpd program type. 982 * 983 * Returns: size of formatted string. 984 **/ 985 static ssize_t 986 lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf) 987 { 988 char hdw[9]; 989 struct Scsi_Host *shost = class_to_shost(dev); 990 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 991 struct lpfc_hba *phba = vport->phba; 992 lpfc_vpd_t *vp = &phba->vpd; 993 994 lpfc_jedec_to_ascii(vp->rev.biuRev, hdw); 995 return scnprintf(buf, PAGE_SIZE, "%s %08x %08x\n", hdw, 996 vp->rev.smRev, vp->rev.smFwRev); 997 } 998 999 /** 1000 * lpfc_option_rom_version_show - Return the adapter ROM FCode version 1001 * @dev: class converted to a Scsi_host structure. 1002 * @attr: device attribute, not used. 1003 * @buf: on return contains the ROM and FCode ascii strings. 1004 * 1005 * Returns: size of formatted string. 1006 **/ 1007 static ssize_t 1008 lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr, 1009 char *buf) 1010 { 1011 struct Scsi_Host *shost = class_to_shost(dev); 1012 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1013 struct lpfc_hba *phba = vport->phba; 1014 char fwrev[FW_REV_STR_SIZE]; 1015 1016 if (phba->sli_rev < LPFC_SLI_REV4) 1017 return scnprintf(buf, PAGE_SIZE, "%s\n", 1018 phba->OptionROMVersion); 1019 1020 lpfc_decode_firmware_rev(phba, fwrev, 1); 1021 return scnprintf(buf, PAGE_SIZE, "%s\n", fwrev); 1022 } 1023 1024 /** 1025 * lpfc_link_state_show - Return the link state of the port 1026 * @dev: class converted to a Scsi_host structure. 1027 * @attr: device attribute, not used. 1028 * @buf: on return contains text describing the state of the link. 1029 * 1030 * Notes: 1031 * The switch statement has no default so zero will be returned. 1032 * 1033 * Returns: size of formatted string. 1034 **/ 1035 static ssize_t 1036 lpfc_link_state_show(struct device *dev, struct device_attribute *attr, 1037 char *buf) 1038 { 1039 struct Scsi_Host *shost = class_to_shost(dev); 1040 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1041 struct lpfc_hba *phba = vport->phba; 1042 int len = 0; 1043 1044 switch (phba->link_state) { 1045 case LPFC_LINK_UNKNOWN: 1046 case LPFC_WARM_START: 1047 case LPFC_INIT_START: 1048 case LPFC_INIT_MBX_CMDS: 1049 case LPFC_LINK_DOWN: 1050 case LPFC_HBA_ERROR: 1051 if (phba->hba_flag & LINK_DISABLED) 1052 len += scnprintf(buf + len, PAGE_SIZE-len, 1053 "Link Down - User disabled\n"); 1054 else 1055 len += scnprintf(buf + len, PAGE_SIZE-len, 1056 "Link Down\n"); 1057 break; 1058 case LPFC_LINK_UP: 1059 case LPFC_CLEAR_LA: 1060 case LPFC_HBA_READY: 1061 len += scnprintf(buf + len, PAGE_SIZE-len, "Link Up - "); 1062 1063 switch (vport->port_state) { 1064 case LPFC_LOCAL_CFG_LINK: 1065 len += scnprintf(buf + len, PAGE_SIZE-len, 1066 "Configuring Link\n"); 1067 break; 1068 case LPFC_FDISC: 1069 case LPFC_FLOGI: 1070 case LPFC_FABRIC_CFG_LINK: 1071 case LPFC_NS_REG: 1072 case LPFC_NS_QRY: 1073 case LPFC_BUILD_DISC_LIST: 1074 case LPFC_DISC_AUTH: 1075 len += scnprintf(buf + len, PAGE_SIZE - len, 1076 "Discovery\n"); 1077 break; 1078 case LPFC_VPORT_READY: 1079 len += scnprintf(buf + len, PAGE_SIZE - len, 1080 "Ready\n"); 1081 break; 1082 1083 case LPFC_VPORT_FAILED: 1084 len += scnprintf(buf + len, PAGE_SIZE - len, 1085 "Failed\n"); 1086 break; 1087 1088 case LPFC_VPORT_UNKNOWN: 1089 len += scnprintf(buf + len, PAGE_SIZE - len, 1090 "Unknown\n"); 1091 break; 1092 } 1093 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 1094 if (vport->fc_flag & FC_PUBLIC_LOOP) 1095 len += scnprintf(buf + len, PAGE_SIZE-len, 1096 " Public Loop\n"); 1097 else 1098 len += scnprintf(buf + len, PAGE_SIZE-len, 1099 " Private Loop\n"); 1100 } else { 1101 if (vport->fc_flag & FC_FABRIC) { 1102 if (phba->sli_rev == LPFC_SLI_REV4 && 1103 vport->port_type == LPFC_PHYSICAL_PORT && 1104 phba->sli4_hba.fawwpn_flag & 1105 LPFC_FAWWPN_FABRIC) 1106 len += scnprintf(buf + len, 1107 PAGE_SIZE - len, 1108 " Fabric FA-PWWN\n"); 1109 else 1110 len += scnprintf(buf + len, 1111 PAGE_SIZE - len, 1112 " Fabric\n"); 1113 } else { 1114 len += scnprintf(buf + len, PAGE_SIZE-len, 1115 " Point-2-Point\n"); 1116 } 1117 } 1118 } 1119 1120 if ((phba->sli_rev == LPFC_SLI_REV4) && 1121 ((bf_get(lpfc_sli_intf_if_type, 1122 &phba->sli4_hba.sli_intf) == 1123 LPFC_SLI_INTF_IF_TYPE_6))) { 1124 struct lpfc_trunk_link link = phba->trunk_link; 1125 1126 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba)) 1127 len += scnprintf(buf + len, PAGE_SIZE - len, 1128 "Trunk port 0: Link %s %s\n", 1129 (link.link0.state == LPFC_LINK_UP) ? 1130 "Up" : "Down. ", 1131 trunk_errmsg[link.link0.fault]); 1132 1133 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba)) 1134 len += scnprintf(buf + len, PAGE_SIZE - len, 1135 "Trunk port 1: Link %s %s\n", 1136 (link.link1.state == LPFC_LINK_UP) ? 1137 "Up" : "Down. ", 1138 trunk_errmsg[link.link1.fault]); 1139 1140 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba)) 1141 len += scnprintf(buf + len, PAGE_SIZE - len, 1142 "Trunk port 2: Link %s %s\n", 1143 (link.link2.state == LPFC_LINK_UP) ? 1144 "Up" : "Down. ", 1145 trunk_errmsg[link.link2.fault]); 1146 1147 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba)) 1148 len += scnprintf(buf + len, PAGE_SIZE - len, 1149 "Trunk port 3: Link %s %s\n", 1150 (link.link3.state == LPFC_LINK_UP) ? 1151 "Up" : "Down. ", 1152 trunk_errmsg[link.link3.fault]); 1153 1154 } 1155 1156 return len; 1157 } 1158 1159 /** 1160 * lpfc_sli4_protocol_show - Return the fip mode of the HBA 1161 * @dev: class unused variable. 1162 * @attr: device attribute, not used. 1163 * @buf: on return contains the module description text. 1164 * 1165 * Returns: size of formatted string. 1166 **/ 1167 static ssize_t 1168 lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr, 1169 char *buf) 1170 { 1171 struct Scsi_Host *shost = class_to_shost(dev); 1172 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1173 struct lpfc_hba *phba = vport->phba; 1174 1175 if (phba->sli_rev < LPFC_SLI_REV4) 1176 return scnprintf(buf, PAGE_SIZE, "fc\n"); 1177 1178 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) { 1179 if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE) 1180 return scnprintf(buf, PAGE_SIZE, "fcoe\n"); 1181 if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) 1182 return scnprintf(buf, PAGE_SIZE, "fc\n"); 1183 } 1184 return scnprintf(buf, PAGE_SIZE, "unknown\n"); 1185 } 1186 1187 /** 1188 * lpfc_oas_supported_show - Return whether or not Optimized Access Storage 1189 * (OAS) is supported. 1190 * @dev: class unused variable. 1191 * @attr: device attribute, not used. 1192 * @buf: on return contains the module description text. 1193 * 1194 * Returns: size of formatted string. 1195 **/ 1196 static ssize_t 1197 lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr, 1198 char *buf) 1199 { 1200 struct Scsi_Host *shost = class_to_shost(dev); 1201 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 1202 struct lpfc_hba *phba = vport->phba; 1203 1204 return scnprintf(buf, PAGE_SIZE, "%d\n", 1205 phba->sli4_hba.pc_sli4_params.oas_supported); 1206 } 1207 1208 /** 1209 * lpfc_link_state_store - Transition the link_state on an HBA port 1210 * @dev: class device that is converted into a Scsi_host. 1211 * @attr: device attribute, not used. 1212 * @buf: one or more lpfc_polling_flags values. 1213 * @count: not used. 1214 * 1215 * Returns: 1216 * -EINVAL if the buffer is not "up" or "down" 1217 * return from link state change function if non-zero 1218 * length of the buf on success 1219 **/ 1220 static ssize_t 1221 lpfc_link_state_store(struct device *dev, struct device_attribute *attr, 1222 const char *buf, size_t count) 1223 { 1224 struct Scsi_Host *shost = class_to_shost(dev); 1225 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1226 struct lpfc_hba *phba = vport->phba; 1227 1228 int status = -EINVAL; 1229 1230 if ((strncmp(buf, "up", sizeof("up") - 1) == 0) && 1231 (phba->link_state == LPFC_LINK_DOWN)) 1232 status = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 1233 else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) && 1234 (phba->link_state >= LPFC_LINK_UP)) 1235 status = phba->lpfc_hba_down_link(phba, MBX_NOWAIT); 1236 1237 if (status == 0) 1238 return strlen(buf); 1239 else 1240 return status; 1241 } 1242 1243 /** 1244 * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports 1245 * @dev: class device that is converted into a Scsi_host. 1246 * @attr: device attribute, not used. 1247 * @buf: on return contains the sum of fc mapped and unmapped. 1248 * 1249 * Description: 1250 * Returns the ascii text number of the sum of the fc mapped and unmapped 1251 * vport counts. 1252 * 1253 * Returns: size of formatted string. 1254 **/ 1255 static ssize_t 1256 lpfc_num_discovered_ports_show(struct device *dev, 1257 struct device_attribute *attr, char *buf) 1258 { 1259 struct Scsi_Host *shost = class_to_shost(dev); 1260 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1261 1262 return scnprintf(buf, PAGE_SIZE, "%d\n", 1263 vport->fc_map_cnt + vport->fc_unmap_cnt); 1264 } 1265 1266 /** 1267 * lpfc_issue_lip - Misnomer, name carried over from long ago 1268 * @shost: Scsi_Host pointer. 1269 * 1270 * Description: 1271 * Bring the link down gracefully then re-init the link. The firmware will 1272 * re-init the fiber channel interface as required. Does not issue a LIP. 1273 * 1274 * Returns: 1275 * -EPERM port offline or management commands are being blocked 1276 * -ENOMEM cannot allocate memory for the mailbox command 1277 * -EIO error sending the mailbox command 1278 * zero for success 1279 **/ 1280 static int 1281 lpfc_issue_lip(struct Scsi_Host *shost) 1282 { 1283 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1284 struct lpfc_hba *phba = vport->phba; 1285 LPFC_MBOXQ_t *pmboxq; 1286 int mbxstatus = MBXERR_ERROR; 1287 1288 /* 1289 * If the link is offline, disabled or BLOCK_MGMT_IO 1290 * it doesn't make any sense to allow issue_lip 1291 */ 1292 if ((vport->fc_flag & FC_OFFLINE_MODE) || 1293 (phba->hba_flag & LINK_DISABLED) || 1294 (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)) 1295 return -EPERM; 1296 1297 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); 1298 1299 if (!pmboxq) 1300 return -ENOMEM; 1301 1302 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 1303 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; 1304 pmboxq->u.mb.mbxOwner = OWN_HOST; 1305 1306 if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME)) 1307 vport->fc_flag &= ~FC_PT2PT_NO_NVME; 1308 1309 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); 1310 1311 if ((mbxstatus == MBX_SUCCESS) && 1312 (pmboxq->u.mb.mbxStatus == 0 || 1313 pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) { 1314 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 1315 lpfc_init_link(phba, pmboxq, phba->cfg_topology, 1316 phba->cfg_link_speed); 1317 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, 1318 phba->fc_ratov * 2); 1319 if ((mbxstatus == MBX_SUCCESS) && 1320 (pmboxq->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) 1321 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 1322 "2859 SLI authentication is required " 1323 "for INIT_LINK but has not done yet\n"); 1324 } 1325 1326 lpfc_set_loopback_flag(phba); 1327 if (mbxstatus != MBX_TIMEOUT) 1328 mempool_free(pmboxq, phba->mbox_mem_pool); 1329 1330 if (mbxstatus == MBXERR_ERROR) 1331 return -EIO; 1332 1333 return 0; 1334 } 1335 1336 int 1337 lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock) 1338 { 1339 int cnt = 0; 1340 1341 spin_lock_irq(lock); 1342 while (!list_empty(q)) { 1343 spin_unlock_irq(lock); 1344 msleep(20); 1345 if (cnt++ > 250) { /* 5 secs */ 1346 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1347 "0466 Outstanding IO when " 1348 "bringing Adapter offline\n"); 1349 return 0; 1350 } 1351 spin_lock_irq(lock); 1352 } 1353 spin_unlock_irq(lock); 1354 return 1; 1355 } 1356 1357 /** 1358 * lpfc_do_offline - Issues a mailbox command to bring the link down 1359 * @phba: lpfc_hba pointer. 1360 * @type: LPFC_EVT_OFFLINE, LPFC_EVT_WARM_START, LPFC_EVT_KILL. 1361 * 1362 * Notes: 1363 * Assumes any error from lpfc_do_offline() will be negative. 1364 * Can wait up to 5 seconds for the port ring buffers count 1365 * to reach zero, prints a warning if it is not zero and continues. 1366 * lpfc_workq_post_event() returns a non-zero return code if call fails. 1367 * 1368 * Returns: 1369 * -EIO error posting the event 1370 * zero for success 1371 **/ 1372 static int 1373 lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) 1374 { 1375 struct completion online_compl; 1376 struct lpfc_queue *qp = NULL; 1377 struct lpfc_sli_ring *pring; 1378 struct lpfc_sli *psli; 1379 int status = 0; 1380 int i; 1381 int rc; 1382 1383 init_completion(&online_compl); 1384 rc = lpfc_workq_post_event(phba, &status, &online_compl, 1385 LPFC_EVT_OFFLINE_PREP); 1386 if (rc == 0) 1387 return -ENOMEM; 1388 1389 wait_for_completion(&online_compl); 1390 1391 if (status != 0) 1392 return -EIO; 1393 1394 psli = &phba->sli; 1395 1396 /* 1397 * If freeing the queues have already started, don't access them. 1398 * Otherwise set FREE_WAIT to indicate that queues are being used 1399 * to hold the freeing process until we finish. 1400 */ 1401 spin_lock_irq(&phba->hbalock); 1402 if (!(psli->sli_flag & LPFC_QUEUE_FREE_INIT)) { 1403 psli->sli_flag |= LPFC_QUEUE_FREE_WAIT; 1404 } else { 1405 spin_unlock_irq(&phba->hbalock); 1406 goto skip_wait; 1407 } 1408 spin_unlock_irq(&phba->hbalock); 1409 1410 /* Wait a little for things to settle down, but not 1411 * long enough for dev loss timeout to expire. 1412 */ 1413 if (phba->sli_rev != LPFC_SLI_REV4) { 1414 for (i = 0; i < psli->num_rings; i++) { 1415 pring = &psli->sli3_ring[i]; 1416 if (!lpfc_emptyq_wait(phba, &pring->txcmplq, 1417 &phba->hbalock)) 1418 goto out; 1419 } 1420 } else { 1421 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 1422 pring = qp->pring; 1423 if (!pring) 1424 continue; 1425 if (!lpfc_emptyq_wait(phba, &pring->txcmplq, 1426 &pring->ring_lock)) 1427 goto out; 1428 } 1429 } 1430 out: 1431 spin_lock_irq(&phba->hbalock); 1432 psli->sli_flag &= ~LPFC_QUEUE_FREE_WAIT; 1433 spin_unlock_irq(&phba->hbalock); 1434 1435 skip_wait: 1436 init_completion(&online_compl); 1437 rc = lpfc_workq_post_event(phba, &status, &online_compl, type); 1438 if (rc == 0) 1439 return -ENOMEM; 1440 1441 wait_for_completion(&online_compl); 1442 1443 if (status != 0) 1444 return -EIO; 1445 1446 return 0; 1447 } 1448 1449 /** 1450 * lpfc_reset_pci_bus - resets PCI bridge controller's secondary bus of an HBA 1451 * @phba: lpfc_hba pointer. 1452 * 1453 * Description: 1454 * Issues a PCI secondary bus reset for the phba->pcidev. 1455 * 1456 * Notes: 1457 * First walks the bus_list to ensure only PCI devices with Emulex 1458 * vendor id, device ids that support hot reset, only one occurrence 1459 * of function 0, and all ports on the bus are in offline mode to ensure the 1460 * hot reset only affects one valid HBA. 1461 * 1462 * Returns: 1463 * -ENOTSUPP, cfg_enable_hba_reset must be of value 2 1464 * -ENODEV, NULL ptr to pcidev 1465 * -EBADSLT, detected invalid device 1466 * -EBUSY, port is not in offline state 1467 * 0, successful 1468 */ 1469 static int 1470 lpfc_reset_pci_bus(struct lpfc_hba *phba) 1471 { 1472 struct pci_dev *pdev = phba->pcidev; 1473 struct Scsi_Host *shost = NULL; 1474 struct lpfc_hba *phba_other = NULL; 1475 struct pci_dev *ptr = NULL; 1476 int res; 1477 1478 if (phba->cfg_enable_hba_reset != 2) 1479 return -ENOTSUPP; 1480 1481 if (!pdev) { 1482 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8345 pdev NULL!\n"); 1483 return -ENODEV; 1484 } 1485 1486 res = lpfc_check_pci_resettable(phba); 1487 if (res) 1488 return res; 1489 1490 /* Walk the list of devices on the pci_dev's bus */ 1491 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) { 1492 /* Check port is offline */ 1493 shost = pci_get_drvdata(ptr); 1494 if (shost) { 1495 phba_other = 1496 ((struct lpfc_vport *)shost->hostdata)->phba; 1497 if (!(phba_other->pport->fc_flag & FC_OFFLINE_MODE)) { 1498 lpfc_printf_log(phba_other, KERN_INFO, LOG_INIT, 1499 "8349 WWPN = 0x%02x%02x%02x%02x" 1500 "%02x%02x%02x%02x is not " 1501 "offline!\n", 1502 phba_other->wwpn[0], 1503 phba_other->wwpn[1], 1504 phba_other->wwpn[2], 1505 phba_other->wwpn[3], 1506 phba_other->wwpn[4], 1507 phba_other->wwpn[5], 1508 phba_other->wwpn[6], 1509 phba_other->wwpn[7]); 1510 return -EBUSY; 1511 } 1512 } 1513 } 1514 1515 /* Issue PCI bus reset */ 1516 res = pci_reset_bus(pdev); 1517 if (res) { 1518 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1519 "8350 PCI reset bus failed: %d\n", res); 1520 } 1521 1522 return res; 1523 } 1524 1525 /** 1526 * lpfc_selective_reset - Offline then onlines the port 1527 * @phba: lpfc_hba pointer. 1528 * 1529 * Description: 1530 * If the port is configured to allow a reset then the hba is brought 1531 * offline then online. 1532 * 1533 * Notes: 1534 * Assumes any error from lpfc_do_offline() will be negative. 1535 * Do not make this function static. 1536 * 1537 * Returns: 1538 * lpfc_do_offline() return code if not zero 1539 * -EIO reset not configured or error posting the event 1540 * zero for success 1541 **/ 1542 int 1543 lpfc_selective_reset(struct lpfc_hba *phba) 1544 { 1545 struct completion online_compl; 1546 int status = 0; 1547 int rc; 1548 1549 if (!phba->cfg_enable_hba_reset) 1550 return -EACCES; 1551 1552 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) { 1553 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 1554 1555 if (status != 0) 1556 return status; 1557 } 1558 1559 init_completion(&online_compl); 1560 rc = lpfc_workq_post_event(phba, &status, &online_compl, 1561 LPFC_EVT_ONLINE); 1562 if (rc == 0) 1563 return -ENOMEM; 1564 1565 wait_for_completion(&online_compl); 1566 1567 if (status != 0) 1568 return -EIO; 1569 1570 return 0; 1571 } 1572 1573 /** 1574 * lpfc_issue_reset - Selectively resets an adapter 1575 * @dev: class device that is converted into a Scsi_host. 1576 * @attr: device attribute, not used. 1577 * @buf: containing the string "selective". 1578 * @count: unused variable. 1579 * 1580 * Description: 1581 * If the buf contains the string "selective" then lpfc_selective_reset() 1582 * is called to perform the reset. 1583 * 1584 * Notes: 1585 * Assumes any error from lpfc_selective_reset() will be negative. 1586 * If lpfc_selective_reset() returns zero then the length of the buffer 1587 * is returned which indicates success 1588 * 1589 * Returns: 1590 * -EINVAL if the buffer does not contain the string "selective" 1591 * length of buf if lpfc-selective_reset() if the call succeeds 1592 * return value of lpfc_selective_reset() if the call fails 1593 **/ 1594 static ssize_t 1595 lpfc_issue_reset(struct device *dev, struct device_attribute *attr, 1596 const char *buf, size_t count) 1597 { 1598 struct Scsi_Host *shost = class_to_shost(dev); 1599 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1600 struct lpfc_hba *phba = vport->phba; 1601 int status = -EINVAL; 1602 1603 if (!phba->cfg_enable_hba_reset) 1604 return -EACCES; 1605 1606 if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) 1607 status = phba->lpfc_selective_reset(phba); 1608 1609 if (status == 0) 1610 return strlen(buf); 1611 else 1612 return status; 1613 } 1614 1615 /** 1616 * lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness 1617 * @phba: lpfc_hba pointer. 1618 * 1619 * Description: 1620 * SLI4 interface type-2 device to wait on the sliport status register for 1621 * the readyness after performing a firmware reset. 1622 * 1623 * Returns: 1624 * zero for success, -EPERM when port does not have privilege to perform the 1625 * reset, -EIO when port timeout from recovering from the reset. 1626 * 1627 * Note: 1628 * As the caller will interpret the return code by value, be careful in making 1629 * change or addition to return codes. 1630 **/ 1631 int 1632 lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba) 1633 { 1634 struct lpfc_register portstat_reg = {0}; 1635 int i; 1636 1637 msleep(100); 1638 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 1639 &portstat_reg.word0)) 1640 return -EIO; 1641 1642 /* verify if privileged for the request operation */ 1643 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) && 1644 !bf_get(lpfc_sliport_status_err, &portstat_reg)) 1645 return -EPERM; 1646 1647 /* wait for the SLI port firmware ready after firmware reset */ 1648 for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) { 1649 msleep(10); 1650 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 1651 &portstat_reg.word0)) 1652 continue; 1653 if (!bf_get(lpfc_sliport_status_err, &portstat_reg)) 1654 continue; 1655 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg)) 1656 continue; 1657 if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg)) 1658 continue; 1659 break; 1660 } 1661 1662 if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT) 1663 return 0; 1664 else 1665 return -EIO; 1666 } 1667 1668 /** 1669 * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc 1670 * @phba: lpfc_hba pointer. 1671 * @opcode: The sli4 config command opcode. 1672 * 1673 * Description: 1674 * Request SLI4 interface type-2 device to perform a physical register set 1675 * access. 1676 * 1677 * Returns: 1678 * zero for success 1679 **/ 1680 static ssize_t 1681 lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) 1682 { 1683 struct completion online_compl; 1684 struct pci_dev *pdev = phba->pcidev; 1685 uint32_t before_fc_flag; 1686 uint32_t sriov_nr_virtfn; 1687 uint32_t reg_val; 1688 int status = 0, rc = 0; 1689 int job_posted = 1, sriov_err; 1690 1691 if (!phba->cfg_enable_hba_reset) 1692 return -EACCES; 1693 1694 if ((phba->sli_rev < LPFC_SLI_REV4) || 1695 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 1696 LPFC_SLI_INTF_IF_TYPE_2)) 1697 return -EPERM; 1698 1699 /* Keep state if we need to restore back */ 1700 before_fc_flag = phba->pport->fc_flag; 1701 sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn; 1702 1703 if (opcode == LPFC_FW_DUMP) { 1704 init_completion(&online_compl); 1705 phba->fw_dump_cmpl = &online_compl; 1706 } else { 1707 /* Disable SR-IOV virtual functions if enabled */ 1708 if (phba->cfg_sriov_nr_virtfn) { 1709 pci_disable_sriov(pdev); 1710 phba->cfg_sriov_nr_virtfn = 0; 1711 } 1712 1713 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 1714 1715 if (status != 0) 1716 return status; 1717 1718 /* wait for the device to be quiesced before firmware reset */ 1719 msleep(100); 1720 } 1721 1722 reg_val = readl(phba->sli4_hba.conf_regs_memmap_p + 1723 LPFC_CTL_PDEV_CTL_OFFSET); 1724 1725 if (opcode == LPFC_FW_DUMP) 1726 reg_val |= LPFC_FW_DUMP_REQUEST; 1727 else if (opcode == LPFC_FW_RESET) 1728 reg_val |= LPFC_CTL_PDEV_CTL_FRST; 1729 else if (opcode == LPFC_DV_RESET) 1730 reg_val |= LPFC_CTL_PDEV_CTL_DRST; 1731 1732 writel(reg_val, phba->sli4_hba.conf_regs_memmap_p + 1733 LPFC_CTL_PDEV_CTL_OFFSET); 1734 /* flush */ 1735 readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); 1736 1737 /* delay driver action following IF_TYPE_2 reset */ 1738 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1739 1740 if (rc == -EPERM) { 1741 /* no privilege for reset */ 1742 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1743 "3150 No privilege to perform the requested " 1744 "access: x%x\n", reg_val); 1745 } else if (rc == -EIO) { 1746 /* reset failed, there is nothing more we can do */ 1747 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1748 "3153 Fail to perform the requested " 1749 "access: x%x\n", reg_val); 1750 if (phba->fw_dump_cmpl) 1751 phba->fw_dump_cmpl = NULL; 1752 return rc; 1753 } 1754 1755 /* keep the original port state */ 1756 if (before_fc_flag & FC_OFFLINE_MODE) { 1757 if (phba->fw_dump_cmpl) 1758 phba->fw_dump_cmpl = NULL; 1759 goto out; 1760 } 1761 1762 /* Firmware dump will trigger an HA_ERATT event, and 1763 * lpfc_handle_eratt_s4 routine already handles bringing the port back 1764 * online. 1765 */ 1766 if (opcode == LPFC_FW_DUMP) { 1767 wait_for_completion(phba->fw_dump_cmpl); 1768 } else { 1769 init_completion(&online_compl); 1770 job_posted = lpfc_workq_post_event(phba, &status, &online_compl, 1771 LPFC_EVT_ONLINE); 1772 if (!job_posted) 1773 goto out; 1774 1775 wait_for_completion(&online_compl); 1776 } 1777 out: 1778 /* in any case, restore the virtual functions enabled as before */ 1779 if (sriov_nr_virtfn) { 1780 /* If fw_dump was performed, first disable to clean up */ 1781 if (opcode == LPFC_FW_DUMP) { 1782 pci_disable_sriov(pdev); 1783 phba->cfg_sriov_nr_virtfn = 0; 1784 } 1785 1786 sriov_err = 1787 lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn); 1788 if (!sriov_err) 1789 phba->cfg_sriov_nr_virtfn = sriov_nr_virtfn; 1790 } 1791 1792 /* return proper error code */ 1793 if (!rc) { 1794 if (!job_posted) 1795 rc = -ENOMEM; 1796 else if (status) 1797 rc = -EIO; 1798 } 1799 return rc; 1800 } 1801 1802 /** 1803 * lpfc_nport_evt_cnt_show - Return the number of nport events 1804 * @dev: class device that is converted into a Scsi_host. 1805 * @attr: device attribute, not used. 1806 * @buf: on return contains the ascii number of nport events. 1807 * 1808 * Returns: size of formatted string. 1809 **/ 1810 static ssize_t 1811 lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr, 1812 char *buf) 1813 { 1814 struct Scsi_Host *shost = class_to_shost(dev); 1815 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1816 struct lpfc_hba *phba = vport->phba; 1817 1818 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); 1819 } 1820 1821 static int 1822 lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out) 1823 { 1824 LPFC_MBOXQ_t *mbox = NULL; 1825 unsigned long val = 0; 1826 char *pval = NULL; 1827 int rc = 0; 1828 1829 if (!strncmp("enable", buff_out, 1830 strlen("enable"))) { 1831 pval = buff_out + strlen("enable") + 1; 1832 rc = kstrtoul(pval, 0, &val); 1833 if (rc) 1834 return rc; /* Invalid number */ 1835 } else if (!strncmp("disable", buff_out, 1836 strlen("disable"))) { 1837 val = 0; 1838 } else { 1839 return -EINVAL; /* Invalid command */ 1840 } 1841 1842 switch (val) { 1843 case 0: 1844 val = 0x0; /* Disable */ 1845 break; 1846 case 2: 1847 val = 0x1; /* Enable two port trunk */ 1848 break; 1849 case 4: 1850 val = 0x2; /* Enable four port trunk */ 1851 break; 1852 default: 1853 return -EINVAL; 1854 } 1855 1856 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1857 "0070 Set trunk mode with val %ld ", val); 1858 1859 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1860 if (!mbox) 1861 return -ENOMEM; 1862 1863 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 1864 LPFC_MBOX_OPCODE_FCOE_FC_SET_TRUNK_MODE, 1865 12, LPFC_SLI4_MBX_EMBED); 1866 1867 bf_set(lpfc_mbx_set_trunk_mode, 1868 &mbox->u.mqe.un.set_trunk_mode, 1869 val); 1870 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 1871 if (rc) 1872 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1873 "0071 Set trunk mode failed with status: %d", 1874 rc); 1875 mempool_free(mbox, phba->mbox_mem_pool); 1876 1877 return 0; 1878 } 1879 1880 static ssize_t 1881 lpfc_xcvr_data_show(struct device *dev, struct device_attribute *attr, 1882 char *buf) 1883 { 1884 struct Scsi_Host *shost = class_to_shost(dev); 1885 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 1886 struct lpfc_hba *phba = vport->phba; 1887 int rc; 1888 int len = 0; 1889 struct lpfc_rdp_context *rdp_context; 1890 u16 temperature; 1891 u16 rx_power; 1892 u16 tx_bias; 1893 u16 tx_power; 1894 u16 vcc; 1895 char chbuf[128]; 1896 u16 wavelength = 0; 1897 struct sff_trasnceiver_codes_byte7 *trasn_code_byte7; 1898 1899 /* Get transceiver information */ 1900 rdp_context = kmalloc(sizeof(*rdp_context), GFP_KERNEL); 1901 1902 rc = lpfc_get_sfp_info_wait(phba, rdp_context); 1903 if (rc) { 1904 len = scnprintf(buf, PAGE_SIZE - len, "SFP info NA:\n"); 1905 goto out_free_rdp; 1906 } 1907 1908 strncpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_NAME], 16); 1909 chbuf[16] = 0; 1910 1911 len = scnprintf(buf, PAGE_SIZE - len, "VendorName:\t%s\n", chbuf); 1912 len += scnprintf(buf + len, PAGE_SIZE - len, 1913 "VendorOUI:\t%02x-%02x-%02x\n", 1914 (uint8_t)rdp_context->page_a0[SSF_VENDOR_OUI], 1915 (uint8_t)rdp_context->page_a0[SSF_VENDOR_OUI + 1], 1916 (uint8_t)rdp_context->page_a0[SSF_VENDOR_OUI + 2]); 1917 strncpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_PN], 16); 1918 chbuf[16] = 0; 1919 len += scnprintf(buf + len, PAGE_SIZE - len, "VendorPN:\t%s\n", chbuf); 1920 strncpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_SN], 16); 1921 chbuf[16] = 0; 1922 len += scnprintf(buf + len, PAGE_SIZE - len, "VendorSN:\t%s\n", chbuf); 1923 strncpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_REV], 4); 1924 chbuf[4] = 0; 1925 len += scnprintf(buf + len, PAGE_SIZE - len, "VendorRev:\t%s\n", chbuf); 1926 strncpy(chbuf, &rdp_context->page_a0[SSF_DATE_CODE], 8); 1927 chbuf[8] = 0; 1928 len += scnprintf(buf + len, PAGE_SIZE - len, "DateCode:\t%s\n", chbuf); 1929 len += scnprintf(buf + len, PAGE_SIZE - len, "Identifier:\t%xh\n", 1930 (uint8_t)rdp_context->page_a0[SSF_IDENTIFIER]); 1931 len += scnprintf(buf + len, PAGE_SIZE - len, "ExtIdentifier:\t%xh\n", 1932 (uint8_t)rdp_context->page_a0[SSF_EXT_IDENTIFIER]); 1933 len += scnprintf(buf + len, PAGE_SIZE - len, "Connector:\t%xh\n", 1934 (uint8_t)rdp_context->page_a0[SSF_CONNECTOR]); 1935 wavelength = (rdp_context->page_a0[SSF_WAVELENGTH_B1] << 8) | 1936 rdp_context->page_a0[SSF_WAVELENGTH_B0]; 1937 1938 len += scnprintf(buf + len, PAGE_SIZE - len, "Wavelength:\t%d nm\n", 1939 wavelength); 1940 trasn_code_byte7 = (struct sff_trasnceiver_codes_byte7 *) 1941 &rdp_context->page_a0[SSF_TRANSCEIVER_CODE_B7]; 1942 1943 len += scnprintf(buf + len, PAGE_SIZE - len, "Speeds: \t"); 1944 if (*(uint8_t *)trasn_code_byte7 == 0) { 1945 len += scnprintf(buf + len, PAGE_SIZE - len, 1946 "Unknown\n"); 1947 } else { 1948 if (trasn_code_byte7->fc_sp_100MB) 1949 len += scnprintf(buf + len, PAGE_SIZE - len, 1950 "1 "); 1951 if (trasn_code_byte7->fc_sp_200mb) 1952 len += scnprintf(buf + len, PAGE_SIZE - len, 1953 "2 "); 1954 if (trasn_code_byte7->fc_sp_400MB) 1955 len += scnprintf(buf + len, PAGE_SIZE - len, 1956 "4 "); 1957 if (trasn_code_byte7->fc_sp_800MB) 1958 len += scnprintf(buf + len, PAGE_SIZE - len, 1959 "8 "); 1960 if (trasn_code_byte7->fc_sp_1600MB) 1961 len += scnprintf(buf + len, PAGE_SIZE - len, 1962 "16 "); 1963 if (trasn_code_byte7->fc_sp_3200MB) 1964 len += scnprintf(buf + len, PAGE_SIZE - len, 1965 "32 "); 1966 if (trasn_code_byte7->speed_chk_ecc) 1967 len += scnprintf(buf + len, PAGE_SIZE - len, 1968 "64 "); 1969 len += scnprintf(buf + len, PAGE_SIZE - len, "GB\n"); 1970 } 1971 temperature = (rdp_context->page_a2[SFF_TEMPERATURE_B1] << 8 | 1972 rdp_context->page_a2[SFF_TEMPERATURE_B0]); 1973 vcc = (rdp_context->page_a2[SFF_VCC_B1] << 8 | 1974 rdp_context->page_a2[SFF_VCC_B0]); 1975 tx_power = (rdp_context->page_a2[SFF_TXPOWER_B1] << 8 | 1976 rdp_context->page_a2[SFF_TXPOWER_B0]); 1977 tx_bias = (rdp_context->page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | 1978 rdp_context->page_a2[SFF_TX_BIAS_CURRENT_B0]); 1979 rx_power = (rdp_context->page_a2[SFF_RXPOWER_B1] << 8 | 1980 rdp_context->page_a2[SFF_RXPOWER_B0]); 1981 1982 len += scnprintf(buf + len, PAGE_SIZE - len, 1983 "Temperature:\tx%04x C\n", temperature); 1984 len += scnprintf(buf + len, PAGE_SIZE - len, "Vcc:\t\tx%04x V\n", vcc); 1985 len += scnprintf(buf + len, PAGE_SIZE - len, 1986 "TxBiasCurrent:\tx%04x mA\n", tx_bias); 1987 len += scnprintf(buf + len, PAGE_SIZE - len, "TxPower:\tx%04x mW\n", 1988 tx_power); 1989 len += scnprintf(buf + len, PAGE_SIZE - len, "RxPower:\tx%04x mW\n", 1990 rx_power); 1991 out_free_rdp: 1992 kfree(rdp_context); 1993 return len; 1994 } 1995 1996 /** 1997 * lpfc_board_mode_show - Return the state of the board 1998 * @dev: class device that is converted into a Scsi_host. 1999 * @attr: device attribute, not used. 2000 * @buf: on return contains the state of the adapter. 2001 * 2002 * Returns: size of formatted string. 2003 **/ 2004 static ssize_t 2005 lpfc_board_mode_show(struct device *dev, struct device_attribute *attr, 2006 char *buf) 2007 { 2008 struct Scsi_Host *shost = class_to_shost(dev); 2009 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2010 struct lpfc_hba *phba = vport->phba; 2011 char * state; 2012 2013 if (phba->link_state == LPFC_HBA_ERROR) 2014 state = "error"; 2015 else if (phba->link_state == LPFC_WARM_START) 2016 state = "warm start"; 2017 else if (phba->link_state == LPFC_INIT_START) 2018 state = "offline"; 2019 else 2020 state = "online"; 2021 2022 return scnprintf(buf, PAGE_SIZE, "%s\n", state); 2023 } 2024 2025 /** 2026 * lpfc_board_mode_store - Puts the hba in online, offline, warm or error state 2027 * @dev: class device that is converted into a Scsi_host. 2028 * @attr: device attribute, not used. 2029 * @buf: containing one of the strings "online", "offline", "warm" or "error". 2030 * @count: unused variable. 2031 * 2032 * Returns: 2033 * -EACCES if enable hba reset not enabled 2034 * -EINVAL if the buffer does not contain a valid string (see above) 2035 * -EIO if lpfc_workq_post_event() or lpfc_do_offline() fails 2036 * buf length greater than zero indicates success 2037 **/ 2038 static ssize_t 2039 lpfc_board_mode_store(struct device *dev, struct device_attribute *attr, 2040 const char *buf, size_t count) 2041 { 2042 struct Scsi_Host *shost = class_to_shost(dev); 2043 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2044 struct lpfc_hba *phba = vport->phba; 2045 struct completion online_compl; 2046 char *board_mode_str = NULL; 2047 int status = 0; 2048 int rc; 2049 2050 if (!phba->cfg_enable_hba_reset) { 2051 status = -EACCES; 2052 goto board_mode_out; 2053 } 2054 2055 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 2056 "3050 lpfc_board_mode set to %s\n", buf); 2057 2058 init_completion(&online_compl); 2059 2060 if(strncmp(buf, "online", sizeof("online") - 1) == 0) { 2061 rc = lpfc_workq_post_event(phba, &status, &online_compl, 2062 LPFC_EVT_ONLINE); 2063 if (rc == 0) { 2064 status = -ENOMEM; 2065 goto board_mode_out; 2066 } 2067 wait_for_completion(&online_compl); 2068 if (status) 2069 status = -EIO; 2070 } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0) 2071 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 2072 else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0) 2073 if (phba->sli_rev == LPFC_SLI_REV4) 2074 status = -EINVAL; 2075 else 2076 status = lpfc_do_offline(phba, LPFC_EVT_WARM_START); 2077 else if (strncmp(buf, "error", sizeof("error") - 1) == 0) 2078 if (phba->sli_rev == LPFC_SLI_REV4) 2079 status = -EINVAL; 2080 else 2081 status = lpfc_do_offline(phba, LPFC_EVT_KILL); 2082 else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0) 2083 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP); 2084 else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0) 2085 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET); 2086 else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0) 2087 status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET); 2088 else if (strncmp(buf, "pci_bus_reset", sizeof("pci_bus_reset") - 1) 2089 == 0) 2090 status = lpfc_reset_pci_bus(phba); 2091 else if (strncmp(buf, "heartbeat", sizeof("heartbeat") - 1) == 0) 2092 lpfc_issue_hb_tmo(phba); 2093 else if (strncmp(buf, "trunk", sizeof("trunk") - 1) == 0) 2094 status = lpfc_set_trunking(phba, (char *)buf + sizeof("trunk")); 2095 else 2096 status = -EINVAL; 2097 2098 board_mode_out: 2099 if (!status) 2100 return strlen(buf); 2101 else { 2102 board_mode_str = strchr(buf, '\n'); 2103 if (board_mode_str) 2104 *board_mode_str = '\0'; 2105 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 2106 "3097 Failed \"%s\", status(%d), " 2107 "fc_flag(x%x)\n", 2108 buf, status, phba->pport->fc_flag); 2109 return status; 2110 } 2111 } 2112 2113 /** 2114 * lpfc_get_hba_info - Return various bits of informaton about the adapter 2115 * @phba: pointer to the adapter structure. 2116 * @mxri: max xri count. 2117 * @axri: available xri count. 2118 * @mrpi: max rpi count. 2119 * @arpi: available rpi count. 2120 * @mvpi: max vpi count. 2121 * @avpi: available vpi count. 2122 * 2123 * Description: 2124 * If an integer pointer for an count is not null then the value for the 2125 * count is returned. 2126 * 2127 * Returns: 2128 * zero on error 2129 * one for success 2130 **/ 2131 static int 2132 lpfc_get_hba_info(struct lpfc_hba *phba, 2133 uint32_t *mxri, uint32_t *axri, 2134 uint32_t *mrpi, uint32_t *arpi, 2135 uint32_t *mvpi, uint32_t *avpi) 2136 { 2137 struct lpfc_mbx_read_config *rd_config; 2138 LPFC_MBOXQ_t *pmboxq; 2139 MAILBOX_t *pmb; 2140 int rc = 0; 2141 uint32_t max_vpi; 2142 2143 /* 2144 * prevent udev from issuing mailbox commands until the port is 2145 * configured. 2146 */ 2147 if (phba->link_state < LPFC_LINK_DOWN || 2148 !phba->mbox_mem_pool || 2149 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0) 2150 return 0; 2151 2152 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) 2153 return 0; 2154 2155 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2156 if (!pmboxq) 2157 return 0; 2158 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 2159 2160 pmb = &pmboxq->u.mb; 2161 pmb->mbxCommand = MBX_READ_CONFIG; 2162 pmb->mbxOwner = OWN_HOST; 2163 pmboxq->ctx_buf = NULL; 2164 2165 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2166 rc = MBX_NOT_FINISHED; 2167 else 2168 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 2169 2170 if (rc != MBX_SUCCESS) { 2171 if (rc != MBX_TIMEOUT) 2172 mempool_free(pmboxq, phba->mbox_mem_pool); 2173 return 0; 2174 } 2175 2176 if (phba->sli_rev == LPFC_SLI_REV4) { 2177 rd_config = &pmboxq->u.mqe.un.rd_config; 2178 if (mrpi) 2179 *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 2180 if (arpi) 2181 *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) - 2182 phba->sli4_hba.max_cfg_param.rpi_used; 2183 if (mxri) 2184 *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 2185 if (axri) 2186 *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) - 2187 phba->sli4_hba.max_cfg_param.xri_used; 2188 2189 /* Account for differences with SLI-3. Get vpi count from 2190 * mailbox data and subtract one for max vpi value. 2191 */ 2192 max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ? 2193 (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0; 2194 2195 /* Limit the max we support */ 2196 if (max_vpi > LPFC_MAX_VPI) 2197 max_vpi = LPFC_MAX_VPI; 2198 if (mvpi) 2199 *mvpi = max_vpi; 2200 if (avpi) 2201 *avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used; 2202 } else { 2203 if (mrpi) 2204 *mrpi = pmb->un.varRdConfig.max_rpi; 2205 if (arpi) 2206 *arpi = pmb->un.varRdConfig.avail_rpi; 2207 if (mxri) 2208 *mxri = pmb->un.varRdConfig.max_xri; 2209 if (axri) 2210 *axri = pmb->un.varRdConfig.avail_xri; 2211 if (mvpi) 2212 *mvpi = pmb->un.varRdConfig.max_vpi; 2213 if (avpi) { 2214 /* avail_vpi is only valid if link is up and ready */ 2215 if (phba->link_state == LPFC_HBA_READY) 2216 *avpi = pmb->un.varRdConfig.avail_vpi; 2217 else 2218 *avpi = pmb->un.varRdConfig.max_vpi; 2219 } 2220 } 2221 2222 mempool_free(pmboxq, phba->mbox_mem_pool); 2223 return 1; 2224 } 2225 2226 /** 2227 * lpfc_max_rpi_show - Return maximum rpi 2228 * @dev: class device that is converted into a Scsi_host. 2229 * @attr: device attribute, not used. 2230 * @buf: on return contains the maximum rpi count in decimal or "Unknown". 2231 * 2232 * Description: 2233 * Calls lpfc_get_hba_info() asking for just the mrpi count. 2234 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set 2235 * to "Unknown" and the buffer length is returned, therefore the caller 2236 * must check for "Unknown" in the buffer to detect a failure. 2237 * 2238 * Returns: size of formatted string. 2239 **/ 2240 static ssize_t 2241 lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr, 2242 char *buf) 2243 { 2244 struct Scsi_Host *shost = class_to_shost(dev); 2245 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2246 struct lpfc_hba *phba = vport->phba; 2247 uint32_t cnt; 2248 2249 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL)) 2250 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt); 2251 return scnprintf(buf, PAGE_SIZE, "Unknown\n"); 2252 } 2253 2254 /** 2255 * lpfc_used_rpi_show - Return maximum rpi minus available rpi 2256 * @dev: class device that is converted into a Scsi_host. 2257 * @attr: device attribute, not used. 2258 * @buf: containing the used rpi count in decimal or "Unknown". 2259 * 2260 * Description: 2261 * Calls lpfc_get_hba_info() asking for just the mrpi and arpi counts. 2262 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set 2263 * to "Unknown" and the buffer length is returned, therefore the caller 2264 * must check for "Unknown" in the buffer to detect a failure. 2265 * 2266 * Returns: size of formatted string. 2267 **/ 2268 static ssize_t 2269 lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr, 2270 char *buf) 2271 { 2272 struct Scsi_Host *shost = class_to_shost(dev); 2273 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2274 struct lpfc_hba *phba = vport->phba; 2275 uint32_t cnt, acnt; 2276 2277 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL)) 2278 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); 2279 return scnprintf(buf, PAGE_SIZE, "Unknown\n"); 2280 } 2281 2282 /** 2283 * lpfc_max_xri_show - Return maximum xri 2284 * @dev: class device that is converted into a Scsi_host. 2285 * @attr: device attribute, not used. 2286 * @buf: on return contains the maximum xri count in decimal or "Unknown". 2287 * 2288 * Description: 2289 * Calls lpfc_get_hba_info() asking for just the mrpi count. 2290 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set 2291 * to "Unknown" and the buffer length is returned, therefore the caller 2292 * must check for "Unknown" in the buffer to detect a failure. 2293 * 2294 * Returns: size of formatted string. 2295 **/ 2296 static ssize_t 2297 lpfc_max_xri_show(struct device *dev, struct device_attribute *attr, 2298 char *buf) 2299 { 2300 struct Scsi_Host *shost = class_to_shost(dev); 2301 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2302 struct lpfc_hba *phba = vport->phba; 2303 uint32_t cnt; 2304 2305 if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL)) 2306 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt); 2307 return scnprintf(buf, PAGE_SIZE, "Unknown\n"); 2308 } 2309 2310 /** 2311 * lpfc_used_xri_show - Return maximum xpi minus the available xpi 2312 * @dev: class device that is converted into a Scsi_host. 2313 * @attr: device attribute, not used. 2314 * @buf: on return contains the used xri count in decimal or "Unknown". 2315 * 2316 * Description: 2317 * Calls lpfc_get_hba_info() asking for just the mxri and axri counts. 2318 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set 2319 * to "Unknown" and the buffer length is returned, therefore the caller 2320 * must check for "Unknown" in the buffer to detect a failure. 2321 * 2322 * Returns: size of formatted string. 2323 **/ 2324 static ssize_t 2325 lpfc_used_xri_show(struct device *dev, struct device_attribute *attr, 2326 char *buf) 2327 { 2328 struct Scsi_Host *shost = class_to_shost(dev); 2329 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2330 struct lpfc_hba *phba = vport->phba; 2331 uint32_t cnt, acnt; 2332 2333 if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL)) 2334 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); 2335 return scnprintf(buf, PAGE_SIZE, "Unknown\n"); 2336 } 2337 2338 /** 2339 * lpfc_max_vpi_show - Return maximum vpi 2340 * @dev: class device that is converted into a Scsi_host. 2341 * @attr: device attribute, not used. 2342 * @buf: on return contains the maximum vpi count in decimal or "Unknown". 2343 * 2344 * Description: 2345 * Calls lpfc_get_hba_info() asking for just the mvpi count. 2346 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set 2347 * to "Unknown" and the buffer length is returned, therefore the caller 2348 * must check for "Unknown" in the buffer to detect a failure. 2349 * 2350 * Returns: size of formatted string. 2351 **/ 2352 static ssize_t 2353 lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr, 2354 char *buf) 2355 { 2356 struct Scsi_Host *shost = class_to_shost(dev); 2357 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2358 struct lpfc_hba *phba = vport->phba; 2359 uint32_t cnt; 2360 2361 if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL)) 2362 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt); 2363 return scnprintf(buf, PAGE_SIZE, "Unknown\n"); 2364 } 2365 2366 /** 2367 * lpfc_used_vpi_show - Return maximum vpi minus the available vpi 2368 * @dev: class device that is converted into a Scsi_host. 2369 * @attr: device attribute, not used. 2370 * @buf: on return contains the used vpi count in decimal or "Unknown". 2371 * 2372 * Description: 2373 * Calls lpfc_get_hba_info() asking for just the mvpi and avpi counts. 2374 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set 2375 * to "Unknown" and the buffer length is returned, therefore the caller 2376 * must check for "Unknown" in the buffer to detect a failure. 2377 * 2378 * Returns: size of formatted string. 2379 **/ 2380 static ssize_t 2381 lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr, 2382 char *buf) 2383 { 2384 struct Scsi_Host *shost = class_to_shost(dev); 2385 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2386 struct lpfc_hba *phba = vport->phba; 2387 uint32_t cnt, acnt; 2388 2389 if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt)) 2390 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); 2391 return scnprintf(buf, PAGE_SIZE, "Unknown\n"); 2392 } 2393 2394 /** 2395 * lpfc_npiv_info_show - Return text about NPIV support for the adapter 2396 * @dev: class device that is converted into a Scsi_host. 2397 * @attr: device attribute, not used. 2398 * @buf: text that must be interpreted to determine if npiv is supported. 2399 * 2400 * Description: 2401 * Buffer will contain text indicating npiv is not suppoerted on the port, 2402 * the port is an NPIV physical port, or it is an npiv virtual port with 2403 * the id of the vport. 2404 * 2405 * Returns: size of formatted string. 2406 **/ 2407 static ssize_t 2408 lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr, 2409 char *buf) 2410 { 2411 struct Scsi_Host *shost = class_to_shost(dev); 2412 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2413 struct lpfc_hba *phba = vport->phba; 2414 2415 if (!(phba->max_vpi)) 2416 return scnprintf(buf, PAGE_SIZE, "NPIV Not Supported\n"); 2417 if (vport->port_type == LPFC_PHYSICAL_PORT) 2418 return scnprintf(buf, PAGE_SIZE, "NPIV Physical\n"); 2419 return scnprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi); 2420 } 2421 2422 /** 2423 * lpfc_poll_show - Return text about poll support for the adapter 2424 * @dev: class device that is converted into a Scsi_host. 2425 * @attr: device attribute, not used. 2426 * @buf: on return contains the cfg_poll in hex. 2427 * 2428 * Notes: 2429 * cfg_poll should be a lpfc_polling_flags type. 2430 * 2431 * Returns: size of formatted string. 2432 **/ 2433 static ssize_t 2434 lpfc_poll_show(struct device *dev, struct device_attribute *attr, 2435 char *buf) 2436 { 2437 struct Scsi_Host *shost = class_to_shost(dev); 2438 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2439 struct lpfc_hba *phba = vport->phba; 2440 2441 return scnprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll); 2442 } 2443 2444 /** 2445 * lpfc_poll_store - Set the value of cfg_poll for the adapter 2446 * @dev: class device that is converted into a Scsi_host. 2447 * @attr: device attribute, not used. 2448 * @buf: one or more lpfc_polling_flags values. 2449 * @count: not used. 2450 * 2451 * Notes: 2452 * buf contents converted to integer and checked for a valid value. 2453 * 2454 * Returns: 2455 * -EINVAL if the buffer connot be converted or is out of range 2456 * length of the buf on success 2457 **/ 2458 static ssize_t 2459 lpfc_poll_store(struct device *dev, struct device_attribute *attr, 2460 const char *buf, size_t count) 2461 { 2462 struct Scsi_Host *shost = class_to_shost(dev); 2463 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2464 struct lpfc_hba *phba = vport->phba; 2465 uint32_t creg_val; 2466 uint32_t old_val; 2467 int val=0; 2468 2469 if (!isdigit(buf[0])) 2470 return -EINVAL; 2471 2472 if (sscanf(buf, "%i", &val) != 1) 2473 return -EINVAL; 2474 2475 if ((val & 0x3) != val) 2476 return -EINVAL; 2477 2478 if (phba->sli_rev == LPFC_SLI_REV4) 2479 val = 0; 2480 2481 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 2482 "3051 lpfc_poll changed from %d to %d\n", 2483 phba->cfg_poll, val); 2484 2485 spin_lock_irq(&phba->hbalock); 2486 2487 old_val = phba->cfg_poll; 2488 2489 if (val & ENABLE_FCP_RING_POLLING) { 2490 if ((val & DISABLE_FCP_RING_INT) && 2491 !(old_val & DISABLE_FCP_RING_INT)) { 2492 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 2493 spin_unlock_irq(&phba->hbalock); 2494 return -EINVAL; 2495 } 2496 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 2497 writel(creg_val, phba->HCregaddr); 2498 readl(phba->HCregaddr); /* flush */ 2499 2500 lpfc_poll_start_timer(phba); 2501 } 2502 } else if (val != 0x0) { 2503 spin_unlock_irq(&phba->hbalock); 2504 return -EINVAL; 2505 } 2506 2507 if (!(val & DISABLE_FCP_RING_INT) && 2508 (old_val & DISABLE_FCP_RING_INT)) 2509 { 2510 spin_unlock_irq(&phba->hbalock); 2511 del_timer(&phba->fcp_poll_timer); 2512 spin_lock_irq(&phba->hbalock); 2513 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 2514 spin_unlock_irq(&phba->hbalock); 2515 return -EINVAL; 2516 } 2517 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 2518 writel(creg_val, phba->HCregaddr); 2519 readl(phba->HCregaddr); /* flush */ 2520 } 2521 2522 phba->cfg_poll = val; 2523 2524 spin_unlock_irq(&phba->hbalock); 2525 2526 return strlen(buf); 2527 } 2528 2529 /** 2530 * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions 2531 * @dev: class converted to a Scsi_host structure. 2532 * @attr: device attribute, not used. 2533 * @buf: on return contains the formatted support level. 2534 * 2535 * Description: 2536 * Returns the maximum number of virtual functions a physical function can 2537 * support, 0 will be returned if called on virtual function. 2538 * 2539 * Returns: size of formatted string. 2540 **/ 2541 static ssize_t 2542 lpfc_sriov_hw_max_virtfn_show(struct device *dev, 2543 struct device_attribute *attr, 2544 char *buf) 2545 { 2546 struct Scsi_Host *shost = class_to_shost(dev); 2547 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2548 struct lpfc_hba *phba = vport->phba; 2549 uint16_t max_nr_virtfn; 2550 2551 max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba); 2552 return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn); 2553 } 2554 2555 /** 2556 * lpfc_enable_bbcr_set: Sets an attribute value. 2557 * @phba: pointer the the adapter structure. 2558 * @val: integer attribute value. 2559 * 2560 * Description: 2561 * Validates the min and max values then sets the 2562 * adapter config field if in the valid range. prints error message 2563 * and does not set the parameter if invalid. 2564 * 2565 * Returns: 2566 * zero on success 2567 * -EINVAL if val is invalid 2568 */ 2569 static ssize_t 2570 lpfc_enable_bbcr_set(struct lpfc_hba *phba, uint val) 2571 { 2572 if (lpfc_rangecheck(val, 0, 1) && phba->sli_rev == LPFC_SLI_REV4) { 2573 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2574 "3068 lpfc_enable_bbcr changed from %d to " 2575 "%d\n", phba->cfg_enable_bbcr, val); 2576 phba->cfg_enable_bbcr = val; 2577 return 0; 2578 } 2579 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2580 "0451 lpfc_enable_bbcr cannot set to %d, range is 0, " 2581 "1\n", val); 2582 return -EINVAL; 2583 } 2584 2585 /* 2586 * lpfc_param_show - Return a cfg attribute value in decimal 2587 * 2588 * Description: 2589 * Macro that given an attr e.g. hba_queue_depth expands 2590 * into a function with the name lpfc_hba_queue_depth_show. 2591 * 2592 * lpfc_##attr##_show: Return the decimal value of an adapters cfg_xxx field. 2593 * @dev: class device that is converted into a Scsi_host. 2594 * @attr: device attribute, not used. 2595 * @buf: on return contains the attribute value in decimal. 2596 * 2597 * Returns: size of formatted string. 2598 **/ 2599 #define lpfc_param_show(attr) \ 2600 static ssize_t \ 2601 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ 2602 char *buf) \ 2603 { \ 2604 struct Scsi_Host *shost = class_to_shost(dev);\ 2605 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 2606 struct lpfc_hba *phba = vport->phba;\ 2607 return scnprintf(buf, PAGE_SIZE, "%d\n",\ 2608 phba->cfg_##attr);\ 2609 } 2610 2611 /* 2612 * lpfc_param_hex_show - Return a cfg attribute value in hex 2613 * 2614 * Description: 2615 * Macro that given an attr e.g. hba_queue_depth expands 2616 * into a function with the name lpfc_hba_queue_depth_show 2617 * 2618 * lpfc_##attr##_show: Return the hex value of an adapters cfg_xxx field. 2619 * @dev: class device that is converted into a Scsi_host. 2620 * @attr: device attribute, not used. 2621 * @buf: on return contains the attribute value in hexadecimal. 2622 * 2623 * Returns: size of formatted string. 2624 **/ 2625 #define lpfc_param_hex_show(attr) \ 2626 static ssize_t \ 2627 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ 2628 char *buf) \ 2629 { \ 2630 struct Scsi_Host *shost = class_to_shost(dev);\ 2631 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 2632 struct lpfc_hba *phba = vport->phba;\ 2633 uint val = 0;\ 2634 val = phba->cfg_##attr;\ 2635 return scnprintf(buf, PAGE_SIZE, "%#x\n",\ 2636 phba->cfg_##attr);\ 2637 } 2638 2639 /* 2640 * lpfc_param_init - Initializes a cfg attribute 2641 * 2642 * Description: 2643 * Macro that given an attr e.g. hba_queue_depth expands 2644 * into a function with the name lpfc_hba_queue_depth_init. The macro also 2645 * takes a default argument, a minimum and maximum argument. 2646 * 2647 * lpfc_##attr##_init: Initializes an attribute. 2648 * @phba: pointer the the adapter structure. 2649 * @val: integer attribute value. 2650 * 2651 * Validates the min and max values then sets the adapter config field 2652 * accordingly, or uses the default if out of range and prints an error message. 2653 * 2654 * Returns: 2655 * zero on success 2656 * -EINVAL if default used 2657 **/ 2658 #define lpfc_param_init(attr, default, minval, maxval) \ 2659 static int \ 2660 lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \ 2661 { \ 2662 if (lpfc_rangecheck(val, minval, maxval)) {\ 2663 phba->cfg_##attr = val;\ 2664 return 0;\ 2665 }\ 2666 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ 2667 "0449 lpfc_"#attr" attribute cannot be set to %d, "\ 2668 "allowed range is ["#minval", "#maxval"]\n", val); \ 2669 phba->cfg_##attr = default;\ 2670 return -EINVAL;\ 2671 } 2672 2673 /* 2674 * lpfc_param_set - Set a cfg attribute value 2675 * 2676 * Description: 2677 * Macro that given an attr e.g. hba_queue_depth expands 2678 * into a function with the name lpfc_hba_queue_depth_set 2679 * 2680 * lpfc_##attr##_set: Sets an attribute value. 2681 * @phba: pointer the the adapter structure. 2682 * @val: integer attribute value. 2683 * 2684 * Description: 2685 * Validates the min and max values then sets the 2686 * adapter config field if in the valid range. prints error message 2687 * and does not set the parameter if invalid. 2688 * 2689 * Returns: 2690 * zero on success 2691 * -EINVAL if val is invalid 2692 **/ 2693 #define lpfc_param_set(attr, default, minval, maxval) \ 2694 static int \ 2695 lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \ 2696 { \ 2697 if (lpfc_rangecheck(val, minval, maxval)) {\ 2698 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ 2699 "3052 lpfc_" #attr " changed from %d to %d\n", \ 2700 phba->cfg_##attr, val); \ 2701 phba->cfg_##attr = val;\ 2702 return 0;\ 2703 }\ 2704 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ 2705 "0450 lpfc_"#attr" attribute cannot be set to %d, "\ 2706 "allowed range is ["#minval", "#maxval"]\n", val); \ 2707 return -EINVAL;\ 2708 } 2709 2710 /* 2711 * lpfc_param_store - Set a vport attribute value 2712 * 2713 * Description: 2714 * Macro that given an attr e.g. hba_queue_depth expands 2715 * into a function with the name lpfc_hba_queue_depth_store. 2716 * 2717 * lpfc_##attr##_store: Set an sttribute value. 2718 * @dev: class device that is converted into a Scsi_host. 2719 * @attr: device attribute, not used. 2720 * @buf: contains the attribute value in ascii. 2721 * @count: not used. 2722 * 2723 * Description: 2724 * Convert the ascii text number to an integer, then 2725 * use the lpfc_##attr##_set function to set the value. 2726 * 2727 * Returns: 2728 * -EINVAL if val is invalid or lpfc_##attr##_set() fails 2729 * length of buffer upon success. 2730 **/ 2731 #define lpfc_param_store(attr) \ 2732 static ssize_t \ 2733 lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ 2734 const char *buf, size_t count) \ 2735 { \ 2736 struct Scsi_Host *shost = class_to_shost(dev);\ 2737 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 2738 struct lpfc_hba *phba = vport->phba;\ 2739 uint val = 0;\ 2740 if (!isdigit(buf[0]))\ 2741 return -EINVAL;\ 2742 if (sscanf(buf, "%i", &val) != 1)\ 2743 return -EINVAL;\ 2744 if (lpfc_##attr##_set(phba, val) == 0) \ 2745 return strlen(buf);\ 2746 else \ 2747 return -EINVAL;\ 2748 } 2749 2750 /* 2751 * lpfc_vport_param_show - Return decimal formatted cfg attribute value 2752 * 2753 * Description: 2754 * Macro that given an attr e.g. hba_queue_depth expands 2755 * into a function with the name lpfc_hba_queue_depth_show 2756 * 2757 * lpfc_##attr##_show: prints the attribute value in decimal. 2758 * @dev: class device that is converted into a Scsi_host. 2759 * @attr: device attribute, not used. 2760 * @buf: on return contains the attribute value in decimal. 2761 * 2762 * Returns: length of formatted string. 2763 **/ 2764 #define lpfc_vport_param_show(attr) \ 2765 static ssize_t \ 2766 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ 2767 char *buf) \ 2768 { \ 2769 struct Scsi_Host *shost = class_to_shost(dev);\ 2770 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 2771 return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\ 2772 } 2773 2774 /* 2775 * lpfc_vport_param_hex_show - Return hex formatted attribute value 2776 * 2777 * Description: 2778 * Macro that given an attr e.g. 2779 * hba_queue_depth expands into a function with the name 2780 * lpfc_hba_queue_depth_show 2781 * 2782 * lpfc_##attr##_show: prints the attribute value in hexadecimal. 2783 * @dev: class device that is converted into a Scsi_host. 2784 * @attr: device attribute, not used. 2785 * @buf: on return contains the attribute value in hexadecimal. 2786 * 2787 * Returns: length of formatted string. 2788 **/ 2789 #define lpfc_vport_param_hex_show(attr) \ 2790 static ssize_t \ 2791 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ 2792 char *buf) \ 2793 { \ 2794 struct Scsi_Host *shost = class_to_shost(dev);\ 2795 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 2796 return scnprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\ 2797 } 2798 2799 /* 2800 * lpfc_vport_param_init - Initialize a vport cfg attribute 2801 * 2802 * Description: 2803 * Macro that given an attr e.g. hba_queue_depth expands 2804 * into a function with the name lpfc_hba_queue_depth_init. The macro also 2805 * takes a default argument, a minimum and maximum argument. 2806 * 2807 * lpfc_##attr##_init: validates the min and max values then sets the 2808 * adapter config field accordingly, or uses the default if out of range 2809 * and prints an error message. 2810 * @phba: pointer the the adapter structure. 2811 * @val: integer attribute value. 2812 * 2813 * Returns: 2814 * zero on success 2815 * -EINVAL if default used 2816 **/ 2817 #define lpfc_vport_param_init(attr, default, minval, maxval) \ 2818 static int \ 2819 lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \ 2820 { \ 2821 if (lpfc_rangecheck(val, minval, maxval)) {\ 2822 vport->cfg_##attr = val;\ 2823 return 0;\ 2824 }\ 2825 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ 2826 "0423 lpfc_"#attr" attribute cannot be set to %d, "\ 2827 "allowed range is ["#minval", "#maxval"]\n", val); \ 2828 vport->cfg_##attr = default;\ 2829 return -EINVAL;\ 2830 } 2831 2832 /* 2833 * lpfc_vport_param_set - Set a vport cfg attribute 2834 * 2835 * Description: 2836 * Macro that given an attr e.g. hba_queue_depth expands 2837 * into a function with the name lpfc_hba_queue_depth_set 2838 * 2839 * lpfc_##attr##_set: validates the min and max values then sets the 2840 * adapter config field if in the valid range. prints error message 2841 * and does not set the parameter if invalid. 2842 * @phba: pointer the the adapter structure. 2843 * @val: integer attribute value. 2844 * 2845 * Returns: 2846 * zero on success 2847 * -EINVAL if val is invalid 2848 **/ 2849 #define lpfc_vport_param_set(attr, default, minval, maxval) \ 2850 static int \ 2851 lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \ 2852 { \ 2853 if (lpfc_rangecheck(val, minval, maxval)) {\ 2854 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ 2855 "3053 lpfc_" #attr \ 2856 " changed from %d (x%x) to %d (x%x)\n", \ 2857 vport->cfg_##attr, vport->cfg_##attr, \ 2858 val, val); \ 2859 vport->cfg_##attr = val;\ 2860 return 0;\ 2861 }\ 2862 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ 2863 "0424 lpfc_"#attr" attribute cannot be set to %d, "\ 2864 "allowed range is ["#minval", "#maxval"]\n", val); \ 2865 return -EINVAL;\ 2866 } 2867 2868 /* 2869 * lpfc_vport_param_store - Set a vport attribute 2870 * 2871 * Description: 2872 * Macro that given an attr e.g. hba_queue_depth 2873 * expands into a function with the name lpfc_hba_queue_depth_store 2874 * 2875 * lpfc_##attr##_store: convert the ascii text number to an integer, then 2876 * use the lpfc_##attr##_set function to set the value. 2877 * @cdev: class device that is converted into a Scsi_host. 2878 * @buf: contains the attribute value in decimal. 2879 * @count: not used. 2880 * 2881 * Returns: 2882 * -EINVAL if val is invalid or lpfc_##attr##_set() fails 2883 * length of buffer upon success. 2884 **/ 2885 #define lpfc_vport_param_store(attr) \ 2886 static ssize_t \ 2887 lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ 2888 const char *buf, size_t count) \ 2889 { \ 2890 struct Scsi_Host *shost = class_to_shost(dev);\ 2891 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 2892 uint val = 0;\ 2893 if (!isdigit(buf[0]))\ 2894 return -EINVAL;\ 2895 if (sscanf(buf, "%i", &val) != 1)\ 2896 return -EINVAL;\ 2897 if (lpfc_##attr##_set(vport, val) == 0) \ 2898 return strlen(buf);\ 2899 else \ 2900 return -EINVAL;\ 2901 } 2902 2903 2904 static DEVICE_ATTR(nvme_info, 0444, lpfc_nvme_info_show, NULL); 2905 static DEVICE_ATTR(scsi_stat, 0444, lpfc_scsi_stat_show, NULL); 2906 static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL); 2907 static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL); 2908 static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL); 2909 static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL); 2910 static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL); 2911 static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL); 2912 static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL); 2913 static DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL); 2914 static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL); 2915 static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL); 2916 static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL); 2917 static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL); 2918 static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show, 2919 lpfc_link_state_store); 2920 static DEVICE_ATTR(option_rom_version, S_IRUGO, 2921 lpfc_option_rom_version_show, NULL); 2922 static DEVICE_ATTR(num_discovered_ports, S_IRUGO, 2923 lpfc_num_discovered_ports_show, NULL); 2924 static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL); 2925 static DEVICE_ATTR_RO(lpfc_drvr_version); 2926 static DEVICE_ATTR_RO(lpfc_enable_fip); 2927 static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, 2928 lpfc_board_mode_show, lpfc_board_mode_store); 2929 static DEVICE_ATTR_RO(lpfc_xcvr_data); 2930 static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); 2931 static DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL); 2932 static DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL); 2933 static DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL); 2934 static DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL); 2935 static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL); 2936 static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL); 2937 static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL); 2938 static DEVICE_ATTR_RO(lpfc_temp_sensor); 2939 static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn); 2940 static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL); 2941 static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show, 2942 NULL); 2943 static DEVICE_ATTR(cmf_info, 0444, lpfc_cmf_info_show, NULL); 2944 2945 #define WWN_SZ 8 2946 /** 2947 * lpfc_wwn_set - Convert string to the 8 byte WWN value. 2948 * @buf: WWN string. 2949 * @cnt: Length of string. 2950 * @wwn: Array to receive converted wwn value. 2951 * 2952 * Returns: 2953 * -EINVAL if the buffer does not contain a valid wwn 2954 * 0 success 2955 **/ 2956 static size_t 2957 lpfc_wwn_set(const char *buf, size_t cnt, char wwn[]) 2958 { 2959 unsigned int i, j; 2960 2961 /* Count may include a LF at end of string */ 2962 if (buf[cnt-1] == '\n') 2963 cnt--; 2964 2965 if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) || 2966 ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x')))) 2967 return -EINVAL; 2968 2969 memset(wwn, 0, WWN_SZ); 2970 2971 /* Validate and store the new name */ 2972 for (i = 0, j = 0; i < 16; i++) { 2973 if ((*buf >= 'a') && (*buf <= 'f')) 2974 j = ((j << 4) | ((*buf++ - 'a') + 10)); 2975 else if ((*buf >= 'A') && (*buf <= 'F')) 2976 j = ((j << 4) | ((*buf++ - 'A') + 10)); 2977 else if ((*buf >= '0') && (*buf <= '9')) 2978 j = ((j << 4) | (*buf++ - '0')); 2979 else 2980 return -EINVAL; 2981 if (i % 2) { 2982 wwn[i/2] = j & 0xff; 2983 j = 0; 2984 } 2985 } 2986 return 0; 2987 } 2988 2989 2990 /** 2991 * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for 2992 * Optimized Access Storage (OAS) operations. 2993 * @dev: class device that is converted into a Scsi_host. 2994 * @attr: device attribute, not used. 2995 * @buf: buffer for passing information. 2996 * 2997 * Returns: 2998 * value of count 2999 **/ 3000 static ssize_t 3001 lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr, 3002 char *buf) 3003 { 3004 struct Scsi_Host *shost = class_to_shost(dev); 3005 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3006 3007 return scnprintf(buf, PAGE_SIZE, "0x%llx\n", 3008 wwn_to_u64(phba->cfg_oas_tgt_wwpn)); 3009 } 3010 3011 /** 3012 * lpfc_oas_tgt_store - Store wwpn of target whose luns maybe enabled for 3013 * Optimized Access Storage (OAS) operations. 3014 * @dev: class device that is converted into a Scsi_host. 3015 * @attr: device attribute, not used. 3016 * @buf: buffer for passing information. 3017 * @count: Size of the data buffer. 3018 * 3019 * Returns: 3020 * -EINVAL count is invalid, invalid wwpn byte invalid 3021 * -EPERM oas is not supported by hba 3022 * value of count on success 3023 **/ 3024 static ssize_t 3025 lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr, 3026 const char *buf, size_t count) 3027 { 3028 struct Scsi_Host *shost = class_to_shost(dev); 3029 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3030 unsigned int cnt = count; 3031 uint8_t wwpn[WWN_SZ]; 3032 int rc; 3033 3034 if (!phba->cfg_fof) 3035 return -EPERM; 3036 3037 /* count may include a LF at end of string */ 3038 if (buf[cnt-1] == '\n') 3039 cnt--; 3040 3041 rc = lpfc_wwn_set(buf, cnt, wwpn); 3042 if (rc) 3043 return rc; 3044 3045 memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t))); 3046 memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t))); 3047 if (wwn_to_u64(wwpn) == 0) 3048 phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET; 3049 else 3050 phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET; 3051 phba->cfg_oas_flags &= ~OAS_LUN_VALID; 3052 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN; 3053 return count; 3054 } 3055 static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR, 3056 lpfc_oas_tgt_show, lpfc_oas_tgt_store); 3057 3058 /** 3059 * lpfc_oas_priority_show - Return wwpn of target whose luns maybe enabled for 3060 * Optimized Access Storage (OAS) operations. 3061 * @dev: class device that is converted into a Scsi_host. 3062 * @attr: device attribute, not used. 3063 * @buf: buffer for passing information. 3064 * 3065 * Returns: 3066 * value of count 3067 **/ 3068 static ssize_t 3069 lpfc_oas_priority_show(struct device *dev, struct device_attribute *attr, 3070 char *buf) 3071 { 3072 struct Scsi_Host *shost = class_to_shost(dev); 3073 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3074 3075 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority); 3076 } 3077 3078 /** 3079 * lpfc_oas_priority_store - Store wwpn of target whose luns maybe enabled for 3080 * Optimized Access Storage (OAS) operations. 3081 * @dev: class device that is converted into a Scsi_host. 3082 * @attr: device attribute, not used. 3083 * @buf: buffer for passing information. 3084 * @count: Size of the data buffer. 3085 * 3086 * Returns: 3087 * -EINVAL count is invalid, invalid wwpn byte invalid 3088 * -EPERM oas is not supported by hba 3089 * value of count on success 3090 **/ 3091 static ssize_t 3092 lpfc_oas_priority_store(struct device *dev, struct device_attribute *attr, 3093 const char *buf, size_t count) 3094 { 3095 struct Scsi_Host *shost = class_to_shost(dev); 3096 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3097 unsigned int cnt = count; 3098 unsigned long val; 3099 int ret; 3100 3101 if (!phba->cfg_fof) 3102 return -EPERM; 3103 3104 /* count may include a LF at end of string */ 3105 if (buf[cnt-1] == '\n') 3106 cnt--; 3107 3108 ret = kstrtoul(buf, 0, &val); 3109 if (ret || (val > 0x7f)) 3110 return -EINVAL; 3111 3112 if (val) 3113 phba->cfg_oas_priority = (uint8_t)val; 3114 else 3115 phba->cfg_oas_priority = phba->cfg_XLanePriority; 3116 return count; 3117 } 3118 static DEVICE_ATTR(lpfc_xlane_priority, S_IRUGO | S_IWUSR, 3119 lpfc_oas_priority_show, lpfc_oas_priority_store); 3120 3121 /** 3122 * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled 3123 * for Optimized Access Storage (OAS) operations. 3124 * @dev: class device that is converted into a Scsi_host. 3125 * @attr: device attribute, not used. 3126 * @buf: buffer for passing information. 3127 * 3128 * Returns: 3129 * value of count on success 3130 **/ 3131 static ssize_t 3132 lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr, 3133 char *buf) 3134 { 3135 struct Scsi_Host *shost = class_to_shost(dev); 3136 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3137 3138 return scnprintf(buf, PAGE_SIZE, "0x%llx\n", 3139 wwn_to_u64(phba->cfg_oas_vpt_wwpn)); 3140 } 3141 3142 /** 3143 * lpfc_oas_vpt_store - Store wwpn of vport whose targets maybe enabled 3144 * for Optimized Access Storage (OAS) operations. 3145 * @dev: class device that is converted into a Scsi_host. 3146 * @attr: device attribute, not used. 3147 * @buf: buffer for passing information. 3148 * @count: Size of the data buffer. 3149 * 3150 * Returns: 3151 * -EINVAL count is invalid, invalid wwpn byte invalid 3152 * -EPERM oas is not supported by hba 3153 * value of count on success 3154 **/ 3155 static ssize_t 3156 lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr, 3157 const char *buf, size_t count) 3158 { 3159 struct Scsi_Host *shost = class_to_shost(dev); 3160 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3161 unsigned int cnt = count; 3162 uint8_t wwpn[WWN_SZ]; 3163 int rc; 3164 3165 if (!phba->cfg_fof) 3166 return -EPERM; 3167 3168 /* count may include a LF at end of string */ 3169 if (buf[cnt-1] == '\n') 3170 cnt--; 3171 3172 rc = lpfc_wwn_set(buf, cnt, wwpn); 3173 if (rc) 3174 return rc; 3175 3176 memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t))); 3177 memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t))); 3178 if (wwn_to_u64(wwpn) == 0) 3179 phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT; 3180 else 3181 phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT; 3182 phba->cfg_oas_flags &= ~OAS_LUN_VALID; 3183 if (phba->cfg_oas_priority == 0) 3184 phba->cfg_oas_priority = phba->cfg_XLanePriority; 3185 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN; 3186 return count; 3187 } 3188 static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR, 3189 lpfc_oas_vpt_show, lpfc_oas_vpt_store); 3190 3191 /** 3192 * lpfc_oas_lun_state_show - Return the current state (enabled or disabled) 3193 * of whether luns will be enabled or disabled 3194 * for Optimized Access Storage (OAS) operations. 3195 * @dev: class device that is converted into a Scsi_host. 3196 * @attr: device attribute, not used. 3197 * @buf: buffer for passing information. 3198 * 3199 * Returns: 3200 * size of formatted string. 3201 **/ 3202 static ssize_t 3203 lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr, 3204 char *buf) 3205 { 3206 struct Scsi_Host *shost = class_to_shost(dev); 3207 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3208 3209 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state); 3210 } 3211 3212 /** 3213 * lpfc_oas_lun_state_store - Store the state (enabled or disabled) 3214 * of whether luns will be enabled or disabled 3215 * for Optimized Access Storage (OAS) operations. 3216 * @dev: class device that is converted into a Scsi_host. 3217 * @attr: device attribute, not used. 3218 * @buf: buffer for passing information. 3219 * @count: Size of the data buffer. 3220 * 3221 * Returns: 3222 * -EINVAL count is invalid, invalid wwpn byte invalid 3223 * -EPERM oas is not supported by hba 3224 * value of count on success 3225 **/ 3226 static ssize_t 3227 lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr, 3228 const char *buf, size_t count) 3229 { 3230 struct Scsi_Host *shost = class_to_shost(dev); 3231 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3232 int val = 0; 3233 3234 if (!phba->cfg_fof) 3235 return -EPERM; 3236 3237 if (!isdigit(buf[0])) 3238 return -EINVAL; 3239 3240 if (sscanf(buf, "%i", &val) != 1) 3241 return -EINVAL; 3242 3243 if ((val != 0) && (val != 1)) 3244 return -EINVAL; 3245 3246 phba->cfg_oas_lun_state = val; 3247 return strlen(buf); 3248 } 3249 static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR, 3250 lpfc_oas_lun_state_show, lpfc_oas_lun_state_store); 3251 3252 /** 3253 * lpfc_oas_lun_status_show - Return the status of the Optimized Access 3254 * Storage (OAS) lun returned by the 3255 * lpfc_oas_lun_show function. 3256 * @dev: class device that is converted into a Scsi_host. 3257 * @attr: device attribute, not used. 3258 * @buf: buffer for passing information. 3259 * 3260 * Returns: 3261 * size of formatted string. 3262 **/ 3263 static ssize_t 3264 lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr, 3265 char *buf) 3266 { 3267 struct Scsi_Host *shost = class_to_shost(dev); 3268 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3269 3270 if (!(phba->cfg_oas_flags & OAS_LUN_VALID)) 3271 return -EFAULT; 3272 3273 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status); 3274 } 3275 static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO, 3276 lpfc_oas_lun_status_show, NULL); 3277 3278 3279 /** 3280 * lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage 3281 * (OAS) operations. 3282 * @phba: lpfc_hba pointer. 3283 * @vpt_wwpn: wwpn of the vport associated with the returned lun 3284 * @tgt_wwpn: wwpn of the target associated with the returned lun 3285 * @lun: the fc lun for setting oas state. 3286 * @oas_state: the oas state to be set to the lun. 3287 * @pri: priority 3288 * 3289 * Returns: 3290 * SUCCESS : 0 3291 * -EPERM OAS is not enabled or not supported by this port. 3292 * 3293 */ 3294 static size_t 3295 lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[], 3296 uint8_t tgt_wwpn[], uint64_t lun, 3297 uint32_t oas_state, uint8_t pri) 3298 { 3299 3300 int rc = 0; 3301 3302 if (!phba->cfg_fof) 3303 return -EPERM; 3304 3305 if (oas_state) { 3306 if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn, 3307 (struct lpfc_name *)tgt_wwpn, 3308 lun, pri)) 3309 rc = -ENOMEM; 3310 } else { 3311 lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn, 3312 (struct lpfc_name *)tgt_wwpn, lun, pri); 3313 } 3314 return rc; 3315 3316 } 3317 3318 /** 3319 * lpfc_oas_lun_get_next - get the next lun that has been enabled for Optimized 3320 * Access Storage (OAS) operations. 3321 * @phba: lpfc_hba pointer. 3322 * @vpt_wwpn: wwpn of the vport associated with the returned lun 3323 * @tgt_wwpn: wwpn of the target associated with the returned lun 3324 * @lun_status: status of the lun returned lun 3325 * @lun_pri: priority of the lun returned lun 3326 * 3327 * Returns the first or next lun enabled for OAS operations for the vport/target 3328 * specified. If a lun is found, its vport wwpn, target wwpn and status is 3329 * returned. If the lun is not found, NOT_OAS_ENABLED_LUN is returned. 3330 * 3331 * Return: 3332 * lun that is OAS enabled for the vport/target 3333 * NOT_OAS_ENABLED_LUN when no oas enabled lun found. 3334 */ 3335 static uint64_t 3336 lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[], 3337 uint8_t tgt_wwpn[], uint32_t *lun_status, 3338 uint32_t *lun_pri) 3339 { 3340 uint64_t found_lun; 3341 3342 if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn) 3343 return NOT_OAS_ENABLED_LUN; 3344 if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *) 3345 phba->sli4_hba.oas_next_vpt_wwpn, 3346 (struct lpfc_name *) 3347 phba->sli4_hba.oas_next_tgt_wwpn, 3348 &phba->sli4_hba.oas_next_lun, 3349 (struct lpfc_name *)vpt_wwpn, 3350 (struct lpfc_name *)tgt_wwpn, 3351 &found_lun, lun_status, lun_pri)) 3352 return found_lun; 3353 else 3354 return NOT_OAS_ENABLED_LUN; 3355 } 3356 3357 /** 3358 * lpfc_oas_lun_state_change - enable/disable a lun for OAS operations 3359 * @phba: lpfc_hba pointer. 3360 * @vpt_wwpn: vport wwpn by reference. 3361 * @tgt_wwpn: target wwpn by reference. 3362 * @lun: the fc lun for setting oas state. 3363 * @oas_state: the oas state to be set to the oas_lun. 3364 * @pri: priority 3365 * 3366 * This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE) 3367 * a lun for OAS operations. 3368 * 3369 * Return: 3370 * SUCCESS: 0 3371 * -ENOMEM: failed to enable an lun for OAS operations 3372 * -EPERM: OAS is not enabled 3373 */ 3374 static ssize_t 3375 lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[], 3376 uint8_t tgt_wwpn[], uint64_t lun, 3377 uint32_t oas_state, uint8_t pri) 3378 { 3379 3380 int rc; 3381 3382 rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun, 3383 oas_state, pri); 3384 return rc; 3385 } 3386 3387 /** 3388 * lpfc_oas_lun_show - Return oas enabled luns from a chosen target 3389 * @dev: class device that is converted into a Scsi_host. 3390 * @attr: device attribute, not used. 3391 * @buf: buffer for passing information. 3392 * 3393 * This routine returns a lun enabled for OAS each time the function 3394 * is called. 3395 * 3396 * Returns: 3397 * SUCCESS: size of formatted string. 3398 * -EFAULT: target or vport wwpn was not set properly. 3399 * -EPERM: oas is not enabled. 3400 **/ 3401 static ssize_t 3402 lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr, 3403 char *buf) 3404 { 3405 struct Scsi_Host *shost = class_to_shost(dev); 3406 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3407 3408 uint64_t oas_lun; 3409 int len = 0; 3410 3411 if (!phba->cfg_fof) 3412 return -EPERM; 3413 3414 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0) 3415 if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)) 3416 return -EFAULT; 3417 3418 if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0) 3419 if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)) 3420 return -EFAULT; 3421 3422 oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn, 3423 phba->cfg_oas_tgt_wwpn, 3424 &phba->cfg_oas_lun_status, 3425 &phba->cfg_oas_priority); 3426 if (oas_lun != NOT_OAS_ENABLED_LUN) 3427 phba->cfg_oas_flags |= OAS_LUN_VALID; 3428 3429 len += scnprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun); 3430 3431 return len; 3432 } 3433 3434 /** 3435 * lpfc_oas_lun_store - Sets the OAS state for lun 3436 * @dev: class device that is converted into a Scsi_host. 3437 * @attr: device attribute, not used. 3438 * @buf: buffer for passing information. 3439 * @count: size of the formatting string 3440 * 3441 * This function sets the OAS state for lun. Before this function is called, 3442 * the vport wwpn, target wwpn, and oas state need to be set. 3443 * 3444 * Returns: 3445 * SUCCESS: size of formatted string. 3446 * -EFAULT: target or vport wwpn was not set properly. 3447 * -EPERM: oas is not enabled. 3448 * size of formatted string. 3449 **/ 3450 static ssize_t 3451 lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr, 3452 const char *buf, size_t count) 3453 { 3454 struct Scsi_Host *shost = class_to_shost(dev); 3455 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3456 uint64_t scsi_lun; 3457 uint32_t pri; 3458 ssize_t rc; 3459 3460 if (!phba->cfg_fof) 3461 return -EPERM; 3462 3463 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0) 3464 return -EFAULT; 3465 3466 if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0) 3467 return -EFAULT; 3468 3469 if (!isdigit(buf[0])) 3470 return -EINVAL; 3471 3472 if (sscanf(buf, "0x%llx", &scsi_lun) != 1) 3473 return -EINVAL; 3474 3475 pri = phba->cfg_oas_priority; 3476 if (pri == 0) 3477 pri = phba->cfg_XLanePriority; 3478 3479 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3480 "3372 Try to set vport 0x%llx target 0x%llx lun:0x%llx " 3481 "priority 0x%x with oas state %d\n", 3482 wwn_to_u64(phba->cfg_oas_vpt_wwpn), 3483 wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun, 3484 pri, phba->cfg_oas_lun_state); 3485 3486 rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn, 3487 phba->cfg_oas_tgt_wwpn, scsi_lun, 3488 phba->cfg_oas_lun_state, pri); 3489 if (rc) 3490 return rc; 3491 3492 return count; 3493 } 3494 static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR, 3495 lpfc_oas_lun_show, lpfc_oas_lun_store); 3496 3497 int lpfc_enable_nvmet_cnt; 3498 unsigned long long lpfc_enable_nvmet[LPFC_NVMET_MAX_PORTS] = { 3499 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3500 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; 3501 module_param_array(lpfc_enable_nvmet, ullong, &lpfc_enable_nvmet_cnt, 0444); 3502 MODULE_PARM_DESC(lpfc_enable_nvmet, "Enable HBA port(s) WWPN as a NVME Target"); 3503 3504 static int lpfc_poll = 0; 3505 module_param(lpfc_poll, int, S_IRUGO); 3506 MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:" 3507 " 0 - none," 3508 " 1 - poll with interrupts enabled" 3509 " 3 - poll and disable FCP ring interrupts"); 3510 3511 static DEVICE_ATTR_RW(lpfc_poll); 3512 3513 int lpfc_no_hba_reset_cnt; 3514 unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = { 3515 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; 3516 module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444); 3517 MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset"); 3518 3519 LPFC_ATTR(sli_mode, 3, 3, 3, 3520 "SLI mode selector: 3 - select SLI-3"); 3521 3522 LPFC_ATTR_R(enable_npiv, 1, 0, 1, 3523 "Enable NPIV functionality"); 3524 3525 LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2, 3526 "FCF Fast failover=1 Priority failover=2"); 3527 3528 /* 3529 * lpfc_fcp_wait_abts_rsp: Modifies criteria for reporting completion of 3530 * aborted IO. 3531 * The range is [0,1]. Default value is 0 3532 * 0, IO completes after ABTS issued (default). 3533 * 1, IO completes after receipt of ABTS response or timeout. 3534 */ 3535 LPFC_ATTR_R(fcp_wait_abts_rsp, 0, 0, 1, "Wait for FCP ABTS completion"); 3536 3537 /* 3538 # lpfc_enable_rrq: Track XRI/OXID reuse after IO failures 3539 # 0x0 = disabled, XRI/OXID use not tracked. 3540 # 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent. 3541 # 0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent. 3542 */ 3543 LPFC_ATTR_R(enable_rrq, 2, 0, 2, 3544 "Enable RRQ functionality"); 3545 3546 /* 3547 # lpfc_suppress_link_up: Bring link up at initialization 3548 # 0x0 = bring link up (issue MBX_INIT_LINK) 3549 # 0x1 = do NOT bring link up at initialization(MBX_INIT_LINK) 3550 # 0x2 = never bring up link 3551 # Default value is 0. 3552 */ 3553 LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK, 3554 LPFC_DELAY_INIT_LINK_INDEFINITELY, 3555 "Suppress Link Up at initialization"); 3556 3557 static ssize_t 3558 lpfc_pls_show(struct device *dev, struct device_attribute *attr, char *buf) 3559 { 3560 struct Scsi_Host *shost = class_to_shost(dev); 3561 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3562 3563 return scnprintf(buf, PAGE_SIZE, "%d\n", 3564 phba->sli4_hba.pc_sli4_params.pls); 3565 } 3566 static DEVICE_ATTR(pls, 0444, 3567 lpfc_pls_show, NULL); 3568 3569 static ssize_t 3570 lpfc_pt_show(struct device *dev, struct device_attribute *attr, char *buf) 3571 { 3572 struct Scsi_Host *shost = class_to_shost(dev); 3573 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3574 3575 return scnprintf(buf, PAGE_SIZE, "%d\n", 3576 (phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0); 3577 } 3578 static DEVICE_ATTR(pt, 0444, 3579 lpfc_pt_show, NULL); 3580 3581 /* 3582 # lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS 3583 # 1 - (1024) 3584 # 2 - (2048) 3585 # 3 - (3072) 3586 # 4 - (4096) 3587 # 5 - (5120) 3588 */ 3589 static ssize_t 3590 lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf) 3591 { 3592 struct Scsi_Host *shost = class_to_shost(dev); 3593 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; 3594 3595 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max); 3596 } 3597 3598 static DEVICE_ATTR(iocb_hw, S_IRUGO, 3599 lpfc_iocb_hw_show, NULL); 3600 static ssize_t 3601 lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf) 3602 { 3603 struct Scsi_Host *shost = class_to_shost(dev); 3604 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; 3605 struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba); 3606 3607 return scnprintf(buf, PAGE_SIZE, "%d\n", 3608 pring ? pring->txq_max : 0); 3609 } 3610 3611 static DEVICE_ATTR(txq_hw, S_IRUGO, 3612 lpfc_txq_hw_show, NULL); 3613 static ssize_t 3614 lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr, 3615 char *buf) 3616 { 3617 struct Scsi_Host *shost = class_to_shost(dev); 3618 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; 3619 struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba); 3620 3621 return scnprintf(buf, PAGE_SIZE, "%d\n", 3622 pring ? pring->txcmplq_max : 0); 3623 } 3624 3625 static DEVICE_ATTR(txcmplq_hw, S_IRUGO, 3626 lpfc_txcmplq_hw_show, NULL); 3627 3628 /* 3629 # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear 3630 # until the timer expires. Value range is [0,255]. Default value is 30. 3631 */ 3632 static int lpfc_nodev_tmo = LPFC_DEF_DEVLOSS_TMO; 3633 static int lpfc_devloss_tmo = LPFC_DEF_DEVLOSS_TMO; 3634 module_param(lpfc_nodev_tmo, int, 0); 3635 MODULE_PARM_DESC(lpfc_nodev_tmo, 3636 "Seconds driver will hold I/O waiting " 3637 "for a device to come back"); 3638 3639 /** 3640 * lpfc_nodev_tmo_show - Return the hba dev loss timeout value 3641 * @dev: class converted to a Scsi_host structure. 3642 * @attr: device attribute, not used. 3643 * @buf: on return contains the dev loss timeout in decimal. 3644 * 3645 * Returns: size of formatted string. 3646 **/ 3647 static ssize_t 3648 lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr, 3649 char *buf) 3650 { 3651 struct Scsi_Host *shost = class_to_shost(dev); 3652 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3653 3654 return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo); 3655 } 3656 3657 /** 3658 * lpfc_nodev_tmo_init - Set the hba nodev timeout value 3659 * @vport: lpfc vport structure pointer. 3660 * @val: contains the nodev timeout value. 3661 * 3662 * Description: 3663 * If the devloss tmo is already set then nodev tmo is set to devloss tmo, 3664 * a kernel error message is printed and zero is returned. 3665 * Else if val is in range then nodev tmo and devloss tmo are set to val. 3666 * Otherwise nodev tmo is set to the default value. 3667 * 3668 * Returns: 3669 * zero if already set or if val is in range 3670 * -EINVAL val out of range 3671 **/ 3672 static int 3673 lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val) 3674 { 3675 if (vport->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) { 3676 vport->cfg_nodev_tmo = vport->cfg_devloss_tmo; 3677 if (val != LPFC_DEF_DEVLOSS_TMO) 3678 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3679 "0407 Ignoring lpfc_nodev_tmo module " 3680 "parameter because lpfc_devloss_tmo " 3681 "is set.\n"); 3682 return 0; 3683 } 3684 3685 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { 3686 vport->cfg_nodev_tmo = val; 3687 vport->cfg_devloss_tmo = val; 3688 return 0; 3689 } 3690 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3691 "0400 lpfc_nodev_tmo attribute cannot be set to" 3692 " %d, allowed range is [%d, %d]\n", 3693 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); 3694 vport->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO; 3695 return -EINVAL; 3696 } 3697 3698 /** 3699 * lpfc_update_rport_devloss_tmo - Update dev loss tmo value 3700 * @vport: lpfc vport structure pointer. 3701 * 3702 * Description: 3703 * Update all the ndlp's dev loss tmo with the vport devloss tmo value. 3704 **/ 3705 static void 3706 lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) 3707 { 3708 struct Scsi_Host *shost; 3709 struct lpfc_nodelist *ndlp; 3710 #if (IS_ENABLED(CONFIG_NVME_FC)) 3711 struct lpfc_nvme_rport *rport; 3712 struct nvme_fc_remote_port *remoteport = NULL; 3713 #endif 3714 3715 shost = lpfc_shost_from_vport(vport); 3716 spin_lock_irq(shost->host_lock); 3717 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 3718 if (ndlp->rport) 3719 ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; 3720 #if (IS_ENABLED(CONFIG_NVME_FC)) 3721 spin_lock(&ndlp->lock); 3722 rport = lpfc_ndlp_get_nrport(ndlp); 3723 if (rport) 3724 remoteport = rport->remoteport; 3725 spin_unlock(&ndlp->lock); 3726 if (rport && remoteport) 3727 nvme_fc_set_remoteport_devloss(remoteport, 3728 vport->cfg_devloss_tmo); 3729 #endif 3730 } 3731 spin_unlock_irq(shost->host_lock); 3732 } 3733 3734 /** 3735 * lpfc_nodev_tmo_set - Set the vport nodev tmo and devloss tmo values 3736 * @vport: lpfc vport structure pointer. 3737 * @val: contains the tmo value. 3738 * 3739 * Description: 3740 * If the devloss tmo is already set or the vport dev loss tmo has changed 3741 * then a kernel error message is printed and zero is returned. 3742 * Else if val is in range then nodev tmo and devloss tmo are set to val. 3743 * Otherwise nodev tmo is set to the default value. 3744 * 3745 * Returns: 3746 * zero if already set or if val is in range 3747 * -EINVAL val out of range 3748 **/ 3749 static int 3750 lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val) 3751 { 3752 if (vport->dev_loss_tmo_changed || 3753 (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) { 3754 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3755 "0401 Ignoring change to lpfc_nodev_tmo " 3756 "because lpfc_devloss_tmo is set.\n"); 3757 return 0; 3758 } 3759 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { 3760 vport->cfg_nodev_tmo = val; 3761 vport->cfg_devloss_tmo = val; 3762 /* 3763 * For compat: set the fc_host dev loss so new rports 3764 * will get the value. 3765 */ 3766 fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val; 3767 lpfc_update_rport_devloss_tmo(vport); 3768 return 0; 3769 } 3770 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3771 "0403 lpfc_nodev_tmo attribute cannot be set to " 3772 "%d, allowed range is [%d, %d]\n", 3773 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); 3774 return -EINVAL; 3775 } 3776 3777 lpfc_vport_param_store(nodev_tmo) 3778 3779 static DEVICE_ATTR_RW(lpfc_nodev_tmo); 3780 3781 /* 3782 # lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that 3783 # disappear until the timer expires. Value range is [0,255]. Default 3784 # value is 30. 3785 */ 3786 module_param(lpfc_devloss_tmo, int, S_IRUGO); 3787 MODULE_PARM_DESC(lpfc_devloss_tmo, 3788 "Seconds driver will hold I/O waiting " 3789 "for a device to come back"); 3790 lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO, 3791 LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO) 3792 lpfc_vport_param_show(devloss_tmo) 3793 3794 /** 3795 * lpfc_devloss_tmo_set - Sets vport nodev tmo, devloss tmo values, changed bit 3796 * @vport: lpfc vport structure pointer. 3797 * @val: contains the tmo value. 3798 * 3799 * Description: 3800 * If val is in a valid range then set the vport nodev tmo, 3801 * devloss tmo, also set the vport dev loss tmo changed flag. 3802 * Else a kernel error message is printed. 3803 * 3804 * Returns: 3805 * zero if val is in range 3806 * -EINVAL val out of range 3807 **/ 3808 static int 3809 lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val) 3810 { 3811 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { 3812 vport->cfg_nodev_tmo = val; 3813 vport->cfg_devloss_tmo = val; 3814 vport->dev_loss_tmo_changed = 1; 3815 fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val; 3816 lpfc_update_rport_devloss_tmo(vport); 3817 return 0; 3818 } 3819 3820 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3821 "0404 lpfc_devloss_tmo attribute cannot be set to " 3822 "%d, allowed range is [%d, %d]\n", 3823 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); 3824 return -EINVAL; 3825 } 3826 3827 lpfc_vport_param_store(devloss_tmo) 3828 static DEVICE_ATTR_RW(lpfc_devloss_tmo); 3829 3830 /* 3831 * lpfc_suppress_rsp: Enable suppress rsp feature is firmware supports it 3832 * lpfc_suppress_rsp = 0 Disable 3833 * lpfc_suppress_rsp = 1 Enable (default) 3834 * 3835 */ 3836 LPFC_ATTR_R(suppress_rsp, 1, 0, 1, 3837 "Enable suppress rsp feature is firmware supports it"); 3838 3839 /* 3840 * lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds 3841 * lpfc_nvmet_mrq = 0 driver will calcualte optimal number of RQ pairs 3842 * lpfc_nvmet_mrq = 1 use a single RQ pair 3843 * lpfc_nvmet_mrq >= 2 use specified RQ pairs for MRQ 3844 * 3845 */ 3846 LPFC_ATTR_R(nvmet_mrq, 3847 LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_MAX, 3848 "Specify number of RQ pairs for processing NVMET cmds"); 3849 3850 /* 3851 * lpfc_nvmet_mrq_post: Specify number of RQ buffer to initially post 3852 * to each NVMET RQ. Range 64 to 2048, default is 512. 3853 */ 3854 LPFC_ATTR_R(nvmet_mrq_post, 3855 LPFC_NVMET_RQE_DEF_POST, LPFC_NVMET_RQE_MIN_POST, 3856 LPFC_NVMET_RQE_DEF_COUNT, 3857 "Specify number of RQ buffers to initially post"); 3858 3859 /* 3860 * lpfc_enable_fc4_type: Defines what FC4 types are supported. 3861 * Supported Values: 1 - register just FCP 3862 * 3 - register both FCP and NVME 3863 * Supported values are [1,3]. Default value is 3 3864 */ 3865 LPFC_ATTR_R(enable_fc4_type, LPFC_DEF_ENBL_FC4_TYPE, 3866 LPFC_ENABLE_FCP, LPFC_MAX_ENBL_FC4_TYPE, 3867 "Enable FC4 Protocol support - FCP / NVME"); 3868 3869 /* 3870 # lpfc_log_verbose: Only turn this flag on if you are willing to risk being 3871 # deluged with LOTS of information. 3872 # You can set a bit mask to record specific types of verbose messages: 3873 # See lpfc_logmsh.h for definitions. 3874 */ 3875 LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff, 3876 "Verbose logging bit-mask"); 3877 3878 /* 3879 # lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters 3880 # objects that have been registered with the nameserver after login. 3881 */ 3882 LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1, 3883 "Deregister nameserver objects before LOGO"); 3884 3885 /* 3886 # lun_queue_depth: This parameter is used to limit the number of outstanding 3887 # commands per FCP LUN. 3888 */ 3889 LPFC_VPORT_ATTR_R(lun_queue_depth, 64, 1, 512, 3890 "Max number of FCP commands we can queue to a specific LUN"); 3891 3892 /* 3893 # tgt_queue_depth: This parameter is used to limit the number of outstanding 3894 # commands per target port. Value range is [10,65535]. Default value is 65535. 3895 */ 3896 static uint lpfc_tgt_queue_depth = LPFC_MAX_TGT_QDEPTH; 3897 module_param(lpfc_tgt_queue_depth, uint, 0444); 3898 MODULE_PARM_DESC(lpfc_tgt_queue_depth, "Set max Target queue depth"); 3899 lpfc_vport_param_show(tgt_queue_depth); 3900 lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH, 3901 LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH); 3902 3903 /** 3904 * lpfc_tgt_queue_depth_set: Sets an attribute value. 3905 * @vport: lpfc vport structure pointer. 3906 * @val: integer attribute value. 3907 * 3908 * Description: Sets the parameter to the new value. 3909 * 3910 * Returns: 3911 * zero on success 3912 * -EINVAL if val is invalid 3913 */ 3914 static int 3915 lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val) 3916 { 3917 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3918 struct lpfc_nodelist *ndlp; 3919 3920 if (!lpfc_rangecheck(val, LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH)) 3921 return -EINVAL; 3922 3923 if (val == vport->cfg_tgt_queue_depth) 3924 return 0; 3925 3926 spin_lock_irq(shost->host_lock); 3927 vport->cfg_tgt_queue_depth = val; 3928 3929 /* Next loop thru nodelist and change cmd_qdepth */ 3930 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) 3931 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; 3932 3933 spin_unlock_irq(shost->host_lock); 3934 return 0; 3935 } 3936 3937 lpfc_vport_param_store(tgt_queue_depth); 3938 static DEVICE_ATTR_RW(lpfc_tgt_queue_depth); 3939 3940 /* 3941 # hba_queue_depth: This parameter is used to limit the number of outstanding 3942 # commands per lpfc HBA. Value range is [32,8192]. If this parameter 3943 # value is greater than the maximum number of exchanges supported by the HBA, 3944 # then maximum number of exchanges supported by the HBA is used to determine 3945 # the hba_queue_depth. 3946 */ 3947 LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192, 3948 "Max number of FCP commands we can queue to a lpfc HBA"); 3949 3950 /* 3951 # peer_port_login: This parameter allows/prevents logins 3952 # between peer ports hosted on the same physical port. 3953 # When this parameter is set 0 peer ports of same physical port 3954 # are not allowed to login to each other. 3955 # When this parameter is set 1 peer ports of same physical port 3956 # are allowed to login to each other. 3957 # Default value of this parameter is 0. 3958 */ 3959 LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1, 3960 "Allow peer ports on the same physical port to login to each " 3961 "other."); 3962 3963 /* 3964 # restrict_login: This parameter allows/prevents logins 3965 # between Virtual Ports and remote initiators. 3966 # When this parameter is not set (0) Virtual Ports will accept PLOGIs from 3967 # other initiators and will attempt to PLOGI all remote ports. 3968 # When this parameter is set (1) Virtual Ports will reject PLOGIs from 3969 # remote ports and will not attempt to PLOGI to other initiators. 3970 # This parameter does not restrict to the physical port. 3971 # This parameter does not restrict logins to Fabric resident remote ports. 3972 # Default value of this parameter is 1. 3973 */ 3974 static int lpfc_restrict_login = 1; 3975 module_param(lpfc_restrict_login, int, S_IRUGO); 3976 MODULE_PARM_DESC(lpfc_restrict_login, 3977 "Restrict virtual ports login to remote initiators."); 3978 lpfc_vport_param_show(restrict_login); 3979 3980 /** 3981 * lpfc_restrict_login_init - Set the vport restrict login flag 3982 * @vport: lpfc vport structure pointer. 3983 * @val: contains the restrict login value. 3984 * 3985 * Description: 3986 * If val is not in a valid range then log a kernel error message and set 3987 * the vport restrict login to one. 3988 * If the port type is physical clear the restrict login flag and return. 3989 * Else set the restrict login flag to val. 3990 * 3991 * Returns: 3992 * zero if val is in range 3993 * -EINVAL val out of range 3994 **/ 3995 static int 3996 lpfc_restrict_login_init(struct lpfc_vport *vport, int val) 3997 { 3998 if (val < 0 || val > 1) { 3999 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 4000 "0422 lpfc_restrict_login attribute cannot " 4001 "be set to %d, allowed range is [0, 1]\n", 4002 val); 4003 vport->cfg_restrict_login = 1; 4004 return -EINVAL; 4005 } 4006 if (vport->port_type == LPFC_PHYSICAL_PORT) { 4007 vport->cfg_restrict_login = 0; 4008 return 0; 4009 } 4010 vport->cfg_restrict_login = val; 4011 return 0; 4012 } 4013 4014 /** 4015 * lpfc_restrict_login_set - Set the vport restrict login flag 4016 * @vport: lpfc vport structure pointer. 4017 * @val: contains the restrict login value. 4018 * 4019 * Description: 4020 * If val is not in a valid range then log a kernel error message and set 4021 * the vport restrict login to one. 4022 * If the port type is physical and the val is not zero log a kernel 4023 * error message, clear the restrict login flag and return zero. 4024 * Else set the restrict login flag to val. 4025 * 4026 * Returns: 4027 * zero if val is in range 4028 * -EINVAL val out of range 4029 **/ 4030 static int 4031 lpfc_restrict_login_set(struct lpfc_vport *vport, int val) 4032 { 4033 if (val < 0 || val > 1) { 4034 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 4035 "0425 lpfc_restrict_login attribute cannot " 4036 "be set to %d, allowed range is [0, 1]\n", 4037 val); 4038 vport->cfg_restrict_login = 1; 4039 return -EINVAL; 4040 } 4041 if (vport->port_type == LPFC_PHYSICAL_PORT && val != 0) { 4042 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 4043 "0468 lpfc_restrict_login must be 0 for " 4044 "Physical ports.\n"); 4045 vport->cfg_restrict_login = 0; 4046 return 0; 4047 } 4048 vport->cfg_restrict_login = val; 4049 return 0; 4050 } 4051 lpfc_vport_param_store(restrict_login); 4052 static DEVICE_ATTR_RW(lpfc_restrict_login); 4053 4054 /* 4055 # Some disk devices have a "select ID" or "select Target" capability. 4056 # From a protocol standpoint "select ID" usually means select the 4057 # Fibre channel "ALPA". In the FC-AL Profile there is an "informative 4058 # annex" which contains a table that maps a "select ID" (a number 4059 # between 0 and 7F) to an ALPA. By default, for compatibility with 4060 # older drivers, the lpfc driver scans this table from low ALPA to high 4061 # ALPA. 4062 # 4063 # Turning on the scan-down variable (on = 1, off = 0) will 4064 # cause the lpfc driver to use an inverted table, effectively 4065 # scanning ALPAs from high to low. Value range is [0,1]. Default value is 1. 4066 # 4067 # (Note: This "select ID" functionality is a LOOP ONLY characteristic 4068 # and will not work across a fabric. Also this parameter will take 4069 # effect only in the case when ALPA map is not available.) 4070 */ 4071 LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1, 4072 "Start scanning for devices from highest ALPA to lowest"); 4073 4074 /* 4075 # lpfc_topology: link topology for init link 4076 # 0x0 = attempt loop mode then point-to-point 4077 # 0x01 = internal loopback mode 4078 # 0x02 = attempt point-to-point mode only 4079 # 0x04 = attempt loop mode only 4080 # 0x06 = attempt point-to-point mode then loop 4081 # Set point-to-point mode if you want to run as an N_Port. 4082 # Set loop mode if you want to run as an NL_Port. Value range is [0,0x6]. 4083 # Default value is 0. 4084 */ 4085 LPFC_ATTR(topology, 0, 0, 6, 4086 "Select Fibre Channel topology"); 4087 4088 /** 4089 * lpfc_topology_store - Set the adapters topology field 4090 * @dev: class device that is converted into a scsi_host. 4091 * @attr:device attribute, not used. 4092 * @buf: buffer for passing information. 4093 * @count: size of the data buffer. 4094 * 4095 * Description: 4096 * If val is in a valid range then set the adapter's topology field and 4097 * issue a lip; if the lip fails reset the topology to the old value. 4098 * 4099 * If the value is not in range log a kernel error message and return an error. 4100 * 4101 * Returns: 4102 * zero if val is in range and lip okay 4103 * non-zero return value from lpfc_issue_lip() 4104 * -EINVAL val out of range 4105 **/ 4106 static ssize_t 4107 lpfc_topology_store(struct device *dev, struct device_attribute *attr, 4108 const char *buf, size_t count) 4109 { 4110 struct Scsi_Host *shost = class_to_shost(dev); 4111 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4112 struct lpfc_hba *phba = vport->phba; 4113 int val = 0; 4114 int nolip = 0; 4115 const char *val_buf = buf; 4116 int err; 4117 uint32_t prev_val; 4118 u8 sli_family, if_type; 4119 4120 if (!strncmp(buf, "nolip ", strlen("nolip "))) { 4121 nolip = 1; 4122 val_buf = &buf[strlen("nolip ")]; 4123 } 4124 4125 if (!isdigit(val_buf[0])) 4126 return -EINVAL; 4127 if (sscanf(val_buf, "%i", &val) != 1) 4128 return -EINVAL; 4129 4130 if (val >= 0 && val <= 6) { 4131 prev_val = phba->cfg_topology; 4132 if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G && 4133 val == 4) { 4134 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 4135 "3113 Loop mode not supported at speed %d\n", 4136 val); 4137 return -EINVAL; 4138 } 4139 /* 4140 * The 'topology' is not a configurable parameter if : 4141 * - persistent topology enabled 4142 * - ASIC_GEN_NUM >= 0xC, with no private loop support 4143 */ 4144 sli_family = bf_get(lpfc_sli_intf_sli_family, 4145 &phba->sli4_hba.sli_intf); 4146 if_type = bf_get(lpfc_sli_intf_if_type, 4147 &phba->sli4_hba.sli_intf); 4148 if ((phba->hba_flag & HBA_PERSISTENT_TOPO || 4149 (!phba->sli4_hba.pc_sli4_params.pls && 4150 (sli_family == LPFC_SLI_INTF_FAMILY_G6 || 4151 if_type == LPFC_SLI_INTF_IF_TYPE_6))) && 4152 val == 4) { 4153 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 4154 "3114 Loop mode not supported\n"); 4155 return -EINVAL; 4156 } 4157 phba->cfg_topology = val; 4158 if (nolip) 4159 return strlen(buf); 4160 4161 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 4162 "3054 lpfc_topology changed from %d to %d\n", 4163 prev_val, val); 4164 if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4) 4165 phba->fc_topology_changed = 1; 4166 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); 4167 if (err) { 4168 phba->cfg_topology = prev_val; 4169 return -EINVAL; 4170 } else 4171 return strlen(buf); 4172 } 4173 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4174 "%d:0467 lpfc_topology attribute cannot be set to %d, " 4175 "allowed range is [0, 6]\n", 4176 phba->brd_no, val); 4177 return -EINVAL; 4178 } 4179 4180 lpfc_param_show(topology) 4181 static DEVICE_ATTR_RW(lpfc_topology); 4182 4183 /** 4184 * lpfc_static_vport_show: Read callback function for 4185 * lpfc_static_vport sysfs file. 4186 * @dev: Pointer to class device object. 4187 * @attr: device attribute structure. 4188 * @buf: Data buffer. 4189 * 4190 * This function is the read call back function for 4191 * lpfc_static_vport sysfs file. The lpfc_static_vport 4192 * sysfs file report the mageability of the vport. 4193 **/ 4194 static ssize_t 4195 lpfc_static_vport_show(struct device *dev, struct device_attribute *attr, 4196 char *buf) 4197 { 4198 struct Scsi_Host *shost = class_to_shost(dev); 4199 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4200 if (vport->vport_flag & STATIC_VPORT) 4201 sprintf(buf, "1\n"); 4202 else 4203 sprintf(buf, "0\n"); 4204 4205 return strlen(buf); 4206 } 4207 4208 /* 4209 * Sysfs attribute to control the statistical data collection. 4210 */ 4211 static DEVICE_ATTR_RO(lpfc_static_vport); 4212 4213 /* 4214 # lpfc_link_speed: Link speed selection for initializing the Fibre Channel 4215 # connection. 4216 # Value range is [0,16]. Default value is 0. 4217 */ 4218 /** 4219 * lpfc_link_speed_store - Set the adapters link speed 4220 * @dev: Pointer to class device. 4221 * @attr: Unused. 4222 * @buf: Data buffer. 4223 * @count: Size of the data buffer. 4224 * 4225 * Description: 4226 * If val is in a valid range then set the adapter's link speed field and 4227 * issue a lip; if the lip fails reset the link speed to the old value. 4228 * 4229 * Notes: 4230 * If the value is not in range log a kernel error message and return an error. 4231 * 4232 * Returns: 4233 * zero if val is in range and lip okay. 4234 * non-zero return value from lpfc_issue_lip() 4235 * -EINVAL val out of range 4236 **/ 4237 static ssize_t 4238 lpfc_link_speed_store(struct device *dev, struct device_attribute *attr, 4239 const char *buf, size_t count) 4240 { 4241 struct Scsi_Host *shost = class_to_shost(dev); 4242 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4243 struct lpfc_hba *phba = vport->phba; 4244 int val = LPFC_USER_LINK_SPEED_AUTO; 4245 int nolip = 0; 4246 const char *val_buf = buf; 4247 int err; 4248 uint32_t prev_val, if_type; 4249 4250 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 4251 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2 && 4252 phba->hba_flag & HBA_FORCED_LINK_SPEED) 4253 return -EPERM; 4254 4255 if (!strncmp(buf, "nolip ", strlen("nolip "))) { 4256 nolip = 1; 4257 val_buf = &buf[strlen("nolip ")]; 4258 } 4259 4260 if (!isdigit(val_buf[0])) 4261 return -EINVAL; 4262 if (sscanf(val_buf, "%i", &val) != 1) 4263 return -EINVAL; 4264 4265 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 4266 "3055 lpfc_link_speed changed from %d to %d %s\n", 4267 phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)"); 4268 4269 if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) || 4270 ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) || 4271 ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) || 4272 ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) || 4273 ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) || 4274 ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb)) || 4275 ((val == LPFC_USER_LINK_SPEED_32G) && !(phba->lmt & LMT_32Gb)) || 4276 ((val == LPFC_USER_LINK_SPEED_64G) && !(phba->lmt & LMT_64Gb))) { 4277 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4278 "2879 lpfc_link_speed attribute cannot be set " 4279 "to %d. Speed is not supported by this port.\n", 4280 val); 4281 return -EINVAL; 4282 } 4283 if (val >= LPFC_USER_LINK_SPEED_16G && 4284 phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 4285 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4286 "3112 lpfc_link_speed attribute cannot be set " 4287 "to %d. Speed is not supported in loop mode.\n", 4288 val); 4289 return -EINVAL; 4290 } 4291 4292 switch (val) { 4293 case LPFC_USER_LINK_SPEED_AUTO: 4294 case LPFC_USER_LINK_SPEED_1G: 4295 case LPFC_USER_LINK_SPEED_2G: 4296 case LPFC_USER_LINK_SPEED_4G: 4297 case LPFC_USER_LINK_SPEED_8G: 4298 case LPFC_USER_LINK_SPEED_16G: 4299 case LPFC_USER_LINK_SPEED_32G: 4300 case LPFC_USER_LINK_SPEED_64G: 4301 prev_val = phba->cfg_link_speed; 4302 phba->cfg_link_speed = val; 4303 if (nolip) 4304 return strlen(buf); 4305 4306 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); 4307 if (err) { 4308 phba->cfg_link_speed = prev_val; 4309 return -EINVAL; 4310 } 4311 return strlen(buf); 4312 default: 4313 break; 4314 } 4315 4316 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4317 "0469 lpfc_link_speed attribute cannot be set to %d, " 4318 "allowed values are [%s]\n", 4319 val, LPFC_LINK_SPEED_STRING); 4320 return -EINVAL; 4321 4322 } 4323 4324 static int lpfc_link_speed = 0; 4325 module_param(lpfc_link_speed, int, S_IRUGO); 4326 MODULE_PARM_DESC(lpfc_link_speed, "Select link speed"); 4327 lpfc_param_show(link_speed) 4328 4329 /** 4330 * lpfc_link_speed_init - Set the adapters link speed 4331 * @phba: lpfc_hba pointer. 4332 * @val: link speed value. 4333 * 4334 * Description: 4335 * If val is in a valid range then set the adapter's link speed field. 4336 * 4337 * Notes: 4338 * If the value is not in range log a kernel error message, clear the link 4339 * speed and return an error. 4340 * 4341 * Returns: 4342 * zero if val saved. 4343 * -EINVAL val out of range 4344 **/ 4345 static int 4346 lpfc_link_speed_init(struct lpfc_hba *phba, int val) 4347 { 4348 if (val >= LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) { 4349 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4350 "3111 lpfc_link_speed of %d cannot " 4351 "support loop mode, setting topology to default.\n", 4352 val); 4353 phba->cfg_topology = 0; 4354 } 4355 4356 switch (val) { 4357 case LPFC_USER_LINK_SPEED_AUTO: 4358 case LPFC_USER_LINK_SPEED_1G: 4359 case LPFC_USER_LINK_SPEED_2G: 4360 case LPFC_USER_LINK_SPEED_4G: 4361 case LPFC_USER_LINK_SPEED_8G: 4362 case LPFC_USER_LINK_SPEED_16G: 4363 case LPFC_USER_LINK_SPEED_32G: 4364 case LPFC_USER_LINK_SPEED_64G: 4365 phba->cfg_link_speed = val; 4366 return 0; 4367 default: 4368 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4369 "0405 lpfc_link_speed attribute cannot " 4370 "be set to %d, allowed values are " 4371 "["LPFC_LINK_SPEED_STRING"]\n", val); 4372 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 4373 return -EINVAL; 4374 } 4375 } 4376 4377 static DEVICE_ATTR_RW(lpfc_link_speed); 4378 4379 /* 4380 # lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER) 4381 # 0 = aer disabled or not supported 4382 # 1 = aer supported and enabled (default) 4383 # Value range is [0,1]. Default value is 1. 4384 */ 4385 LPFC_ATTR(aer_support, 1, 0, 1, 4386 "Enable PCIe device AER support"); 4387 lpfc_param_show(aer_support) 4388 4389 /** 4390 * lpfc_aer_support_store - Set the adapter for aer support 4391 * 4392 * @dev: class device that is converted into a Scsi_host. 4393 * @attr: device attribute, not used. 4394 * @buf: containing enable or disable aer flag. 4395 * @count: unused variable. 4396 * 4397 * Description: 4398 * If the val is 1 and currently the device's AER capability was not 4399 * enabled, invoke the kernel's enable AER helper routine, trying to 4400 * enable the device's AER capability. If the helper routine enabling 4401 * AER returns success, update the device's cfg_aer_support flag to 4402 * indicate AER is supported by the device; otherwise, if the device 4403 * AER capability is already enabled to support AER, then do nothing. 4404 * 4405 * If the val is 0 and currently the device's AER support was enabled, 4406 * invoke the kernel's disable AER helper routine. After that, update 4407 * the device's cfg_aer_support flag to indicate AER is not supported 4408 * by the device; otherwise, if the device AER capability is already 4409 * disabled from supporting AER, then do nothing. 4410 * 4411 * Returns: 4412 * length of the buf on success if val is in range the intended mode 4413 * is supported. 4414 * -EINVAL if val out of range or intended mode is not supported. 4415 **/ 4416 static ssize_t 4417 lpfc_aer_support_store(struct device *dev, struct device_attribute *attr, 4418 const char *buf, size_t count) 4419 { 4420 struct Scsi_Host *shost = class_to_shost(dev); 4421 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4422 struct lpfc_hba *phba = vport->phba; 4423 int val = 0, rc = -EINVAL; 4424 4425 if (!isdigit(buf[0])) 4426 return -EINVAL; 4427 if (sscanf(buf, "%i", &val) != 1) 4428 return -EINVAL; 4429 4430 switch (val) { 4431 case 0: 4432 if (phba->hba_flag & HBA_AER_ENABLED) { 4433 rc = pci_disable_pcie_error_reporting(phba->pcidev); 4434 if (!rc) { 4435 spin_lock_irq(&phba->hbalock); 4436 phba->hba_flag &= ~HBA_AER_ENABLED; 4437 spin_unlock_irq(&phba->hbalock); 4438 phba->cfg_aer_support = 0; 4439 rc = strlen(buf); 4440 } else 4441 rc = -EPERM; 4442 } else { 4443 phba->cfg_aer_support = 0; 4444 rc = strlen(buf); 4445 } 4446 break; 4447 case 1: 4448 if (!(phba->hba_flag & HBA_AER_ENABLED)) { 4449 rc = pci_enable_pcie_error_reporting(phba->pcidev); 4450 if (!rc) { 4451 spin_lock_irq(&phba->hbalock); 4452 phba->hba_flag |= HBA_AER_ENABLED; 4453 spin_unlock_irq(&phba->hbalock); 4454 phba->cfg_aer_support = 1; 4455 rc = strlen(buf); 4456 } else 4457 rc = -EPERM; 4458 } else { 4459 phba->cfg_aer_support = 1; 4460 rc = strlen(buf); 4461 } 4462 break; 4463 default: 4464 rc = -EINVAL; 4465 break; 4466 } 4467 return rc; 4468 } 4469 4470 static DEVICE_ATTR_RW(lpfc_aer_support); 4471 4472 /** 4473 * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device 4474 * @dev: class device that is converted into a Scsi_host. 4475 * @attr: device attribute, not used. 4476 * @buf: containing flag 1 for aer cleanup state. 4477 * @count: unused variable. 4478 * 4479 * Description: 4480 * If the @buf contains 1 and the device currently has the AER support 4481 * enabled, then invokes the kernel AER helper routine 4482 * pci_aer_clear_nonfatal_status() to clean up the uncorrectable 4483 * error status register. 4484 * 4485 * Notes: 4486 * 4487 * Returns: 4488 * -EINVAL if the buf does not contain the 1 or the device is not currently 4489 * enabled with the AER support. 4490 **/ 4491 static ssize_t 4492 lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr, 4493 const char *buf, size_t count) 4494 { 4495 struct Scsi_Host *shost = class_to_shost(dev); 4496 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4497 struct lpfc_hba *phba = vport->phba; 4498 int val, rc = -1; 4499 4500 if (!isdigit(buf[0])) 4501 return -EINVAL; 4502 if (sscanf(buf, "%i", &val) != 1) 4503 return -EINVAL; 4504 if (val != 1) 4505 return -EINVAL; 4506 4507 if (phba->hba_flag & HBA_AER_ENABLED) 4508 rc = pci_aer_clear_nonfatal_status(phba->pcidev); 4509 4510 if (rc == 0) 4511 return strlen(buf); 4512 else 4513 return -EPERM; 4514 } 4515 4516 static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL, 4517 lpfc_aer_cleanup_state); 4518 4519 /** 4520 * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions 4521 * 4522 * @dev: class device that is converted into a Scsi_host. 4523 * @attr: device attribute, not used. 4524 * @buf: containing the string the number of vfs to be enabled. 4525 * @count: unused variable. 4526 * 4527 * Description: 4528 * When this api is called either through user sysfs, the driver shall 4529 * try to enable or disable SR-IOV virtual functions according to the 4530 * following: 4531 * 4532 * If zero virtual function has been enabled to the physical function, 4533 * the driver shall invoke the pci enable virtual function api trying 4534 * to enable the virtual functions. If the nr_vfn provided is greater 4535 * than the maximum supported, the maximum virtual function number will 4536 * be used for invoking the api; otherwise, the nr_vfn provided shall 4537 * be used for invoking the api. If the api call returned success, the 4538 * actual number of virtual functions enabled will be set to the driver 4539 * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver 4540 * cfg_sriov_nr_virtfn remains zero. 4541 * 4542 * If none-zero virtual functions have already been enabled to the 4543 * physical function, as reflected by the driver's cfg_sriov_nr_virtfn, 4544 * -EINVAL will be returned and the driver does nothing; 4545 * 4546 * If the nr_vfn provided is zero and none-zero virtual functions have 4547 * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the 4548 * disabling virtual function api shall be invoded to disable all the 4549 * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to 4550 * zero. Otherwise, if zero virtual function has been enabled, do 4551 * nothing. 4552 * 4553 * Returns: 4554 * length of the buf on success if val is in range the intended mode 4555 * is supported. 4556 * -EINVAL if val out of range or intended mode is not supported. 4557 **/ 4558 static ssize_t 4559 lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr, 4560 const char *buf, size_t count) 4561 { 4562 struct Scsi_Host *shost = class_to_shost(dev); 4563 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4564 struct lpfc_hba *phba = vport->phba; 4565 struct pci_dev *pdev = phba->pcidev; 4566 int val = 0, rc = -EINVAL; 4567 4568 /* Sanity check on user data */ 4569 if (!isdigit(buf[0])) 4570 return -EINVAL; 4571 if (sscanf(buf, "%i", &val) != 1) 4572 return -EINVAL; 4573 if (val < 0) 4574 return -EINVAL; 4575 4576 /* Request disabling virtual functions */ 4577 if (val == 0) { 4578 if (phba->cfg_sriov_nr_virtfn > 0) { 4579 pci_disable_sriov(pdev); 4580 phba->cfg_sriov_nr_virtfn = 0; 4581 } 4582 return strlen(buf); 4583 } 4584 4585 /* Request enabling virtual functions */ 4586 if (phba->cfg_sriov_nr_virtfn > 0) { 4587 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4588 "3018 There are %d virtual functions " 4589 "enabled on physical function.\n", 4590 phba->cfg_sriov_nr_virtfn); 4591 return -EEXIST; 4592 } 4593 4594 if (val <= LPFC_MAX_VFN_PER_PFN) 4595 phba->cfg_sriov_nr_virtfn = val; 4596 else { 4597 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4598 "3019 Enabling %d virtual functions is not " 4599 "allowed.\n", val); 4600 return -EINVAL; 4601 } 4602 4603 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn); 4604 if (rc) { 4605 phba->cfg_sriov_nr_virtfn = 0; 4606 rc = -EPERM; 4607 } else 4608 rc = strlen(buf); 4609 4610 return rc; 4611 } 4612 4613 LPFC_ATTR(sriov_nr_virtfn, LPFC_DEF_VFN_PER_PFN, 0, LPFC_MAX_VFN_PER_PFN, 4614 "Enable PCIe device SR-IOV virtual fn"); 4615 4616 lpfc_param_show(sriov_nr_virtfn) 4617 static DEVICE_ATTR_RW(lpfc_sriov_nr_virtfn); 4618 4619 /** 4620 * lpfc_request_firmware_upgrade_store - Request for Linux generic firmware upgrade 4621 * 4622 * @dev: class device that is converted into a Scsi_host. 4623 * @attr: device attribute, not used. 4624 * @buf: containing the string the number of vfs to be enabled. 4625 * @count: unused variable. 4626 * 4627 * Description: 4628 * 4629 * Returns: 4630 * length of the buf on success if val is in range the intended mode 4631 * is supported. 4632 * -EINVAL if val out of range or intended mode is not supported. 4633 **/ 4634 static ssize_t 4635 lpfc_request_firmware_upgrade_store(struct device *dev, 4636 struct device_attribute *attr, 4637 const char *buf, size_t count) 4638 { 4639 struct Scsi_Host *shost = class_to_shost(dev); 4640 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4641 struct lpfc_hba *phba = vport->phba; 4642 int val = 0, rc; 4643 4644 /* Sanity check on user data */ 4645 if (!isdigit(buf[0])) 4646 return -EINVAL; 4647 if (sscanf(buf, "%i", &val) != 1) 4648 return -EINVAL; 4649 if (val != 1) 4650 return -EINVAL; 4651 4652 rc = lpfc_sli4_request_firmware_update(phba, RUN_FW_UPGRADE); 4653 if (rc) 4654 rc = -EPERM; 4655 else 4656 rc = strlen(buf); 4657 return rc; 4658 } 4659 4660 static int lpfc_req_fw_upgrade; 4661 module_param(lpfc_req_fw_upgrade, int, S_IRUGO|S_IWUSR); 4662 MODULE_PARM_DESC(lpfc_req_fw_upgrade, "Enable Linux generic firmware upgrade"); 4663 lpfc_param_show(request_firmware_upgrade) 4664 4665 /** 4666 * lpfc_request_firmware_upgrade_init - Enable initial linux generic fw upgrade 4667 * @phba: lpfc_hba pointer. 4668 * @val: 0 or 1. 4669 * 4670 * Description: 4671 * Set the initial Linux generic firmware upgrade enable or disable flag. 4672 * 4673 * Returns: 4674 * zero if val saved. 4675 * -EINVAL val out of range 4676 **/ 4677 static int 4678 lpfc_request_firmware_upgrade_init(struct lpfc_hba *phba, int val) 4679 { 4680 if (val >= 0 && val <= 1) { 4681 phba->cfg_request_firmware_upgrade = val; 4682 return 0; 4683 } 4684 return -EINVAL; 4685 } 4686 static DEVICE_ATTR(lpfc_req_fw_upgrade, S_IRUGO | S_IWUSR, 4687 lpfc_request_firmware_upgrade_show, 4688 lpfc_request_firmware_upgrade_store); 4689 4690 /** 4691 * lpfc_force_rscn_store 4692 * 4693 * @dev: class device that is converted into a Scsi_host. 4694 * @attr: device attribute, not used. 4695 * @buf: unused string 4696 * @count: unused variable. 4697 * 4698 * Description: 4699 * Force the switch to send a RSCN to all other NPorts in our zone 4700 * If we are direct connect pt2pt, build the RSCN command ourself 4701 * and send to the other NPort. Not supported for private loop. 4702 * 4703 * Returns: 4704 * 0 - on success 4705 * -EIO - if command is not sent 4706 **/ 4707 static ssize_t 4708 lpfc_force_rscn_store(struct device *dev, struct device_attribute *attr, 4709 const char *buf, size_t count) 4710 { 4711 struct Scsi_Host *shost = class_to_shost(dev); 4712 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4713 int i; 4714 4715 i = lpfc_issue_els_rscn(vport, 0); 4716 if (i) 4717 return -EIO; 4718 return strlen(buf); 4719 } 4720 4721 /* 4722 * lpfc_force_rscn: Force an RSCN to be sent to all remote NPorts 4723 * connected to the HBA. 4724 * 4725 * Value range is any ascii value 4726 */ 4727 static int lpfc_force_rscn; 4728 module_param(lpfc_force_rscn, int, 0644); 4729 MODULE_PARM_DESC(lpfc_force_rscn, 4730 "Force an RSCN to be sent to all remote NPorts"); 4731 lpfc_param_show(force_rscn) 4732 4733 /** 4734 * lpfc_force_rscn_init - Force an RSCN to be sent to all remote NPorts 4735 * @phba: lpfc_hba pointer. 4736 * @val: unused value. 4737 * 4738 * Returns: 4739 * zero if val saved. 4740 **/ 4741 static int 4742 lpfc_force_rscn_init(struct lpfc_hba *phba, int val) 4743 { 4744 return 0; 4745 } 4746 static DEVICE_ATTR_RW(lpfc_force_rscn); 4747 4748 /** 4749 * lpfc_fcp_imax_store 4750 * 4751 * @dev: class device that is converted into a Scsi_host. 4752 * @attr: device attribute, not used. 4753 * @buf: string with the number of fast-path FCP interrupts per second. 4754 * @count: unused variable. 4755 * 4756 * Description: 4757 * If val is in a valid range [636,651042], then set the adapter's 4758 * maximum number of fast-path FCP interrupts per second. 4759 * 4760 * Returns: 4761 * length of the buf on success if val is in range the intended mode 4762 * is supported. 4763 * -EINVAL if val out of range or intended mode is not supported. 4764 **/ 4765 static ssize_t 4766 lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr, 4767 const char *buf, size_t count) 4768 { 4769 struct Scsi_Host *shost = class_to_shost(dev); 4770 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4771 struct lpfc_hba *phba = vport->phba; 4772 struct lpfc_eq_intr_info *eqi; 4773 uint32_t usdelay; 4774 int val = 0, i; 4775 4776 /* fcp_imax is only valid for SLI4 */ 4777 if (phba->sli_rev != LPFC_SLI_REV4) 4778 return -EINVAL; 4779 4780 /* Sanity check on user data */ 4781 if (!isdigit(buf[0])) 4782 return -EINVAL; 4783 if (sscanf(buf, "%i", &val) != 1) 4784 return -EINVAL; 4785 4786 /* 4787 * Value range for the HBA is [5000,5000000] 4788 * The value for each EQ depends on how many EQs are configured. 4789 * Allow value == 0 4790 */ 4791 if (val && (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX)) 4792 return -EINVAL; 4793 4794 phba->cfg_auto_imax = (val) ? 0 : 1; 4795 if (phba->cfg_fcp_imax && !val) { 4796 queue_delayed_work(phba->wq, &phba->eq_delay_work, 4797 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 4798 4799 for_each_present_cpu(i) { 4800 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); 4801 eqi->icnt = 0; 4802 } 4803 } 4804 4805 phba->cfg_fcp_imax = (uint32_t)val; 4806 4807 if (phba->cfg_fcp_imax) 4808 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; 4809 else 4810 usdelay = 0; 4811 4812 for (i = 0; i < phba->cfg_irq_chann; i += LPFC_MAX_EQ_DELAY_EQID_CNT) 4813 lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT, 4814 usdelay); 4815 4816 return strlen(buf); 4817 } 4818 4819 /* 4820 # lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second 4821 # for the HBA. 4822 # 4823 # Value range is [5,000 to 5,000,000]. Default value is 50,000. 4824 */ 4825 static int lpfc_fcp_imax = LPFC_DEF_IMAX; 4826 module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR); 4827 MODULE_PARM_DESC(lpfc_fcp_imax, 4828 "Set the maximum number of FCP interrupts per second per HBA"); 4829 lpfc_param_show(fcp_imax) 4830 4831 /** 4832 * lpfc_fcp_imax_init - Set the initial sr-iov virtual function enable 4833 * @phba: lpfc_hba pointer. 4834 * @val: link speed value. 4835 * 4836 * Description: 4837 * If val is in a valid range [636,651042], then initialize the adapter's 4838 * maximum number of fast-path FCP interrupts per second. 4839 * 4840 * Returns: 4841 * zero if val saved. 4842 * -EINVAL val out of range 4843 **/ 4844 static int 4845 lpfc_fcp_imax_init(struct lpfc_hba *phba, int val) 4846 { 4847 if (phba->sli_rev != LPFC_SLI_REV4) { 4848 phba->cfg_fcp_imax = 0; 4849 return 0; 4850 } 4851 4852 if ((val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) || 4853 (val == 0)) { 4854 phba->cfg_fcp_imax = val; 4855 return 0; 4856 } 4857 4858 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4859 "3016 lpfc_fcp_imax: %d out of range, using default\n", 4860 val); 4861 phba->cfg_fcp_imax = LPFC_DEF_IMAX; 4862 4863 return 0; 4864 } 4865 4866 static DEVICE_ATTR_RW(lpfc_fcp_imax); 4867 4868 /** 4869 * lpfc_cq_max_proc_limit_store 4870 * 4871 * @dev: class device that is converted into a Scsi_host. 4872 * @attr: device attribute, not used. 4873 * @buf: string with the cq max processing limit of cqes 4874 * @count: unused variable. 4875 * 4876 * Description: 4877 * If val is in a valid range, then set value on each cq 4878 * 4879 * Returns: 4880 * The length of the buf: if successful 4881 * -ERANGE: if val is not in the valid range 4882 * -EINVAL: if bad value format or intended mode is not supported. 4883 **/ 4884 static ssize_t 4885 lpfc_cq_max_proc_limit_store(struct device *dev, struct device_attribute *attr, 4886 const char *buf, size_t count) 4887 { 4888 struct Scsi_Host *shost = class_to_shost(dev); 4889 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4890 struct lpfc_hba *phba = vport->phba; 4891 struct lpfc_queue *eq, *cq; 4892 unsigned long val; 4893 int i; 4894 4895 /* cq_max_proc_limit is only valid for SLI4 */ 4896 if (phba->sli_rev != LPFC_SLI_REV4) 4897 return -EINVAL; 4898 4899 /* Sanity check on user data */ 4900 if (!isdigit(buf[0])) 4901 return -EINVAL; 4902 if (kstrtoul(buf, 0, &val)) 4903 return -EINVAL; 4904 4905 if (val < LPFC_CQ_MIN_PROC_LIMIT || val > LPFC_CQ_MAX_PROC_LIMIT) 4906 return -ERANGE; 4907 4908 phba->cfg_cq_max_proc_limit = (uint32_t)val; 4909 4910 /* set the values on the cq's */ 4911 for (i = 0; i < phba->cfg_irq_chann; i++) { 4912 /* Get the EQ corresponding to the IRQ vector */ 4913 eq = phba->sli4_hba.hba_eq_hdl[i].eq; 4914 if (!eq) 4915 continue; 4916 4917 list_for_each_entry(cq, &eq->child_list, list) 4918 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, 4919 cq->entry_count); 4920 } 4921 4922 return strlen(buf); 4923 } 4924 4925 /* 4926 * lpfc_cq_max_proc_limit: The maximum number CQE entries processed in an 4927 * itteration of CQ processing. 4928 */ 4929 static int lpfc_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT; 4930 module_param(lpfc_cq_max_proc_limit, int, 0644); 4931 MODULE_PARM_DESC(lpfc_cq_max_proc_limit, 4932 "Set the maximum number CQEs processed in an iteration of " 4933 "CQ processing"); 4934 lpfc_param_show(cq_max_proc_limit) 4935 4936 /* 4937 * lpfc_cq_poll_threshold: Set the threshold of CQE completions in a 4938 * single handler call which should request a polled completion rather 4939 * than re-enabling interrupts. 4940 */ 4941 LPFC_ATTR_RW(cq_poll_threshold, LPFC_CQ_DEF_THRESHOLD_TO_POLL, 4942 LPFC_CQ_MIN_THRESHOLD_TO_POLL, 4943 LPFC_CQ_MAX_THRESHOLD_TO_POLL, 4944 "CQE Processing Threshold to enable Polling"); 4945 4946 /** 4947 * lpfc_cq_max_proc_limit_init - Set the initial cq max_proc_limit 4948 * @phba: lpfc_hba pointer. 4949 * @val: entry limit 4950 * 4951 * Description: 4952 * If val is in a valid range, then initialize the adapter's maximum 4953 * value. 4954 * 4955 * Returns: 4956 * Always returns 0 for success, even if value not always set to 4957 * requested value. If value out of range or not supported, will fall 4958 * back to default. 4959 **/ 4960 static int 4961 lpfc_cq_max_proc_limit_init(struct lpfc_hba *phba, int val) 4962 { 4963 phba->cfg_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT; 4964 4965 if (phba->sli_rev != LPFC_SLI_REV4) 4966 return 0; 4967 4968 if (val >= LPFC_CQ_MIN_PROC_LIMIT && val <= LPFC_CQ_MAX_PROC_LIMIT) { 4969 phba->cfg_cq_max_proc_limit = val; 4970 return 0; 4971 } 4972 4973 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4974 "0371 lpfc_cq_max_proc_limit: %d out of range, using " 4975 "default\n", 4976 phba->cfg_cq_max_proc_limit); 4977 4978 return 0; 4979 } 4980 4981 static DEVICE_ATTR_RW(lpfc_cq_max_proc_limit); 4982 4983 /** 4984 * lpfc_fcp_cpu_map_show - Display current driver CPU affinity 4985 * @dev: class converted to a Scsi_host structure. 4986 * @attr: device attribute, not used. 4987 * @buf: on return contains text describing the state of the link. 4988 * 4989 * Returns: size of formatted string. 4990 **/ 4991 static ssize_t 4992 lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, 4993 char *buf) 4994 { 4995 struct Scsi_Host *shost = class_to_shost(dev); 4996 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4997 struct lpfc_hba *phba = vport->phba; 4998 struct lpfc_vector_map_info *cpup; 4999 int len = 0; 5000 5001 if ((phba->sli_rev != LPFC_SLI_REV4) || 5002 (phba->intr_type != MSIX)) 5003 return len; 5004 5005 switch (phba->cfg_fcp_cpu_map) { 5006 case 0: 5007 len += scnprintf(buf + len, PAGE_SIZE-len, 5008 "fcp_cpu_map: No mapping (%d)\n", 5009 phba->cfg_fcp_cpu_map); 5010 return len; 5011 case 1: 5012 len += scnprintf(buf + len, PAGE_SIZE-len, 5013 "fcp_cpu_map: HBA centric mapping (%d): " 5014 "%d of %d CPUs online from %d possible CPUs\n", 5015 phba->cfg_fcp_cpu_map, num_online_cpus(), 5016 num_present_cpus(), 5017 phba->sli4_hba.num_possible_cpu); 5018 break; 5019 } 5020 5021 while (phba->sli4_hba.curr_disp_cpu < 5022 phba->sli4_hba.num_possible_cpu) { 5023 cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu]; 5024 5025 if (!cpu_present(phba->sli4_hba.curr_disp_cpu)) 5026 len += scnprintf(buf + len, PAGE_SIZE - len, 5027 "CPU %02d not present\n", 5028 phba->sli4_hba.curr_disp_cpu); 5029 else if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 5030 if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY) 5031 len += scnprintf( 5032 buf + len, PAGE_SIZE - len, 5033 "CPU %02d hdwq None " 5034 "physid %d coreid %d ht %d ua %d\n", 5035 phba->sli4_hba.curr_disp_cpu, 5036 cpup->phys_id, cpup->core_id, 5037 (cpup->flag & LPFC_CPU_MAP_HYPER), 5038 (cpup->flag & LPFC_CPU_MAP_UNASSIGN)); 5039 else 5040 len += scnprintf( 5041 buf + len, PAGE_SIZE - len, 5042 "CPU %02d EQ None hdwq %04d " 5043 "physid %d coreid %d ht %d ua %d\n", 5044 phba->sli4_hba.curr_disp_cpu, 5045 cpup->hdwq, cpup->phys_id, 5046 cpup->core_id, 5047 (cpup->flag & LPFC_CPU_MAP_HYPER), 5048 (cpup->flag & LPFC_CPU_MAP_UNASSIGN)); 5049 } else { 5050 if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY) 5051 len += scnprintf( 5052 buf + len, PAGE_SIZE - len, 5053 "CPU %02d hdwq None " 5054 "physid %d coreid %d ht %d ua %d IRQ %d\n", 5055 phba->sli4_hba.curr_disp_cpu, 5056 cpup->phys_id, 5057 cpup->core_id, 5058 (cpup->flag & LPFC_CPU_MAP_HYPER), 5059 (cpup->flag & LPFC_CPU_MAP_UNASSIGN), 5060 lpfc_get_irq(cpup->eq)); 5061 else 5062 len += scnprintf( 5063 buf + len, PAGE_SIZE - len, 5064 "CPU %02d EQ %04d hdwq %04d " 5065 "physid %d coreid %d ht %d ua %d IRQ %d\n", 5066 phba->sli4_hba.curr_disp_cpu, 5067 cpup->eq, cpup->hdwq, cpup->phys_id, 5068 cpup->core_id, 5069 (cpup->flag & LPFC_CPU_MAP_HYPER), 5070 (cpup->flag & LPFC_CPU_MAP_UNASSIGN), 5071 lpfc_get_irq(cpup->eq)); 5072 } 5073 5074 phba->sli4_hba.curr_disp_cpu++; 5075 5076 /* display max number of CPUs keeping some margin */ 5077 if (phba->sli4_hba.curr_disp_cpu < 5078 phba->sli4_hba.num_possible_cpu && 5079 (len >= (PAGE_SIZE - 64))) { 5080 len += scnprintf(buf + len, 5081 PAGE_SIZE - len, "more...\n"); 5082 break; 5083 } 5084 } 5085 5086 if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_possible_cpu) 5087 phba->sli4_hba.curr_disp_cpu = 0; 5088 5089 return len; 5090 } 5091 5092 /** 5093 * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors 5094 * @dev: class device that is converted into a Scsi_host. 5095 * @attr: device attribute, not used. 5096 * @buf: one or more lpfc_polling_flags values. 5097 * @count: not used. 5098 * 5099 * Returns: 5100 * -EINVAL - Not implemented yet. 5101 **/ 5102 static ssize_t 5103 lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr, 5104 const char *buf, size_t count) 5105 { 5106 return -EINVAL; 5107 } 5108 5109 /* 5110 # lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors 5111 # for the HBA. 5112 # 5113 # Value range is [0 to 1]. Default value is LPFC_HBA_CPU_MAP (1). 5114 # 0 - Do not affinitze IRQ vectors 5115 # 1 - Affintize HBA vectors with respect to each HBA 5116 # (start with CPU0 for each HBA) 5117 # This also defines how Hardware Queues are mapped to specific CPUs. 5118 */ 5119 static int lpfc_fcp_cpu_map = LPFC_HBA_CPU_MAP; 5120 module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR); 5121 MODULE_PARM_DESC(lpfc_fcp_cpu_map, 5122 "Defines how to map CPUs to IRQ vectors per HBA"); 5123 5124 /** 5125 * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable 5126 * @phba: lpfc_hba pointer. 5127 * @val: link speed value. 5128 * 5129 * Description: 5130 * If val is in a valid range [0-2], then affinitze the adapter's 5131 * MSIX vectors. 5132 * 5133 * Returns: 5134 * zero if val saved. 5135 * -EINVAL val out of range 5136 **/ 5137 static int 5138 lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val) 5139 { 5140 if (phba->sli_rev != LPFC_SLI_REV4) { 5141 phba->cfg_fcp_cpu_map = 0; 5142 return 0; 5143 } 5144 5145 if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) { 5146 phba->cfg_fcp_cpu_map = val; 5147 return 0; 5148 } 5149 5150 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5151 "3326 lpfc_fcp_cpu_map: %d out of range, using " 5152 "default\n", val); 5153 phba->cfg_fcp_cpu_map = LPFC_HBA_CPU_MAP; 5154 5155 return 0; 5156 } 5157 5158 static DEVICE_ATTR_RW(lpfc_fcp_cpu_map); 5159 5160 /* 5161 # lpfc_fcp_class: Determines FC class to use for the FCP protocol. 5162 # Value range is [2,3]. Default value is 3. 5163 */ 5164 LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3, 5165 "Select Fibre Channel class of service for FCP sequences"); 5166 5167 /* 5168 # lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range 5169 # is [0,1]. Default value is 1. 5170 */ 5171 LPFC_VPORT_ATTR_RW(use_adisc, 1, 0, 1, 5172 "Use ADISC on rediscovery to authenticate FCP devices"); 5173 5174 /* 5175 # lpfc_first_burst_size: First burst size to use on the NPorts 5176 # that support first burst. 5177 # Value range is [0,65536]. Default value is 0. 5178 */ 5179 LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536, 5180 "First burst size for Targets that support first burst"); 5181 5182 /* 5183 * lpfc_nvmet_fb_size: NVME Target mode supported first burst size. 5184 * When the driver is configured as an NVME target, this value is 5185 * communicated to the NVME initiator in the PRLI response. It is 5186 * used only when the lpfc_nvme_enable_fb and lpfc_nvmet_support 5187 * parameters are set and the target is sending the PRLI RSP. 5188 * Parameter supported on physical port only - no NPIV support. 5189 * Value range is [0,65536]. Default value is 0. 5190 */ 5191 LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536, 5192 "NVME Target mode first burst size in 512B increments."); 5193 5194 /* 5195 * lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions. 5196 * For the Initiator (I), enabling this parameter means that an NVMET 5197 * PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be 5198 * processed by the initiator for subsequent NVME FCP IO. 5199 * Currently, this feature is not supported on the NVME target 5200 * Value range is [0,1]. Default value is 0 (disabled). 5201 */ 5202 LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1, 5203 "Enable First Burst feature for NVME Initiator."); 5204 5205 /* 5206 # lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue 5207 # depth. Default value is 0. When the value of this parameter is zero the 5208 # SCSI command completion time is not used for controlling I/O queue depth. When 5209 # the parameter is set to a non-zero value, the I/O queue depth is controlled 5210 # to limit the I/O completion time to the parameter value. 5211 # The value is set in milliseconds. 5212 */ 5213 LPFC_VPORT_ATTR(max_scsicmpl_time, 0, 0, 60000, 5214 "Use command completion time to control queue depth"); 5215 5216 lpfc_vport_param_show(max_scsicmpl_time); 5217 static int 5218 lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val) 5219 { 5220 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5221 struct lpfc_nodelist *ndlp, *next_ndlp; 5222 5223 if (val == vport->cfg_max_scsicmpl_time) 5224 return 0; 5225 if ((val < 0) || (val > 60000)) 5226 return -EINVAL; 5227 vport->cfg_max_scsicmpl_time = val; 5228 5229 spin_lock_irq(shost->host_lock); 5230 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 5231 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 5232 continue; 5233 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; 5234 } 5235 spin_unlock_irq(shost->host_lock); 5236 return 0; 5237 } 5238 lpfc_vport_param_store(max_scsicmpl_time); 5239 static DEVICE_ATTR_RW(lpfc_max_scsicmpl_time); 5240 5241 /* 5242 # lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value 5243 # range is [0,1]. Default value is 0. 5244 */ 5245 LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support"); 5246 5247 /* 5248 # lpfc_xri_rebalancing: enable or disable XRI rebalancing feature 5249 # range is [0,1]. Default value is 1. 5250 */ 5251 LPFC_ATTR_R(xri_rebalancing, 1, 0, 1, "Enable/Disable XRI rebalancing"); 5252 5253 /* 5254 * lpfc_io_sched: Determine scheduling algrithmn for issuing FCP cmds 5255 * range is [0,1]. Default value is 0. 5256 * For [0], FCP commands are issued to Work Queues based on upper layer 5257 * hardware queue index. 5258 * For [1], FCP commands are issued to a Work Queue associated with the 5259 * current CPU. 5260 * 5261 * LPFC_FCP_SCHED_BY_HDWQ == 0 5262 * LPFC_FCP_SCHED_BY_CPU == 1 5263 * 5264 * The driver dynamically sets this to 1 (BY_CPU) if it's able to set up cpu 5265 * affinity for FCP/NVME I/Os through Work Queues associated with the current 5266 * CPU. Otherwise, the default 0 (Round Robin) scheduling of FCP/NVME I/Os 5267 * through WQs will be used. 5268 */ 5269 LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_BY_CPU, 5270 LPFC_FCP_SCHED_BY_HDWQ, 5271 LPFC_FCP_SCHED_BY_CPU, 5272 "Determine scheduling algorithm for " 5273 "issuing commands [0] - Hardware Queue, [1] - Current CPU"); 5274 5275 /* 5276 * lpfc_ns_query: Determine algrithmn for NameServer queries after RSCN 5277 * range is [0,1]. Default value is 0. 5278 * For [0], GID_FT is used for NameServer queries after RSCN (default) 5279 * For [1], GID_PT is used for NameServer queries after RSCN 5280 * 5281 */ 5282 LPFC_ATTR_RW(ns_query, LPFC_NS_QUERY_GID_FT, 5283 LPFC_NS_QUERY_GID_FT, LPFC_NS_QUERY_GID_PT, 5284 "Determine algorithm NameServer queries after RSCN " 5285 "[0] - GID_FT, [1] - GID_PT"); 5286 5287 /* 5288 # lpfc_fcp2_no_tgt_reset: Determine bus reset behavior 5289 # range is [0,1]. Default value is 0. 5290 # For [0], bus reset issues target reset to ALL devices 5291 # For [1], bus reset issues target reset to non-FCP2 devices 5292 */ 5293 LPFC_ATTR_RW(fcp2_no_tgt_reset, 0, 0, 1, "Determine bus reset behavior for " 5294 "FCP2 devices [0] - issue tgt reset, [1] - no tgt reset"); 5295 5296 5297 /* 5298 # lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing 5299 # cr_delay (msec) or cr_count outstanding commands. cr_delay can take 5300 # value [0,63]. cr_count can take value [1,255]. Default value of cr_delay 5301 # is 0. Default value of cr_count is 1. The cr_count feature is disabled if 5302 # cr_delay is set to 0. 5303 */ 5304 LPFC_ATTR_RW(cr_delay, 0, 0, 63, "A count of milliseconds after which an " 5305 "interrupt response is generated"); 5306 5307 LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an " 5308 "interrupt response is generated"); 5309 5310 /* 5311 # lpfc_multi_ring_support: Determines how many rings to spread available 5312 # cmd/rsp IOCB entries across. 5313 # Value range is [1,2]. Default value is 1. 5314 */ 5315 LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary " 5316 "SLI rings to spread IOCB entries across"); 5317 5318 /* 5319 # lpfc_multi_ring_rctl: If lpfc_multi_ring_support is enabled, this 5320 # identifies what rctl value to configure the additional ring for. 5321 # Value range is [1,0xff]. Default value is 4 (Unsolicated Data). 5322 */ 5323 LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1, 5324 255, "Identifies RCTL for additional ring configuration"); 5325 5326 /* 5327 # lpfc_multi_ring_type: If lpfc_multi_ring_support is enabled, this 5328 # identifies what type value to configure the additional ring for. 5329 # Value range is [1,0xff]. Default value is 5 (LLC/SNAP). 5330 */ 5331 LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1, 5332 255, "Identifies TYPE for additional ring configuration"); 5333 5334 /* 5335 # lpfc_enable_SmartSAN: Sets up FDMI support for SmartSAN 5336 # 0 = SmartSAN functionality disabled (default) 5337 # 1 = SmartSAN functionality enabled 5338 # This parameter will override the value of lpfc_fdmi_on module parameter. 5339 # Value range is [0,1]. Default value is 0. 5340 */ 5341 LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality"); 5342 5343 /* 5344 # lpfc_fdmi_on: Controls FDMI support. 5345 # 0 No FDMI support 5346 # 1 Traditional FDMI support (default) 5347 # Traditional FDMI support means the driver will assume FDMI-2 support; 5348 # however, if that fails, it will fallback to FDMI-1. 5349 # If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on. 5350 # If lpfc_enable_SmartSAN is set 0, the driver uses the current value of 5351 # lpfc_fdmi_on. 5352 # Value range [0,1]. Default value is 1. 5353 */ 5354 LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support"); 5355 5356 /* 5357 # Specifies the maximum number of ELS cmds we can have outstanding (for 5358 # discovery). Value range is [1,64]. Default value = 32. 5359 */ 5360 LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands " 5361 "during discovery"); 5362 5363 /* 5364 # lpfc_max_luns: maximum allowed LUN ID. This is the highest LUN ID that 5365 # will be scanned by the SCSI midlayer when sequential scanning is 5366 # used; and is also the highest LUN ID allowed when the SCSI midlayer 5367 # parses REPORT_LUN responses. The lpfc driver has no LUN count or 5368 # LUN ID limit, but the SCSI midlayer requires this field for the uses 5369 # above. The lpfc driver limits the default value to 255 for two reasons. 5370 # As it bounds the sequential scan loop, scanning for thousands of luns 5371 # on a target can take minutes of wall clock time. Additionally, 5372 # there are FC targets, such as JBODs, that only recognize 8-bits of 5373 # LUN ID. When they receive a value greater than 8 bits, they chop off 5374 # the high order bits. In other words, they see LUN IDs 0, 256, 512, 5375 # and so on all as LUN ID 0. This causes the linux kernel, which sees 5376 # valid responses at each of the LUN IDs, to believe there are multiple 5377 # devices present, when in fact, there is only 1. 5378 # A customer that is aware of their target behaviors, and the results as 5379 # indicated above, is welcome to increase the lpfc_max_luns value. 5380 # As mentioned, this value is not used by the lpfc driver, only the 5381 # SCSI midlayer. 5382 # Value range is [0,65535]. Default value is 255. 5383 # NOTE: The SCSI layer might probe all allowed LUN on some old targets. 5384 */ 5385 LPFC_VPORT_ULL_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN ID"); 5386 5387 /* 5388 # lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring. 5389 # Value range is [1,255], default value is 10. 5390 */ 5391 LPFC_ATTR_RW(poll_tmo, 10, 1, 255, 5392 "Milliseconds driver will wait between polling FCP ring"); 5393 5394 /* 5395 # lpfc_task_mgmt_tmo: Maximum time to wait for task management commands 5396 # to complete in seconds. Value range is [5,180], default value is 60. 5397 */ 5398 LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180, 5399 "Maximum time to wait for task management commands to complete"); 5400 /* 5401 # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that 5402 # support this feature 5403 # 0 = MSI disabled 5404 # 1 = MSI enabled 5405 # 2 = MSI-X enabled (default) 5406 # Value range is [0,2]. Default value is 2. 5407 */ 5408 LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " 5409 "MSI-X (2), if possible"); 5410 5411 /* 5412 * lpfc_nvme_oas: Use the oas bit when sending NVME/NVMET IOs 5413 * 5414 * 0 = NVME OAS disabled 5415 * 1 = NVME OAS enabled 5416 * 5417 * Value range is [0,1]. Default value is 0. 5418 */ 5419 LPFC_ATTR_RW(nvme_oas, 0, 0, 1, 5420 "Use OAS bit on NVME IOs"); 5421 5422 /* 5423 * lpfc_nvme_embed_cmd: Use the oas bit when sending NVME/NVMET IOs 5424 * 5425 * 0 = Put NVME Command in SGL 5426 * 1 = Embed NVME Command in WQE (unless G7) 5427 * 2 = Embed NVME Command in WQE (force) 5428 * 5429 * Value range is [0,2]. Default value is 1. 5430 */ 5431 LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2, 5432 "Embed NVME Command in WQE"); 5433 5434 /* 5435 * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues 5436 * the driver will advertise it supports to the SCSI layer. 5437 * 5438 * 0 = Set nr_hw_queues by the number of CPUs or HW queues. 5439 * 1,256 = Manually specify nr_hw_queue value to be advertised, 5440 * 5441 * Value range is [0,256]. Default value is 8. 5442 */ 5443 LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF, 5444 LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX, 5445 "Set the number of SCSI Queues advertised"); 5446 5447 /* 5448 * lpfc_hdw_queue: Set the number of Hardware Queues the driver 5449 * will advertise it supports to the NVME and SCSI layers. This also 5450 * will map to the number of CQ/WQ pairs the driver will create. 5451 * 5452 * The NVME Layer will try to create this many, plus 1 administrative 5453 * hardware queue. The administrative queue will always map to WQ 0 5454 * A hardware IO queue maps (qidx) to a specific driver CQ/WQ. 5455 * 5456 * 0 = Configure the number of hdw queues to the number of active CPUs. 5457 * 1,256 = Manually specify how many hdw queues to use. 5458 * 5459 * Value range is [0,256]. Default value is 0. 5460 */ 5461 LPFC_ATTR_R(hdw_queue, 5462 LPFC_HBA_HDWQ_DEF, 5463 LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX, 5464 "Set the number of I/O Hardware Queues"); 5465 5466 #if IS_ENABLED(CONFIG_X86) 5467 /** 5468 * lpfc_cpumask_irq_mode_init - initalizes cpumask of phba based on 5469 * irq_chann_mode 5470 * @phba: Pointer to HBA context object. 5471 **/ 5472 static void 5473 lpfc_cpumask_irq_mode_init(struct lpfc_hba *phba) 5474 { 5475 unsigned int cpu, first_cpu, numa_node = NUMA_NO_NODE; 5476 const struct cpumask *sibling_mask; 5477 struct cpumask *aff_mask = &phba->sli4_hba.irq_aff_mask; 5478 5479 cpumask_clear(aff_mask); 5480 5481 if (phba->irq_chann_mode == NUMA_MODE) { 5482 /* Check if we're a NUMA architecture */ 5483 numa_node = dev_to_node(&phba->pcidev->dev); 5484 if (numa_node == NUMA_NO_NODE) { 5485 phba->irq_chann_mode = NORMAL_MODE; 5486 return; 5487 } 5488 } 5489 5490 for_each_possible_cpu(cpu) { 5491 switch (phba->irq_chann_mode) { 5492 case NUMA_MODE: 5493 if (cpu_to_node(cpu) == numa_node) 5494 cpumask_set_cpu(cpu, aff_mask); 5495 break; 5496 case NHT_MODE: 5497 sibling_mask = topology_sibling_cpumask(cpu); 5498 first_cpu = cpumask_first(sibling_mask); 5499 if (first_cpu < nr_cpu_ids) 5500 cpumask_set_cpu(first_cpu, aff_mask); 5501 break; 5502 default: 5503 break; 5504 } 5505 } 5506 } 5507 #endif 5508 5509 static void 5510 lpfc_assign_default_irq_chann(struct lpfc_hba *phba) 5511 { 5512 #if IS_ENABLED(CONFIG_X86) 5513 switch (boot_cpu_data.x86_vendor) { 5514 case X86_VENDOR_AMD: 5515 /* If AMD architecture, then default is NUMA_MODE */ 5516 phba->irq_chann_mode = NUMA_MODE; 5517 break; 5518 case X86_VENDOR_INTEL: 5519 /* If Intel architecture, then default is no hyperthread mode */ 5520 phba->irq_chann_mode = NHT_MODE; 5521 break; 5522 default: 5523 phba->irq_chann_mode = NORMAL_MODE; 5524 break; 5525 } 5526 lpfc_cpumask_irq_mode_init(phba); 5527 #else 5528 phba->irq_chann_mode = NORMAL_MODE; 5529 #endif 5530 } 5531 5532 /* 5533 * lpfc_irq_chann: Set the number of IRQ vectors that are available 5534 * for Hardware Queues to utilize. This also will map to the number 5535 * of EQ / MSI-X vectors the driver will create. This should never be 5536 * more than the number of Hardware Queues 5537 * 5538 * 0 = Configure number of IRQ Channels to: 5539 * if AMD architecture, number of CPUs on HBA's NUMA node 5540 * if Intel architecture, number of physical CPUs. 5541 * otherwise, number of active CPUs. 5542 * [1,256] = Manually specify how many IRQ Channels to use. 5543 * 5544 * Value range is [0,256]. Default value is [0]. 5545 */ 5546 static uint lpfc_irq_chann = LPFC_IRQ_CHANN_DEF; 5547 module_param(lpfc_irq_chann, uint, 0444); 5548 MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate"); 5549 5550 /* lpfc_irq_chann_init - Set the hba irq_chann initial value 5551 * @phba: lpfc_hba pointer. 5552 * @val: contains the initial value 5553 * 5554 * Description: 5555 * Validates the initial value is within range and assigns it to the 5556 * adapter. If not in range, an error message is posted and the 5557 * default value is assigned. 5558 * 5559 * Returns: 5560 * zero if value is in range and is set 5561 * -EINVAL if value was out of range 5562 **/ 5563 static int 5564 lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val) 5565 { 5566 const struct cpumask *aff_mask; 5567 5568 if (phba->cfg_use_msi != 2) { 5569 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5570 "8532 use_msi = %u ignoring cfg_irq_numa\n", 5571 phba->cfg_use_msi); 5572 phba->irq_chann_mode = NORMAL_MODE; 5573 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF; 5574 return 0; 5575 } 5576 5577 /* Check if default setting was passed */ 5578 if (val == LPFC_IRQ_CHANN_DEF && 5579 phba->cfg_hdw_queue == LPFC_HBA_HDWQ_DEF && 5580 phba->sli_rev == LPFC_SLI_REV4) 5581 lpfc_assign_default_irq_chann(phba); 5582 5583 if (phba->irq_chann_mode != NORMAL_MODE) { 5584 aff_mask = &phba->sli4_hba.irq_aff_mask; 5585 5586 if (cpumask_empty(aff_mask)) { 5587 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5588 "8533 Could not identify CPUS for " 5589 "mode %d, ignoring\n", 5590 phba->irq_chann_mode); 5591 phba->irq_chann_mode = NORMAL_MODE; 5592 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF; 5593 } else { 5594 phba->cfg_irq_chann = cpumask_weight(aff_mask); 5595 5596 /* If no hyperthread mode, then set hdwq count to 5597 * aff_mask weight as well 5598 */ 5599 if (phba->irq_chann_mode == NHT_MODE) 5600 phba->cfg_hdw_queue = phba->cfg_irq_chann; 5601 5602 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5603 "8543 lpfc_irq_chann set to %u " 5604 "(mode: %d)\n", phba->cfg_irq_chann, 5605 phba->irq_chann_mode); 5606 } 5607 } else { 5608 if (val > LPFC_IRQ_CHANN_MAX) { 5609 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5610 "8545 lpfc_irq_chann attribute cannot " 5611 "be set to %u, allowed range is " 5612 "[%u,%u]\n", 5613 val, 5614 LPFC_IRQ_CHANN_MIN, 5615 LPFC_IRQ_CHANN_MAX); 5616 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF; 5617 return -EINVAL; 5618 } 5619 if (phba->sli_rev == LPFC_SLI_REV4) { 5620 phba->cfg_irq_chann = val; 5621 } else { 5622 phba->cfg_irq_chann = 2; 5623 phba->cfg_hdw_queue = 1; 5624 } 5625 } 5626 5627 return 0; 5628 } 5629 5630 /** 5631 * lpfc_irq_chann_show - Display value of irq_chann 5632 * @dev: class converted to a Scsi_host structure. 5633 * @attr: device attribute, not used. 5634 * @buf: on return contains a string with the list sizes 5635 * 5636 * Returns: size of formatted string. 5637 **/ 5638 static ssize_t 5639 lpfc_irq_chann_show(struct device *dev, struct device_attribute *attr, 5640 char *buf) 5641 { 5642 struct Scsi_Host *shost = class_to_shost(dev); 5643 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 5644 struct lpfc_hba *phba = vport->phba; 5645 5646 return scnprintf(buf, PAGE_SIZE, "%u\n", phba->cfg_irq_chann); 5647 } 5648 5649 static DEVICE_ATTR_RO(lpfc_irq_chann); 5650 5651 /* 5652 # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. 5653 # 0 = HBA resets disabled 5654 # 1 = HBA resets enabled (default) 5655 # 2 = HBA reset via PCI bus reset enabled 5656 # Value range is [0,2]. Default value is 1. 5657 */ 5658 LPFC_ATTR_RW(enable_hba_reset, 1, 0, 2, "Enable HBA resets from the driver."); 5659 5660 /* 5661 # lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer.. 5662 # 0 = HBA Heartbeat disabled 5663 # 1 = HBA Heartbeat enabled (default) 5664 # Value range is [0,1]. Default value is 1. 5665 */ 5666 LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat."); 5667 5668 /* 5669 # lpfc_EnableXLane: Enable Express Lane Feature 5670 # 0x0 Express Lane Feature disabled 5671 # 0x1 Express Lane Feature enabled 5672 # Value range is [0,1]. Default value is 0. 5673 */ 5674 LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature."); 5675 5676 /* 5677 # lpfc_XLanePriority: Define CS_CTL priority for Express Lane Feature 5678 # 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits) 5679 # Value range is [0x0,0x7f]. Default value is 0 5680 */ 5681 LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature."); 5682 5683 /* 5684 # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF) 5685 # 0 = BlockGuard disabled (default) 5686 # 1 = BlockGuard enabled 5687 # Value range is [0,1]. Default value is 0. 5688 */ 5689 LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); 5690 5691 /* 5692 # lpfc_prot_mask: 5693 # - Bit mask of host protection capabilities used to register with the 5694 # SCSI mid-layer 5695 # - Only meaningful if BG is turned on (lpfc_enable_bg=1). 5696 # - Allows you to ultimately specify which profiles to use 5697 # - Default will result in registering capabilities for all profiles. 5698 # - SHOST_DIF_TYPE1_PROTECTION 1 5699 # HBA supports T10 DIF Type 1: HBA to Target Type 1 Protection 5700 # - SHOST_DIX_TYPE0_PROTECTION 8 5701 # HBA supports DIX Type 0: Host to HBA protection only 5702 # - SHOST_DIX_TYPE1_PROTECTION 16 5703 # HBA supports DIX Type 1: Host to HBA Type 1 protection 5704 # 5705 */ 5706 LPFC_ATTR(prot_mask, 5707 (SHOST_DIF_TYPE1_PROTECTION | 5708 SHOST_DIX_TYPE0_PROTECTION | 5709 SHOST_DIX_TYPE1_PROTECTION), 5710 0, 5711 (SHOST_DIF_TYPE1_PROTECTION | 5712 SHOST_DIX_TYPE0_PROTECTION | 5713 SHOST_DIX_TYPE1_PROTECTION), 5714 "T10-DIF host protection capabilities mask"); 5715 5716 /* 5717 # lpfc_prot_guard: 5718 # - Bit mask of protection guard types to register with the SCSI mid-layer 5719 # - Guard types are currently either 1) T10-DIF CRC 2) IP checksum 5720 # - Allows you to ultimately specify which profiles to use 5721 # - Default will result in registering capabilities for all guard types 5722 # 5723 */ 5724 LPFC_ATTR(prot_guard, 5725 SHOST_DIX_GUARD_IP, SHOST_DIX_GUARD_CRC, SHOST_DIX_GUARD_IP, 5726 "T10-DIF host protection guard type"); 5727 5728 /* 5729 * Delay initial NPort discovery when Clean Address bit is cleared in 5730 * FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed. 5731 * This parameter can have value 0 or 1. 5732 * When this parameter is set to 0, no delay is added to the initial 5733 * discovery. 5734 * When this parameter is set to non-zero value, initial Nport discovery is 5735 * delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC 5736 * accept and FCID/Fabric name/Fabric portname is changed. 5737 * Driver always delay Nport discovery for subsequent FLOGI/FDISC completion 5738 * when Clean Address bit is cleared in FLOGI/FDISC 5739 * accept and FCID/Fabric name/Fabric portname is changed. 5740 * Default value is 0. 5741 */ 5742 LPFC_ATTR(delay_discovery, 0, 0, 1, 5743 "Delay NPort discovery when Clean Address bit is cleared."); 5744 5745 /* 5746 * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count 5747 * This value can be set to values between 64 and 4096. The default value 5748 * is 64, but may be increased to allow for larger Max I/O sizes. The scsi 5749 * and nvme layers will allow I/O sizes up to (MAX_SEG_COUNT * SEG_SIZE). 5750 * Because of the additional overhead involved in setting up T10-DIF, 5751 * this parameter will be limited to 128 if BlockGuard is enabled under SLI4 5752 * and will be limited to 512 if BlockGuard is enabled under SLI3. 5753 */ 5754 static uint lpfc_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT; 5755 module_param(lpfc_sg_seg_cnt, uint, 0444); 5756 MODULE_PARM_DESC(lpfc_sg_seg_cnt, "Max Scatter Gather Segment Count"); 5757 5758 /** 5759 * lpfc_sg_seg_cnt_show - Display the scatter/gather list sizes 5760 * configured for the adapter 5761 * @dev: class converted to a Scsi_host structure. 5762 * @attr: device attribute, not used. 5763 * @buf: on return contains a string with the list sizes 5764 * 5765 * Returns: size of formatted string. 5766 **/ 5767 static ssize_t 5768 lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr, 5769 char *buf) 5770 { 5771 struct Scsi_Host *shost = class_to_shost(dev); 5772 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 5773 struct lpfc_hba *phba = vport->phba; 5774 int len; 5775 5776 len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n", 5777 phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt); 5778 5779 len += scnprintf(buf + len, PAGE_SIZE - len, 5780 "Cfg: %d SCSI: %d NVME: %d\n", 5781 phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt, 5782 phba->cfg_nvme_seg_cnt); 5783 return len; 5784 } 5785 5786 static DEVICE_ATTR_RO(lpfc_sg_seg_cnt); 5787 5788 /** 5789 * lpfc_sg_seg_cnt_init - Set the hba sg_seg_cnt initial value 5790 * @phba: lpfc_hba pointer. 5791 * @val: contains the initial value 5792 * 5793 * Description: 5794 * Validates the initial value is within range and assigns it to the 5795 * adapter. If not in range, an error message is posted and the 5796 * default value is assigned. 5797 * 5798 * Returns: 5799 * zero if value is in range and is set 5800 * -EINVAL if value was out of range 5801 **/ 5802 static int 5803 lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val) 5804 { 5805 if (val >= LPFC_MIN_SG_SEG_CNT && val <= LPFC_MAX_SG_SEG_CNT) { 5806 phba->cfg_sg_seg_cnt = val; 5807 return 0; 5808 } 5809 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5810 "0409 lpfc_sg_seg_cnt attribute cannot be set to %d, " 5811 "allowed range is [%d, %d]\n", 5812 val, LPFC_MIN_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT); 5813 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT; 5814 return -EINVAL; 5815 } 5816 5817 /* 5818 * lpfc_enable_mds_diags: Enable MDS Diagnostics 5819 * 0 = MDS Diagnostics disabled (default) 5820 * 1 = MDS Diagnostics enabled 5821 * Value range is [0,1]. Default value is 0. 5822 */ 5823 LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics"); 5824 5825 /* 5826 * lpfc_ras_fwlog_buffsize: Firmware logging host buffer size 5827 * 0 = Disable firmware logging (default) 5828 * [1-4] = Multiple of 1/4th Mb of host memory for FW logging 5829 * Value range [0..4]. Default value is 0 5830 */ 5831 LPFC_ATTR(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging"); 5832 lpfc_param_show(ras_fwlog_buffsize); 5833 5834 static ssize_t 5835 lpfc_ras_fwlog_buffsize_set(struct lpfc_hba *phba, uint val) 5836 { 5837 int ret = 0; 5838 enum ras_state state; 5839 5840 if (!lpfc_rangecheck(val, 0, 4)) 5841 return -EINVAL; 5842 5843 if (phba->cfg_ras_fwlog_buffsize == val) 5844 return 0; 5845 5846 if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn)) 5847 return -EINVAL; 5848 5849 spin_lock_irq(&phba->hbalock); 5850 state = phba->ras_fwlog.state; 5851 spin_unlock_irq(&phba->hbalock); 5852 5853 if (state == REG_INPROGRESS) { 5854 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging " 5855 "registration is in progress\n"); 5856 return -EBUSY; 5857 } 5858 5859 /* For disable logging: stop the logs and free the DMA. 5860 * For ras_fwlog_buffsize size change we still need to free and 5861 * reallocate the DMA in lpfc_sli4_ras_fwlog_init. 5862 */ 5863 phba->cfg_ras_fwlog_buffsize = val; 5864 if (state == ACTIVE) { 5865 lpfc_ras_stop_fwlog(phba); 5866 lpfc_sli4_ras_dma_free(phba); 5867 } 5868 5869 lpfc_sli4_ras_init(phba); 5870 if (phba->ras_fwlog.ras_enabled) 5871 ret = lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level, 5872 LPFC_RAS_ENABLE_LOGGING); 5873 return ret; 5874 } 5875 5876 lpfc_param_store(ras_fwlog_buffsize); 5877 static DEVICE_ATTR_RW(lpfc_ras_fwlog_buffsize); 5878 5879 /* 5880 * lpfc_ras_fwlog_level: Firmware logging verbosity level 5881 * Valid only if firmware logging is enabled 5882 * 0(Least Verbosity) 4 (most verbosity) 5883 * Value range is [0..4]. Default value is 0 5884 */ 5885 LPFC_ATTR_RW(ras_fwlog_level, 0, 0, 4, "Firmware Logging Level"); 5886 5887 /* 5888 * lpfc_ras_fwlog_func: Firmware logging enabled on function number 5889 * Default function which has RAS support : 0 5890 * Value Range is [0..7]. 5891 * FW logging is a global action and enablement is via a specific 5892 * port. 5893 */ 5894 LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function"); 5895 5896 /* 5897 * lpfc_enable_bbcr: Enable BB Credit Recovery 5898 * 0 = BB Credit Recovery disabled 5899 * 1 = BB Credit Recovery enabled (default) 5900 * Value range is [0,1]. Default value is 1. 5901 */ 5902 LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery"); 5903 5904 /* Signaling module parameters */ 5905 int lpfc_fabric_cgn_frequency = 100; /* 100 ms default */ 5906 module_param(lpfc_fabric_cgn_frequency, int, 0444); 5907 MODULE_PARM_DESC(lpfc_fabric_cgn_frequency, "Congestion signaling fabric freq"); 5908 5909 int lpfc_acqe_cgn_frequency = 10; /* 10 sec default */ 5910 module_param(lpfc_acqe_cgn_frequency, int, 0444); 5911 MODULE_PARM_DESC(lpfc_acqe_cgn_frequency, "Congestion signaling ACQE freq"); 5912 5913 int lpfc_use_cgn_signal = 1; /* 0 - only use FPINs, 1 - Use signals if avail */ 5914 module_param(lpfc_use_cgn_signal, int, 0444); 5915 MODULE_PARM_DESC(lpfc_use_cgn_signal, "Use Congestion signaling if available"); 5916 5917 /* 5918 * lpfc_enable_dpp: Enable DPP on G7 5919 * 0 = DPP on G7 disabled 5920 * 1 = DPP on G7 enabled (default) 5921 * Value range is [0,1]. Default value is 1. 5922 */ 5923 LPFC_ATTR_RW(enable_dpp, 1, 0, 1, "Enable Direct Packet Push"); 5924 5925 /* 5926 * lpfc_enable_mi: Enable FDMI MIB 5927 * 0 = disabled 5928 * 1 = enabled (default) 5929 * Value range is [0,1]. 5930 */ 5931 LPFC_ATTR_R(enable_mi, 1, 0, 1, "Enable MI"); 5932 5933 /* 5934 * lpfc_max_vmid: Maximum number of VMs to be tagged. This is valid only if 5935 * either vmid_app_header or vmid_priority_tagging is enabled. 5936 * 4 - 255 = vmid support enabled for 4-255 VMs 5937 * Value range is [4,255]. 5938 */ 5939 LPFC_ATTR_RW(max_vmid, LPFC_MIN_VMID, LPFC_MIN_VMID, LPFC_MAX_VMID, 5940 "Maximum number of VMs supported"); 5941 5942 /* 5943 * lpfc_vmid_inactivity_timeout: Inactivity timeout duration in hours 5944 * 0 = Timeout is disabled 5945 * Value range is [0,24]. 5946 */ 5947 LPFC_ATTR_RW(vmid_inactivity_timeout, 4, 0, 24, 5948 "Inactivity timeout in hours"); 5949 5950 /* 5951 * lpfc_vmid_app_header: Enable App Header VMID support 5952 * 0 = Support is disabled (default) 5953 * 1 = Support is enabled 5954 * Value range is [0,1]. 5955 */ 5956 LPFC_ATTR_RW(vmid_app_header, LPFC_VMID_APP_HEADER_DISABLE, 5957 LPFC_VMID_APP_HEADER_DISABLE, LPFC_VMID_APP_HEADER_ENABLE, 5958 "Enable App Header VMID support"); 5959 5960 /* 5961 * lpfc_vmid_priority_tagging: Enable Priority Tagging VMID support 5962 * 0 = Support is disabled (default) 5963 * 1 = Allow supported targets only 5964 * 2 = Allow all targets 5965 * Value range is [0,2]. 5966 */ 5967 LPFC_ATTR_RW(vmid_priority_tagging, LPFC_VMID_PRIO_TAG_DISABLE, 5968 LPFC_VMID_PRIO_TAG_DISABLE, 5969 LPFC_VMID_PRIO_TAG_ALL_TARGETS, 5970 "Enable Priority Tagging VMID support"); 5971 5972 static struct attribute *lpfc_hba_attrs[] = { 5973 &dev_attr_nvme_info.attr, 5974 &dev_attr_scsi_stat.attr, 5975 &dev_attr_bg_info.attr, 5976 &dev_attr_bg_guard_err.attr, 5977 &dev_attr_bg_apptag_err.attr, 5978 &dev_attr_bg_reftag_err.attr, 5979 &dev_attr_info.attr, 5980 &dev_attr_serialnum.attr, 5981 &dev_attr_modeldesc.attr, 5982 &dev_attr_modelname.attr, 5983 &dev_attr_programtype.attr, 5984 &dev_attr_portnum.attr, 5985 &dev_attr_fwrev.attr, 5986 &dev_attr_hdw.attr, 5987 &dev_attr_option_rom_version.attr, 5988 &dev_attr_link_state.attr, 5989 &dev_attr_num_discovered_ports.attr, 5990 &dev_attr_lpfc_drvr_version.attr, 5991 &dev_attr_lpfc_enable_fip.attr, 5992 &dev_attr_lpfc_temp_sensor.attr, 5993 &dev_attr_lpfc_log_verbose.attr, 5994 &dev_attr_lpfc_lun_queue_depth.attr, 5995 &dev_attr_lpfc_tgt_queue_depth.attr, 5996 &dev_attr_lpfc_hba_queue_depth.attr, 5997 &dev_attr_lpfc_peer_port_login.attr, 5998 &dev_attr_lpfc_nodev_tmo.attr, 5999 &dev_attr_lpfc_devloss_tmo.attr, 6000 &dev_attr_lpfc_enable_fc4_type.attr, 6001 &dev_attr_lpfc_fcp_class.attr, 6002 &dev_attr_lpfc_use_adisc.attr, 6003 &dev_attr_lpfc_first_burst_size.attr, 6004 &dev_attr_lpfc_ack0.attr, 6005 &dev_attr_lpfc_xri_rebalancing.attr, 6006 &dev_attr_lpfc_topology.attr, 6007 &dev_attr_lpfc_scan_down.attr, 6008 &dev_attr_lpfc_link_speed.attr, 6009 &dev_attr_lpfc_fcp_io_sched.attr, 6010 &dev_attr_lpfc_ns_query.attr, 6011 &dev_attr_lpfc_fcp2_no_tgt_reset.attr, 6012 &dev_attr_lpfc_cr_delay.attr, 6013 &dev_attr_lpfc_cr_count.attr, 6014 &dev_attr_lpfc_multi_ring_support.attr, 6015 &dev_attr_lpfc_multi_ring_rctl.attr, 6016 &dev_attr_lpfc_multi_ring_type.attr, 6017 &dev_attr_lpfc_fdmi_on.attr, 6018 &dev_attr_lpfc_enable_SmartSAN.attr, 6019 &dev_attr_lpfc_max_luns.attr, 6020 &dev_attr_lpfc_enable_npiv.attr, 6021 &dev_attr_lpfc_fcf_failover_policy.attr, 6022 &dev_attr_lpfc_enable_rrq.attr, 6023 &dev_attr_lpfc_fcp_wait_abts_rsp.attr, 6024 &dev_attr_nport_evt_cnt.attr, 6025 &dev_attr_board_mode.attr, 6026 &dev_attr_lpfc_xcvr_data.attr, 6027 &dev_attr_max_vpi.attr, 6028 &dev_attr_used_vpi.attr, 6029 &dev_attr_max_rpi.attr, 6030 &dev_attr_used_rpi.attr, 6031 &dev_attr_max_xri.attr, 6032 &dev_attr_used_xri.attr, 6033 &dev_attr_npiv_info.attr, 6034 &dev_attr_issue_reset.attr, 6035 &dev_attr_lpfc_poll.attr, 6036 &dev_attr_lpfc_poll_tmo.attr, 6037 &dev_attr_lpfc_task_mgmt_tmo.attr, 6038 &dev_attr_lpfc_use_msi.attr, 6039 &dev_attr_lpfc_nvme_oas.attr, 6040 &dev_attr_lpfc_nvme_embed_cmd.attr, 6041 &dev_attr_lpfc_fcp_imax.attr, 6042 &dev_attr_lpfc_force_rscn.attr, 6043 &dev_attr_lpfc_cq_poll_threshold.attr, 6044 &dev_attr_lpfc_cq_max_proc_limit.attr, 6045 &dev_attr_lpfc_fcp_cpu_map.attr, 6046 &dev_attr_lpfc_fcp_mq_threshold.attr, 6047 &dev_attr_lpfc_hdw_queue.attr, 6048 &dev_attr_lpfc_irq_chann.attr, 6049 &dev_attr_lpfc_suppress_rsp.attr, 6050 &dev_attr_lpfc_nvmet_mrq.attr, 6051 &dev_attr_lpfc_nvmet_mrq_post.attr, 6052 &dev_attr_lpfc_nvme_enable_fb.attr, 6053 &dev_attr_lpfc_nvmet_fb_size.attr, 6054 &dev_attr_lpfc_enable_bg.attr, 6055 &dev_attr_lpfc_enable_hba_reset.attr, 6056 &dev_attr_lpfc_enable_hba_heartbeat.attr, 6057 &dev_attr_lpfc_EnableXLane.attr, 6058 &dev_attr_lpfc_XLanePriority.attr, 6059 &dev_attr_lpfc_xlane_lun.attr, 6060 &dev_attr_lpfc_xlane_tgt.attr, 6061 &dev_attr_lpfc_xlane_vpt.attr, 6062 &dev_attr_lpfc_xlane_lun_state.attr, 6063 &dev_attr_lpfc_xlane_lun_status.attr, 6064 &dev_attr_lpfc_xlane_priority.attr, 6065 &dev_attr_lpfc_sg_seg_cnt.attr, 6066 &dev_attr_lpfc_max_scsicmpl_time.attr, 6067 &dev_attr_lpfc_aer_support.attr, 6068 &dev_attr_lpfc_aer_state_cleanup.attr, 6069 &dev_attr_lpfc_sriov_nr_virtfn.attr, 6070 &dev_attr_lpfc_req_fw_upgrade.attr, 6071 &dev_attr_lpfc_suppress_link_up.attr, 6072 &dev_attr_iocb_hw.attr, 6073 &dev_attr_pls.attr, 6074 &dev_attr_pt.attr, 6075 &dev_attr_txq_hw.attr, 6076 &dev_attr_txcmplq_hw.attr, 6077 &dev_attr_lpfc_sriov_hw_max_virtfn.attr, 6078 &dev_attr_protocol.attr, 6079 &dev_attr_lpfc_xlane_supported.attr, 6080 &dev_attr_lpfc_enable_mds_diags.attr, 6081 &dev_attr_lpfc_ras_fwlog_buffsize.attr, 6082 &dev_attr_lpfc_ras_fwlog_level.attr, 6083 &dev_attr_lpfc_ras_fwlog_func.attr, 6084 &dev_attr_lpfc_enable_bbcr.attr, 6085 &dev_attr_lpfc_enable_dpp.attr, 6086 &dev_attr_lpfc_enable_mi.attr, 6087 &dev_attr_cmf_info.attr, 6088 &dev_attr_lpfc_max_vmid.attr, 6089 &dev_attr_lpfc_vmid_inactivity_timeout.attr, 6090 &dev_attr_lpfc_vmid_app_header.attr, 6091 &dev_attr_lpfc_vmid_priority_tagging.attr, 6092 NULL, 6093 }; 6094 6095 static const struct attribute_group lpfc_hba_attr_group = { 6096 .attrs = lpfc_hba_attrs 6097 }; 6098 6099 const struct attribute_group *lpfc_hba_groups[] = { 6100 &lpfc_hba_attr_group, 6101 NULL 6102 }; 6103 6104 static struct attribute *lpfc_vport_attrs[] = { 6105 &dev_attr_info.attr, 6106 &dev_attr_link_state.attr, 6107 &dev_attr_num_discovered_ports.attr, 6108 &dev_attr_lpfc_drvr_version.attr, 6109 &dev_attr_lpfc_log_verbose.attr, 6110 &dev_attr_lpfc_lun_queue_depth.attr, 6111 &dev_attr_lpfc_tgt_queue_depth.attr, 6112 &dev_attr_lpfc_nodev_tmo.attr, 6113 &dev_attr_lpfc_devloss_tmo.attr, 6114 &dev_attr_lpfc_hba_queue_depth.attr, 6115 &dev_attr_lpfc_peer_port_login.attr, 6116 &dev_attr_lpfc_restrict_login.attr, 6117 &dev_attr_lpfc_fcp_class.attr, 6118 &dev_attr_lpfc_use_adisc.attr, 6119 &dev_attr_lpfc_first_burst_size.attr, 6120 &dev_attr_lpfc_max_luns.attr, 6121 &dev_attr_nport_evt_cnt.attr, 6122 &dev_attr_npiv_info.attr, 6123 &dev_attr_lpfc_enable_da_id.attr, 6124 &dev_attr_lpfc_max_scsicmpl_time.attr, 6125 &dev_attr_lpfc_static_vport.attr, 6126 &dev_attr_cmf_info.attr, 6127 NULL, 6128 }; 6129 6130 static const struct attribute_group lpfc_vport_attr_group = { 6131 .attrs = lpfc_vport_attrs 6132 }; 6133 6134 const struct attribute_group *lpfc_vport_groups[] = { 6135 &lpfc_vport_attr_group, 6136 NULL 6137 }; 6138 6139 /** 6140 * sysfs_ctlreg_write - Write method for writing to ctlreg 6141 * @filp: open sysfs file 6142 * @kobj: kernel kobject that contains the kernel class device. 6143 * @bin_attr: kernel attributes passed to us. 6144 * @buf: contains the data to be written to the adapter IOREG space. 6145 * @off: offset into buffer to beginning of data. 6146 * @count: bytes to transfer. 6147 * 6148 * Description: 6149 * Accessed via /sys/class/scsi_host/hostxxx/ctlreg. 6150 * Uses the adapter io control registers to send buf contents to the adapter. 6151 * 6152 * Returns: 6153 * -ERANGE off and count combo out of range 6154 * -EINVAL off, count or buff address invalid 6155 * -EPERM adapter is offline 6156 * value of count, buf contents written 6157 **/ 6158 static ssize_t 6159 sysfs_ctlreg_write(struct file *filp, struct kobject *kobj, 6160 struct bin_attribute *bin_attr, 6161 char *buf, loff_t off, size_t count) 6162 { 6163 size_t buf_off; 6164 struct device *dev = container_of(kobj, struct device, kobj); 6165 struct Scsi_Host *shost = class_to_shost(dev); 6166 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6167 struct lpfc_hba *phba = vport->phba; 6168 6169 if (phba->sli_rev >= LPFC_SLI_REV4) 6170 return -EPERM; 6171 6172 if ((off + count) > FF_REG_AREA_SIZE) 6173 return -ERANGE; 6174 6175 if (count <= LPFC_REG_WRITE_KEY_SIZE) 6176 return 0; 6177 6178 if (off % 4 || count % 4 || (unsigned long)buf % 4) 6179 return -EINVAL; 6180 6181 /* This is to protect HBA registers from accidental writes. */ 6182 if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE)) 6183 return -EINVAL; 6184 6185 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 6186 return -EPERM; 6187 6188 spin_lock_irq(&phba->hbalock); 6189 for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE; 6190 buf_off += sizeof(uint32_t)) 6191 writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)), 6192 phba->ctrl_regs_memmap_p + off + buf_off); 6193 6194 spin_unlock_irq(&phba->hbalock); 6195 6196 return count; 6197 } 6198 6199 /** 6200 * sysfs_ctlreg_read - Read method for reading from ctlreg 6201 * @filp: open sysfs file 6202 * @kobj: kernel kobject that contains the kernel class device. 6203 * @bin_attr: kernel attributes passed to us. 6204 * @buf: if successful contains the data from the adapter IOREG space. 6205 * @off: offset into buffer to beginning of data. 6206 * @count: bytes to transfer. 6207 * 6208 * Description: 6209 * Accessed via /sys/class/scsi_host/hostxxx/ctlreg. 6210 * Uses the adapter io control registers to read data into buf. 6211 * 6212 * Returns: 6213 * -ERANGE off and count combo out of range 6214 * -EINVAL off, count or buff address invalid 6215 * value of count, buf contents read 6216 **/ 6217 static ssize_t 6218 sysfs_ctlreg_read(struct file *filp, struct kobject *kobj, 6219 struct bin_attribute *bin_attr, 6220 char *buf, loff_t off, size_t count) 6221 { 6222 size_t buf_off; 6223 uint32_t * tmp_ptr; 6224 struct device *dev = container_of(kobj, struct device, kobj); 6225 struct Scsi_Host *shost = class_to_shost(dev); 6226 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6227 struct lpfc_hba *phba = vport->phba; 6228 6229 if (phba->sli_rev >= LPFC_SLI_REV4) 6230 return -EPERM; 6231 6232 if (off > FF_REG_AREA_SIZE) 6233 return -ERANGE; 6234 6235 if ((off + count) > FF_REG_AREA_SIZE) 6236 count = FF_REG_AREA_SIZE - off; 6237 6238 if (count == 0) return 0; 6239 6240 if (off % 4 || count % 4 || (unsigned long)buf % 4) 6241 return -EINVAL; 6242 6243 spin_lock_irq(&phba->hbalock); 6244 6245 for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) { 6246 tmp_ptr = (uint32_t *)(buf + buf_off); 6247 *tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off); 6248 } 6249 6250 spin_unlock_irq(&phba->hbalock); 6251 6252 return count; 6253 } 6254 6255 static struct bin_attribute sysfs_ctlreg_attr = { 6256 .attr = { 6257 .name = "ctlreg", 6258 .mode = S_IRUSR | S_IWUSR, 6259 }, 6260 .size = 256, 6261 .read = sysfs_ctlreg_read, 6262 .write = sysfs_ctlreg_write, 6263 }; 6264 6265 /** 6266 * sysfs_mbox_write - Write method for writing information via mbox 6267 * @filp: open sysfs file 6268 * @kobj: kernel kobject that contains the kernel class device. 6269 * @bin_attr: kernel attributes passed to us. 6270 * @buf: contains the data to be written to sysfs mbox. 6271 * @off: offset into buffer to beginning of data. 6272 * @count: bytes to transfer. 6273 * 6274 * Description: 6275 * Deprecated function. All mailbox access from user space is performed via the 6276 * bsg interface. 6277 * 6278 * Returns: 6279 * -EPERM operation not permitted 6280 **/ 6281 static ssize_t 6282 sysfs_mbox_write(struct file *filp, struct kobject *kobj, 6283 struct bin_attribute *bin_attr, 6284 char *buf, loff_t off, size_t count) 6285 { 6286 return -EPERM; 6287 } 6288 6289 /** 6290 * sysfs_mbox_read - Read method for reading information via mbox 6291 * @filp: open sysfs file 6292 * @kobj: kernel kobject that contains the kernel class device. 6293 * @bin_attr: kernel attributes passed to us. 6294 * @buf: contains the data to be read from sysfs mbox. 6295 * @off: offset into buffer to beginning of data. 6296 * @count: bytes to transfer. 6297 * 6298 * Description: 6299 * Deprecated function. All mailbox access from user space is performed via the 6300 * bsg interface. 6301 * 6302 * Returns: 6303 * -EPERM operation not permitted 6304 **/ 6305 static ssize_t 6306 sysfs_mbox_read(struct file *filp, struct kobject *kobj, 6307 struct bin_attribute *bin_attr, 6308 char *buf, loff_t off, size_t count) 6309 { 6310 return -EPERM; 6311 } 6312 6313 static struct bin_attribute sysfs_mbox_attr = { 6314 .attr = { 6315 .name = "mbox", 6316 .mode = S_IRUSR | S_IWUSR, 6317 }, 6318 .size = MAILBOX_SYSFS_MAX, 6319 .read = sysfs_mbox_read, 6320 .write = sysfs_mbox_write, 6321 }; 6322 6323 /** 6324 * lpfc_alloc_sysfs_attr - Creates the ctlreg and mbox entries 6325 * @vport: address of lpfc vport structure. 6326 * 6327 * Return codes: 6328 * zero on success 6329 * error return code from sysfs_create_bin_file() 6330 **/ 6331 int 6332 lpfc_alloc_sysfs_attr(struct lpfc_vport *vport) 6333 { 6334 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6335 int error; 6336 6337 /* Virtual ports do not need ctrl_reg and mbox */ 6338 if (vport->port_type == LPFC_NPIV_PORT) 6339 return 0; 6340 6341 error = sysfs_create_bin_file(&shost->shost_dev.kobj, 6342 &sysfs_ctlreg_attr); 6343 if (error) 6344 goto out; 6345 6346 error = sysfs_create_bin_file(&shost->shost_dev.kobj, 6347 &sysfs_mbox_attr); 6348 if (error) 6349 goto out_remove_ctlreg_attr; 6350 6351 return 0; 6352 out_remove_ctlreg_attr: 6353 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); 6354 out: 6355 return error; 6356 } 6357 6358 /** 6359 * lpfc_free_sysfs_attr - Removes the ctlreg and mbox entries 6360 * @vport: address of lpfc vport structure. 6361 **/ 6362 void 6363 lpfc_free_sysfs_attr(struct lpfc_vport *vport) 6364 { 6365 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6366 6367 /* Virtual ports do not need ctrl_reg and mbox */ 6368 if (vport->port_type == LPFC_NPIV_PORT) 6369 return; 6370 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr); 6371 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); 6372 } 6373 6374 /* 6375 * Dynamic FC Host Attributes Support 6376 */ 6377 6378 /** 6379 * lpfc_get_host_symbolic_name - Copy symbolic name into the scsi host 6380 * @shost: kernel scsi host pointer. 6381 **/ 6382 static void 6383 lpfc_get_host_symbolic_name(struct Scsi_Host *shost) 6384 { 6385 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 6386 6387 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 6388 sizeof fc_host_symbolic_name(shost)); 6389 } 6390 6391 /** 6392 * lpfc_get_host_port_id - Copy the vport DID into the scsi host port id 6393 * @shost: kernel scsi host pointer. 6394 **/ 6395 static void 6396 lpfc_get_host_port_id(struct Scsi_Host *shost) 6397 { 6398 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6399 6400 /* note: fc_myDID already in cpu endianness */ 6401 fc_host_port_id(shost) = vport->fc_myDID; 6402 } 6403 6404 /** 6405 * lpfc_get_host_port_type - Set the value of the scsi host port type 6406 * @shost: kernel scsi host pointer. 6407 **/ 6408 static void 6409 lpfc_get_host_port_type(struct Scsi_Host *shost) 6410 { 6411 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6412 struct lpfc_hba *phba = vport->phba; 6413 6414 spin_lock_irq(shost->host_lock); 6415 6416 if (vport->port_type == LPFC_NPIV_PORT) { 6417 fc_host_port_type(shost) = FC_PORTTYPE_NPIV; 6418 } else if (lpfc_is_link_up(phba)) { 6419 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 6420 if (vport->fc_flag & FC_PUBLIC_LOOP) 6421 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; 6422 else 6423 fc_host_port_type(shost) = FC_PORTTYPE_LPORT; 6424 } else { 6425 if (vport->fc_flag & FC_FABRIC) 6426 fc_host_port_type(shost) = FC_PORTTYPE_NPORT; 6427 else 6428 fc_host_port_type(shost) = FC_PORTTYPE_PTP; 6429 } 6430 } else 6431 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; 6432 6433 spin_unlock_irq(shost->host_lock); 6434 } 6435 6436 /** 6437 * lpfc_get_host_port_state - Set the value of the scsi host port state 6438 * @shost: kernel scsi host pointer. 6439 **/ 6440 static void 6441 lpfc_get_host_port_state(struct Scsi_Host *shost) 6442 { 6443 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6444 struct lpfc_hba *phba = vport->phba; 6445 6446 spin_lock_irq(shost->host_lock); 6447 6448 if (vport->fc_flag & FC_OFFLINE_MODE) 6449 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; 6450 else { 6451 switch (phba->link_state) { 6452 case LPFC_LINK_UNKNOWN: 6453 case LPFC_LINK_DOWN: 6454 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; 6455 break; 6456 case LPFC_LINK_UP: 6457 case LPFC_CLEAR_LA: 6458 case LPFC_HBA_READY: 6459 /* Links up, reports port state accordingly */ 6460 if (vport->port_state < LPFC_VPORT_READY) 6461 fc_host_port_state(shost) = 6462 FC_PORTSTATE_BYPASSED; 6463 else 6464 fc_host_port_state(shost) = 6465 FC_PORTSTATE_ONLINE; 6466 break; 6467 case LPFC_HBA_ERROR: 6468 fc_host_port_state(shost) = FC_PORTSTATE_ERROR; 6469 break; 6470 default: 6471 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; 6472 break; 6473 } 6474 } 6475 6476 spin_unlock_irq(shost->host_lock); 6477 } 6478 6479 /** 6480 * lpfc_get_host_speed - Set the value of the scsi host speed 6481 * @shost: kernel scsi host pointer. 6482 **/ 6483 static void 6484 lpfc_get_host_speed(struct Scsi_Host *shost) 6485 { 6486 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6487 struct lpfc_hba *phba = vport->phba; 6488 6489 spin_lock_irq(shost->host_lock); 6490 6491 if ((lpfc_is_link_up(phba)) && (!(phba->hba_flag & HBA_FCOE_MODE))) { 6492 switch(phba->fc_linkspeed) { 6493 case LPFC_LINK_SPEED_1GHZ: 6494 fc_host_speed(shost) = FC_PORTSPEED_1GBIT; 6495 break; 6496 case LPFC_LINK_SPEED_2GHZ: 6497 fc_host_speed(shost) = FC_PORTSPEED_2GBIT; 6498 break; 6499 case LPFC_LINK_SPEED_4GHZ: 6500 fc_host_speed(shost) = FC_PORTSPEED_4GBIT; 6501 break; 6502 case LPFC_LINK_SPEED_8GHZ: 6503 fc_host_speed(shost) = FC_PORTSPEED_8GBIT; 6504 break; 6505 case LPFC_LINK_SPEED_10GHZ: 6506 fc_host_speed(shost) = FC_PORTSPEED_10GBIT; 6507 break; 6508 case LPFC_LINK_SPEED_16GHZ: 6509 fc_host_speed(shost) = FC_PORTSPEED_16GBIT; 6510 break; 6511 case LPFC_LINK_SPEED_32GHZ: 6512 fc_host_speed(shost) = FC_PORTSPEED_32GBIT; 6513 break; 6514 case LPFC_LINK_SPEED_64GHZ: 6515 fc_host_speed(shost) = FC_PORTSPEED_64GBIT; 6516 break; 6517 case LPFC_LINK_SPEED_128GHZ: 6518 fc_host_speed(shost) = FC_PORTSPEED_128GBIT; 6519 break; 6520 case LPFC_LINK_SPEED_256GHZ: 6521 fc_host_speed(shost) = FC_PORTSPEED_256GBIT; 6522 break; 6523 default: 6524 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 6525 break; 6526 } 6527 } else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) { 6528 switch (phba->fc_linkspeed) { 6529 case LPFC_ASYNC_LINK_SPEED_1GBPS: 6530 fc_host_speed(shost) = FC_PORTSPEED_1GBIT; 6531 break; 6532 case LPFC_ASYNC_LINK_SPEED_10GBPS: 6533 fc_host_speed(shost) = FC_PORTSPEED_10GBIT; 6534 break; 6535 case LPFC_ASYNC_LINK_SPEED_20GBPS: 6536 fc_host_speed(shost) = FC_PORTSPEED_20GBIT; 6537 break; 6538 case LPFC_ASYNC_LINK_SPEED_25GBPS: 6539 fc_host_speed(shost) = FC_PORTSPEED_25GBIT; 6540 break; 6541 case LPFC_ASYNC_LINK_SPEED_40GBPS: 6542 fc_host_speed(shost) = FC_PORTSPEED_40GBIT; 6543 break; 6544 case LPFC_ASYNC_LINK_SPEED_100GBPS: 6545 fc_host_speed(shost) = FC_PORTSPEED_100GBIT; 6546 break; 6547 default: 6548 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 6549 break; 6550 } 6551 } else 6552 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 6553 6554 spin_unlock_irq(shost->host_lock); 6555 } 6556 6557 /** 6558 * lpfc_get_host_fabric_name - Set the value of the scsi host fabric name 6559 * @shost: kernel scsi host pointer. 6560 **/ 6561 static void 6562 lpfc_get_host_fabric_name (struct Scsi_Host *shost) 6563 { 6564 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6565 struct lpfc_hba *phba = vport->phba; 6566 u64 node_name; 6567 6568 spin_lock_irq(shost->host_lock); 6569 6570 if ((vport->port_state > LPFC_FLOGI) && 6571 ((vport->fc_flag & FC_FABRIC) || 6572 ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) && 6573 (vport->fc_flag & FC_PUBLIC_LOOP)))) 6574 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn); 6575 else 6576 /* fabric is local port if there is no F/FL_Port */ 6577 node_name = 0; 6578 6579 spin_unlock_irq(shost->host_lock); 6580 6581 fc_host_fabric_name(shost) = node_name; 6582 } 6583 6584 /** 6585 * lpfc_get_stats - Return statistical information about the adapter 6586 * @shost: kernel scsi host pointer. 6587 * 6588 * Notes: 6589 * NULL on error for link down, no mbox pool, sli2 active, 6590 * management not allowed, memory allocation error, or mbox error. 6591 * 6592 * Returns: 6593 * NULL for error 6594 * address of the adapter host statistics 6595 **/ 6596 static struct fc_host_statistics * 6597 lpfc_get_stats(struct Scsi_Host *shost) 6598 { 6599 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6600 struct lpfc_hba *phba = vport->phba; 6601 struct lpfc_sli *psli = &phba->sli; 6602 struct fc_host_statistics *hs = &phba->link_stats; 6603 struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; 6604 LPFC_MBOXQ_t *pmboxq; 6605 MAILBOX_t *pmb; 6606 int rc = 0; 6607 6608 /* 6609 * prevent udev from issuing mailbox commands until the port is 6610 * configured. 6611 */ 6612 if (phba->link_state < LPFC_LINK_DOWN || 6613 !phba->mbox_mem_pool || 6614 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0) 6615 return NULL; 6616 6617 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) 6618 return NULL; 6619 6620 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6621 if (!pmboxq) 6622 return NULL; 6623 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 6624 6625 pmb = &pmboxq->u.mb; 6626 pmb->mbxCommand = MBX_READ_STATUS; 6627 pmb->mbxOwner = OWN_HOST; 6628 pmboxq->ctx_buf = NULL; 6629 pmboxq->vport = vport; 6630 6631 if (vport->fc_flag & FC_OFFLINE_MODE) { 6632 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 6633 if (rc != MBX_SUCCESS) { 6634 mempool_free(pmboxq, phba->mbox_mem_pool); 6635 return NULL; 6636 } 6637 } else { 6638 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 6639 if (rc != MBX_SUCCESS) { 6640 if (rc != MBX_TIMEOUT) 6641 mempool_free(pmboxq, phba->mbox_mem_pool); 6642 return NULL; 6643 } 6644 } 6645 6646 memset(hs, 0, sizeof (struct fc_host_statistics)); 6647 6648 hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt; 6649 hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt; 6650 6651 /* 6652 * The MBX_READ_STATUS returns tx_k_bytes which has to be 6653 * converted to words. 6654 * 6655 * Check if extended byte flag is set, to know when to collect upper 6656 * bits of 64 bit wide statistics counter. 6657 */ 6658 if (pmb->un.varRdStatus.xkb & RD_ST_XKB) { 6659 hs->tx_words = (u64) 6660 ((((u64)(pmb->un.varRdStatus.xmit_xkb & 6661 RD_ST_XMIT_XKB_MASK) << 32) | 6662 (u64)pmb->un.varRdStatus.xmitByteCnt) * 6663 (u64)256); 6664 hs->rx_words = (u64) 6665 ((((u64)(pmb->un.varRdStatus.rcv_xkb & 6666 RD_ST_RCV_XKB_MASK) << 32) | 6667 (u64)pmb->un.varRdStatus.rcvByteCnt) * 6668 (u64)256); 6669 } else { 6670 hs->tx_words = (uint64_t) 6671 ((uint64_t)pmb->un.varRdStatus.xmitByteCnt 6672 * (uint64_t)256); 6673 hs->rx_words = (uint64_t) 6674 ((uint64_t)pmb->un.varRdStatus.rcvByteCnt 6675 * (uint64_t)256); 6676 } 6677 6678 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 6679 pmb->mbxCommand = MBX_READ_LNK_STAT; 6680 pmb->mbxOwner = OWN_HOST; 6681 pmboxq->ctx_buf = NULL; 6682 pmboxq->vport = vport; 6683 6684 if (vport->fc_flag & FC_OFFLINE_MODE) { 6685 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 6686 if (rc != MBX_SUCCESS) { 6687 mempool_free(pmboxq, phba->mbox_mem_pool); 6688 return NULL; 6689 } 6690 } else { 6691 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 6692 if (rc != MBX_SUCCESS) { 6693 if (rc != MBX_TIMEOUT) 6694 mempool_free(pmboxq, phba->mbox_mem_pool); 6695 return NULL; 6696 } 6697 } 6698 6699 hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt; 6700 hs->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt; 6701 hs->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt; 6702 hs->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt; 6703 hs->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; 6704 hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt; 6705 hs->error_frames = pmb->un.varRdLnk.crcCnt; 6706 6707 hs->cn_sig_warn = atomic64_read(&phba->cgn_acqe_stat.warn); 6708 hs->cn_sig_alarm = atomic64_read(&phba->cgn_acqe_stat.alarm); 6709 6710 hs->link_failure_count -= lso->link_failure_count; 6711 hs->loss_of_sync_count -= lso->loss_of_sync_count; 6712 hs->loss_of_signal_count -= lso->loss_of_signal_count; 6713 hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count; 6714 hs->invalid_tx_word_count -= lso->invalid_tx_word_count; 6715 hs->invalid_crc_count -= lso->invalid_crc_count; 6716 hs->error_frames -= lso->error_frames; 6717 6718 if (phba->hba_flag & HBA_FCOE_MODE) { 6719 hs->lip_count = -1; 6720 hs->nos_count = (phba->link_events >> 1); 6721 hs->nos_count -= lso->link_events; 6722 } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 6723 hs->lip_count = (phba->fc_eventTag >> 1); 6724 hs->lip_count -= lso->link_events; 6725 hs->nos_count = -1; 6726 } else { 6727 hs->lip_count = -1; 6728 hs->nos_count = (phba->fc_eventTag >> 1); 6729 hs->nos_count -= lso->link_events; 6730 } 6731 6732 hs->dumped_frames = -1; 6733 6734 hs->seconds_since_last_reset = ktime_get_seconds() - psli->stats_start; 6735 6736 mempool_free(pmboxq, phba->mbox_mem_pool); 6737 6738 return hs; 6739 } 6740 6741 /** 6742 * lpfc_reset_stats - Copy the adapter link stats information 6743 * @shost: kernel scsi host pointer. 6744 **/ 6745 static void 6746 lpfc_reset_stats(struct Scsi_Host *shost) 6747 { 6748 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6749 struct lpfc_hba *phba = vport->phba; 6750 struct lpfc_sli *psli = &phba->sli; 6751 struct lpfc_lnk_stat *lso = &psli->lnk_stat_offsets; 6752 LPFC_MBOXQ_t *pmboxq; 6753 MAILBOX_t *pmb; 6754 int rc = 0; 6755 6756 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) 6757 return; 6758 6759 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6760 if (!pmboxq) 6761 return; 6762 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 6763 6764 pmb = &pmboxq->u.mb; 6765 pmb->mbxCommand = MBX_READ_STATUS; 6766 pmb->mbxOwner = OWN_HOST; 6767 pmb->un.varWords[0] = 0x1; /* reset request */ 6768 pmboxq->ctx_buf = NULL; 6769 pmboxq->vport = vport; 6770 6771 if ((vport->fc_flag & FC_OFFLINE_MODE) || 6772 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { 6773 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 6774 if (rc != MBX_SUCCESS) { 6775 mempool_free(pmboxq, phba->mbox_mem_pool); 6776 return; 6777 } 6778 } else { 6779 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 6780 if (rc != MBX_SUCCESS) { 6781 if (rc != MBX_TIMEOUT) 6782 mempool_free(pmboxq, phba->mbox_mem_pool); 6783 return; 6784 } 6785 } 6786 6787 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 6788 pmb->mbxCommand = MBX_READ_LNK_STAT; 6789 pmb->mbxOwner = OWN_HOST; 6790 pmboxq->ctx_buf = NULL; 6791 pmboxq->vport = vport; 6792 6793 if ((vport->fc_flag & FC_OFFLINE_MODE) || 6794 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { 6795 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 6796 if (rc != MBX_SUCCESS) { 6797 mempool_free(pmboxq, phba->mbox_mem_pool); 6798 return; 6799 } 6800 } else { 6801 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 6802 if (rc != MBX_SUCCESS) { 6803 if (rc != MBX_TIMEOUT) 6804 mempool_free(pmboxq, phba->mbox_mem_pool); 6805 return; 6806 } 6807 } 6808 6809 lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt; 6810 lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt; 6811 lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt; 6812 lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt; 6813 lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; 6814 lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; 6815 lso->error_frames = pmb->un.varRdLnk.crcCnt; 6816 if (phba->hba_flag & HBA_FCOE_MODE) 6817 lso->link_events = (phba->link_events >> 1); 6818 else 6819 lso->link_events = (phba->fc_eventTag >> 1); 6820 6821 atomic64_set(&phba->cgn_acqe_stat.warn, 0); 6822 atomic64_set(&phba->cgn_acqe_stat.alarm, 0); 6823 6824 memset(&shost_to_fc_host(shost)->fpin_stats, 0, 6825 sizeof(shost_to_fc_host(shost)->fpin_stats)); 6826 6827 psli->stats_start = ktime_get_seconds(); 6828 6829 mempool_free(pmboxq, phba->mbox_mem_pool); 6830 6831 return; 6832 } 6833 6834 /* 6835 * The LPFC driver treats linkdown handling as target loss events so there 6836 * are no sysfs handlers for link_down_tmo. 6837 */ 6838 6839 /** 6840 * lpfc_get_node_by_target - Return the nodelist for a target 6841 * @starget: kernel scsi target pointer. 6842 * 6843 * Returns: 6844 * address of the node list if found 6845 * NULL target not found 6846 **/ 6847 static struct lpfc_nodelist * 6848 lpfc_get_node_by_target(struct scsi_target *starget) 6849 { 6850 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 6851 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6852 struct lpfc_nodelist *ndlp; 6853 6854 spin_lock_irq(shost->host_lock); 6855 /* Search for this, mapped, target ID */ 6856 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 6857 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 6858 starget->id == ndlp->nlp_sid) { 6859 spin_unlock_irq(shost->host_lock); 6860 return ndlp; 6861 } 6862 } 6863 spin_unlock_irq(shost->host_lock); 6864 return NULL; 6865 } 6866 6867 /** 6868 * lpfc_get_starget_port_id - Set the target port id to the ndlp DID or -1 6869 * @starget: kernel scsi target pointer. 6870 **/ 6871 static void 6872 lpfc_get_starget_port_id(struct scsi_target *starget) 6873 { 6874 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget); 6875 6876 fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1; 6877 } 6878 6879 /** 6880 * lpfc_get_starget_node_name - Set the target node name 6881 * @starget: kernel scsi target pointer. 6882 * 6883 * Description: Set the target node name to the ndlp node name wwn or zero. 6884 **/ 6885 static void 6886 lpfc_get_starget_node_name(struct scsi_target *starget) 6887 { 6888 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget); 6889 6890 fc_starget_node_name(starget) = 6891 ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0; 6892 } 6893 6894 /** 6895 * lpfc_get_starget_port_name - Set the target port name 6896 * @starget: kernel scsi target pointer. 6897 * 6898 * Description: set the target port name to the ndlp port name wwn or zero. 6899 **/ 6900 static void 6901 lpfc_get_starget_port_name(struct scsi_target *starget) 6902 { 6903 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget); 6904 6905 fc_starget_port_name(starget) = 6906 ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0; 6907 } 6908 6909 /** 6910 * lpfc_set_rport_loss_tmo - Set the rport dev loss tmo 6911 * @rport: fc rport address. 6912 * @timeout: new value for dev loss tmo. 6913 * 6914 * Description: 6915 * If timeout is non zero set the dev_loss_tmo to timeout, else set 6916 * dev_loss_tmo to one. 6917 **/ 6918 static void 6919 lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) 6920 { 6921 struct lpfc_rport_data *rdata = rport->dd_data; 6922 struct lpfc_nodelist *ndlp = rdata->pnode; 6923 #if (IS_ENABLED(CONFIG_NVME_FC)) 6924 struct lpfc_nvme_rport *nrport = NULL; 6925 #endif 6926 6927 if (timeout) 6928 rport->dev_loss_tmo = timeout; 6929 else 6930 rport->dev_loss_tmo = 1; 6931 6932 if (!ndlp) { 6933 dev_info(&rport->dev, "Cannot find remote node to " 6934 "set rport dev loss tmo, port_id x%x\n", 6935 rport->port_id); 6936 return; 6937 } 6938 6939 #if (IS_ENABLED(CONFIG_NVME_FC)) 6940 nrport = lpfc_ndlp_get_nrport(ndlp); 6941 6942 if (nrport && nrport->remoteport) 6943 nvme_fc_set_remoteport_devloss(nrport->remoteport, 6944 rport->dev_loss_tmo); 6945 #endif 6946 } 6947 6948 /* 6949 * lpfc_rport_show_function - Return rport target information 6950 * 6951 * Description: 6952 * Macro that uses field to generate a function with the name lpfc_show_rport_ 6953 * 6954 * lpfc_show_rport_##field: returns the bytes formatted in buf 6955 * @cdev: class converted to an fc_rport. 6956 * @buf: on return contains the target_field or zero. 6957 * 6958 * Returns: size of formatted string. 6959 **/ 6960 #define lpfc_rport_show_function(field, format_string, sz, cast) \ 6961 static ssize_t \ 6962 lpfc_show_rport_##field (struct device *dev, \ 6963 struct device_attribute *attr, \ 6964 char *buf) \ 6965 { \ 6966 struct fc_rport *rport = transport_class_to_rport(dev); \ 6967 struct lpfc_rport_data *rdata = rport->hostdata; \ 6968 return scnprintf(buf, sz, format_string, \ 6969 (rdata->target) ? cast rdata->target->field : 0); \ 6970 } 6971 6972 #define lpfc_rport_rd_attr(field, format_string, sz) \ 6973 lpfc_rport_show_function(field, format_string, sz, ) \ 6974 static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL) 6975 6976 /** 6977 * lpfc_set_vport_symbolic_name - Set the vport's symbolic name 6978 * @fc_vport: The fc_vport who's symbolic name has been changed. 6979 * 6980 * Description: 6981 * This function is called by the transport after the @fc_vport's symbolic name 6982 * has been changed. This function re-registers the symbolic name with the 6983 * switch to propagate the change into the fabric if the vport is active. 6984 **/ 6985 static void 6986 lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport) 6987 { 6988 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; 6989 6990 if (vport->port_state == LPFC_VPORT_READY) 6991 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); 6992 } 6993 6994 /** 6995 * lpfc_hba_log_verbose_init - Set hba's log verbose level 6996 * @phba: Pointer to lpfc_hba struct. 6997 * @verbose: Verbose level to set. 6998 * 6999 * This function is called by the lpfc_get_cfgparam() routine to set the 7000 * module lpfc_log_verbose into the @phba cfg_log_verbose for use with 7001 * log message according to the module's lpfc_log_verbose parameter setting 7002 * before hba port or vport created. 7003 **/ 7004 static void 7005 lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose) 7006 { 7007 phba->cfg_log_verbose = verbose; 7008 } 7009 7010 struct fc_function_template lpfc_transport_functions = { 7011 /* fixed attributes the driver supports */ 7012 .show_host_node_name = 1, 7013 .show_host_port_name = 1, 7014 .show_host_supported_classes = 1, 7015 .show_host_supported_fc4s = 1, 7016 .show_host_supported_speeds = 1, 7017 .show_host_maxframe_size = 1, 7018 7019 .get_host_symbolic_name = lpfc_get_host_symbolic_name, 7020 .show_host_symbolic_name = 1, 7021 7022 /* dynamic attributes the driver supports */ 7023 .get_host_port_id = lpfc_get_host_port_id, 7024 .show_host_port_id = 1, 7025 7026 .get_host_port_type = lpfc_get_host_port_type, 7027 .show_host_port_type = 1, 7028 7029 .get_host_port_state = lpfc_get_host_port_state, 7030 .show_host_port_state = 1, 7031 7032 /* active_fc4s is shown but doesn't change (thus no get function) */ 7033 .show_host_active_fc4s = 1, 7034 7035 .get_host_speed = lpfc_get_host_speed, 7036 .show_host_speed = 1, 7037 7038 .get_host_fabric_name = lpfc_get_host_fabric_name, 7039 .show_host_fabric_name = 1, 7040 7041 /* 7042 * The LPFC driver treats linkdown handling as target loss events 7043 * so there are no sysfs handlers for link_down_tmo. 7044 */ 7045 7046 .get_fc_host_stats = lpfc_get_stats, 7047 .reset_fc_host_stats = lpfc_reset_stats, 7048 7049 .dd_fcrport_size = sizeof(struct lpfc_rport_data), 7050 .show_rport_maxframe_size = 1, 7051 .show_rport_supported_classes = 1, 7052 7053 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo, 7054 .show_rport_dev_loss_tmo = 1, 7055 7056 .get_starget_port_id = lpfc_get_starget_port_id, 7057 .show_starget_port_id = 1, 7058 7059 .get_starget_node_name = lpfc_get_starget_node_name, 7060 .show_starget_node_name = 1, 7061 7062 .get_starget_port_name = lpfc_get_starget_port_name, 7063 .show_starget_port_name = 1, 7064 7065 .issue_fc_host_lip = lpfc_issue_lip, 7066 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk, 7067 .terminate_rport_io = lpfc_terminate_rport_io, 7068 7069 .dd_fcvport_size = sizeof(struct lpfc_vport *), 7070 7071 .vport_disable = lpfc_vport_disable, 7072 7073 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name, 7074 7075 .bsg_request = lpfc_bsg_request, 7076 .bsg_timeout = lpfc_bsg_timeout, 7077 }; 7078 7079 struct fc_function_template lpfc_vport_transport_functions = { 7080 /* fixed attributes the driver supports */ 7081 .show_host_node_name = 1, 7082 .show_host_port_name = 1, 7083 .show_host_supported_classes = 1, 7084 .show_host_supported_fc4s = 1, 7085 .show_host_supported_speeds = 1, 7086 .show_host_maxframe_size = 1, 7087 7088 .get_host_symbolic_name = lpfc_get_host_symbolic_name, 7089 .show_host_symbolic_name = 1, 7090 7091 /* dynamic attributes the driver supports */ 7092 .get_host_port_id = lpfc_get_host_port_id, 7093 .show_host_port_id = 1, 7094 7095 .get_host_port_type = lpfc_get_host_port_type, 7096 .show_host_port_type = 1, 7097 7098 .get_host_port_state = lpfc_get_host_port_state, 7099 .show_host_port_state = 1, 7100 7101 /* active_fc4s is shown but doesn't change (thus no get function) */ 7102 .show_host_active_fc4s = 1, 7103 7104 .get_host_speed = lpfc_get_host_speed, 7105 .show_host_speed = 1, 7106 7107 .get_host_fabric_name = lpfc_get_host_fabric_name, 7108 .show_host_fabric_name = 1, 7109 7110 /* 7111 * The LPFC driver treats linkdown handling as target loss events 7112 * so there are no sysfs handlers for link_down_tmo. 7113 */ 7114 7115 .get_fc_host_stats = lpfc_get_stats, 7116 .reset_fc_host_stats = lpfc_reset_stats, 7117 7118 .dd_fcrport_size = sizeof(struct lpfc_rport_data), 7119 .show_rport_maxframe_size = 1, 7120 .show_rport_supported_classes = 1, 7121 7122 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo, 7123 .show_rport_dev_loss_tmo = 1, 7124 7125 .get_starget_port_id = lpfc_get_starget_port_id, 7126 .show_starget_port_id = 1, 7127 7128 .get_starget_node_name = lpfc_get_starget_node_name, 7129 .show_starget_node_name = 1, 7130 7131 .get_starget_port_name = lpfc_get_starget_port_name, 7132 .show_starget_port_name = 1, 7133 7134 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk, 7135 .terminate_rport_io = lpfc_terminate_rport_io, 7136 7137 .vport_disable = lpfc_vport_disable, 7138 7139 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name, 7140 }; 7141 7142 /** 7143 * lpfc_get_hba_function_mode - Used to determine the HBA function in FCoE 7144 * Mode 7145 * @phba: lpfc_hba pointer. 7146 **/ 7147 static void 7148 lpfc_get_hba_function_mode(struct lpfc_hba *phba) 7149 { 7150 /* If the adapter supports FCoE mode */ 7151 switch (phba->pcidev->device) { 7152 case PCI_DEVICE_ID_SKYHAWK: 7153 case PCI_DEVICE_ID_SKYHAWK_VF: 7154 case PCI_DEVICE_ID_LANCER_FCOE: 7155 case PCI_DEVICE_ID_LANCER_FCOE_VF: 7156 case PCI_DEVICE_ID_ZEPHYR_DCSP: 7157 case PCI_DEVICE_ID_TIGERSHARK: 7158 case PCI_DEVICE_ID_TOMCAT: 7159 phba->hba_flag |= HBA_FCOE_MODE; 7160 break; 7161 default: 7162 /* for others, clear the flag */ 7163 phba->hba_flag &= ~HBA_FCOE_MODE; 7164 } 7165 } 7166 7167 /** 7168 * lpfc_get_cfgparam - Used during probe_one to init the adapter structure 7169 * @phba: lpfc_hba pointer. 7170 **/ 7171 void 7172 lpfc_get_cfgparam(struct lpfc_hba *phba) 7173 { 7174 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); 7175 lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched); 7176 lpfc_ns_query_init(phba, lpfc_ns_query); 7177 lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset); 7178 lpfc_cr_delay_init(phba, lpfc_cr_delay); 7179 lpfc_cr_count_init(phba, lpfc_cr_count); 7180 lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support); 7181 lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl); 7182 lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type); 7183 lpfc_ack0_init(phba, lpfc_ack0); 7184 lpfc_xri_rebalancing_init(phba, lpfc_xri_rebalancing); 7185 lpfc_topology_init(phba, lpfc_topology); 7186 lpfc_link_speed_init(phba, lpfc_link_speed); 7187 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 7188 lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo); 7189 lpfc_enable_npiv_init(phba, lpfc_enable_npiv); 7190 lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy); 7191 lpfc_enable_rrq_init(phba, lpfc_enable_rrq); 7192 lpfc_fcp_wait_abts_rsp_init(phba, lpfc_fcp_wait_abts_rsp); 7193 lpfc_fdmi_on_init(phba, lpfc_fdmi_on); 7194 lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN); 7195 lpfc_use_msi_init(phba, lpfc_use_msi); 7196 lpfc_nvme_oas_init(phba, lpfc_nvme_oas); 7197 lpfc_nvme_embed_cmd_init(phba, lpfc_nvme_embed_cmd); 7198 lpfc_fcp_imax_init(phba, lpfc_fcp_imax); 7199 lpfc_force_rscn_init(phba, lpfc_force_rscn); 7200 lpfc_cq_poll_threshold_init(phba, lpfc_cq_poll_threshold); 7201 lpfc_cq_max_proc_limit_init(phba, lpfc_cq_max_proc_limit); 7202 lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map); 7203 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); 7204 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); 7205 7206 lpfc_EnableXLane_init(phba, lpfc_EnableXLane); 7207 /* VMID Inits */ 7208 lpfc_max_vmid_init(phba, lpfc_max_vmid); 7209 lpfc_vmid_inactivity_timeout_init(phba, lpfc_vmid_inactivity_timeout); 7210 lpfc_vmid_app_header_init(phba, lpfc_vmid_app_header); 7211 lpfc_vmid_priority_tagging_init(phba, lpfc_vmid_priority_tagging); 7212 if (phba->sli_rev != LPFC_SLI_REV4) 7213 phba->cfg_EnableXLane = 0; 7214 lpfc_XLanePriority_init(phba, lpfc_XLanePriority); 7215 7216 memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t))); 7217 memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t))); 7218 phba->cfg_oas_lun_state = 0; 7219 phba->cfg_oas_lun_status = 0; 7220 phba->cfg_oas_flags = 0; 7221 phba->cfg_oas_priority = 0; 7222 lpfc_enable_bg_init(phba, lpfc_enable_bg); 7223 lpfc_prot_mask_init(phba, lpfc_prot_mask); 7224 lpfc_prot_guard_init(phba, lpfc_prot_guard); 7225 if (phba->sli_rev == LPFC_SLI_REV4) 7226 phba->cfg_poll = 0; 7227 else 7228 phba->cfg_poll = lpfc_poll; 7229 7230 /* Get the function mode */ 7231 lpfc_get_hba_function_mode(phba); 7232 7233 /* BlockGuard allowed for FC only. */ 7234 if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) { 7235 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7236 "0581 BlockGuard feature not supported\n"); 7237 /* If set, clear the BlockGuard support param */ 7238 phba->cfg_enable_bg = 0; 7239 } else if (phba->cfg_enable_bg) { 7240 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 7241 } 7242 7243 lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp); 7244 7245 lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type); 7246 lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq); 7247 lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post); 7248 7249 /* Initialize first burst. Target vs Initiator are different. */ 7250 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); 7251 lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size); 7252 lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold); 7253 lpfc_hdw_queue_init(phba, lpfc_hdw_queue); 7254 lpfc_irq_chann_init(phba, lpfc_irq_chann); 7255 lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr); 7256 lpfc_enable_dpp_init(phba, lpfc_enable_dpp); 7257 lpfc_enable_mi_init(phba, lpfc_enable_mi); 7258 7259 phba->cgn_p.cgn_param_mode = LPFC_CFG_OFF; 7260 phba->cmf_active_mode = LPFC_CFG_OFF; 7261 if (lpfc_fabric_cgn_frequency > EDC_CG_SIGFREQ_CNT_MAX || 7262 lpfc_fabric_cgn_frequency < EDC_CG_SIGFREQ_CNT_MIN) 7263 lpfc_fabric_cgn_frequency = 100; /* 100 ms default */ 7264 7265 if (phba->sli_rev != LPFC_SLI_REV4) { 7266 /* NVME only supported on SLI4 */ 7267 phba->nvmet_support = 0; 7268 phba->cfg_nvmet_mrq = 0; 7269 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; 7270 phba->cfg_enable_bbcr = 0; 7271 phba->cfg_xri_rebalancing = 0; 7272 } else { 7273 /* We MUST have FCP support */ 7274 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 7275 phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP; 7276 } 7277 7278 phba->cfg_auto_imax = (phba->cfg_fcp_imax) ? 0 : 1; 7279 7280 phba->cfg_enable_pbde = 0; 7281 7282 /* A value of 0 means use the number of CPUs found in the system */ 7283 if (phba->cfg_hdw_queue == 0) 7284 phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu; 7285 if (phba->cfg_irq_chann == 0) 7286 phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu; 7287 if (phba->cfg_irq_chann > phba->cfg_hdw_queue && 7288 phba->sli_rev == LPFC_SLI_REV4) 7289 phba->cfg_irq_chann = phba->cfg_hdw_queue; 7290 7291 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); 7292 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); 7293 lpfc_aer_support_init(phba, lpfc_aer_support); 7294 lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn); 7295 lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade); 7296 lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up); 7297 lpfc_delay_discovery_init(phba, lpfc_delay_discovery); 7298 lpfc_sli_mode_init(phba, lpfc_sli_mode); 7299 lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags); 7300 lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize); 7301 lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level); 7302 lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func); 7303 7304 return; 7305 } 7306 7307 /** 7308 * lpfc_nvme_mod_param_dep - Adjust module parameter value based on 7309 * dependencies between protocols and roles. 7310 * @phba: lpfc_hba pointer. 7311 **/ 7312 void 7313 lpfc_nvme_mod_param_dep(struct lpfc_hba *phba) 7314 { 7315 int logit = 0; 7316 7317 if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu) { 7318 phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu; 7319 logit = 1; 7320 } 7321 if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu) { 7322 phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu; 7323 logit = 1; 7324 } 7325 if (phba->cfg_irq_chann > phba->cfg_hdw_queue) { 7326 phba->cfg_irq_chann = phba->cfg_hdw_queue; 7327 logit = 1; 7328 } 7329 if (logit) 7330 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7331 "2006 Reducing Queues - CPU limitation: " 7332 "IRQ %d HDWQ %d\n", 7333 phba->cfg_irq_chann, 7334 phba->cfg_hdw_queue); 7335 7336 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && 7337 phba->nvmet_support) { 7338 phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP; 7339 7340 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 7341 "6013 %s x%x fb_size x%x, fb_max x%x\n", 7342 "NVME Target PRLI ACC enable_fb ", 7343 phba->cfg_nvme_enable_fb, 7344 phba->cfg_nvmet_fb_size, 7345 LPFC_NVMET_FB_SZ_MAX); 7346 7347 if (phba->cfg_nvme_enable_fb == 0) 7348 phba->cfg_nvmet_fb_size = 0; 7349 else { 7350 if (phba->cfg_nvmet_fb_size > LPFC_NVMET_FB_SZ_MAX) 7351 phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX; 7352 } 7353 7354 if (!phba->cfg_nvmet_mrq) 7355 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; 7356 7357 /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */ 7358 if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) { 7359 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; 7360 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 7361 "6018 Adjust lpfc_nvmet_mrq to %d\n", 7362 phba->cfg_nvmet_mrq); 7363 } 7364 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) 7365 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; 7366 7367 } else { 7368 /* Not NVME Target mode. Turn off Target parameters. */ 7369 phba->nvmet_support = 0; 7370 phba->cfg_nvmet_mrq = 0; 7371 phba->cfg_nvmet_fb_size = 0; 7372 } 7373 } 7374 7375 /** 7376 * lpfc_get_vport_cfgparam - Used during port create, init the vport structure 7377 * @vport: lpfc_vport pointer. 7378 **/ 7379 void 7380 lpfc_get_vport_cfgparam(struct lpfc_vport *vport) 7381 { 7382 lpfc_log_verbose_init(vport, lpfc_log_verbose); 7383 lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth); 7384 lpfc_tgt_queue_depth_init(vport, lpfc_tgt_queue_depth); 7385 lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo); 7386 lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo); 7387 lpfc_peer_port_login_init(vport, lpfc_peer_port_login); 7388 lpfc_restrict_login_init(vport, lpfc_restrict_login); 7389 lpfc_fcp_class_init(vport, lpfc_fcp_class); 7390 lpfc_use_adisc_init(vport, lpfc_use_adisc); 7391 lpfc_first_burst_size_init(vport, lpfc_first_burst_size); 7392 lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time); 7393 lpfc_discovery_threads_init(vport, lpfc_discovery_threads); 7394 lpfc_max_luns_init(vport, lpfc_max_luns); 7395 lpfc_scan_down_init(vport, lpfc_scan_down); 7396 lpfc_enable_da_id_init(vport, lpfc_enable_da_id); 7397 return; 7398 } 7399