1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/ctype.h> 25 #include <linux/delay.h> 26 #include <linux/pci.h> 27 #include <linux/interrupt.h> 28 #include <linux/module.h> 29 #include <linux/aer.h> 30 #include <linux/gfp.h> 31 #include <linux/kernel.h> 32 33 #include <scsi/scsi.h> 34 #include <scsi/scsi_device.h> 35 #include <scsi/scsi_host.h> 36 #include <scsi/scsi_tcq.h> 37 #include <scsi/scsi_transport_fc.h> 38 #include <scsi/fc/fc_fs.h> 39 40 #include "lpfc_hw4.h" 41 #include "lpfc_hw.h" 42 #include "lpfc_sli.h" 43 #include "lpfc_sli4.h" 44 #include "lpfc_nl.h" 45 #include "lpfc_disc.h" 46 #include "lpfc.h" 47 #include "lpfc_scsi.h" 48 #include "lpfc_nvme.h" 49 #include "lpfc_logmsg.h" 50 #include "lpfc_version.h" 51 #include "lpfc_compat.h" 52 #include "lpfc_crtn.h" 53 #include "lpfc_vport.h" 54 #include "lpfc_attr.h" 55 56 #define LPFC_DEF_DEVLOSS_TMO 30 57 #define LPFC_MIN_DEVLOSS_TMO 1 58 #define LPFC_MAX_DEVLOSS_TMO 255 59 60 #define LPFC_MAX_INFO_TMP_LEN 100 61 #define LPFC_INFO_MORE_STR "\nCould be more info...\n" 62 /* 63 * Write key size should be multiple of 4. If write key is changed 64 * make sure that library write key is also changed. 65 */ 66 #define LPFC_REG_WRITE_KEY_SIZE 4 67 #define LPFC_REG_WRITE_KEY "EMLX" 68 69 const char *const trunk_errmsg[] = { /* map errcode */ 70 "", /* There is no such error code at index 0*/ 71 "link negotiated speed does not match existing" 72 " trunk - link was \"low\" speed", 73 "link negotiated speed does not match" 74 " existing trunk - link was \"middle\" speed", 75 "link negotiated speed does not match existing" 76 " trunk - link was \"high\" speed", 77 "Attached to non-trunking port - F_Port", 78 "Attached to non-trunking port - N_Port", 79 "FLOGI response timeout", 80 "non-FLOGI frame received", 81 "Invalid FLOGI response", 82 "Trunking initialization protocol", 83 "Trunk peer device mismatch", 84 }; 85 86 /** 87 * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules 88 * @incr: integer to convert. 89 * @hdw: ascii string holding converted integer plus a string terminator. 90 * 91 * Description: 92 * JEDEC Joint Electron Device Engineering Council. 93 * Convert a 32 bit integer composed of 8 nibbles into an 8 byte ascii 94 * character string. The string is then terminated with a NULL in byte 9. 95 * Hex 0-9 becomes ascii '0' to '9'. 96 * Hex a-f becomes ascii '=' to 'B' capital B. 97 * 98 * Notes: 99 * Coded for 32 bit integers only. 100 **/ 101 static void 102 lpfc_jedec_to_ascii(int incr, char hdw[]) 103 { 104 int i, j; 105 for (i = 0; i < 8; i++) { 106 j = (incr & 0xf); 107 if (j <= 9) 108 hdw[7 - i] = 0x30 + j; 109 else 110 hdw[7 - i] = 0x61 + j - 10; 111 incr = (incr >> 4); 112 } 113 hdw[8] = 0; 114 return; 115 } 116 117 static ssize_t 118 lpfc_cmf_info_show(struct device *dev, struct device_attribute *attr, 119 char *buf) 120 { 121 struct Scsi_Host *shost = class_to_shost(dev); 122 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 123 struct lpfc_hba *phba = vport->phba; 124 struct lpfc_cgn_info *cp = NULL; 125 struct lpfc_cgn_stat *cgs; 126 int len = 0; 127 int cpu; 128 u64 rcv, total; 129 char tmp[LPFC_MAX_INFO_TMP_LEN] = {0}; 130 131 if (phba->cgn_i) 132 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 133 134 scnprintf(tmp, sizeof(tmp), 135 "Congestion Mgmt Info: E2Eattr %d Ver %d " 136 "CMF %d cnt %d\n", 137 phba->sli4_hba.pc_sli4_params.mi_ver, 138 cp ? cp->cgn_info_version : 0, 139 phba->sli4_hba.pc_sli4_params.cmf, phba->cmf_timer_cnt); 140 141 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 142 goto buffer_done; 143 144 if (!phba->sli4_hba.pc_sli4_params.cmf) 145 goto buffer_done; 146 147 switch (phba->cgn_init_reg_signal) { 148 case EDC_CG_SIG_WARN_ONLY: 149 scnprintf(tmp, sizeof(tmp), 150 "Register: Init: Signal:WARN "); 151 break; 152 case EDC_CG_SIG_WARN_ALARM: 153 scnprintf(tmp, sizeof(tmp), 154 "Register: Init: Signal:WARN|ALARM "); 155 break; 156 default: 157 scnprintf(tmp, sizeof(tmp), 158 "Register: Init: Signal:NONE "); 159 break; 160 } 161 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 162 goto buffer_done; 163 164 switch (phba->cgn_init_reg_fpin) { 165 case LPFC_CGN_FPIN_WARN: 166 scnprintf(tmp, sizeof(tmp), 167 "FPIN:WARN\n"); 168 break; 169 case LPFC_CGN_FPIN_ALARM: 170 scnprintf(tmp, sizeof(tmp), 171 "FPIN:ALARM\n"); 172 break; 173 case LPFC_CGN_FPIN_BOTH: 174 scnprintf(tmp, sizeof(tmp), 175 "FPIN:WARN|ALARM\n"); 176 break; 177 default: 178 scnprintf(tmp, sizeof(tmp), 179 "FPIN:NONE\n"); 180 break; 181 } 182 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 183 goto buffer_done; 184 185 switch (phba->cgn_reg_signal) { 186 case EDC_CG_SIG_WARN_ONLY: 187 scnprintf(tmp, sizeof(tmp), 188 " Current: Signal:WARN "); 189 break; 190 case EDC_CG_SIG_WARN_ALARM: 191 scnprintf(tmp, sizeof(tmp), 192 " Current: Signal:WARN|ALARM "); 193 break; 194 default: 195 scnprintf(tmp, sizeof(tmp), 196 " Current: Signal:NONE "); 197 break; 198 } 199 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 200 goto buffer_done; 201 202 switch (phba->cgn_reg_fpin) { 203 case LPFC_CGN_FPIN_WARN: 204 scnprintf(tmp, sizeof(tmp), 205 "FPIN:WARN ACQEcnt:%d\n", phba->cgn_acqe_cnt); 206 break; 207 case LPFC_CGN_FPIN_ALARM: 208 scnprintf(tmp, sizeof(tmp), 209 "FPIN:ALARM ACQEcnt:%d\n", phba->cgn_acqe_cnt); 210 break; 211 case LPFC_CGN_FPIN_BOTH: 212 scnprintf(tmp, sizeof(tmp), 213 "FPIN:WARN|ALARM ACQEcnt:%d\n", phba->cgn_acqe_cnt); 214 break; 215 default: 216 scnprintf(tmp, sizeof(tmp), 217 "FPIN:NONE ACQEcnt:%d\n", phba->cgn_acqe_cnt); 218 break; 219 } 220 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 221 goto buffer_done; 222 223 if (phba->cmf_active_mode != phba->cgn_p.cgn_param_mode) { 224 switch (phba->cmf_active_mode) { 225 case LPFC_CFG_OFF: 226 scnprintf(tmp, sizeof(tmp), "Active: Mode:Off\n"); 227 break; 228 case LPFC_CFG_MANAGED: 229 scnprintf(tmp, sizeof(tmp), "Active: Mode:Managed\n"); 230 break; 231 case LPFC_CFG_MONITOR: 232 scnprintf(tmp, sizeof(tmp), "Active: Mode:Monitor\n"); 233 break; 234 default: 235 scnprintf(tmp, sizeof(tmp), "Active: Mode:Unknown\n"); 236 } 237 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 238 goto buffer_done; 239 } 240 241 switch (phba->cgn_p.cgn_param_mode) { 242 case LPFC_CFG_OFF: 243 scnprintf(tmp, sizeof(tmp), "Config: Mode:Off "); 244 break; 245 case LPFC_CFG_MANAGED: 246 scnprintf(tmp, sizeof(tmp), "Config: Mode:Managed "); 247 break; 248 case LPFC_CFG_MONITOR: 249 scnprintf(tmp, sizeof(tmp), "Config: Mode:Monitor "); 250 break; 251 default: 252 scnprintf(tmp, sizeof(tmp), "Config: Mode:Unknown "); 253 } 254 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 255 goto buffer_done; 256 257 total = 0; 258 rcv = 0; 259 for_each_present_cpu(cpu) { 260 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 261 total += atomic64_read(&cgs->total_bytes); 262 rcv += atomic64_read(&cgs->rcv_bytes); 263 } 264 265 scnprintf(tmp, sizeof(tmp), 266 "IObusy:%d Info:%d Bytes: Rcv:x%llx Total:x%llx\n", 267 atomic_read(&phba->cmf_busy), 268 phba->cmf_active_info, rcv, total); 269 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 270 goto buffer_done; 271 272 scnprintf(tmp, sizeof(tmp), 273 "Port_speed:%d Link_byte_cnt:%ld " 274 "Max_byte_per_interval:%ld\n", 275 lpfc_sli_port_speed_get(phba), 276 (unsigned long)phba->cmf_link_byte_count, 277 (unsigned long)phba->cmf_max_bytes_per_interval); 278 strlcat(buf, tmp, PAGE_SIZE); 279 280 buffer_done: 281 len = strnlen(buf, PAGE_SIZE); 282 283 if (unlikely(len >= (PAGE_SIZE - 1))) { 284 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 285 "6312 Catching potential buffer " 286 "overflow > PAGE_SIZE = %lu bytes\n", 287 PAGE_SIZE); 288 strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR), 289 LPFC_INFO_MORE_STR, sizeof(LPFC_INFO_MORE_STR) + 1); 290 } 291 return len; 292 } 293 294 /** 295 * lpfc_drvr_version_show - Return the Emulex driver string with version number 296 * @dev: class unused variable. 297 * @attr: device attribute, not used. 298 * @buf: on return contains the module description text. 299 * 300 * Returns: size of formatted string. 301 **/ 302 static ssize_t 303 lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr, 304 char *buf) 305 { 306 return scnprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n"); 307 } 308 309 /** 310 * lpfc_enable_fip_show - Return the fip mode of the HBA 311 * @dev: class unused variable. 312 * @attr: device attribute, not used. 313 * @buf: on return contains the module description text. 314 * 315 * Returns: size of formatted string. 316 **/ 317 static ssize_t 318 lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr, 319 char *buf) 320 { 321 struct Scsi_Host *shost = class_to_shost(dev); 322 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 323 struct lpfc_hba *phba = vport->phba; 324 325 if (phba->hba_flag & HBA_FIP_SUPPORT) 326 return scnprintf(buf, PAGE_SIZE, "1\n"); 327 else 328 return scnprintf(buf, PAGE_SIZE, "0\n"); 329 } 330 331 static ssize_t 332 lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, 333 char *buf) 334 { 335 struct Scsi_Host *shost = class_to_shost(dev); 336 struct lpfc_vport *vport = shost_priv(shost); 337 struct lpfc_hba *phba = vport->phba; 338 struct lpfc_nvmet_tgtport *tgtp; 339 struct nvme_fc_local_port *localport; 340 struct lpfc_nvme_lport *lport; 341 struct lpfc_nvme_rport *rport; 342 struct lpfc_nodelist *ndlp; 343 struct nvme_fc_remote_port *nrport; 344 struct lpfc_fc4_ctrl_stat *cstat; 345 uint64_t data1, data2, data3; 346 uint64_t totin, totout, tot; 347 char *statep; 348 int i; 349 int len = 0; 350 char tmp[LPFC_MAX_INFO_TMP_LEN] = {0}; 351 352 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { 353 len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n"); 354 return len; 355 } 356 if (phba->nvmet_support) { 357 if (!phba->targetport) { 358 len = scnprintf(buf, PAGE_SIZE, 359 "NVME Target: x%llx is not allocated\n", 360 wwn_to_u64(vport->fc_portname.u.wwn)); 361 return len; 362 } 363 /* Port state is only one of two values for now. */ 364 if (phba->targetport->port_id) 365 statep = "REGISTERED"; 366 else 367 statep = "INIT"; 368 scnprintf(tmp, sizeof(tmp), 369 "NVME Target Enabled State %s\n", 370 statep); 371 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 372 goto buffer_done; 373 374 scnprintf(tmp, sizeof(tmp), 375 "%s%d WWPN x%llx WWNN x%llx DID x%06x\n", 376 "NVME Target: lpfc", 377 phba->brd_no, 378 wwn_to_u64(vport->fc_portname.u.wwn), 379 wwn_to_u64(vport->fc_nodename.u.wwn), 380 phba->targetport->port_id); 381 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 382 goto buffer_done; 383 384 if (strlcat(buf, "\nNVME Target: Statistics\n", PAGE_SIZE) 385 >= PAGE_SIZE) 386 goto buffer_done; 387 388 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 389 scnprintf(tmp, sizeof(tmp), 390 "LS: Rcv %08x Drop %08x Abort %08x\n", 391 atomic_read(&tgtp->rcv_ls_req_in), 392 atomic_read(&tgtp->rcv_ls_req_drop), 393 atomic_read(&tgtp->xmt_ls_abort)); 394 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 395 goto buffer_done; 396 397 if (atomic_read(&tgtp->rcv_ls_req_in) != 398 atomic_read(&tgtp->rcv_ls_req_out)) { 399 scnprintf(tmp, sizeof(tmp), 400 "Rcv LS: in %08x != out %08x\n", 401 atomic_read(&tgtp->rcv_ls_req_in), 402 atomic_read(&tgtp->rcv_ls_req_out)); 403 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 404 goto buffer_done; 405 } 406 407 scnprintf(tmp, sizeof(tmp), 408 "LS: Xmt %08x Drop %08x Cmpl %08x\n", 409 atomic_read(&tgtp->xmt_ls_rsp), 410 atomic_read(&tgtp->xmt_ls_drop), 411 atomic_read(&tgtp->xmt_ls_rsp_cmpl)); 412 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 413 goto buffer_done; 414 415 scnprintf(tmp, sizeof(tmp), 416 "LS: RSP Abort %08x xb %08x Err %08x\n", 417 atomic_read(&tgtp->xmt_ls_rsp_aborted), 418 atomic_read(&tgtp->xmt_ls_rsp_xb_set), 419 atomic_read(&tgtp->xmt_ls_rsp_error)); 420 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 421 goto buffer_done; 422 423 scnprintf(tmp, sizeof(tmp), 424 "FCP: Rcv %08x Defer %08x Release %08x " 425 "Drop %08x\n", 426 atomic_read(&tgtp->rcv_fcp_cmd_in), 427 atomic_read(&tgtp->rcv_fcp_cmd_defer), 428 atomic_read(&tgtp->xmt_fcp_release), 429 atomic_read(&tgtp->rcv_fcp_cmd_drop)); 430 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 431 goto buffer_done; 432 433 if (atomic_read(&tgtp->rcv_fcp_cmd_in) != 434 atomic_read(&tgtp->rcv_fcp_cmd_out)) { 435 scnprintf(tmp, sizeof(tmp), 436 "Rcv FCP: in %08x != out %08x\n", 437 atomic_read(&tgtp->rcv_fcp_cmd_in), 438 atomic_read(&tgtp->rcv_fcp_cmd_out)); 439 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 440 goto buffer_done; 441 } 442 443 scnprintf(tmp, sizeof(tmp), 444 "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x " 445 "drop %08x\n", 446 atomic_read(&tgtp->xmt_fcp_read), 447 atomic_read(&tgtp->xmt_fcp_read_rsp), 448 atomic_read(&tgtp->xmt_fcp_write), 449 atomic_read(&tgtp->xmt_fcp_rsp), 450 atomic_read(&tgtp->xmt_fcp_drop)); 451 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 452 goto buffer_done; 453 454 scnprintf(tmp, sizeof(tmp), 455 "FCP Rsp Cmpl: %08x err %08x drop %08x\n", 456 atomic_read(&tgtp->xmt_fcp_rsp_cmpl), 457 atomic_read(&tgtp->xmt_fcp_rsp_error), 458 atomic_read(&tgtp->xmt_fcp_rsp_drop)); 459 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 460 goto buffer_done; 461 462 scnprintf(tmp, sizeof(tmp), 463 "FCP Rsp Abort: %08x xb %08x xricqe %08x\n", 464 atomic_read(&tgtp->xmt_fcp_rsp_aborted), 465 atomic_read(&tgtp->xmt_fcp_rsp_xb_set), 466 atomic_read(&tgtp->xmt_fcp_xri_abort_cqe)); 467 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 468 goto buffer_done; 469 470 scnprintf(tmp, sizeof(tmp), 471 "ABORT: Xmt %08x Cmpl %08x\n", 472 atomic_read(&tgtp->xmt_fcp_abort), 473 atomic_read(&tgtp->xmt_fcp_abort_cmpl)); 474 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 475 goto buffer_done; 476 477 scnprintf(tmp, sizeof(tmp), 478 "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x\n", 479 atomic_read(&tgtp->xmt_abort_sol), 480 atomic_read(&tgtp->xmt_abort_unsol), 481 atomic_read(&tgtp->xmt_abort_rsp), 482 atomic_read(&tgtp->xmt_abort_rsp_error)); 483 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 484 goto buffer_done; 485 486 scnprintf(tmp, sizeof(tmp), 487 "DELAY: ctx %08x fod %08x wqfull %08x\n", 488 atomic_read(&tgtp->defer_ctx), 489 atomic_read(&tgtp->defer_fod), 490 atomic_read(&tgtp->defer_wqfull)); 491 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 492 goto buffer_done; 493 494 /* Calculate outstanding IOs */ 495 tot = atomic_read(&tgtp->rcv_fcp_cmd_drop); 496 tot += atomic_read(&tgtp->xmt_fcp_release); 497 tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot; 498 499 scnprintf(tmp, sizeof(tmp), 500 "IO_CTX: %08x WAIT: cur %08x tot %08x\n" 501 "CTX Outstanding %08llx\n\n", 502 phba->sli4_hba.nvmet_xri_cnt, 503 phba->sli4_hba.nvmet_io_wait_cnt, 504 phba->sli4_hba.nvmet_io_wait_total, 505 tot); 506 strlcat(buf, tmp, PAGE_SIZE); 507 goto buffer_done; 508 } 509 510 localport = vport->localport; 511 if (!localport) { 512 len = scnprintf(buf, PAGE_SIZE, 513 "NVME Initiator x%llx is not allocated\n", 514 wwn_to_u64(vport->fc_portname.u.wwn)); 515 return len; 516 } 517 lport = (struct lpfc_nvme_lport *)localport->private; 518 if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE) 519 goto buffer_done; 520 521 scnprintf(tmp, sizeof(tmp), 522 "XRI Dist lpfc%d Total %d IO %d ELS %d\n", 523 phba->brd_no, 524 phba->sli4_hba.max_cfg_param.max_xri, 525 phba->sli4_hba.io_xri_max, 526 lpfc_sli4_get_els_iocb_cnt(phba)); 527 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 528 goto buffer_done; 529 530 /* Port state is only one of two values for now. */ 531 if (localport->port_id) 532 statep = "ONLINE"; 533 else 534 statep = "UNKNOWN "; 535 536 scnprintf(tmp, sizeof(tmp), 537 "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n", 538 "NVME LPORT lpfc", 539 phba->brd_no, 540 wwn_to_u64(vport->fc_portname.u.wwn), 541 wwn_to_u64(vport->fc_nodename.u.wwn), 542 localport->port_id, statep); 543 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 544 goto buffer_done; 545 546 spin_lock_irq(shost->host_lock); 547 548 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 549 nrport = NULL; 550 spin_lock(&ndlp->lock); 551 rport = lpfc_ndlp_get_nrport(ndlp); 552 if (rport) 553 nrport = rport->remoteport; 554 spin_unlock(&ndlp->lock); 555 if (!nrport) 556 continue; 557 558 /* Port state is only one of two values for now. */ 559 switch (nrport->port_state) { 560 case FC_OBJSTATE_ONLINE: 561 statep = "ONLINE"; 562 break; 563 case FC_OBJSTATE_UNKNOWN: 564 statep = "UNKNOWN "; 565 break; 566 default: 567 statep = "UNSUPPORTED"; 568 break; 569 } 570 571 /* Tab in to show lport ownership. */ 572 if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE) 573 goto unlock_buf_done; 574 if (phba->brd_no >= 10) { 575 if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE) 576 goto unlock_buf_done; 577 } 578 579 scnprintf(tmp, sizeof(tmp), "WWPN x%llx ", 580 nrport->port_name); 581 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 582 goto unlock_buf_done; 583 584 scnprintf(tmp, sizeof(tmp), "WWNN x%llx ", 585 nrport->node_name); 586 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 587 goto unlock_buf_done; 588 589 scnprintf(tmp, sizeof(tmp), "DID x%06x ", 590 nrport->port_id); 591 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 592 goto unlock_buf_done; 593 594 /* An NVME rport can have multiple roles. */ 595 if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) { 596 if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE) 597 goto unlock_buf_done; 598 } 599 if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) { 600 if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE) 601 goto unlock_buf_done; 602 } 603 if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) { 604 if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE) 605 goto unlock_buf_done; 606 } 607 if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR | 608 FC_PORT_ROLE_NVME_TARGET | 609 FC_PORT_ROLE_NVME_DISCOVERY)) { 610 scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x", 611 nrport->port_role); 612 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 613 goto unlock_buf_done; 614 } 615 616 scnprintf(tmp, sizeof(tmp), "%s\n", statep); 617 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 618 goto unlock_buf_done; 619 } 620 spin_unlock_irq(shost->host_lock); 621 622 if (!lport) 623 goto buffer_done; 624 625 if (strlcat(buf, "\nNVME Statistics\n", PAGE_SIZE) >= PAGE_SIZE) 626 goto buffer_done; 627 628 scnprintf(tmp, sizeof(tmp), 629 "LS: Xmt %010x Cmpl %010x Abort %08x\n", 630 atomic_read(&lport->fc4NvmeLsRequests), 631 atomic_read(&lport->fc4NvmeLsCmpls), 632 atomic_read(&lport->xmt_ls_abort)); 633 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 634 goto buffer_done; 635 636 scnprintf(tmp, sizeof(tmp), 637 "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n", 638 atomic_read(&lport->xmt_ls_err), 639 atomic_read(&lport->cmpl_ls_xb), 640 atomic_read(&lport->cmpl_ls_err)); 641 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 642 goto buffer_done; 643 644 totin = 0; 645 totout = 0; 646 for (i = 0; i < phba->cfg_hdw_queue; i++) { 647 cstat = &phba->sli4_hba.hdwq[i].nvme_cstat; 648 tot = cstat->io_cmpls; 649 totin += tot; 650 data1 = cstat->input_requests; 651 data2 = cstat->output_requests; 652 data3 = cstat->control_requests; 653 totout += (data1 + data2 + data3); 654 } 655 scnprintf(tmp, sizeof(tmp), 656 "Total FCP Cmpl %016llx Issue %016llx " 657 "OutIO %016llx\n", 658 totin, totout, totout - totin); 659 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 660 goto buffer_done; 661 662 scnprintf(tmp, sizeof(tmp), 663 "\tabort %08x noxri %08x nondlp %08x qdepth %08x " 664 "wqerr %08x err %08x\n", 665 atomic_read(&lport->xmt_fcp_abort), 666 atomic_read(&lport->xmt_fcp_noxri), 667 atomic_read(&lport->xmt_fcp_bad_ndlp), 668 atomic_read(&lport->xmt_fcp_qdepth), 669 atomic_read(&lport->xmt_fcp_wqerr), 670 atomic_read(&lport->xmt_fcp_err)); 671 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 672 goto buffer_done; 673 674 scnprintf(tmp, sizeof(tmp), 675 "FCP CMPL: xb %08x Err %08x\n", 676 atomic_read(&lport->cmpl_fcp_xb), 677 atomic_read(&lport->cmpl_fcp_err)); 678 strlcat(buf, tmp, PAGE_SIZE); 679 680 /* host_lock is already unlocked. */ 681 goto buffer_done; 682 683 unlock_buf_done: 684 spin_unlock_irq(shost->host_lock); 685 686 buffer_done: 687 len = strnlen(buf, PAGE_SIZE); 688 689 if (unlikely(len >= (PAGE_SIZE - 1))) { 690 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 691 "6314 Catching potential buffer " 692 "overflow > PAGE_SIZE = %lu bytes\n", 693 PAGE_SIZE); 694 strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR), 695 LPFC_INFO_MORE_STR, 696 sizeof(LPFC_INFO_MORE_STR) + 1); 697 } 698 699 return len; 700 } 701 702 static ssize_t 703 lpfc_scsi_stat_show(struct device *dev, struct device_attribute *attr, 704 char *buf) 705 { 706 struct Scsi_Host *shost = class_to_shost(dev); 707 struct lpfc_vport *vport = shost_priv(shost); 708 struct lpfc_hba *phba = vport->phba; 709 int len; 710 struct lpfc_fc4_ctrl_stat *cstat; 711 u64 data1, data2, data3; 712 u64 tot, totin, totout; 713 int i; 714 char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0}; 715 716 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP) || 717 (phba->sli_rev != LPFC_SLI_REV4)) 718 return 0; 719 720 scnprintf(buf, PAGE_SIZE, "SCSI HDWQ Statistics\n"); 721 722 totin = 0; 723 totout = 0; 724 for (i = 0; i < phba->cfg_hdw_queue; i++) { 725 cstat = &phba->sli4_hba.hdwq[i].scsi_cstat; 726 tot = cstat->io_cmpls; 727 totin += tot; 728 data1 = cstat->input_requests; 729 data2 = cstat->output_requests; 730 data3 = cstat->control_requests; 731 totout += (data1 + data2 + data3); 732 733 scnprintf(tmp, sizeof(tmp), "HDWQ (%d): Rd %016llx Wr %016llx " 734 "IO %016llx ", i, data1, data2, data3); 735 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 736 goto buffer_done; 737 738 scnprintf(tmp, sizeof(tmp), "Cmpl %016llx OutIO %016llx\n", 739 tot, ((data1 + data2 + data3) - tot)); 740 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 741 goto buffer_done; 742 } 743 scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx " 744 "OutIO %016llx\n", totin, totout, totout - totin); 745 strlcat(buf, tmp, PAGE_SIZE); 746 747 buffer_done: 748 len = strnlen(buf, PAGE_SIZE); 749 750 return len; 751 } 752 753 static ssize_t 754 lpfc_bg_info_show(struct device *dev, struct device_attribute *attr, 755 char *buf) 756 { 757 struct Scsi_Host *shost = class_to_shost(dev); 758 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 759 struct lpfc_hba *phba = vport->phba; 760 761 if (phba->cfg_enable_bg) { 762 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 763 return scnprintf(buf, PAGE_SIZE, 764 "BlockGuard Enabled\n"); 765 else 766 return scnprintf(buf, PAGE_SIZE, 767 "BlockGuard Not Supported\n"); 768 } else 769 return scnprintf(buf, PAGE_SIZE, 770 "BlockGuard Disabled\n"); 771 } 772 773 static ssize_t 774 lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr, 775 char *buf) 776 { 777 struct Scsi_Host *shost = class_to_shost(dev); 778 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 779 struct lpfc_hba *phba = vport->phba; 780 781 return scnprintf(buf, PAGE_SIZE, "%llu\n", 782 (unsigned long long)phba->bg_guard_err_cnt); 783 } 784 785 static ssize_t 786 lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr, 787 char *buf) 788 { 789 struct Scsi_Host *shost = class_to_shost(dev); 790 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 791 struct lpfc_hba *phba = vport->phba; 792 793 return scnprintf(buf, PAGE_SIZE, "%llu\n", 794 (unsigned long long)phba->bg_apptag_err_cnt); 795 } 796 797 static ssize_t 798 lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr, 799 char *buf) 800 { 801 struct Scsi_Host *shost = class_to_shost(dev); 802 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 803 struct lpfc_hba *phba = vport->phba; 804 805 return scnprintf(buf, PAGE_SIZE, "%llu\n", 806 (unsigned long long)phba->bg_reftag_err_cnt); 807 } 808 809 /** 810 * lpfc_info_show - Return some pci info about the host in ascii 811 * @dev: class converted to a Scsi_host structure. 812 * @attr: device attribute, not used. 813 * @buf: on return contains the formatted text from lpfc_info(). 814 * 815 * Returns: size of formatted string. 816 **/ 817 static ssize_t 818 lpfc_info_show(struct device *dev, struct device_attribute *attr, 819 char *buf) 820 { 821 struct Scsi_Host *host = class_to_shost(dev); 822 823 return scnprintf(buf, PAGE_SIZE, "%s\n", lpfc_info(host)); 824 } 825 826 /** 827 * lpfc_serialnum_show - Return the hba serial number in ascii 828 * @dev: class converted to a Scsi_host structure. 829 * @attr: device attribute, not used. 830 * @buf: on return contains the formatted text serial number. 831 * 832 * Returns: size of formatted string. 833 **/ 834 static ssize_t 835 lpfc_serialnum_show(struct device *dev, struct device_attribute *attr, 836 char *buf) 837 { 838 struct Scsi_Host *shost = class_to_shost(dev); 839 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 840 struct lpfc_hba *phba = vport->phba; 841 842 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->SerialNumber); 843 } 844 845 /** 846 * lpfc_temp_sensor_show - Return the temperature sensor level 847 * @dev: class converted to a Scsi_host structure. 848 * @attr: device attribute, not used. 849 * @buf: on return contains the formatted support level. 850 * 851 * Description: 852 * Returns a number indicating the temperature sensor level currently 853 * supported, zero or one in ascii. 854 * 855 * Returns: size of formatted string. 856 **/ 857 static ssize_t 858 lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr, 859 char *buf) 860 { 861 struct Scsi_Host *shost = class_to_shost(dev); 862 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 863 struct lpfc_hba *phba = vport->phba; 864 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->temp_sensor_support); 865 } 866 867 /** 868 * lpfc_modeldesc_show - Return the model description of the hba 869 * @dev: class converted to a Scsi_host structure. 870 * @attr: device attribute, not used. 871 * @buf: on return contains the scsi vpd model description. 872 * 873 * Returns: size of formatted string. 874 **/ 875 static ssize_t 876 lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr, 877 char *buf) 878 { 879 struct Scsi_Host *shost = class_to_shost(dev); 880 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 881 struct lpfc_hba *phba = vport->phba; 882 883 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelDesc); 884 } 885 886 /** 887 * lpfc_modelname_show - Return the model name of the hba 888 * @dev: class converted to a Scsi_host structure. 889 * @attr: device attribute, not used. 890 * @buf: on return contains the scsi vpd model name. 891 * 892 * Returns: size of formatted string. 893 **/ 894 static ssize_t 895 lpfc_modelname_show(struct device *dev, struct device_attribute *attr, 896 char *buf) 897 { 898 struct Scsi_Host *shost = class_to_shost(dev); 899 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 900 struct lpfc_hba *phba = vport->phba; 901 902 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelName); 903 } 904 905 /** 906 * lpfc_programtype_show - Return the program type of the hba 907 * @dev: class converted to a Scsi_host structure. 908 * @attr: device attribute, not used. 909 * @buf: on return contains the scsi vpd program type. 910 * 911 * Returns: size of formatted string. 912 **/ 913 static ssize_t 914 lpfc_programtype_show(struct device *dev, struct device_attribute *attr, 915 char *buf) 916 { 917 struct Scsi_Host *shost = class_to_shost(dev); 918 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 919 struct lpfc_hba *phba = vport->phba; 920 921 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ProgramType); 922 } 923 924 /** 925 * lpfc_mlomgmt_show - Return the Menlo Maintenance sli flag 926 * @dev: class converted to a Scsi_host structure. 927 * @attr: device attribute, not used. 928 * @buf: on return contains the Menlo Maintenance sli flag. 929 * 930 * Returns: size of formatted string. 931 **/ 932 static ssize_t 933 lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf) 934 { 935 struct Scsi_Host *shost = class_to_shost(dev); 936 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 937 struct lpfc_hba *phba = vport->phba; 938 939 return scnprintf(buf, PAGE_SIZE, "%d\n", 940 (phba->sli.sli_flag & LPFC_MENLO_MAINT)); 941 } 942 943 /** 944 * lpfc_vportnum_show - Return the port number in ascii of the hba 945 * @dev: class converted to a Scsi_host structure. 946 * @attr: device attribute, not used. 947 * @buf: on return contains scsi vpd program type. 948 * 949 * Returns: size of formatted string. 950 **/ 951 static ssize_t 952 lpfc_vportnum_show(struct device *dev, struct device_attribute *attr, 953 char *buf) 954 { 955 struct Scsi_Host *shost = class_to_shost(dev); 956 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 957 struct lpfc_hba *phba = vport->phba; 958 959 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->Port); 960 } 961 962 /** 963 * lpfc_fwrev_show - Return the firmware rev running in the hba 964 * @dev: class converted to a Scsi_host structure. 965 * @attr: device attribute, not used. 966 * @buf: on return contains the scsi vpd program type. 967 * 968 * Returns: size of formatted string. 969 **/ 970 static ssize_t 971 lpfc_fwrev_show(struct device *dev, struct device_attribute *attr, 972 char *buf) 973 { 974 struct Scsi_Host *shost = class_to_shost(dev); 975 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 976 struct lpfc_hba *phba = vport->phba; 977 uint32_t if_type; 978 uint8_t sli_family; 979 char fwrev[FW_REV_STR_SIZE]; 980 int len; 981 982 lpfc_decode_firmware_rev(phba, fwrev, 1); 983 if_type = phba->sli4_hba.pc_sli4_params.if_type; 984 sli_family = phba->sli4_hba.pc_sli4_params.sli_family; 985 986 if (phba->sli_rev < LPFC_SLI_REV4) 987 len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d\n", 988 fwrev, phba->sli_rev); 989 else 990 len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n", 991 fwrev, phba->sli_rev, if_type, sli_family); 992 993 return len; 994 } 995 996 /** 997 * lpfc_hdw_show - Return the jedec information about the hba 998 * @dev: class converted to a Scsi_host structure. 999 * @attr: device attribute, not used. 1000 * @buf: on return contains the scsi vpd program type. 1001 * 1002 * Returns: size of formatted string. 1003 **/ 1004 static ssize_t 1005 lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf) 1006 { 1007 char hdw[9]; 1008 struct Scsi_Host *shost = class_to_shost(dev); 1009 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1010 struct lpfc_hba *phba = vport->phba; 1011 lpfc_vpd_t *vp = &phba->vpd; 1012 1013 lpfc_jedec_to_ascii(vp->rev.biuRev, hdw); 1014 return scnprintf(buf, PAGE_SIZE, "%s %08x %08x\n", hdw, 1015 vp->rev.smRev, vp->rev.smFwRev); 1016 } 1017 1018 /** 1019 * lpfc_option_rom_version_show - Return the adapter ROM FCode version 1020 * @dev: class converted to a Scsi_host structure. 1021 * @attr: device attribute, not used. 1022 * @buf: on return contains the ROM and FCode ascii strings. 1023 * 1024 * Returns: size of formatted string. 1025 **/ 1026 static ssize_t 1027 lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr, 1028 char *buf) 1029 { 1030 struct Scsi_Host *shost = class_to_shost(dev); 1031 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1032 struct lpfc_hba *phba = vport->phba; 1033 char fwrev[FW_REV_STR_SIZE]; 1034 1035 if (phba->sli_rev < LPFC_SLI_REV4) 1036 return scnprintf(buf, PAGE_SIZE, "%s\n", 1037 phba->OptionROMVersion); 1038 1039 lpfc_decode_firmware_rev(phba, fwrev, 1); 1040 return scnprintf(buf, PAGE_SIZE, "%s\n", fwrev); 1041 } 1042 1043 /** 1044 * lpfc_link_state_show - Return the link state of the port 1045 * @dev: class converted to a Scsi_host structure. 1046 * @attr: device attribute, not used. 1047 * @buf: on return contains text describing the state of the link. 1048 * 1049 * Notes: 1050 * The switch statement has no default so zero will be returned. 1051 * 1052 * Returns: size of formatted string. 1053 **/ 1054 static ssize_t 1055 lpfc_link_state_show(struct device *dev, struct device_attribute *attr, 1056 char *buf) 1057 { 1058 struct Scsi_Host *shost = class_to_shost(dev); 1059 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1060 struct lpfc_hba *phba = vport->phba; 1061 int len = 0; 1062 1063 switch (phba->link_state) { 1064 case LPFC_LINK_UNKNOWN: 1065 case LPFC_WARM_START: 1066 case LPFC_INIT_START: 1067 case LPFC_INIT_MBX_CMDS: 1068 case LPFC_LINK_DOWN: 1069 case LPFC_HBA_ERROR: 1070 if (phba->hba_flag & LINK_DISABLED) 1071 len += scnprintf(buf + len, PAGE_SIZE-len, 1072 "Link Down - User disabled\n"); 1073 else 1074 len += scnprintf(buf + len, PAGE_SIZE-len, 1075 "Link Down\n"); 1076 break; 1077 case LPFC_LINK_UP: 1078 case LPFC_CLEAR_LA: 1079 case LPFC_HBA_READY: 1080 len += scnprintf(buf + len, PAGE_SIZE-len, "Link Up - "); 1081 1082 switch (vport->port_state) { 1083 case LPFC_LOCAL_CFG_LINK: 1084 len += scnprintf(buf + len, PAGE_SIZE-len, 1085 "Configuring Link\n"); 1086 break; 1087 case LPFC_FDISC: 1088 case LPFC_FLOGI: 1089 case LPFC_FABRIC_CFG_LINK: 1090 case LPFC_NS_REG: 1091 case LPFC_NS_QRY: 1092 case LPFC_BUILD_DISC_LIST: 1093 case LPFC_DISC_AUTH: 1094 len += scnprintf(buf + len, PAGE_SIZE - len, 1095 "Discovery\n"); 1096 break; 1097 case LPFC_VPORT_READY: 1098 len += scnprintf(buf + len, PAGE_SIZE - len, 1099 "Ready\n"); 1100 break; 1101 1102 case LPFC_VPORT_FAILED: 1103 len += scnprintf(buf + len, PAGE_SIZE - len, 1104 "Failed\n"); 1105 break; 1106 1107 case LPFC_VPORT_UNKNOWN: 1108 len += scnprintf(buf + len, PAGE_SIZE - len, 1109 "Unknown\n"); 1110 break; 1111 } 1112 if (phba->sli.sli_flag & LPFC_MENLO_MAINT) 1113 len += scnprintf(buf + len, PAGE_SIZE-len, 1114 " Menlo Maint Mode\n"); 1115 else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 1116 if (vport->fc_flag & FC_PUBLIC_LOOP) 1117 len += scnprintf(buf + len, PAGE_SIZE-len, 1118 " Public Loop\n"); 1119 else 1120 len += scnprintf(buf + len, PAGE_SIZE-len, 1121 " Private Loop\n"); 1122 } else { 1123 if (vport->fc_flag & FC_FABRIC) 1124 len += scnprintf(buf + len, PAGE_SIZE-len, 1125 " Fabric\n"); 1126 else 1127 len += scnprintf(buf + len, PAGE_SIZE-len, 1128 " Point-2-Point\n"); 1129 } 1130 } 1131 1132 if ((phba->sli_rev == LPFC_SLI_REV4) && 1133 ((bf_get(lpfc_sli_intf_if_type, 1134 &phba->sli4_hba.sli_intf) == 1135 LPFC_SLI_INTF_IF_TYPE_6))) { 1136 struct lpfc_trunk_link link = phba->trunk_link; 1137 1138 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba)) 1139 len += scnprintf(buf + len, PAGE_SIZE - len, 1140 "Trunk port 0: Link %s %s\n", 1141 (link.link0.state == LPFC_LINK_UP) ? 1142 "Up" : "Down. ", 1143 trunk_errmsg[link.link0.fault]); 1144 1145 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba)) 1146 len += scnprintf(buf + len, PAGE_SIZE - len, 1147 "Trunk port 1: Link %s %s\n", 1148 (link.link1.state == LPFC_LINK_UP) ? 1149 "Up" : "Down. ", 1150 trunk_errmsg[link.link1.fault]); 1151 1152 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba)) 1153 len += scnprintf(buf + len, PAGE_SIZE - len, 1154 "Trunk port 2: Link %s %s\n", 1155 (link.link2.state == LPFC_LINK_UP) ? 1156 "Up" : "Down. ", 1157 trunk_errmsg[link.link2.fault]); 1158 1159 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba)) 1160 len += scnprintf(buf + len, PAGE_SIZE - len, 1161 "Trunk port 3: Link %s %s\n", 1162 (link.link3.state == LPFC_LINK_UP) ? 1163 "Up" : "Down. ", 1164 trunk_errmsg[link.link3.fault]); 1165 1166 } 1167 1168 return len; 1169 } 1170 1171 /** 1172 * lpfc_sli4_protocol_show - Return the fip mode of the HBA 1173 * @dev: class unused variable. 1174 * @attr: device attribute, not used. 1175 * @buf: on return contains the module description text. 1176 * 1177 * Returns: size of formatted string. 1178 **/ 1179 static ssize_t 1180 lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr, 1181 char *buf) 1182 { 1183 struct Scsi_Host *shost = class_to_shost(dev); 1184 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1185 struct lpfc_hba *phba = vport->phba; 1186 1187 if (phba->sli_rev < LPFC_SLI_REV4) 1188 return scnprintf(buf, PAGE_SIZE, "fc\n"); 1189 1190 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) { 1191 if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE) 1192 return scnprintf(buf, PAGE_SIZE, "fcoe\n"); 1193 if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) 1194 return scnprintf(buf, PAGE_SIZE, "fc\n"); 1195 } 1196 return scnprintf(buf, PAGE_SIZE, "unknown\n"); 1197 } 1198 1199 /** 1200 * lpfc_oas_supported_show - Return whether or not Optimized Access Storage 1201 * (OAS) is supported. 1202 * @dev: class unused variable. 1203 * @attr: device attribute, not used. 1204 * @buf: on return contains the module description text. 1205 * 1206 * Returns: size of formatted string. 1207 **/ 1208 static ssize_t 1209 lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr, 1210 char *buf) 1211 { 1212 struct Scsi_Host *shost = class_to_shost(dev); 1213 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 1214 struct lpfc_hba *phba = vport->phba; 1215 1216 return scnprintf(buf, PAGE_SIZE, "%d\n", 1217 phba->sli4_hba.pc_sli4_params.oas_supported); 1218 } 1219 1220 /** 1221 * lpfc_link_state_store - Transition the link_state on an HBA port 1222 * @dev: class device that is converted into a Scsi_host. 1223 * @attr: device attribute, not used. 1224 * @buf: one or more lpfc_polling_flags values. 1225 * @count: not used. 1226 * 1227 * Returns: 1228 * -EINVAL if the buffer is not "up" or "down" 1229 * return from link state change function if non-zero 1230 * length of the buf on success 1231 **/ 1232 static ssize_t 1233 lpfc_link_state_store(struct device *dev, struct device_attribute *attr, 1234 const char *buf, size_t count) 1235 { 1236 struct Scsi_Host *shost = class_to_shost(dev); 1237 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1238 struct lpfc_hba *phba = vport->phba; 1239 1240 int status = -EINVAL; 1241 1242 if ((strncmp(buf, "up", sizeof("up") - 1) == 0) && 1243 (phba->link_state == LPFC_LINK_DOWN)) 1244 status = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 1245 else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) && 1246 (phba->link_state >= LPFC_LINK_UP)) 1247 status = phba->lpfc_hba_down_link(phba, MBX_NOWAIT); 1248 1249 if (status == 0) 1250 return strlen(buf); 1251 else 1252 return status; 1253 } 1254 1255 /** 1256 * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports 1257 * @dev: class device that is converted into a Scsi_host. 1258 * @attr: device attribute, not used. 1259 * @buf: on return contains the sum of fc mapped and unmapped. 1260 * 1261 * Description: 1262 * Returns the ascii text number of the sum of the fc mapped and unmapped 1263 * vport counts. 1264 * 1265 * Returns: size of formatted string. 1266 **/ 1267 static ssize_t 1268 lpfc_num_discovered_ports_show(struct device *dev, 1269 struct device_attribute *attr, char *buf) 1270 { 1271 struct Scsi_Host *shost = class_to_shost(dev); 1272 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1273 1274 return scnprintf(buf, PAGE_SIZE, "%d\n", 1275 vport->fc_map_cnt + vport->fc_unmap_cnt); 1276 } 1277 1278 /** 1279 * lpfc_issue_lip - Misnomer, name carried over from long ago 1280 * @shost: Scsi_Host pointer. 1281 * 1282 * Description: 1283 * Bring the link down gracefully then re-init the link. The firmware will 1284 * re-init the fiber channel interface as required. Does not issue a LIP. 1285 * 1286 * Returns: 1287 * -EPERM port offline or management commands are being blocked 1288 * -ENOMEM cannot allocate memory for the mailbox command 1289 * -EIO error sending the mailbox command 1290 * zero for success 1291 **/ 1292 static int 1293 lpfc_issue_lip(struct Scsi_Host *shost) 1294 { 1295 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1296 struct lpfc_hba *phba = vport->phba; 1297 LPFC_MBOXQ_t *pmboxq; 1298 int mbxstatus = MBXERR_ERROR; 1299 1300 /* 1301 * If the link is offline, disabled or BLOCK_MGMT_IO 1302 * it doesn't make any sense to allow issue_lip 1303 */ 1304 if ((vport->fc_flag & FC_OFFLINE_MODE) || 1305 (phba->hba_flag & LINK_DISABLED) || 1306 (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)) 1307 return -EPERM; 1308 1309 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); 1310 1311 if (!pmboxq) 1312 return -ENOMEM; 1313 1314 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 1315 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; 1316 pmboxq->u.mb.mbxOwner = OWN_HOST; 1317 1318 if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME)) 1319 vport->fc_flag &= ~FC_PT2PT_NO_NVME; 1320 1321 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); 1322 1323 if ((mbxstatus == MBX_SUCCESS) && 1324 (pmboxq->u.mb.mbxStatus == 0 || 1325 pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) { 1326 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 1327 lpfc_init_link(phba, pmboxq, phba->cfg_topology, 1328 phba->cfg_link_speed); 1329 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, 1330 phba->fc_ratov * 2); 1331 if ((mbxstatus == MBX_SUCCESS) && 1332 (pmboxq->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) 1333 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 1334 "2859 SLI authentication is required " 1335 "for INIT_LINK but has not done yet\n"); 1336 } 1337 1338 lpfc_set_loopback_flag(phba); 1339 if (mbxstatus != MBX_TIMEOUT) 1340 mempool_free(pmboxq, phba->mbox_mem_pool); 1341 1342 if (mbxstatus == MBXERR_ERROR) 1343 return -EIO; 1344 1345 return 0; 1346 } 1347 1348 int 1349 lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock) 1350 { 1351 int cnt = 0; 1352 1353 spin_lock_irq(lock); 1354 while (!list_empty(q)) { 1355 spin_unlock_irq(lock); 1356 msleep(20); 1357 if (cnt++ > 250) { /* 5 secs */ 1358 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1359 "0466 Outstanding IO when " 1360 "bringing Adapter offline\n"); 1361 return 0; 1362 } 1363 spin_lock_irq(lock); 1364 } 1365 spin_unlock_irq(lock); 1366 return 1; 1367 } 1368 1369 /** 1370 * lpfc_do_offline - Issues a mailbox command to bring the link down 1371 * @phba: lpfc_hba pointer. 1372 * @type: LPFC_EVT_OFFLINE, LPFC_EVT_WARM_START, LPFC_EVT_KILL. 1373 * 1374 * Notes: 1375 * Assumes any error from lpfc_do_offline() will be negative. 1376 * Can wait up to 5 seconds for the port ring buffers count 1377 * to reach zero, prints a warning if it is not zero and continues. 1378 * lpfc_workq_post_event() returns a non-zero return code if call fails. 1379 * 1380 * Returns: 1381 * -EIO error posting the event 1382 * zero for success 1383 **/ 1384 static int 1385 lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) 1386 { 1387 struct completion online_compl; 1388 struct lpfc_queue *qp = NULL; 1389 struct lpfc_sli_ring *pring; 1390 struct lpfc_sli *psli; 1391 int status = 0; 1392 int i; 1393 int rc; 1394 1395 init_completion(&online_compl); 1396 rc = lpfc_workq_post_event(phba, &status, &online_compl, 1397 LPFC_EVT_OFFLINE_PREP); 1398 if (rc == 0) 1399 return -ENOMEM; 1400 1401 wait_for_completion(&online_compl); 1402 1403 if (status != 0) 1404 return -EIO; 1405 1406 psli = &phba->sli; 1407 1408 /* 1409 * If freeing the queues have already started, don't access them. 1410 * Otherwise set FREE_WAIT to indicate that queues are being used 1411 * to hold the freeing process until we finish. 1412 */ 1413 spin_lock_irq(&phba->hbalock); 1414 if (!(psli->sli_flag & LPFC_QUEUE_FREE_INIT)) { 1415 psli->sli_flag |= LPFC_QUEUE_FREE_WAIT; 1416 } else { 1417 spin_unlock_irq(&phba->hbalock); 1418 goto skip_wait; 1419 } 1420 spin_unlock_irq(&phba->hbalock); 1421 1422 /* Wait a little for things to settle down, but not 1423 * long enough for dev loss timeout to expire. 1424 */ 1425 if (phba->sli_rev != LPFC_SLI_REV4) { 1426 for (i = 0; i < psli->num_rings; i++) { 1427 pring = &psli->sli3_ring[i]; 1428 if (!lpfc_emptyq_wait(phba, &pring->txcmplq, 1429 &phba->hbalock)) 1430 goto out; 1431 } 1432 } else { 1433 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 1434 pring = qp->pring; 1435 if (!pring) 1436 continue; 1437 if (!lpfc_emptyq_wait(phba, &pring->txcmplq, 1438 &pring->ring_lock)) 1439 goto out; 1440 } 1441 } 1442 out: 1443 spin_lock_irq(&phba->hbalock); 1444 psli->sli_flag &= ~LPFC_QUEUE_FREE_WAIT; 1445 spin_unlock_irq(&phba->hbalock); 1446 1447 skip_wait: 1448 init_completion(&online_compl); 1449 rc = lpfc_workq_post_event(phba, &status, &online_compl, type); 1450 if (rc == 0) 1451 return -ENOMEM; 1452 1453 wait_for_completion(&online_compl); 1454 1455 if (status != 0) 1456 return -EIO; 1457 1458 return 0; 1459 } 1460 1461 /** 1462 * lpfc_reset_pci_bus - resets PCI bridge controller's secondary bus of an HBA 1463 * @phba: lpfc_hba pointer. 1464 * 1465 * Description: 1466 * Issues a PCI secondary bus reset for the phba->pcidev. 1467 * 1468 * Notes: 1469 * First walks the bus_list to ensure only PCI devices with Emulex 1470 * vendor id, device ids that support hot reset, only one occurrence 1471 * of function 0, and all ports on the bus are in offline mode to ensure the 1472 * hot reset only affects one valid HBA. 1473 * 1474 * Returns: 1475 * -ENOTSUPP, cfg_enable_hba_reset must be of value 2 1476 * -ENODEV, NULL ptr to pcidev 1477 * -EBADSLT, detected invalid device 1478 * -EBUSY, port is not in offline state 1479 * 0, successful 1480 */ 1481 static int 1482 lpfc_reset_pci_bus(struct lpfc_hba *phba) 1483 { 1484 struct pci_dev *pdev = phba->pcidev; 1485 struct Scsi_Host *shost = NULL; 1486 struct lpfc_hba *phba_other = NULL; 1487 struct pci_dev *ptr = NULL; 1488 int res; 1489 1490 if (phba->cfg_enable_hba_reset != 2) 1491 return -ENOTSUPP; 1492 1493 if (!pdev) { 1494 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8345 pdev NULL!\n"); 1495 return -ENODEV; 1496 } 1497 1498 res = lpfc_check_pci_resettable(phba); 1499 if (res) 1500 return res; 1501 1502 /* Walk the list of devices on the pci_dev's bus */ 1503 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) { 1504 /* Check port is offline */ 1505 shost = pci_get_drvdata(ptr); 1506 if (shost) { 1507 phba_other = 1508 ((struct lpfc_vport *)shost->hostdata)->phba; 1509 if (!(phba_other->pport->fc_flag & FC_OFFLINE_MODE)) { 1510 lpfc_printf_log(phba_other, KERN_INFO, LOG_INIT, 1511 "8349 WWPN = 0x%02x%02x%02x%02x" 1512 "%02x%02x%02x%02x is not " 1513 "offline!\n", 1514 phba_other->wwpn[0], 1515 phba_other->wwpn[1], 1516 phba_other->wwpn[2], 1517 phba_other->wwpn[3], 1518 phba_other->wwpn[4], 1519 phba_other->wwpn[5], 1520 phba_other->wwpn[6], 1521 phba_other->wwpn[7]); 1522 return -EBUSY; 1523 } 1524 } 1525 } 1526 1527 /* Issue PCI bus reset */ 1528 res = pci_reset_bus(pdev); 1529 if (res) { 1530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1531 "8350 PCI reset bus failed: %d\n", res); 1532 } 1533 1534 return res; 1535 } 1536 1537 /** 1538 * lpfc_selective_reset - Offline then onlines the port 1539 * @phba: lpfc_hba pointer. 1540 * 1541 * Description: 1542 * If the port is configured to allow a reset then the hba is brought 1543 * offline then online. 1544 * 1545 * Notes: 1546 * Assumes any error from lpfc_do_offline() will be negative. 1547 * Do not make this function static. 1548 * 1549 * Returns: 1550 * lpfc_do_offline() return code if not zero 1551 * -EIO reset not configured or error posting the event 1552 * zero for success 1553 **/ 1554 int 1555 lpfc_selective_reset(struct lpfc_hba *phba) 1556 { 1557 struct completion online_compl; 1558 int status = 0; 1559 int rc; 1560 1561 if (!phba->cfg_enable_hba_reset) 1562 return -EACCES; 1563 1564 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) { 1565 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 1566 1567 if (status != 0) 1568 return status; 1569 } 1570 1571 init_completion(&online_compl); 1572 rc = lpfc_workq_post_event(phba, &status, &online_compl, 1573 LPFC_EVT_ONLINE); 1574 if (rc == 0) 1575 return -ENOMEM; 1576 1577 wait_for_completion(&online_compl); 1578 1579 if (status != 0) 1580 return -EIO; 1581 1582 return 0; 1583 } 1584 1585 /** 1586 * lpfc_issue_reset - Selectively resets an adapter 1587 * @dev: class device that is converted into a Scsi_host. 1588 * @attr: device attribute, not used. 1589 * @buf: containing the string "selective". 1590 * @count: unused variable. 1591 * 1592 * Description: 1593 * If the buf contains the string "selective" then lpfc_selective_reset() 1594 * is called to perform the reset. 1595 * 1596 * Notes: 1597 * Assumes any error from lpfc_selective_reset() will be negative. 1598 * If lpfc_selective_reset() returns zero then the length of the buffer 1599 * is returned which indicates success 1600 * 1601 * Returns: 1602 * -EINVAL if the buffer does not contain the string "selective" 1603 * length of buf if lpfc-selective_reset() if the call succeeds 1604 * return value of lpfc_selective_reset() if the call fails 1605 **/ 1606 static ssize_t 1607 lpfc_issue_reset(struct device *dev, struct device_attribute *attr, 1608 const char *buf, size_t count) 1609 { 1610 struct Scsi_Host *shost = class_to_shost(dev); 1611 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1612 struct lpfc_hba *phba = vport->phba; 1613 int status = -EINVAL; 1614 1615 if (!phba->cfg_enable_hba_reset) 1616 return -EACCES; 1617 1618 if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) 1619 status = phba->lpfc_selective_reset(phba); 1620 1621 if (status == 0) 1622 return strlen(buf); 1623 else 1624 return status; 1625 } 1626 1627 /** 1628 * lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness 1629 * @phba: lpfc_hba pointer. 1630 * 1631 * Description: 1632 * SLI4 interface type-2 device to wait on the sliport status register for 1633 * the readyness after performing a firmware reset. 1634 * 1635 * Returns: 1636 * zero for success, -EPERM when port does not have privilege to perform the 1637 * reset, -EIO when port timeout from recovering from the reset. 1638 * 1639 * Note: 1640 * As the caller will interpret the return code by value, be careful in making 1641 * change or addition to return codes. 1642 **/ 1643 int 1644 lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba) 1645 { 1646 struct lpfc_register portstat_reg = {0}; 1647 int i; 1648 1649 msleep(100); 1650 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 1651 &portstat_reg.word0)) 1652 return -EIO; 1653 1654 /* verify if privileged for the request operation */ 1655 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) && 1656 !bf_get(lpfc_sliport_status_err, &portstat_reg)) 1657 return -EPERM; 1658 1659 /* wait for the SLI port firmware ready after firmware reset */ 1660 for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) { 1661 msleep(10); 1662 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 1663 &portstat_reg.word0)) 1664 continue; 1665 if (!bf_get(lpfc_sliport_status_err, &portstat_reg)) 1666 continue; 1667 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg)) 1668 continue; 1669 if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg)) 1670 continue; 1671 break; 1672 } 1673 1674 if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT) 1675 return 0; 1676 else 1677 return -EIO; 1678 } 1679 1680 /** 1681 * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc 1682 * @phba: lpfc_hba pointer. 1683 * @opcode: The sli4 config command opcode. 1684 * 1685 * Description: 1686 * Request SLI4 interface type-2 device to perform a physical register set 1687 * access. 1688 * 1689 * Returns: 1690 * zero for success 1691 **/ 1692 static ssize_t 1693 lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) 1694 { 1695 struct completion online_compl; 1696 struct pci_dev *pdev = phba->pcidev; 1697 uint32_t before_fc_flag; 1698 uint32_t sriov_nr_virtfn; 1699 uint32_t reg_val; 1700 int status = 0, rc = 0; 1701 int job_posted = 1, sriov_err; 1702 1703 if (!phba->cfg_enable_hba_reset) 1704 return -EACCES; 1705 1706 if ((phba->sli_rev < LPFC_SLI_REV4) || 1707 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 1708 LPFC_SLI_INTF_IF_TYPE_2)) 1709 return -EPERM; 1710 1711 /* Keep state if we need to restore back */ 1712 before_fc_flag = phba->pport->fc_flag; 1713 sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn; 1714 1715 if (opcode == LPFC_FW_DUMP) { 1716 init_completion(&online_compl); 1717 phba->fw_dump_cmpl = &online_compl; 1718 } else { 1719 /* Disable SR-IOV virtual functions if enabled */ 1720 if (phba->cfg_sriov_nr_virtfn) { 1721 pci_disable_sriov(pdev); 1722 phba->cfg_sriov_nr_virtfn = 0; 1723 } 1724 1725 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 1726 1727 if (status != 0) 1728 return status; 1729 1730 /* wait for the device to be quiesced before firmware reset */ 1731 msleep(100); 1732 } 1733 1734 reg_val = readl(phba->sli4_hba.conf_regs_memmap_p + 1735 LPFC_CTL_PDEV_CTL_OFFSET); 1736 1737 if (opcode == LPFC_FW_DUMP) 1738 reg_val |= LPFC_FW_DUMP_REQUEST; 1739 else if (opcode == LPFC_FW_RESET) 1740 reg_val |= LPFC_CTL_PDEV_CTL_FRST; 1741 else if (opcode == LPFC_DV_RESET) 1742 reg_val |= LPFC_CTL_PDEV_CTL_DRST; 1743 1744 writel(reg_val, phba->sli4_hba.conf_regs_memmap_p + 1745 LPFC_CTL_PDEV_CTL_OFFSET); 1746 /* flush */ 1747 readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); 1748 1749 /* delay driver action following IF_TYPE_2 reset */ 1750 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1751 1752 if (rc == -EPERM) { 1753 /* no privilege for reset */ 1754 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1755 "3150 No privilege to perform the requested " 1756 "access: x%x\n", reg_val); 1757 } else if (rc == -EIO) { 1758 /* reset failed, there is nothing more we can do */ 1759 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1760 "3153 Fail to perform the requested " 1761 "access: x%x\n", reg_val); 1762 if (phba->fw_dump_cmpl) 1763 phba->fw_dump_cmpl = NULL; 1764 return rc; 1765 } 1766 1767 /* keep the original port state */ 1768 if (before_fc_flag & FC_OFFLINE_MODE) { 1769 if (phba->fw_dump_cmpl) 1770 phba->fw_dump_cmpl = NULL; 1771 goto out; 1772 } 1773 1774 /* Firmware dump will trigger an HA_ERATT event, and 1775 * lpfc_handle_eratt_s4 routine already handles bringing the port back 1776 * online. 1777 */ 1778 if (opcode == LPFC_FW_DUMP) { 1779 wait_for_completion(phba->fw_dump_cmpl); 1780 } else { 1781 init_completion(&online_compl); 1782 job_posted = lpfc_workq_post_event(phba, &status, &online_compl, 1783 LPFC_EVT_ONLINE); 1784 if (!job_posted) 1785 goto out; 1786 1787 wait_for_completion(&online_compl); 1788 } 1789 out: 1790 /* in any case, restore the virtual functions enabled as before */ 1791 if (sriov_nr_virtfn) { 1792 /* If fw_dump was performed, first disable to clean up */ 1793 if (opcode == LPFC_FW_DUMP) { 1794 pci_disable_sriov(pdev); 1795 phba->cfg_sriov_nr_virtfn = 0; 1796 } 1797 1798 sriov_err = 1799 lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn); 1800 if (!sriov_err) 1801 phba->cfg_sriov_nr_virtfn = sriov_nr_virtfn; 1802 } 1803 1804 /* return proper error code */ 1805 if (!rc) { 1806 if (!job_posted) 1807 rc = -ENOMEM; 1808 else if (status) 1809 rc = -EIO; 1810 } 1811 return rc; 1812 } 1813 1814 /** 1815 * lpfc_nport_evt_cnt_show - Return the number of nport events 1816 * @dev: class device that is converted into a Scsi_host. 1817 * @attr: device attribute, not used. 1818 * @buf: on return contains the ascii number of nport events. 1819 * 1820 * Returns: size of formatted string. 1821 **/ 1822 static ssize_t 1823 lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr, 1824 char *buf) 1825 { 1826 struct Scsi_Host *shost = class_to_shost(dev); 1827 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1828 struct lpfc_hba *phba = vport->phba; 1829 1830 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); 1831 } 1832 1833 static int 1834 lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out) 1835 { 1836 LPFC_MBOXQ_t *mbox = NULL; 1837 unsigned long val = 0; 1838 char *pval = NULL; 1839 int rc = 0; 1840 1841 if (!strncmp("enable", buff_out, 1842 strlen("enable"))) { 1843 pval = buff_out + strlen("enable") + 1; 1844 rc = kstrtoul(pval, 0, &val); 1845 if (rc) 1846 return rc; /* Invalid number */ 1847 } else if (!strncmp("disable", buff_out, 1848 strlen("disable"))) { 1849 val = 0; 1850 } else { 1851 return -EINVAL; /* Invalid command */ 1852 } 1853 1854 switch (val) { 1855 case 0: 1856 val = 0x0; /* Disable */ 1857 break; 1858 case 2: 1859 val = 0x1; /* Enable two port trunk */ 1860 break; 1861 case 4: 1862 val = 0x2; /* Enable four port trunk */ 1863 break; 1864 default: 1865 return -EINVAL; 1866 } 1867 1868 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1869 "0070 Set trunk mode with val %ld ", val); 1870 1871 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1872 if (!mbox) 1873 return -ENOMEM; 1874 1875 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 1876 LPFC_MBOX_OPCODE_FCOE_FC_SET_TRUNK_MODE, 1877 12, LPFC_SLI4_MBX_EMBED); 1878 1879 bf_set(lpfc_mbx_set_trunk_mode, 1880 &mbox->u.mqe.un.set_trunk_mode, 1881 val); 1882 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 1883 if (rc) 1884 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1885 "0071 Set trunk mode failed with status: %d", 1886 rc); 1887 mempool_free(mbox, phba->mbox_mem_pool); 1888 1889 return 0; 1890 } 1891 1892 /** 1893 * lpfc_board_mode_show - Return the state of the board 1894 * @dev: class device that is converted into a Scsi_host. 1895 * @attr: device attribute, not used. 1896 * @buf: on return contains the state of the adapter. 1897 * 1898 * Returns: size of formatted string. 1899 **/ 1900 static ssize_t 1901 lpfc_board_mode_show(struct device *dev, struct device_attribute *attr, 1902 char *buf) 1903 { 1904 struct Scsi_Host *shost = class_to_shost(dev); 1905 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1906 struct lpfc_hba *phba = vport->phba; 1907 char * state; 1908 1909 if (phba->link_state == LPFC_HBA_ERROR) 1910 state = "error"; 1911 else if (phba->link_state == LPFC_WARM_START) 1912 state = "warm start"; 1913 else if (phba->link_state == LPFC_INIT_START) 1914 state = "offline"; 1915 else 1916 state = "online"; 1917 1918 return scnprintf(buf, PAGE_SIZE, "%s\n", state); 1919 } 1920 1921 /** 1922 * lpfc_board_mode_store - Puts the hba in online, offline, warm or error state 1923 * @dev: class device that is converted into a Scsi_host. 1924 * @attr: device attribute, not used. 1925 * @buf: containing one of the strings "online", "offline", "warm" or "error". 1926 * @count: unused variable. 1927 * 1928 * Returns: 1929 * -EACCES if enable hba reset not enabled 1930 * -EINVAL if the buffer does not contain a valid string (see above) 1931 * -EIO if lpfc_workq_post_event() or lpfc_do_offline() fails 1932 * buf length greater than zero indicates success 1933 **/ 1934 static ssize_t 1935 lpfc_board_mode_store(struct device *dev, struct device_attribute *attr, 1936 const char *buf, size_t count) 1937 { 1938 struct Scsi_Host *shost = class_to_shost(dev); 1939 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1940 struct lpfc_hba *phba = vport->phba; 1941 struct completion online_compl; 1942 char *board_mode_str = NULL; 1943 int status = 0; 1944 int rc; 1945 1946 if (!phba->cfg_enable_hba_reset) { 1947 status = -EACCES; 1948 goto board_mode_out; 1949 } 1950 1951 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 1952 "3050 lpfc_board_mode set to %s\n", buf); 1953 1954 init_completion(&online_compl); 1955 1956 if(strncmp(buf, "online", sizeof("online") - 1) == 0) { 1957 rc = lpfc_workq_post_event(phba, &status, &online_compl, 1958 LPFC_EVT_ONLINE); 1959 if (rc == 0) { 1960 status = -ENOMEM; 1961 goto board_mode_out; 1962 } 1963 wait_for_completion(&online_compl); 1964 if (status) 1965 status = -EIO; 1966 } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0) 1967 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 1968 else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0) 1969 if (phba->sli_rev == LPFC_SLI_REV4) 1970 status = -EINVAL; 1971 else 1972 status = lpfc_do_offline(phba, LPFC_EVT_WARM_START); 1973 else if (strncmp(buf, "error", sizeof("error") - 1) == 0) 1974 if (phba->sli_rev == LPFC_SLI_REV4) 1975 status = -EINVAL; 1976 else 1977 status = lpfc_do_offline(phba, LPFC_EVT_KILL); 1978 else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0) 1979 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP); 1980 else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0) 1981 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET); 1982 else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0) 1983 status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET); 1984 else if (strncmp(buf, "pci_bus_reset", sizeof("pci_bus_reset") - 1) 1985 == 0) 1986 status = lpfc_reset_pci_bus(phba); 1987 else if (strncmp(buf, "heartbeat", sizeof("heartbeat") - 1) == 0) 1988 lpfc_issue_hb_tmo(phba); 1989 else if (strncmp(buf, "trunk", sizeof("trunk") - 1) == 0) 1990 status = lpfc_set_trunking(phba, (char *)buf + sizeof("trunk")); 1991 else 1992 status = -EINVAL; 1993 1994 board_mode_out: 1995 if (!status) 1996 return strlen(buf); 1997 else { 1998 board_mode_str = strchr(buf, '\n'); 1999 if (board_mode_str) 2000 *board_mode_str = '\0'; 2001 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 2002 "3097 Failed \"%s\", status(%d), " 2003 "fc_flag(x%x)\n", 2004 buf, status, phba->pport->fc_flag); 2005 return status; 2006 } 2007 } 2008 2009 /** 2010 * lpfc_get_hba_info - Return various bits of informaton about the adapter 2011 * @phba: pointer to the adapter structure. 2012 * @mxri: max xri count. 2013 * @axri: available xri count. 2014 * @mrpi: max rpi count. 2015 * @arpi: available rpi count. 2016 * @mvpi: max vpi count. 2017 * @avpi: available vpi count. 2018 * 2019 * Description: 2020 * If an integer pointer for an count is not null then the value for the 2021 * count is returned. 2022 * 2023 * Returns: 2024 * zero on error 2025 * one for success 2026 **/ 2027 static int 2028 lpfc_get_hba_info(struct lpfc_hba *phba, 2029 uint32_t *mxri, uint32_t *axri, 2030 uint32_t *mrpi, uint32_t *arpi, 2031 uint32_t *mvpi, uint32_t *avpi) 2032 { 2033 struct lpfc_mbx_read_config *rd_config; 2034 LPFC_MBOXQ_t *pmboxq; 2035 MAILBOX_t *pmb; 2036 int rc = 0; 2037 uint32_t max_vpi; 2038 2039 /* 2040 * prevent udev from issuing mailbox commands until the port is 2041 * configured. 2042 */ 2043 if (phba->link_state < LPFC_LINK_DOWN || 2044 !phba->mbox_mem_pool || 2045 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0) 2046 return 0; 2047 2048 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) 2049 return 0; 2050 2051 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2052 if (!pmboxq) 2053 return 0; 2054 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 2055 2056 pmb = &pmboxq->u.mb; 2057 pmb->mbxCommand = MBX_READ_CONFIG; 2058 pmb->mbxOwner = OWN_HOST; 2059 pmboxq->ctx_buf = NULL; 2060 2061 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2062 rc = MBX_NOT_FINISHED; 2063 else 2064 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 2065 2066 if (rc != MBX_SUCCESS) { 2067 if (rc != MBX_TIMEOUT) 2068 mempool_free(pmboxq, phba->mbox_mem_pool); 2069 return 0; 2070 } 2071 2072 if (phba->sli_rev == LPFC_SLI_REV4) { 2073 rd_config = &pmboxq->u.mqe.un.rd_config; 2074 if (mrpi) 2075 *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 2076 if (arpi) 2077 *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) - 2078 phba->sli4_hba.max_cfg_param.rpi_used; 2079 if (mxri) 2080 *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 2081 if (axri) 2082 *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) - 2083 phba->sli4_hba.max_cfg_param.xri_used; 2084 2085 /* Account for differences with SLI-3. Get vpi count from 2086 * mailbox data and subtract one for max vpi value. 2087 */ 2088 max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ? 2089 (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0; 2090 2091 /* Limit the max we support */ 2092 if (max_vpi > LPFC_MAX_VPI) 2093 max_vpi = LPFC_MAX_VPI; 2094 if (mvpi) 2095 *mvpi = max_vpi; 2096 if (avpi) 2097 *avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used; 2098 } else { 2099 if (mrpi) 2100 *mrpi = pmb->un.varRdConfig.max_rpi; 2101 if (arpi) 2102 *arpi = pmb->un.varRdConfig.avail_rpi; 2103 if (mxri) 2104 *mxri = pmb->un.varRdConfig.max_xri; 2105 if (axri) 2106 *axri = pmb->un.varRdConfig.avail_xri; 2107 if (mvpi) 2108 *mvpi = pmb->un.varRdConfig.max_vpi; 2109 if (avpi) { 2110 /* avail_vpi is only valid if link is up and ready */ 2111 if (phba->link_state == LPFC_HBA_READY) 2112 *avpi = pmb->un.varRdConfig.avail_vpi; 2113 else 2114 *avpi = pmb->un.varRdConfig.max_vpi; 2115 } 2116 } 2117 2118 mempool_free(pmboxq, phba->mbox_mem_pool); 2119 return 1; 2120 } 2121 2122 /** 2123 * lpfc_max_rpi_show - Return maximum rpi 2124 * @dev: class device that is converted into a Scsi_host. 2125 * @attr: device attribute, not used. 2126 * @buf: on return contains the maximum rpi count in decimal or "Unknown". 2127 * 2128 * Description: 2129 * Calls lpfc_get_hba_info() asking for just the mrpi count. 2130 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set 2131 * to "Unknown" and the buffer length is returned, therefore the caller 2132 * must check for "Unknown" in the buffer to detect a failure. 2133 * 2134 * Returns: size of formatted string. 2135 **/ 2136 static ssize_t 2137 lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr, 2138 char *buf) 2139 { 2140 struct Scsi_Host *shost = class_to_shost(dev); 2141 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2142 struct lpfc_hba *phba = vport->phba; 2143 uint32_t cnt; 2144 2145 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL)) 2146 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt); 2147 return scnprintf(buf, PAGE_SIZE, "Unknown\n"); 2148 } 2149 2150 /** 2151 * lpfc_used_rpi_show - Return maximum rpi minus available rpi 2152 * @dev: class device that is converted into a Scsi_host. 2153 * @attr: device attribute, not used. 2154 * @buf: containing the used rpi count in decimal or "Unknown". 2155 * 2156 * Description: 2157 * Calls lpfc_get_hba_info() asking for just the mrpi and arpi counts. 2158 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set 2159 * to "Unknown" and the buffer length is returned, therefore the caller 2160 * must check for "Unknown" in the buffer to detect a failure. 2161 * 2162 * Returns: size of formatted string. 2163 **/ 2164 static ssize_t 2165 lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr, 2166 char *buf) 2167 { 2168 struct Scsi_Host *shost = class_to_shost(dev); 2169 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2170 struct lpfc_hba *phba = vport->phba; 2171 uint32_t cnt, acnt; 2172 2173 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL)) 2174 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); 2175 return scnprintf(buf, PAGE_SIZE, "Unknown\n"); 2176 } 2177 2178 /** 2179 * lpfc_max_xri_show - Return maximum xri 2180 * @dev: class device that is converted into a Scsi_host. 2181 * @attr: device attribute, not used. 2182 * @buf: on return contains the maximum xri count in decimal or "Unknown". 2183 * 2184 * Description: 2185 * Calls lpfc_get_hba_info() asking for just the mrpi count. 2186 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set 2187 * to "Unknown" and the buffer length is returned, therefore the caller 2188 * must check for "Unknown" in the buffer to detect a failure. 2189 * 2190 * Returns: size of formatted string. 2191 **/ 2192 static ssize_t 2193 lpfc_max_xri_show(struct device *dev, struct device_attribute *attr, 2194 char *buf) 2195 { 2196 struct Scsi_Host *shost = class_to_shost(dev); 2197 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2198 struct lpfc_hba *phba = vport->phba; 2199 uint32_t cnt; 2200 2201 if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL)) 2202 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt); 2203 return scnprintf(buf, PAGE_SIZE, "Unknown\n"); 2204 } 2205 2206 /** 2207 * lpfc_used_xri_show - Return maximum xpi minus the available xpi 2208 * @dev: class device that is converted into a Scsi_host. 2209 * @attr: device attribute, not used. 2210 * @buf: on return contains the used xri count in decimal or "Unknown". 2211 * 2212 * Description: 2213 * Calls lpfc_get_hba_info() asking for just the mxri and axri counts. 2214 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set 2215 * to "Unknown" and the buffer length is returned, therefore the caller 2216 * must check for "Unknown" in the buffer to detect a failure. 2217 * 2218 * Returns: size of formatted string. 2219 **/ 2220 static ssize_t 2221 lpfc_used_xri_show(struct device *dev, struct device_attribute *attr, 2222 char *buf) 2223 { 2224 struct Scsi_Host *shost = class_to_shost(dev); 2225 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2226 struct lpfc_hba *phba = vport->phba; 2227 uint32_t cnt, acnt; 2228 2229 if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL)) 2230 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); 2231 return scnprintf(buf, PAGE_SIZE, "Unknown\n"); 2232 } 2233 2234 /** 2235 * lpfc_max_vpi_show - Return maximum vpi 2236 * @dev: class device that is converted into a Scsi_host. 2237 * @attr: device attribute, not used. 2238 * @buf: on return contains the maximum vpi count in decimal or "Unknown". 2239 * 2240 * Description: 2241 * Calls lpfc_get_hba_info() asking for just the mvpi count. 2242 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set 2243 * to "Unknown" and the buffer length is returned, therefore the caller 2244 * must check for "Unknown" in the buffer to detect a failure. 2245 * 2246 * Returns: size of formatted string. 2247 **/ 2248 static ssize_t 2249 lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr, 2250 char *buf) 2251 { 2252 struct Scsi_Host *shost = class_to_shost(dev); 2253 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2254 struct lpfc_hba *phba = vport->phba; 2255 uint32_t cnt; 2256 2257 if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL)) 2258 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt); 2259 return scnprintf(buf, PAGE_SIZE, "Unknown\n"); 2260 } 2261 2262 /** 2263 * lpfc_used_vpi_show - Return maximum vpi minus the available vpi 2264 * @dev: class device that is converted into a Scsi_host. 2265 * @attr: device attribute, not used. 2266 * @buf: on return contains the used vpi count in decimal or "Unknown". 2267 * 2268 * Description: 2269 * Calls lpfc_get_hba_info() asking for just the mvpi and avpi counts. 2270 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set 2271 * to "Unknown" and the buffer length is returned, therefore the caller 2272 * must check for "Unknown" in the buffer to detect a failure. 2273 * 2274 * Returns: size of formatted string. 2275 **/ 2276 static ssize_t 2277 lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr, 2278 char *buf) 2279 { 2280 struct Scsi_Host *shost = class_to_shost(dev); 2281 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2282 struct lpfc_hba *phba = vport->phba; 2283 uint32_t cnt, acnt; 2284 2285 if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt)) 2286 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); 2287 return scnprintf(buf, PAGE_SIZE, "Unknown\n"); 2288 } 2289 2290 /** 2291 * lpfc_npiv_info_show - Return text about NPIV support for the adapter 2292 * @dev: class device that is converted into a Scsi_host. 2293 * @attr: device attribute, not used. 2294 * @buf: text that must be interpreted to determine if npiv is supported. 2295 * 2296 * Description: 2297 * Buffer will contain text indicating npiv is not suppoerted on the port, 2298 * the port is an NPIV physical port, or it is an npiv virtual port with 2299 * the id of the vport. 2300 * 2301 * Returns: size of formatted string. 2302 **/ 2303 static ssize_t 2304 lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr, 2305 char *buf) 2306 { 2307 struct Scsi_Host *shost = class_to_shost(dev); 2308 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2309 struct lpfc_hba *phba = vport->phba; 2310 2311 if (!(phba->max_vpi)) 2312 return scnprintf(buf, PAGE_SIZE, "NPIV Not Supported\n"); 2313 if (vport->port_type == LPFC_PHYSICAL_PORT) 2314 return scnprintf(buf, PAGE_SIZE, "NPIV Physical\n"); 2315 return scnprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi); 2316 } 2317 2318 /** 2319 * lpfc_poll_show - Return text about poll support for the adapter 2320 * @dev: class device that is converted into a Scsi_host. 2321 * @attr: device attribute, not used. 2322 * @buf: on return contains the cfg_poll in hex. 2323 * 2324 * Notes: 2325 * cfg_poll should be a lpfc_polling_flags type. 2326 * 2327 * Returns: size of formatted string. 2328 **/ 2329 static ssize_t 2330 lpfc_poll_show(struct device *dev, struct device_attribute *attr, 2331 char *buf) 2332 { 2333 struct Scsi_Host *shost = class_to_shost(dev); 2334 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2335 struct lpfc_hba *phba = vport->phba; 2336 2337 return scnprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll); 2338 } 2339 2340 /** 2341 * lpfc_poll_store - Set the value of cfg_poll for the adapter 2342 * @dev: class device that is converted into a Scsi_host. 2343 * @attr: device attribute, not used. 2344 * @buf: one or more lpfc_polling_flags values. 2345 * @count: not used. 2346 * 2347 * Notes: 2348 * buf contents converted to integer and checked for a valid value. 2349 * 2350 * Returns: 2351 * -EINVAL if the buffer connot be converted or is out of range 2352 * length of the buf on success 2353 **/ 2354 static ssize_t 2355 lpfc_poll_store(struct device *dev, struct device_attribute *attr, 2356 const char *buf, size_t count) 2357 { 2358 struct Scsi_Host *shost = class_to_shost(dev); 2359 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2360 struct lpfc_hba *phba = vport->phba; 2361 uint32_t creg_val; 2362 uint32_t old_val; 2363 int val=0; 2364 2365 if (!isdigit(buf[0])) 2366 return -EINVAL; 2367 2368 if (sscanf(buf, "%i", &val) != 1) 2369 return -EINVAL; 2370 2371 if ((val & 0x3) != val) 2372 return -EINVAL; 2373 2374 if (phba->sli_rev == LPFC_SLI_REV4) 2375 val = 0; 2376 2377 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 2378 "3051 lpfc_poll changed from %d to %d\n", 2379 phba->cfg_poll, val); 2380 2381 spin_lock_irq(&phba->hbalock); 2382 2383 old_val = phba->cfg_poll; 2384 2385 if (val & ENABLE_FCP_RING_POLLING) { 2386 if ((val & DISABLE_FCP_RING_INT) && 2387 !(old_val & DISABLE_FCP_RING_INT)) { 2388 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 2389 spin_unlock_irq(&phba->hbalock); 2390 return -EINVAL; 2391 } 2392 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 2393 writel(creg_val, phba->HCregaddr); 2394 readl(phba->HCregaddr); /* flush */ 2395 2396 lpfc_poll_start_timer(phba); 2397 } 2398 } else if (val != 0x0) { 2399 spin_unlock_irq(&phba->hbalock); 2400 return -EINVAL; 2401 } 2402 2403 if (!(val & DISABLE_FCP_RING_INT) && 2404 (old_val & DISABLE_FCP_RING_INT)) 2405 { 2406 spin_unlock_irq(&phba->hbalock); 2407 del_timer(&phba->fcp_poll_timer); 2408 spin_lock_irq(&phba->hbalock); 2409 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 2410 spin_unlock_irq(&phba->hbalock); 2411 return -EINVAL; 2412 } 2413 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 2414 writel(creg_val, phba->HCregaddr); 2415 readl(phba->HCregaddr); /* flush */ 2416 } 2417 2418 phba->cfg_poll = val; 2419 2420 spin_unlock_irq(&phba->hbalock); 2421 2422 return strlen(buf); 2423 } 2424 2425 /** 2426 * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions 2427 * @dev: class converted to a Scsi_host structure. 2428 * @attr: device attribute, not used. 2429 * @buf: on return contains the formatted support level. 2430 * 2431 * Description: 2432 * Returns the maximum number of virtual functions a physical function can 2433 * support, 0 will be returned if called on virtual function. 2434 * 2435 * Returns: size of formatted string. 2436 **/ 2437 static ssize_t 2438 lpfc_sriov_hw_max_virtfn_show(struct device *dev, 2439 struct device_attribute *attr, 2440 char *buf) 2441 { 2442 struct Scsi_Host *shost = class_to_shost(dev); 2443 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2444 struct lpfc_hba *phba = vport->phba; 2445 uint16_t max_nr_virtfn; 2446 2447 max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba); 2448 return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn); 2449 } 2450 2451 /** 2452 * lpfc_enable_bbcr_set: Sets an attribute value. 2453 * @phba: pointer the the adapter structure. 2454 * @val: integer attribute value. 2455 * 2456 * Description: 2457 * Validates the min and max values then sets the 2458 * adapter config field if in the valid range. prints error message 2459 * and does not set the parameter if invalid. 2460 * 2461 * Returns: 2462 * zero on success 2463 * -EINVAL if val is invalid 2464 */ 2465 static ssize_t 2466 lpfc_enable_bbcr_set(struct lpfc_hba *phba, uint val) 2467 { 2468 if (lpfc_rangecheck(val, 0, 1) && phba->sli_rev == LPFC_SLI_REV4) { 2469 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2470 "3068 lpfc_enable_bbcr changed from %d to " 2471 "%d\n", phba->cfg_enable_bbcr, val); 2472 phba->cfg_enable_bbcr = val; 2473 return 0; 2474 } 2475 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2476 "0451 lpfc_enable_bbcr cannot set to %d, range is 0, " 2477 "1\n", val); 2478 return -EINVAL; 2479 } 2480 2481 /* 2482 * lpfc_param_show - Return a cfg attribute value in decimal 2483 * 2484 * Description: 2485 * Macro that given an attr e.g. hba_queue_depth expands 2486 * into a function with the name lpfc_hba_queue_depth_show. 2487 * 2488 * lpfc_##attr##_show: Return the decimal value of an adapters cfg_xxx field. 2489 * @dev: class device that is converted into a Scsi_host. 2490 * @attr: device attribute, not used. 2491 * @buf: on return contains the attribute value in decimal. 2492 * 2493 * Returns: size of formatted string. 2494 **/ 2495 #define lpfc_param_show(attr) \ 2496 static ssize_t \ 2497 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ 2498 char *buf) \ 2499 { \ 2500 struct Scsi_Host *shost = class_to_shost(dev);\ 2501 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 2502 struct lpfc_hba *phba = vport->phba;\ 2503 return scnprintf(buf, PAGE_SIZE, "%d\n",\ 2504 phba->cfg_##attr);\ 2505 } 2506 2507 /* 2508 * lpfc_param_hex_show - Return a cfg attribute value in hex 2509 * 2510 * Description: 2511 * Macro that given an attr e.g. hba_queue_depth expands 2512 * into a function with the name lpfc_hba_queue_depth_show 2513 * 2514 * lpfc_##attr##_show: Return the hex value of an adapters cfg_xxx field. 2515 * @dev: class device that is converted into a Scsi_host. 2516 * @attr: device attribute, not used. 2517 * @buf: on return contains the attribute value in hexadecimal. 2518 * 2519 * Returns: size of formatted string. 2520 **/ 2521 #define lpfc_param_hex_show(attr) \ 2522 static ssize_t \ 2523 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ 2524 char *buf) \ 2525 { \ 2526 struct Scsi_Host *shost = class_to_shost(dev);\ 2527 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 2528 struct lpfc_hba *phba = vport->phba;\ 2529 uint val = 0;\ 2530 val = phba->cfg_##attr;\ 2531 return scnprintf(buf, PAGE_SIZE, "%#x\n",\ 2532 phba->cfg_##attr);\ 2533 } 2534 2535 /* 2536 * lpfc_param_init - Initializes a cfg attribute 2537 * 2538 * Description: 2539 * Macro that given an attr e.g. hba_queue_depth expands 2540 * into a function with the name lpfc_hba_queue_depth_init. The macro also 2541 * takes a default argument, a minimum and maximum argument. 2542 * 2543 * lpfc_##attr##_init: Initializes an attribute. 2544 * @phba: pointer the the adapter structure. 2545 * @val: integer attribute value. 2546 * 2547 * Validates the min and max values then sets the adapter config field 2548 * accordingly, or uses the default if out of range and prints an error message. 2549 * 2550 * Returns: 2551 * zero on success 2552 * -EINVAL if default used 2553 **/ 2554 #define lpfc_param_init(attr, default, minval, maxval) \ 2555 static int \ 2556 lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \ 2557 { \ 2558 if (lpfc_rangecheck(val, minval, maxval)) {\ 2559 phba->cfg_##attr = val;\ 2560 return 0;\ 2561 }\ 2562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ 2563 "0449 lpfc_"#attr" attribute cannot be set to %d, "\ 2564 "allowed range is ["#minval", "#maxval"]\n", val); \ 2565 phba->cfg_##attr = default;\ 2566 return -EINVAL;\ 2567 } 2568 2569 /* 2570 * lpfc_param_set - Set a cfg attribute value 2571 * 2572 * Description: 2573 * Macro that given an attr e.g. hba_queue_depth expands 2574 * into a function with the name lpfc_hba_queue_depth_set 2575 * 2576 * lpfc_##attr##_set: Sets an attribute value. 2577 * @phba: pointer the the adapter structure. 2578 * @val: integer attribute value. 2579 * 2580 * Description: 2581 * Validates the min and max values then sets the 2582 * adapter config field if in the valid range. prints error message 2583 * and does not set the parameter if invalid. 2584 * 2585 * Returns: 2586 * zero on success 2587 * -EINVAL if val is invalid 2588 **/ 2589 #define lpfc_param_set(attr, default, minval, maxval) \ 2590 static int \ 2591 lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \ 2592 { \ 2593 if (lpfc_rangecheck(val, minval, maxval)) {\ 2594 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ 2595 "3052 lpfc_" #attr " changed from %d to %d\n", \ 2596 phba->cfg_##attr, val); \ 2597 phba->cfg_##attr = val;\ 2598 return 0;\ 2599 }\ 2600 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ 2601 "0450 lpfc_"#attr" attribute cannot be set to %d, "\ 2602 "allowed range is ["#minval", "#maxval"]\n", val); \ 2603 return -EINVAL;\ 2604 } 2605 2606 /* 2607 * lpfc_param_store - Set a vport attribute value 2608 * 2609 * Description: 2610 * Macro that given an attr e.g. hba_queue_depth expands 2611 * into a function with the name lpfc_hba_queue_depth_store. 2612 * 2613 * lpfc_##attr##_store: Set an sttribute value. 2614 * @dev: class device that is converted into a Scsi_host. 2615 * @attr: device attribute, not used. 2616 * @buf: contains the attribute value in ascii. 2617 * @count: not used. 2618 * 2619 * Description: 2620 * Convert the ascii text number to an integer, then 2621 * use the lpfc_##attr##_set function to set the value. 2622 * 2623 * Returns: 2624 * -EINVAL if val is invalid or lpfc_##attr##_set() fails 2625 * length of buffer upon success. 2626 **/ 2627 #define lpfc_param_store(attr) \ 2628 static ssize_t \ 2629 lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ 2630 const char *buf, size_t count) \ 2631 { \ 2632 struct Scsi_Host *shost = class_to_shost(dev);\ 2633 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 2634 struct lpfc_hba *phba = vport->phba;\ 2635 uint val = 0;\ 2636 if (!isdigit(buf[0]))\ 2637 return -EINVAL;\ 2638 if (sscanf(buf, "%i", &val) != 1)\ 2639 return -EINVAL;\ 2640 if (lpfc_##attr##_set(phba, val) == 0) \ 2641 return strlen(buf);\ 2642 else \ 2643 return -EINVAL;\ 2644 } 2645 2646 /* 2647 * lpfc_vport_param_show - Return decimal formatted cfg attribute value 2648 * 2649 * Description: 2650 * Macro that given an attr e.g. hba_queue_depth expands 2651 * into a function with the name lpfc_hba_queue_depth_show 2652 * 2653 * lpfc_##attr##_show: prints the attribute value in decimal. 2654 * @dev: class device that is converted into a Scsi_host. 2655 * @attr: device attribute, not used. 2656 * @buf: on return contains the attribute value in decimal. 2657 * 2658 * Returns: length of formatted string. 2659 **/ 2660 #define lpfc_vport_param_show(attr) \ 2661 static ssize_t \ 2662 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ 2663 char *buf) \ 2664 { \ 2665 struct Scsi_Host *shost = class_to_shost(dev);\ 2666 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 2667 return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\ 2668 } 2669 2670 /* 2671 * lpfc_vport_param_hex_show - Return hex formatted attribute value 2672 * 2673 * Description: 2674 * Macro that given an attr e.g. 2675 * hba_queue_depth expands into a function with the name 2676 * lpfc_hba_queue_depth_show 2677 * 2678 * lpfc_##attr##_show: prints the attribute value in hexadecimal. 2679 * @dev: class device that is converted into a Scsi_host. 2680 * @attr: device attribute, not used. 2681 * @buf: on return contains the attribute value in hexadecimal. 2682 * 2683 * Returns: length of formatted string. 2684 **/ 2685 #define lpfc_vport_param_hex_show(attr) \ 2686 static ssize_t \ 2687 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ 2688 char *buf) \ 2689 { \ 2690 struct Scsi_Host *shost = class_to_shost(dev);\ 2691 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 2692 return scnprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\ 2693 } 2694 2695 /* 2696 * lpfc_vport_param_init - Initialize a vport cfg attribute 2697 * 2698 * Description: 2699 * Macro that given an attr e.g. hba_queue_depth expands 2700 * into a function with the name lpfc_hba_queue_depth_init. The macro also 2701 * takes a default argument, a minimum and maximum argument. 2702 * 2703 * lpfc_##attr##_init: validates the min and max values then sets the 2704 * adapter config field accordingly, or uses the default if out of range 2705 * and prints an error message. 2706 * @phba: pointer the the adapter structure. 2707 * @val: integer attribute value. 2708 * 2709 * Returns: 2710 * zero on success 2711 * -EINVAL if default used 2712 **/ 2713 #define lpfc_vport_param_init(attr, default, minval, maxval) \ 2714 static int \ 2715 lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \ 2716 { \ 2717 if (lpfc_rangecheck(val, minval, maxval)) {\ 2718 vport->cfg_##attr = val;\ 2719 return 0;\ 2720 }\ 2721 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ 2722 "0423 lpfc_"#attr" attribute cannot be set to %d, "\ 2723 "allowed range is ["#minval", "#maxval"]\n", val); \ 2724 vport->cfg_##attr = default;\ 2725 return -EINVAL;\ 2726 } 2727 2728 /* 2729 * lpfc_vport_param_set - Set a vport cfg attribute 2730 * 2731 * Description: 2732 * Macro that given an attr e.g. hba_queue_depth expands 2733 * into a function with the name lpfc_hba_queue_depth_set 2734 * 2735 * lpfc_##attr##_set: validates the min and max values then sets the 2736 * adapter config field if in the valid range. prints error message 2737 * and does not set the parameter if invalid. 2738 * @phba: pointer the the adapter structure. 2739 * @val: integer attribute value. 2740 * 2741 * Returns: 2742 * zero on success 2743 * -EINVAL if val is invalid 2744 **/ 2745 #define lpfc_vport_param_set(attr, default, minval, maxval) \ 2746 static int \ 2747 lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \ 2748 { \ 2749 if (lpfc_rangecheck(val, minval, maxval)) {\ 2750 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ 2751 "3053 lpfc_" #attr \ 2752 " changed from %d (x%x) to %d (x%x)\n", \ 2753 vport->cfg_##attr, vport->cfg_##attr, \ 2754 val, val); \ 2755 vport->cfg_##attr = val;\ 2756 return 0;\ 2757 }\ 2758 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ 2759 "0424 lpfc_"#attr" attribute cannot be set to %d, "\ 2760 "allowed range is ["#minval", "#maxval"]\n", val); \ 2761 return -EINVAL;\ 2762 } 2763 2764 /* 2765 * lpfc_vport_param_store - Set a vport attribute 2766 * 2767 * Description: 2768 * Macro that given an attr e.g. hba_queue_depth 2769 * expands into a function with the name lpfc_hba_queue_depth_store 2770 * 2771 * lpfc_##attr##_store: convert the ascii text number to an integer, then 2772 * use the lpfc_##attr##_set function to set the value. 2773 * @cdev: class device that is converted into a Scsi_host. 2774 * @buf: contains the attribute value in decimal. 2775 * @count: not used. 2776 * 2777 * Returns: 2778 * -EINVAL if val is invalid or lpfc_##attr##_set() fails 2779 * length of buffer upon success. 2780 **/ 2781 #define lpfc_vport_param_store(attr) \ 2782 static ssize_t \ 2783 lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ 2784 const char *buf, size_t count) \ 2785 { \ 2786 struct Scsi_Host *shost = class_to_shost(dev);\ 2787 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 2788 uint val = 0;\ 2789 if (!isdigit(buf[0]))\ 2790 return -EINVAL;\ 2791 if (sscanf(buf, "%i", &val) != 1)\ 2792 return -EINVAL;\ 2793 if (lpfc_##attr##_set(vport, val) == 0) \ 2794 return strlen(buf);\ 2795 else \ 2796 return -EINVAL;\ 2797 } 2798 2799 2800 static DEVICE_ATTR(nvme_info, 0444, lpfc_nvme_info_show, NULL); 2801 static DEVICE_ATTR(scsi_stat, 0444, lpfc_scsi_stat_show, NULL); 2802 static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL); 2803 static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL); 2804 static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL); 2805 static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL); 2806 static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL); 2807 static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL); 2808 static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL); 2809 static DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL); 2810 static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL); 2811 static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL); 2812 static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL); 2813 static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL); 2814 static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show, 2815 lpfc_link_state_store); 2816 static DEVICE_ATTR(option_rom_version, S_IRUGO, 2817 lpfc_option_rom_version_show, NULL); 2818 static DEVICE_ATTR(num_discovered_ports, S_IRUGO, 2819 lpfc_num_discovered_ports_show, NULL); 2820 static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL); 2821 static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL); 2822 static DEVICE_ATTR_RO(lpfc_drvr_version); 2823 static DEVICE_ATTR_RO(lpfc_enable_fip); 2824 static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, 2825 lpfc_board_mode_show, lpfc_board_mode_store); 2826 static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); 2827 static DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL); 2828 static DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL); 2829 static DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL); 2830 static DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL); 2831 static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL); 2832 static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL); 2833 static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL); 2834 static DEVICE_ATTR_RO(lpfc_temp_sensor); 2835 static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn); 2836 static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL); 2837 static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show, 2838 NULL); 2839 static DEVICE_ATTR(cmf_info, 0444, lpfc_cmf_info_show, NULL); 2840 2841 #define WWN_SZ 8 2842 /** 2843 * lpfc_wwn_set - Convert string to the 8 byte WWN value. 2844 * @buf: WWN string. 2845 * @cnt: Length of string. 2846 * @wwn: Array to receive converted wwn value. 2847 * 2848 * Returns: 2849 * -EINVAL if the buffer does not contain a valid wwn 2850 * 0 success 2851 **/ 2852 static size_t 2853 lpfc_wwn_set(const char *buf, size_t cnt, char wwn[]) 2854 { 2855 unsigned int i, j; 2856 2857 /* Count may include a LF at end of string */ 2858 if (buf[cnt-1] == '\n') 2859 cnt--; 2860 2861 if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) || 2862 ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x')))) 2863 return -EINVAL; 2864 2865 memset(wwn, 0, WWN_SZ); 2866 2867 /* Validate and store the new name */ 2868 for (i = 0, j = 0; i < 16; i++) { 2869 if ((*buf >= 'a') && (*buf <= 'f')) 2870 j = ((j << 4) | ((*buf++ - 'a') + 10)); 2871 else if ((*buf >= 'A') && (*buf <= 'F')) 2872 j = ((j << 4) | ((*buf++ - 'A') + 10)); 2873 else if ((*buf >= '0') && (*buf <= '9')) 2874 j = ((j << 4) | (*buf++ - '0')); 2875 else 2876 return -EINVAL; 2877 if (i % 2) { 2878 wwn[i/2] = j & 0xff; 2879 j = 0; 2880 } 2881 } 2882 return 0; 2883 } 2884 2885 2886 /** 2887 * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for 2888 * Optimized Access Storage (OAS) operations. 2889 * @dev: class device that is converted into a Scsi_host. 2890 * @attr: device attribute, not used. 2891 * @buf: buffer for passing information. 2892 * 2893 * Returns: 2894 * value of count 2895 **/ 2896 static ssize_t 2897 lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr, 2898 char *buf) 2899 { 2900 struct Scsi_Host *shost = class_to_shost(dev); 2901 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 2902 2903 return scnprintf(buf, PAGE_SIZE, "0x%llx\n", 2904 wwn_to_u64(phba->cfg_oas_tgt_wwpn)); 2905 } 2906 2907 /** 2908 * lpfc_oas_tgt_store - Store wwpn of target whose luns maybe enabled for 2909 * Optimized Access Storage (OAS) operations. 2910 * @dev: class device that is converted into a Scsi_host. 2911 * @attr: device attribute, not used. 2912 * @buf: buffer for passing information. 2913 * @count: Size of the data buffer. 2914 * 2915 * Returns: 2916 * -EINVAL count is invalid, invalid wwpn byte invalid 2917 * -EPERM oas is not supported by hba 2918 * value of count on success 2919 **/ 2920 static ssize_t 2921 lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr, 2922 const char *buf, size_t count) 2923 { 2924 struct Scsi_Host *shost = class_to_shost(dev); 2925 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 2926 unsigned int cnt = count; 2927 uint8_t wwpn[WWN_SZ]; 2928 int rc; 2929 2930 if (!phba->cfg_fof) 2931 return -EPERM; 2932 2933 /* count may include a LF at end of string */ 2934 if (buf[cnt-1] == '\n') 2935 cnt--; 2936 2937 rc = lpfc_wwn_set(buf, cnt, wwpn); 2938 if (rc) 2939 return rc; 2940 2941 memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t))); 2942 memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t))); 2943 if (wwn_to_u64(wwpn) == 0) 2944 phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET; 2945 else 2946 phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET; 2947 phba->cfg_oas_flags &= ~OAS_LUN_VALID; 2948 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN; 2949 return count; 2950 } 2951 static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR, 2952 lpfc_oas_tgt_show, lpfc_oas_tgt_store); 2953 2954 /** 2955 * lpfc_oas_priority_show - Return wwpn of target whose luns maybe enabled for 2956 * Optimized Access Storage (OAS) operations. 2957 * @dev: class device that is converted into a Scsi_host. 2958 * @attr: device attribute, not used. 2959 * @buf: buffer for passing information. 2960 * 2961 * Returns: 2962 * value of count 2963 **/ 2964 static ssize_t 2965 lpfc_oas_priority_show(struct device *dev, struct device_attribute *attr, 2966 char *buf) 2967 { 2968 struct Scsi_Host *shost = class_to_shost(dev); 2969 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 2970 2971 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority); 2972 } 2973 2974 /** 2975 * lpfc_oas_priority_store - Store wwpn of target whose luns maybe enabled for 2976 * Optimized Access Storage (OAS) operations. 2977 * @dev: class device that is converted into a Scsi_host. 2978 * @attr: device attribute, not used. 2979 * @buf: buffer for passing information. 2980 * @count: Size of the data buffer. 2981 * 2982 * Returns: 2983 * -EINVAL count is invalid, invalid wwpn byte invalid 2984 * -EPERM oas is not supported by hba 2985 * value of count on success 2986 **/ 2987 static ssize_t 2988 lpfc_oas_priority_store(struct device *dev, struct device_attribute *attr, 2989 const char *buf, size_t count) 2990 { 2991 struct Scsi_Host *shost = class_to_shost(dev); 2992 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 2993 unsigned int cnt = count; 2994 unsigned long val; 2995 int ret; 2996 2997 if (!phba->cfg_fof) 2998 return -EPERM; 2999 3000 /* count may include a LF at end of string */ 3001 if (buf[cnt-1] == '\n') 3002 cnt--; 3003 3004 ret = kstrtoul(buf, 0, &val); 3005 if (ret || (val > 0x7f)) 3006 return -EINVAL; 3007 3008 if (val) 3009 phba->cfg_oas_priority = (uint8_t)val; 3010 else 3011 phba->cfg_oas_priority = phba->cfg_XLanePriority; 3012 return count; 3013 } 3014 static DEVICE_ATTR(lpfc_xlane_priority, S_IRUGO | S_IWUSR, 3015 lpfc_oas_priority_show, lpfc_oas_priority_store); 3016 3017 /** 3018 * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled 3019 * for Optimized Access Storage (OAS) operations. 3020 * @dev: class device that is converted into a Scsi_host. 3021 * @attr: device attribute, not used. 3022 * @buf: buffer for passing information. 3023 * 3024 * Returns: 3025 * value of count on success 3026 **/ 3027 static ssize_t 3028 lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr, 3029 char *buf) 3030 { 3031 struct Scsi_Host *shost = class_to_shost(dev); 3032 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3033 3034 return scnprintf(buf, PAGE_SIZE, "0x%llx\n", 3035 wwn_to_u64(phba->cfg_oas_vpt_wwpn)); 3036 } 3037 3038 /** 3039 * lpfc_oas_vpt_store - Store wwpn of vport whose targets maybe enabled 3040 * for Optimized Access Storage (OAS) operations. 3041 * @dev: class device that is converted into a Scsi_host. 3042 * @attr: device attribute, not used. 3043 * @buf: buffer for passing information. 3044 * @count: Size of the data buffer. 3045 * 3046 * Returns: 3047 * -EINVAL count is invalid, invalid wwpn byte invalid 3048 * -EPERM oas is not supported by hba 3049 * value of count on success 3050 **/ 3051 static ssize_t 3052 lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr, 3053 const char *buf, size_t count) 3054 { 3055 struct Scsi_Host *shost = class_to_shost(dev); 3056 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3057 unsigned int cnt = count; 3058 uint8_t wwpn[WWN_SZ]; 3059 int rc; 3060 3061 if (!phba->cfg_fof) 3062 return -EPERM; 3063 3064 /* count may include a LF at end of string */ 3065 if (buf[cnt-1] == '\n') 3066 cnt--; 3067 3068 rc = lpfc_wwn_set(buf, cnt, wwpn); 3069 if (rc) 3070 return rc; 3071 3072 memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t))); 3073 memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t))); 3074 if (wwn_to_u64(wwpn) == 0) 3075 phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT; 3076 else 3077 phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT; 3078 phba->cfg_oas_flags &= ~OAS_LUN_VALID; 3079 if (phba->cfg_oas_priority == 0) 3080 phba->cfg_oas_priority = phba->cfg_XLanePriority; 3081 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN; 3082 return count; 3083 } 3084 static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR, 3085 lpfc_oas_vpt_show, lpfc_oas_vpt_store); 3086 3087 /** 3088 * lpfc_oas_lun_state_show - Return the current state (enabled or disabled) 3089 * of whether luns will be enabled or disabled 3090 * for Optimized Access Storage (OAS) operations. 3091 * @dev: class device that is converted into a Scsi_host. 3092 * @attr: device attribute, not used. 3093 * @buf: buffer for passing information. 3094 * 3095 * Returns: 3096 * size of formatted string. 3097 **/ 3098 static ssize_t 3099 lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr, 3100 char *buf) 3101 { 3102 struct Scsi_Host *shost = class_to_shost(dev); 3103 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3104 3105 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state); 3106 } 3107 3108 /** 3109 * lpfc_oas_lun_state_store - Store the state (enabled or disabled) 3110 * of whether luns will be enabled or disabled 3111 * for Optimized Access Storage (OAS) operations. 3112 * @dev: class device that is converted into a Scsi_host. 3113 * @attr: device attribute, not used. 3114 * @buf: buffer for passing information. 3115 * @count: Size of the data buffer. 3116 * 3117 * Returns: 3118 * -EINVAL count is invalid, invalid wwpn byte invalid 3119 * -EPERM oas is not supported by hba 3120 * value of count on success 3121 **/ 3122 static ssize_t 3123 lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr, 3124 const char *buf, size_t count) 3125 { 3126 struct Scsi_Host *shost = class_to_shost(dev); 3127 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3128 int val = 0; 3129 3130 if (!phba->cfg_fof) 3131 return -EPERM; 3132 3133 if (!isdigit(buf[0])) 3134 return -EINVAL; 3135 3136 if (sscanf(buf, "%i", &val) != 1) 3137 return -EINVAL; 3138 3139 if ((val != 0) && (val != 1)) 3140 return -EINVAL; 3141 3142 phba->cfg_oas_lun_state = val; 3143 return strlen(buf); 3144 } 3145 static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR, 3146 lpfc_oas_lun_state_show, lpfc_oas_lun_state_store); 3147 3148 /** 3149 * lpfc_oas_lun_status_show - Return the status of the Optimized Access 3150 * Storage (OAS) lun returned by the 3151 * lpfc_oas_lun_show function. 3152 * @dev: class device that is converted into a Scsi_host. 3153 * @attr: device attribute, not used. 3154 * @buf: buffer for passing information. 3155 * 3156 * Returns: 3157 * size of formatted string. 3158 **/ 3159 static ssize_t 3160 lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr, 3161 char *buf) 3162 { 3163 struct Scsi_Host *shost = class_to_shost(dev); 3164 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3165 3166 if (!(phba->cfg_oas_flags & OAS_LUN_VALID)) 3167 return -EFAULT; 3168 3169 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status); 3170 } 3171 static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO, 3172 lpfc_oas_lun_status_show, NULL); 3173 3174 3175 /** 3176 * lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage 3177 * (OAS) operations. 3178 * @phba: lpfc_hba pointer. 3179 * @vpt_wwpn: wwpn of the vport associated with the returned lun 3180 * @tgt_wwpn: wwpn of the target associated with the returned lun 3181 * @lun: the fc lun for setting oas state. 3182 * @oas_state: the oas state to be set to the lun. 3183 * @pri: priority 3184 * 3185 * Returns: 3186 * SUCCESS : 0 3187 * -EPERM OAS is not enabled or not supported by this port. 3188 * 3189 */ 3190 static size_t 3191 lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[], 3192 uint8_t tgt_wwpn[], uint64_t lun, 3193 uint32_t oas_state, uint8_t pri) 3194 { 3195 3196 int rc = 0; 3197 3198 if (!phba->cfg_fof) 3199 return -EPERM; 3200 3201 if (oas_state) { 3202 if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn, 3203 (struct lpfc_name *)tgt_wwpn, 3204 lun, pri)) 3205 rc = -ENOMEM; 3206 } else { 3207 lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn, 3208 (struct lpfc_name *)tgt_wwpn, lun, pri); 3209 } 3210 return rc; 3211 3212 } 3213 3214 /** 3215 * lpfc_oas_lun_get_next - get the next lun that has been enabled for Optimized 3216 * Access Storage (OAS) operations. 3217 * @phba: lpfc_hba pointer. 3218 * @vpt_wwpn: wwpn of the vport associated with the returned lun 3219 * @tgt_wwpn: wwpn of the target associated with the returned lun 3220 * @lun_status: status of the lun returned lun 3221 * @lun_pri: priority of the lun returned lun 3222 * 3223 * Returns the first or next lun enabled for OAS operations for the vport/target 3224 * specified. If a lun is found, its vport wwpn, target wwpn and status is 3225 * returned. If the lun is not found, NOT_OAS_ENABLED_LUN is returned. 3226 * 3227 * Return: 3228 * lun that is OAS enabled for the vport/target 3229 * NOT_OAS_ENABLED_LUN when no oas enabled lun found. 3230 */ 3231 static uint64_t 3232 lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[], 3233 uint8_t tgt_wwpn[], uint32_t *lun_status, 3234 uint32_t *lun_pri) 3235 { 3236 uint64_t found_lun; 3237 3238 if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn) 3239 return NOT_OAS_ENABLED_LUN; 3240 if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *) 3241 phba->sli4_hba.oas_next_vpt_wwpn, 3242 (struct lpfc_name *) 3243 phba->sli4_hba.oas_next_tgt_wwpn, 3244 &phba->sli4_hba.oas_next_lun, 3245 (struct lpfc_name *)vpt_wwpn, 3246 (struct lpfc_name *)tgt_wwpn, 3247 &found_lun, lun_status, lun_pri)) 3248 return found_lun; 3249 else 3250 return NOT_OAS_ENABLED_LUN; 3251 } 3252 3253 /** 3254 * lpfc_oas_lun_state_change - enable/disable a lun for OAS operations 3255 * @phba: lpfc_hba pointer. 3256 * @vpt_wwpn: vport wwpn by reference. 3257 * @tgt_wwpn: target wwpn by reference. 3258 * @lun: the fc lun for setting oas state. 3259 * @oas_state: the oas state to be set to the oas_lun. 3260 * @pri: priority 3261 * 3262 * This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE) 3263 * a lun for OAS operations. 3264 * 3265 * Return: 3266 * SUCCESS: 0 3267 * -ENOMEM: failed to enable an lun for OAS operations 3268 * -EPERM: OAS is not enabled 3269 */ 3270 static ssize_t 3271 lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[], 3272 uint8_t tgt_wwpn[], uint64_t lun, 3273 uint32_t oas_state, uint8_t pri) 3274 { 3275 3276 int rc; 3277 3278 rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun, 3279 oas_state, pri); 3280 return rc; 3281 } 3282 3283 /** 3284 * lpfc_oas_lun_show - Return oas enabled luns from a chosen target 3285 * @dev: class device that is converted into a Scsi_host. 3286 * @attr: device attribute, not used. 3287 * @buf: buffer for passing information. 3288 * 3289 * This routine returns a lun enabled for OAS each time the function 3290 * is called. 3291 * 3292 * Returns: 3293 * SUCCESS: size of formatted string. 3294 * -EFAULT: target or vport wwpn was not set properly. 3295 * -EPERM: oas is not enabled. 3296 **/ 3297 static ssize_t 3298 lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr, 3299 char *buf) 3300 { 3301 struct Scsi_Host *shost = class_to_shost(dev); 3302 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3303 3304 uint64_t oas_lun; 3305 int len = 0; 3306 3307 if (!phba->cfg_fof) 3308 return -EPERM; 3309 3310 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0) 3311 if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)) 3312 return -EFAULT; 3313 3314 if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0) 3315 if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)) 3316 return -EFAULT; 3317 3318 oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn, 3319 phba->cfg_oas_tgt_wwpn, 3320 &phba->cfg_oas_lun_status, 3321 &phba->cfg_oas_priority); 3322 if (oas_lun != NOT_OAS_ENABLED_LUN) 3323 phba->cfg_oas_flags |= OAS_LUN_VALID; 3324 3325 len += scnprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun); 3326 3327 return len; 3328 } 3329 3330 /** 3331 * lpfc_oas_lun_store - Sets the OAS state for lun 3332 * @dev: class device that is converted into a Scsi_host. 3333 * @attr: device attribute, not used. 3334 * @buf: buffer for passing information. 3335 * @count: size of the formatting string 3336 * 3337 * This function sets the OAS state for lun. Before this function is called, 3338 * the vport wwpn, target wwpn, and oas state need to be set. 3339 * 3340 * Returns: 3341 * SUCCESS: size of formatted string. 3342 * -EFAULT: target or vport wwpn was not set properly. 3343 * -EPERM: oas is not enabled. 3344 * size of formatted string. 3345 **/ 3346 static ssize_t 3347 lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr, 3348 const char *buf, size_t count) 3349 { 3350 struct Scsi_Host *shost = class_to_shost(dev); 3351 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3352 uint64_t scsi_lun; 3353 uint32_t pri; 3354 ssize_t rc; 3355 3356 if (!phba->cfg_fof) 3357 return -EPERM; 3358 3359 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0) 3360 return -EFAULT; 3361 3362 if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0) 3363 return -EFAULT; 3364 3365 if (!isdigit(buf[0])) 3366 return -EINVAL; 3367 3368 if (sscanf(buf, "0x%llx", &scsi_lun) != 1) 3369 return -EINVAL; 3370 3371 pri = phba->cfg_oas_priority; 3372 if (pri == 0) 3373 pri = phba->cfg_XLanePriority; 3374 3375 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3376 "3372 Try to set vport 0x%llx target 0x%llx lun:0x%llx " 3377 "priority 0x%x with oas state %d\n", 3378 wwn_to_u64(phba->cfg_oas_vpt_wwpn), 3379 wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun, 3380 pri, phba->cfg_oas_lun_state); 3381 3382 rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn, 3383 phba->cfg_oas_tgt_wwpn, scsi_lun, 3384 phba->cfg_oas_lun_state, pri); 3385 if (rc) 3386 return rc; 3387 3388 return count; 3389 } 3390 static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR, 3391 lpfc_oas_lun_show, lpfc_oas_lun_store); 3392 3393 int lpfc_enable_nvmet_cnt; 3394 unsigned long long lpfc_enable_nvmet[LPFC_NVMET_MAX_PORTS] = { 3395 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3396 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; 3397 module_param_array(lpfc_enable_nvmet, ullong, &lpfc_enable_nvmet_cnt, 0444); 3398 MODULE_PARM_DESC(lpfc_enable_nvmet, "Enable HBA port(s) WWPN as a NVME Target"); 3399 3400 static int lpfc_poll = 0; 3401 module_param(lpfc_poll, int, S_IRUGO); 3402 MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:" 3403 " 0 - none," 3404 " 1 - poll with interrupts enabled" 3405 " 3 - poll and disable FCP ring interrupts"); 3406 3407 static DEVICE_ATTR_RW(lpfc_poll); 3408 3409 int lpfc_no_hba_reset_cnt; 3410 unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = { 3411 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; 3412 module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444); 3413 MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset"); 3414 3415 LPFC_ATTR(sli_mode, 3, 3, 3, 3416 "SLI mode selector: 3 - select SLI-3"); 3417 3418 LPFC_ATTR_R(enable_npiv, 1, 0, 1, 3419 "Enable NPIV functionality"); 3420 3421 LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2, 3422 "FCF Fast failover=1 Priority failover=2"); 3423 3424 /* 3425 * lpfc_fcp_wait_abts_rsp: Modifies criteria for reporting completion of 3426 * aborted IO. 3427 * The range is [0,1]. Default value is 0 3428 * 0, IO completes after ABTS issued (default). 3429 * 1, IO completes after receipt of ABTS response or timeout. 3430 */ 3431 LPFC_ATTR_R(fcp_wait_abts_rsp, 0, 0, 1, "Wait for FCP ABTS completion"); 3432 3433 /* 3434 # lpfc_enable_rrq: Track XRI/OXID reuse after IO failures 3435 # 0x0 = disabled, XRI/OXID use not tracked. 3436 # 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent. 3437 # 0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent. 3438 */ 3439 LPFC_ATTR_R(enable_rrq, 2, 0, 2, 3440 "Enable RRQ functionality"); 3441 3442 /* 3443 # lpfc_suppress_link_up: Bring link up at initialization 3444 # 0x0 = bring link up (issue MBX_INIT_LINK) 3445 # 0x1 = do NOT bring link up at initialization(MBX_INIT_LINK) 3446 # 0x2 = never bring up link 3447 # Default value is 0. 3448 */ 3449 LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK, 3450 LPFC_DELAY_INIT_LINK_INDEFINITELY, 3451 "Suppress Link Up at initialization"); 3452 3453 static ssize_t 3454 lpfc_pls_show(struct device *dev, struct device_attribute *attr, char *buf) 3455 { 3456 struct Scsi_Host *shost = class_to_shost(dev); 3457 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3458 3459 return scnprintf(buf, PAGE_SIZE, "%d\n", 3460 phba->sli4_hba.pc_sli4_params.pls); 3461 } 3462 static DEVICE_ATTR(pls, 0444, 3463 lpfc_pls_show, NULL); 3464 3465 static ssize_t 3466 lpfc_pt_show(struct device *dev, struct device_attribute *attr, char *buf) 3467 { 3468 struct Scsi_Host *shost = class_to_shost(dev); 3469 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3470 3471 return scnprintf(buf, PAGE_SIZE, "%d\n", 3472 (phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0); 3473 } 3474 static DEVICE_ATTR(pt, 0444, 3475 lpfc_pt_show, NULL); 3476 3477 /* 3478 # lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS 3479 # 1 - (1024) 3480 # 2 - (2048) 3481 # 3 - (3072) 3482 # 4 - (4096) 3483 # 5 - (5120) 3484 */ 3485 static ssize_t 3486 lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf) 3487 { 3488 struct Scsi_Host *shost = class_to_shost(dev); 3489 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; 3490 3491 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max); 3492 } 3493 3494 static DEVICE_ATTR(iocb_hw, S_IRUGO, 3495 lpfc_iocb_hw_show, NULL); 3496 static ssize_t 3497 lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf) 3498 { 3499 struct Scsi_Host *shost = class_to_shost(dev); 3500 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; 3501 struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba); 3502 3503 return scnprintf(buf, PAGE_SIZE, "%d\n", 3504 pring ? pring->txq_max : 0); 3505 } 3506 3507 static DEVICE_ATTR(txq_hw, S_IRUGO, 3508 lpfc_txq_hw_show, NULL); 3509 static ssize_t 3510 lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr, 3511 char *buf) 3512 { 3513 struct Scsi_Host *shost = class_to_shost(dev); 3514 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; 3515 struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba); 3516 3517 return scnprintf(buf, PAGE_SIZE, "%d\n", 3518 pring ? pring->txcmplq_max : 0); 3519 } 3520 3521 static DEVICE_ATTR(txcmplq_hw, S_IRUGO, 3522 lpfc_txcmplq_hw_show, NULL); 3523 3524 /* 3525 # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear 3526 # until the timer expires. Value range is [0,255]. Default value is 30. 3527 */ 3528 static int lpfc_nodev_tmo = LPFC_DEF_DEVLOSS_TMO; 3529 static int lpfc_devloss_tmo = LPFC_DEF_DEVLOSS_TMO; 3530 module_param(lpfc_nodev_tmo, int, 0); 3531 MODULE_PARM_DESC(lpfc_nodev_tmo, 3532 "Seconds driver will hold I/O waiting " 3533 "for a device to come back"); 3534 3535 /** 3536 * lpfc_nodev_tmo_show - Return the hba dev loss timeout value 3537 * @dev: class converted to a Scsi_host structure. 3538 * @attr: device attribute, not used. 3539 * @buf: on return contains the dev loss timeout in decimal. 3540 * 3541 * Returns: size of formatted string. 3542 **/ 3543 static ssize_t 3544 lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr, 3545 char *buf) 3546 { 3547 struct Scsi_Host *shost = class_to_shost(dev); 3548 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3549 3550 return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo); 3551 } 3552 3553 /** 3554 * lpfc_nodev_tmo_init - Set the hba nodev timeout value 3555 * @vport: lpfc vport structure pointer. 3556 * @val: contains the nodev timeout value. 3557 * 3558 * Description: 3559 * If the devloss tmo is already set then nodev tmo is set to devloss tmo, 3560 * a kernel error message is printed and zero is returned. 3561 * Else if val is in range then nodev tmo and devloss tmo are set to val. 3562 * Otherwise nodev tmo is set to the default value. 3563 * 3564 * Returns: 3565 * zero if already set or if val is in range 3566 * -EINVAL val out of range 3567 **/ 3568 static int 3569 lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val) 3570 { 3571 if (vport->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) { 3572 vport->cfg_nodev_tmo = vport->cfg_devloss_tmo; 3573 if (val != LPFC_DEF_DEVLOSS_TMO) 3574 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3575 "0407 Ignoring lpfc_nodev_tmo module " 3576 "parameter because lpfc_devloss_tmo " 3577 "is set.\n"); 3578 return 0; 3579 } 3580 3581 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { 3582 vport->cfg_nodev_tmo = val; 3583 vport->cfg_devloss_tmo = val; 3584 return 0; 3585 } 3586 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3587 "0400 lpfc_nodev_tmo attribute cannot be set to" 3588 " %d, allowed range is [%d, %d]\n", 3589 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); 3590 vport->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO; 3591 return -EINVAL; 3592 } 3593 3594 /** 3595 * lpfc_update_rport_devloss_tmo - Update dev loss tmo value 3596 * @vport: lpfc vport structure pointer. 3597 * 3598 * Description: 3599 * Update all the ndlp's dev loss tmo with the vport devloss tmo value. 3600 **/ 3601 static void 3602 lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) 3603 { 3604 struct Scsi_Host *shost; 3605 struct lpfc_nodelist *ndlp; 3606 #if (IS_ENABLED(CONFIG_NVME_FC)) 3607 struct lpfc_nvme_rport *rport; 3608 struct nvme_fc_remote_port *remoteport = NULL; 3609 #endif 3610 3611 shost = lpfc_shost_from_vport(vport); 3612 spin_lock_irq(shost->host_lock); 3613 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 3614 if (ndlp->rport) 3615 ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; 3616 #if (IS_ENABLED(CONFIG_NVME_FC)) 3617 spin_lock(&ndlp->lock); 3618 rport = lpfc_ndlp_get_nrport(ndlp); 3619 if (rport) 3620 remoteport = rport->remoteport; 3621 spin_unlock(&ndlp->lock); 3622 if (rport && remoteport) 3623 nvme_fc_set_remoteport_devloss(remoteport, 3624 vport->cfg_devloss_tmo); 3625 #endif 3626 } 3627 spin_unlock_irq(shost->host_lock); 3628 } 3629 3630 /** 3631 * lpfc_nodev_tmo_set - Set the vport nodev tmo and devloss tmo values 3632 * @vport: lpfc vport structure pointer. 3633 * @val: contains the tmo value. 3634 * 3635 * Description: 3636 * If the devloss tmo is already set or the vport dev loss tmo has changed 3637 * then a kernel error message is printed and zero is returned. 3638 * Else if val is in range then nodev tmo and devloss tmo are set to val. 3639 * Otherwise nodev tmo is set to the default value. 3640 * 3641 * Returns: 3642 * zero if already set or if val is in range 3643 * -EINVAL val out of range 3644 **/ 3645 static int 3646 lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val) 3647 { 3648 if (vport->dev_loss_tmo_changed || 3649 (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) { 3650 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3651 "0401 Ignoring change to lpfc_nodev_tmo " 3652 "because lpfc_devloss_tmo is set.\n"); 3653 return 0; 3654 } 3655 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { 3656 vport->cfg_nodev_tmo = val; 3657 vport->cfg_devloss_tmo = val; 3658 /* 3659 * For compat: set the fc_host dev loss so new rports 3660 * will get the value. 3661 */ 3662 fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val; 3663 lpfc_update_rport_devloss_tmo(vport); 3664 return 0; 3665 } 3666 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3667 "0403 lpfc_nodev_tmo attribute cannot be set to " 3668 "%d, allowed range is [%d, %d]\n", 3669 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); 3670 return -EINVAL; 3671 } 3672 3673 lpfc_vport_param_store(nodev_tmo) 3674 3675 static DEVICE_ATTR_RW(lpfc_nodev_tmo); 3676 3677 /* 3678 # lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that 3679 # disappear until the timer expires. Value range is [0,255]. Default 3680 # value is 30. 3681 */ 3682 module_param(lpfc_devloss_tmo, int, S_IRUGO); 3683 MODULE_PARM_DESC(lpfc_devloss_tmo, 3684 "Seconds driver will hold I/O waiting " 3685 "for a device to come back"); 3686 lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO, 3687 LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO) 3688 lpfc_vport_param_show(devloss_tmo) 3689 3690 /** 3691 * lpfc_devloss_tmo_set - Sets vport nodev tmo, devloss tmo values, changed bit 3692 * @vport: lpfc vport structure pointer. 3693 * @val: contains the tmo value. 3694 * 3695 * Description: 3696 * If val is in a valid range then set the vport nodev tmo, 3697 * devloss tmo, also set the vport dev loss tmo changed flag. 3698 * Else a kernel error message is printed. 3699 * 3700 * Returns: 3701 * zero if val is in range 3702 * -EINVAL val out of range 3703 **/ 3704 static int 3705 lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val) 3706 { 3707 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { 3708 vport->cfg_nodev_tmo = val; 3709 vport->cfg_devloss_tmo = val; 3710 vport->dev_loss_tmo_changed = 1; 3711 fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val; 3712 lpfc_update_rport_devloss_tmo(vport); 3713 return 0; 3714 } 3715 3716 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3717 "0404 lpfc_devloss_tmo attribute cannot be set to " 3718 "%d, allowed range is [%d, %d]\n", 3719 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); 3720 return -EINVAL; 3721 } 3722 3723 lpfc_vport_param_store(devloss_tmo) 3724 static DEVICE_ATTR_RW(lpfc_devloss_tmo); 3725 3726 /* 3727 * lpfc_suppress_rsp: Enable suppress rsp feature is firmware supports it 3728 * lpfc_suppress_rsp = 0 Disable 3729 * lpfc_suppress_rsp = 1 Enable (default) 3730 * 3731 */ 3732 LPFC_ATTR_R(suppress_rsp, 1, 0, 1, 3733 "Enable suppress rsp feature is firmware supports it"); 3734 3735 /* 3736 * lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds 3737 * lpfc_nvmet_mrq = 0 driver will calcualte optimal number of RQ pairs 3738 * lpfc_nvmet_mrq = 1 use a single RQ pair 3739 * lpfc_nvmet_mrq >= 2 use specified RQ pairs for MRQ 3740 * 3741 */ 3742 LPFC_ATTR_R(nvmet_mrq, 3743 LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_MAX, 3744 "Specify number of RQ pairs for processing NVMET cmds"); 3745 3746 /* 3747 * lpfc_nvmet_mrq_post: Specify number of RQ buffer to initially post 3748 * to each NVMET RQ. Range 64 to 2048, default is 512. 3749 */ 3750 LPFC_ATTR_R(nvmet_mrq_post, 3751 LPFC_NVMET_RQE_DEF_POST, LPFC_NVMET_RQE_MIN_POST, 3752 LPFC_NVMET_RQE_DEF_COUNT, 3753 "Specify number of RQ buffers to initially post"); 3754 3755 /* 3756 * lpfc_enable_fc4_type: Defines what FC4 types are supported. 3757 * Supported Values: 1 - register just FCP 3758 * 3 - register both FCP and NVME 3759 * Supported values are [1,3]. Default value is 3 3760 */ 3761 LPFC_ATTR_R(enable_fc4_type, LPFC_DEF_ENBL_FC4_TYPE, 3762 LPFC_ENABLE_FCP, LPFC_MAX_ENBL_FC4_TYPE, 3763 "Enable FC4 Protocol support - FCP / NVME"); 3764 3765 /* 3766 # lpfc_log_verbose: Only turn this flag on if you are willing to risk being 3767 # deluged with LOTS of information. 3768 # You can set a bit mask to record specific types of verbose messages: 3769 # See lpfc_logmsh.h for definitions. 3770 */ 3771 LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff, 3772 "Verbose logging bit-mask"); 3773 3774 /* 3775 # lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters 3776 # objects that have been registered with the nameserver after login. 3777 */ 3778 LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1, 3779 "Deregister nameserver objects before LOGO"); 3780 3781 /* 3782 # lun_queue_depth: This parameter is used to limit the number of outstanding 3783 # commands per FCP LUN. 3784 */ 3785 LPFC_VPORT_ATTR_R(lun_queue_depth, 64, 1, 512, 3786 "Max number of FCP commands we can queue to a specific LUN"); 3787 3788 /* 3789 # tgt_queue_depth: This parameter is used to limit the number of outstanding 3790 # commands per target port. Value range is [10,65535]. Default value is 65535. 3791 */ 3792 static uint lpfc_tgt_queue_depth = LPFC_MAX_TGT_QDEPTH; 3793 module_param(lpfc_tgt_queue_depth, uint, 0444); 3794 MODULE_PARM_DESC(lpfc_tgt_queue_depth, "Set max Target queue depth"); 3795 lpfc_vport_param_show(tgt_queue_depth); 3796 lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH, 3797 LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH); 3798 3799 /** 3800 * lpfc_tgt_queue_depth_set: Sets an attribute value. 3801 * @vport: lpfc vport structure pointer. 3802 * @val: integer attribute value. 3803 * 3804 * Description: Sets the parameter to the new value. 3805 * 3806 * Returns: 3807 * zero on success 3808 * -EINVAL if val is invalid 3809 */ 3810 static int 3811 lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val) 3812 { 3813 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3814 struct lpfc_nodelist *ndlp; 3815 3816 if (!lpfc_rangecheck(val, LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH)) 3817 return -EINVAL; 3818 3819 if (val == vport->cfg_tgt_queue_depth) 3820 return 0; 3821 3822 spin_lock_irq(shost->host_lock); 3823 vport->cfg_tgt_queue_depth = val; 3824 3825 /* Next loop thru nodelist and change cmd_qdepth */ 3826 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) 3827 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; 3828 3829 spin_unlock_irq(shost->host_lock); 3830 return 0; 3831 } 3832 3833 lpfc_vport_param_store(tgt_queue_depth); 3834 static DEVICE_ATTR_RW(lpfc_tgt_queue_depth); 3835 3836 /* 3837 # hba_queue_depth: This parameter is used to limit the number of outstanding 3838 # commands per lpfc HBA. Value range is [32,8192]. If this parameter 3839 # value is greater than the maximum number of exchanges supported by the HBA, 3840 # then maximum number of exchanges supported by the HBA is used to determine 3841 # the hba_queue_depth. 3842 */ 3843 LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192, 3844 "Max number of FCP commands we can queue to a lpfc HBA"); 3845 3846 /* 3847 # peer_port_login: This parameter allows/prevents logins 3848 # between peer ports hosted on the same physical port. 3849 # When this parameter is set 0 peer ports of same physical port 3850 # are not allowed to login to each other. 3851 # When this parameter is set 1 peer ports of same physical port 3852 # are allowed to login to each other. 3853 # Default value of this parameter is 0. 3854 */ 3855 LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1, 3856 "Allow peer ports on the same physical port to login to each " 3857 "other."); 3858 3859 /* 3860 # restrict_login: This parameter allows/prevents logins 3861 # between Virtual Ports and remote initiators. 3862 # When this parameter is not set (0) Virtual Ports will accept PLOGIs from 3863 # other initiators and will attempt to PLOGI all remote ports. 3864 # When this parameter is set (1) Virtual Ports will reject PLOGIs from 3865 # remote ports and will not attempt to PLOGI to other initiators. 3866 # This parameter does not restrict to the physical port. 3867 # This parameter does not restrict logins to Fabric resident remote ports. 3868 # Default value of this parameter is 1. 3869 */ 3870 static int lpfc_restrict_login = 1; 3871 module_param(lpfc_restrict_login, int, S_IRUGO); 3872 MODULE_PARM_DESC(lpfc_restrict_login, 3873 "Restrict virtual ports login to remote initiators."); 3874 lpfc_vport_param_show(restrict_login); 3875 3876 /** 3877 * lpfc_restrict_login_init - Set the vport restrict login flag 3878 * @vport: lpfc vport structure pointer. 3879 * @val: contains the restrict login value. 3880 * 3881 * Description: 3882 * If val is not in a valid range then log a kernel error message and set 3883 * the vport restrict login to one. 3884 * If the port type is physical clear the restrict login flag and return. 3885 * Else set the restrict login flag to val. 3886 * 3887 * Returns: 3888 * zero if val is in range 3889 * -EINVAL val out of range 3890 **/ 3891 static int 3892 lpfc_restrict_login_init(struct lpfc_vport *vport, int val) 3893 { 3894 if (val < 0 || val > 1) { 3895 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3896 "0422 lpfc_restrict_login attribute cannot " 3897 "be set to %d, allowed range is [0, 1]\n", 3898 val); 3899 vport->cfg_restrict_login = 1; 3900 return -EINVAL; 3901 } 3902 if (vport->port_type == LPFC_PHYSICAL_PORT) { 3903 vport->cfg_restrict_login = 0; 3904 return 0; 3905 } 3906 vport->cfg_restrict_login = val; 3907 return 0; 3908 } 3909 3910 /** 3911 * lpfc_restrict_login_set - Set the vport restrict login flag 3912 * @vport: lpfc vport structure pointer. 3913 * @val: contains the restrict login value. 3914 * 3915 * Description: 3916 * If val is not in a valid range then log a kernel error message and set 3917 * the vport restrict login to one. 3918 * If the port type is physical and the val is not zero log a kernel 3919 * error message, clear the restrict login flag and return zero. 3920 * Else set the restrict login flag to val. 3921 * 3922 * Returns: 3923 * zero if val is in range 3924 * -EINVAL val out of range 3925 **/ 3926 static int 3927 lpfc_restrict_login_set(struct lpfc_vport *vport, int val) 3928 { 3929 if (val < 0 || val > 1) { 3930 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3931 "0425 lpfc_restrict_login attribute cannot " 3932 "be set to %d, allowed range is [0, 1]\n", 3933 val); 3934 vport->cfg_restrict_login = 1; 3935 return -EINVAL; 3936 } 3937 if (vport->port_type == LPFC_PHYSICAL_PORT && val != 0) { 3938 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3939 "0468 lpfc_restrict_login must be 0 for " 3940 "Physical ports.\n"); 3941 vport->cfg_restrict_login = 0; 3942 return 0; 3943 } 3944 vport->cfg_restrict_login = val; 3945 return 0; 3946 } 3947 lpfc_vport_param_store(restrict_login); 3948 static DEVICE_ATTR_RW(lpfc_restrict_login); 3949 3950 /* 3951 # Some disk devices have a "select ID" or "select Target" capability. 3952 # From a protocol standpoint "select ID" usually means select the 3953 # Fibre channel "ALPA". In the FC-AL Profile there is an "informative 3954 # annex" which contains a table that maps a "select ID" (a number 3955 # between 0 and 7F) to an ALPA. By default, for compatibility with 3956 # older drivers, the lpfc driver scans this table from low ALPA to high 3957 # ALPA. 3958 # 3959 # Turning on the scan-down variable (on = 1, off = 0) will 3960 # cause the lpfc driver to use an inverted table, effectively 3961 # scanning ALPAs from high to low. Value range is [0,1]. Default value is 1. 3962 # 3963 # (Note: This "select ID" functionality is a LOOP ONLY characteristic 3964 # and will not work across a fabric. Also this parameter will take 3965 # effect only in the case when ALPA map is not available.) 3966 */ 3967 LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1, 3968 "Start scanning for devices from highest ALPA to lowest"); 3969 3970 /* 3971 # lpfc_topology: link topology for init link 3972 # 0x0 = attempt loop mode then point-to-point 3973 # 0x01 = internal loopback mode 3974 # 0x02 = attempt point-to-point mode only 3975 # 0x04 = attempt loop mode only 3976 # 0x06 = attempt point-to-point mode then loop 3977 # Set point-to-point mode if you want to run as an N_Port. 3978 # Set loop mode if you want to run as an NL_Port. Value range is [0,0x6]. 3979 # Default value is 0. 3980 */ 3981 LPFC_ATTR(topology, 0, 0, 6, 3982 "Select Fibre Channel topology"); 3983 3984 /** 3985 * lpfc_topology_store - Set the adapters topology field 3986 * @dev: class device that is converted into a scsi_host. 3987 * @attr:device attribute, not used. 3988 * @buf: buffer for passing information. 3989 * @count: size of the data buffer. 3990 * 3991 * Description: 3992 * If val is in a valid range then set the adapter's topology field and 3993 * issue a lip; if the lip fails reset the topology to the old value. 3994 * 3995 * If the value is not in range log a kernel error message and return an error. 3996 * 3997 * Returns: 3998 * zero if val is in range and lip okay 3999 * non-zero return value from lpfc_issue_lip() 4000 * -EINVAL val out of range 4001 **/ 4002 static ssize_t 4003 lpfc_topology_store(struct device *dev, struct device_attribute *attr, 4004 const char *buf, size_t count) 4005 { 4006 struct Scsi_Host *shost = class_to_shost(dev); 4007 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4008 struct lpfc_hba *phba = vport->phba; 4009 int val = 0; 4010 int nolip = 0; 4011 const char *val_buf = buf; 4012 int err; 4013 uint32_t prev_val; 4014 u8 sli_family, if_type; 4015 4016 if (!strncmp(buf, "nolip ", strlen("nolip "))) { 4017 nolip = 1; 4018 val_buf = &buf[strlen("nolip ")]; 4019 } 4020 4021 if (!isdigit(val_buf[0])) 4022 return -EINVAL; 4023 if (sscanf(val_buf, "%i", &val) != 1) 4024 return -EINVAL; 4025 4026 if (val >= 0 && val <= 6) { 4027 prev_val = phba->cfg_topology; 4028 if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G && 4029 val == 4) { 4030 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 4031 "3113 Loop mode not supported at speed %d\n", 4032 val); 4033 return -EINVAL; 4034 } 4035 /* 4036 * The 'topology' is not a configurable parameter if : 4037 * - persistent topology enabled 4038 * - ASIC_GEN_NUM >= 0xC, with no private loop support 4039 */ 4040 sli_family = bf_get(lpfc_sli_intf_sli_family, 4041 &phba->sli4_hba.sli_intf); 4042 if_type = bf_get(lpfc_sli_intf_if_type, 4043 &phba->sli4_hba.sli_intf); 4044 if ((phba->hba_flag & HBA_PERSISTENT_TOPO || 4045 (!phba->sli4_hba.pc_sli4_params.pls && 4046 (sli_family == LPFC_SLI_INTF_FAMILY_G6 || 4047 if_type == LPFC_SLI_INTF_IF_TYPE_6))) && 4048 val == 4) { 4049 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 4050 "3114 Loop mode not supported\n"); 4051 return -EINVAL; 4052 } 4053 phba->cfg_topology = val; 4054 if (nolip) 4055 return strlen(buf); 4056 4057 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 4058 "3054 lpfc_topology changed from %d to %d\n", 4059 prev_val, val); 4060 if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4) 4061 phba->fc_topology_changed = 1; 4062 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); 4063 if (err) { 4064 phba->cfg_topology = prev_val; 4065 return -EINVAL; 4066 } else 4067 return strlen(buf); 4068 } 4069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4070 "%d:0467 lpfc_topology attribute cannot be set to %d, " 4071 "allowed range is [0, 6]\n", 4072 phba->brd_no, val); 4073 return -EINVAL; 4074 } 4075 4076 lpfc_param_show(topology) 4077 static DEVICE_ATTR_RW(lpfc_topology); 4078 4079 /** 4080 * lpfc_static_vport_show: Read callback function for 4081 * lpfc_static_vport sysfs file. 4082 * @dev: Pointer to class device object. 4083 * @attr: device attribute structure. 4084 * @buf: Data buffer. 4085 * 4086 * This function is the read call back function for 4087 * lpfc_static_vport sysfs file. The lpfc_static_vport 4088 * sysfs file report the mageability of the vport. 4089 **/ 4090 static ssize_t 4091 lpfc_static_vport_show(struct device *dev, struct device_attribute *attr, 4092 char *buf) 4093 { 4094 struct Scsi_Host *shost = class_to_shost(dev); 4095 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4096 if (vport->vport_flag & STATIC_VPORT) 4097 sprintf(buf, "1\n"); 4098 else 4099 sprintf(buf, "0\n"); 4100 4101 return strlen(buf); 4102 } 4103 4104 /* 4105 * Sysfs attribute to control the statistical data collection. 4106 */ 4107 static DEVICE_ATTR_RO(lpfc_static_vport); 4108 4109 /** 4110 * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file 4111 * @dev: Pointer to class device. 4112 * @attr: Unused. 4113 * @buf: Data buffer. 4114 * @count: Size of the data buffer. 4115 * 4116 * This function get called when a user write to the lpfc_stat_data_ctrl 4117 * sysfs file. This function parse the command written to the sysfs file 4118 * and take appropriate action. These commands are used for controlling 4119 * driver statistical data collection. 4120 * Following are the command this function handles. 4121 * 4122 * setbucket <bucket_type> <base> <step> 4123 * = Set the latency buckets. 4124 * destroybucket = destroy all the buckets. 4125 * start = start data collection 4126 * stop = stop data collection 4127 * reset = reset the collected data 4128 **/ 4129 static ssize_t 4130 lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr, 4131 const char *buf, size_t count) 4132 { 4133 struct Scsi_Host *shost = class_to_shost(dev); 4134 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4135 struct lpfc_hba *phba = vport->phba; 4136 #define LPFC_MAX_DATA_CTRL_LEN 1024 4137 static char bucket_data[LPFC_MAX_DATA_CTRL_LEN]; 4138 unsigned long i; 4139 char *str_ptr, *token; 4140 struct lpfc_vport **vports; 4141 struct Scsi_Host *v_shost; 4142 char *bucket_type_str, *base_str, *step_str; 4143 unsigned long base, step, bucket_type; 4144 4145 if (!strncmp(buf, "setbucket", strlen("setbucket"))) { 4146 if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1)) 4147 return -EINVAL; 4148 4149 strncpy(bucket_data, buf, LPFC_MAX_DATA_CTRL_LEN); 4150 str_ptr = &bucket_data[0]; 4151 /* Ignore this token - this is command token */ 4152 token = strsep(&str_ptr, "\t "); 4153 if (!token) 4154 return -EINVAL; 4155 4156 bucket_type_str = strsep(&str_ptr, "\t "); 4157 if (!bucket_type_str) 4158 return -EINVAL; 4159 4160 if (!strncmp(bucket_type_str, "linear", strlen("linear"))) 4161 bucket_type = LPFC_LINEAR_BUCKET; 4162 else if (!strncmp(bucket_type_str, "power2", strlen("power2"))) 4163 bucket_type = LPFC_POWER2_BUCKET; 4164 else 4165 return -EINVAL; 4166 4167 base_str = strsep(&str_ptr, "\t "); 4168 if (!base_str) 4169 return -EINVAL; 4170 base = simple_strtoul(base_str, NULL, 0); 4171 4172 step_str = strsep(&str_ptr, "\t "); 4173 if (!step_str) 4174 return -EINVAL; 4175 step = simple_strtoul(step_str, NULL, 0); 4176 if (!step) 4177 return -EINVAL; 4178 4179 /* Block the data collection for every vport */ 4180 vports = lpfc_create_vport_work_array(phba); 4181 if (vports == NULL) 4182 return -ENOMEM; 4183 4184 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 4185 v_shost = lpfc_shost_from_vport(vports[i]); 4186 spin_lock_irq(v_shost->host_lock); 4187 /* Block and reset data collection */ 4188 vports[i]->stat_data_blocked = 1; 4189 if (vports[i]->stat_data_enabled) 4190 lpfc_vport_reset_stat_data(vports[i]); 4191 spin_unlock_irq(v_shost->host_lock); 4192 } 4193 4194 /* Set the bucket attributes */ 4195 phba->bucket_type = bucket_type; 4196 phba->bucket_base = base; 4197 phba->bucket_step = step; 4198 4199 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 4200 v_shost = lpfc_shost_from_vport(vports[i]); 4201 4202 /* Unblock data collection */ 4203 spin_lock_irq(v_shost->host_lock); 4204 vports[i]->stat_data_blocked = 0; 4205 spin_unlock_irq(v_shost->host_lock); 4206 } 4207 lpfc_destroy_vport_work_array(phba, vports); 4208 return strlen(buf); 4209 } 4210 4211 if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) { 4212 vports = lpfc_create_vport_work_array(phba); 4213 if (vports == NULL) 4214 return -ENOMEM; 4215 4216 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 4217 v_shost = lpfc_shost_from_vport(vports[i]); 4218 spin_lock_irq(shost->host_lock); 4219 vports[i]->stat_data_blocked = 1; 4220 lpfc_free_bucket(vport); 4221 vport->stat_data_enabled = 0; 4222 vports[i]->stat_data_blocked = 0; 4223 spin_unlock_irq(shost->host_lock); 4224 } 4225 lpfc_destroy_vport_work_array(phba, vports); 4226 phba->bucket_type = LPFC_NO_BUCKET; 4227 phba->bucket_base = 0; 4228 phba->bucket_step = 0; 4229 return strlen(buf); 4230 } 4231 4232 if (!strncmp(buf, "start", strlen("start"))) { 4233 /* If no buckets configured return error */ 4234 if (phba->bucket_type == LPFC_NO_BUCKET) 4235 return -EINVAL; 4236 spin_lock_irq(shost->host_lock); 4237 if (vport->stat_data_enabled) { 4238 spin_unlock_irq(shost->host_lock); 4239 return strlen(buf); 4240 } 4241 lpfc_alloc_bucket(vport); 4242 vport->stat_data_enabled = 1; 4243 spin_unlock_irq(shost->host_lock); 4244 return strlen(buf); 4245 } 4246 4247 if (!strncmp(buf, "stop", strlen("stop"))) { 4248 spin_lock_irq(shost->host_lock); 4249 if (vport->stat_data_enabled == 0) { 4250 spin_unlock_irq(shost->host_lock); 4251 return strlen(buf); 4252 } 4253 lpfc_free_bucket(vport); 4254 vport->stat_data_enabled = 0; 4255 spin_unlock_irq(shost->host_lock); 4256 return strlen(buf); 4257 } 4258 4259 if (!strncmp(buf, "reset", strlen("reset"))) { 4260 if ((phba->bucket_type == LPFC_NO_BUCKET) 4261 || !vport->stat_data_enabled) 4262 return strlen(buf); 4263 spin_lock_irq(shost->host_lock); 4264 vport->stat_data_blocked = 1; 4265 lpfc_vport_reset_stat_data(vport); 4266 vport->stat_data_blocked = 0; 4267 spin_unlock_irq(shost->host_lock); 4268 return strlen(buf); 4269 } 4270 return -EINVAL; 4271 } 4272 4273 4274 /** 4275 * lpfc_stat_data_ctrl_show - Read function for lpfc_stat_data_ctrl sysfs file 4276 * @dev: Pointer to class device. 4277 * @attr: Unused. 4278 * @buf: Data buffer. 4279 * 4280 * This function is the read call back function for 4281 * lpfc_stat_data_ctrl sysfs file. This function report the 4282 * current statistical data collection state. 4283 **/ 4284 static ssize_t 4285 lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr, 4286 char *buf) 4287 { 4288 struct Scsi_Host *shost = class_to_shost(dev); 4289 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4290 struct lpfc_hba *phba = vport->phba; 4291 int index = 0; 4292 int i; 4293 char *bucket_type; 4294 unsigned long bucket_value; 4295 4296 switch (phba->bucket_type) { 4297 case LPFC_LINEAR_BUCKET: 4298 bucket_type = "linear"; 4299 break; 4300 case LPFC_POWER2_BUCKET: 4301 bucket_type = "power2"; 4302 break; 4303 default: 4304 bucket_type = "No Bucket"; 4305 break; 4306 } 4307 4308 sprintf(&buf[index], "Statistical Data enabled :%d, " 4309 "blocked :%d, Bucket type :%s, Bucket base :%d," 4310 " Bucket step :%d\nLatency Ranges :", 4311 vport->stat_data_enabled, vport->stat_data_blocked, 4312 bucket_type, phba->bucket_base, phba->bucket_step); 4313 index = strlen(buf); 4314 if (phba->bucket_type != LPFC_NO_BUCKET) { 4315 for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) { 4316 if (phba->bucket_type == LPFC_LINEAR_BUCKET) 4317 bucket_value = phba->bucket_base + 4318 phba->bucket_step * i; 4319 else 4320 bucket_value = phba->bucket_base + 4321 (1 << i) * phba->bucket_step; 4322 4323 if (index + 10 > PAGE_SIZE) 4324 break; 4325 sprintf(&buf[index], "%08ld ", bucket_value); 4326 index = strlen(buf); 4327 } 4328 } 4329 sprintf(&buf[index], "\n"); 4330 return strlen(buf); 4331 } 4332 4333 /* 4334 * Sysfs attribute to control the statistical data collection. 4335 */ 4336 static DEVICE_ATTR_RW(lpfc_stat_data_ctrl); 4337 4338 /* 4339 * lpfc_drvr_stat_data: sysfs attr to get driver statistical data. 4340 */ 4341 4342 /* 4343 * Each Bucket takes 11 characters and 1 new line + 17 bytes WWN 4344 * for each target. 4345 */ 4346 #define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18) 4347 #define MAX_STAT_DATA_SIZE_PER_TARGET \ 4348 STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT) 4349 4350 4351 /** 4352 * sysfs_drvr_stat_data_read - Read function for lpfc_drvr_stat_data attribute 4353 * @filp: sysfs file 4354 * @kobj: Pointer to the kernel object 4355 * @bin_attr: Attribute object 4356 * @buf: Buffer pointer 4357 * @off: File offset 4358 * @count: Buffer size 4359 * 4360 * This function is the read call back function for lpfc_drvr_stat_data 4361 * sysfs file. This function export the statistical data to user 4362 * applications. 4363 **/ 4364 static ssize_t 4365 sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj, 4366 struct bin_attribute *bin_attr, 4367 char *buf, loff_t off, size_t count) 4368 { 4369 struct device *dev = container_of(kobj, struct device, 4370 kobj); 4371 struct Scsi_Host *shost = class_to_shost(dev); 4372 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4373 struct lpfc_hba *phba = vport->phba; 4374 int i = 0, index = 0; 4375 unsigned long nport_index; 4376 struct lpfc_nodelist *ndlp = NULL; 4377 nport_index = (unsigned long)off / 4378 MAX_STAT_DATA_SIZE_PER_TARGET; 4379 4380 if (!vport->stat_data_enabled || vport->stat_data_blocked 4381 || (phba->bucket_type == LPFC_NO_BUCKET)) 4382 return 0; 4383 4384 spin_lock_irq(shost->host_lock); 4385 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 4386 if (!ndlp->lat_data) 4387 continue; 4388 4389 if (nport_index > 0) { 4390 nport_index--; 4391 continue; 4392 } 4393 4394 if ((index + MAX_STAT_DATA_SIZE_PER_TARGET) 4395 > count) 4396 break; 4397 4398 if (!ndlp->lat_data) 4399 continue; 4400 4401 /* Print the WWN */ 4402 sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:", 4403 ndlp->nlp_portname.u.wwn[0], 4404 ndlp->nlp_portname.u.wwn[1], 4405 ndlp->nlp_portname.u.wwn[2], 4406 ndlp->nlp_portname.u.wwn[3], 4407 ndlp->nlp_portname.u.wwn[4], 4408 ndlp->nlp_portname.u.wwn[5], 4409 ndlp->nlp_portname.u.wwn[6], 4410 ndlp->nlp_portname.u.wwn[7]); 4411 4412 index = strlen(buf); 4413 4414 for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) { 4415 sprintf(&buf[index], "%010u,", 4416 ndlp->lat_data[i].cmd_count); 4417 index = strlen(buf); 4418 } 4419 sprintf(&buf[index], "\n"); 4420 index = strlen(buf); 4421 } 4422 spin_unlock_irq(shost->host_lock); 4423 return index; 4424 } 4425 4426 static struct bin_attribute sysfs_drvr_stat_data_attr = { 4427 .attr = { 4428 .name = "lpfc_drvr_stat_data", 4429 .mode = S_IRUSR, 4430 }, 4431 .size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET, 4432 .read = sysfs_drvr_stat_data_read, 4433 .write = NULL, 4434 }; 4435 4436 /* 4437 # lpfc_link_speed: Link speed selection for initializing the Fibre Channel 4438 # connection. 4439 # Value range is [0,16]. Default value is 0. 4440 */ 4441 /** 4442 * lpfc_link_speed_store - Set the adapters link speed 4443 * @dev: Pointer to class device. 4444 * @attr: Unused. 4445 * @buf: Data buffer. 4446 * @count: Size of the data buffer. 4447 * 4448 * Description: 4449 * If val is in a valid range then set the adapter's link speed field and 4450 * issue a lip; if the lip fails reset the link speed to the old value. 4451 * 4452 * Notes: 4453 * If the value is not in range log a kernel error message and return an error. 4454 * 4455 * Returns: 4456 * zero if val is in range and lip okay. 4457 * non-zero return value from lpfc_issue_lip() 4458 * -EINVAL val out of range 4459 **/ 4460 static ssize_t 4461 lpfc_link_speed_store(struct device *dev, struct device_attribute *attr, 4462 const char *buf, size_t count) 4463 { 4464 struct Scsi_Host *shost = class_to_shost(dev); 4465 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4466 struct lpfc_hba *phba = vport->phba; 4467 int val = LPFC_USER_LINK_SPEED_AUTO; 4468 int nolip = 0; 4469 const char *val_buf = buf; 4470 int err; 4471 uint32_t prev_val, if_type; 4472 4473 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 4474 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2 && 4475 phba->hba_flag & HBA_FORCED_LINK_SPEED) 4476 return -EPERM; 4477 4478 if (!strncmp(buf, "nolip ", strlen("nolip "))) { 4479 nolip = 1; 4480 val_buf = &buf[strlen("nolip ")]; 4481 } 4482 4483 if (!isdigit(val_buf[0])) 4484 return -EINVAL; 4485 if (sscanf(val_buf, "%i", &val) != 1) 4486 return -EINVAL; 4487 4488 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 4489 "3055 lpfc_link_speed changed from %d to %d %s\n", 4490 phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)"); 4491 4492 if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) || 4493 ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) || 4494 ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) || 4495 ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) || 4496 ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) || 4497 ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb)) || 4498 ((val == LPFC_USER_LINK_SPEED_32G) && !(phba->lmt & LMT_32Gb)) || 4499 ((val == LPFC_USER_LINK_SPEED_64G) && !(phba->lmt & LMT_64Gb))) { 4500 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4501 "2879 lpfc_link_speed attribute cannot be set " 4502 "to %d. Speed is not supported by this port.\n", 4503 val); 4504 return -EINVAL; 4505 } 4506 if (val >= LPFC_USER_LINK_SPEED_16G && 4507 phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 4508 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4509 "3112 lpfc_link_speed attribute cannot be set " 4510 "to %d. Speed is not supported in loop mode.\n", 4511 val); 4512 return -EINVAL; 4513 } 4514 4515 switch (val) { 4516 case LPFC_USER_LINK_SPEED_AUTO: 4517 case LPFC_USER_LINK_SPEED_1G: 4518 case LPFC_USER_LINK_SPEED_2G: 4519 case LPFC_USER_LINK_SPEED_4G: 4520 case LPFC_USER_LINK_SPEED_8G: 4521 case LPFC_USER_LINK_SPEED_16G: 4522 case LPFC_USER_LINK_SPEED_32G: 4523 case LPFC_USER_LINK_SPEED_64G: 4524 prev_val = phba->cfg_link_speed; 4525 phba->cfg_link_speed = val; 4526 if (nolip) 4527 return strlen(buf); 4528 4529 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); 4530 if (err) { 4531 phba->cfg_link_speed = prev_val; 4532 return -EINVAL; 4533 } 4534 return strlen(buf); 4535 default: 4536 break; 4537 } 4538 4539 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4540 "0469 lpfc_link_speed attribute cannot be set to %d, " 4541 "allowed values are [%s]\n", 4542 val, LPFC_LINK_SPEED_STRING); 4543 return -EINVAL; 4544 4545 } 4546 4547 static int lpfc_link_speed = 0; 4548 module_param(lpfc_link_speed, int, S_IRUGO); 4549 MODULE_PARM_DESC(lpfc_link_speed, "Select link speed"); 4550 lpfc_param_show(link_speed) 4551 4552 /** 4553 * lpfc_link_speed_init - Set the adapters link speed 4554 * @phba: lpfc_hba pointer. 4555 * @val: link speed value. 4556 * 4557 * Description: 4558 * If val is in a valid range then set the adapter's link speed field. 4559 * 4560 * Notes: 4561 * If the value is not in range log a kernel error message, clear the link 4562 * speed and return an error. 4563 * 4564 * Returns: 4565 * zero if val saved. 4566 * -EINVAL val out of range 4567 **/ 4568 static int 4569 lpfc_link_speed_init(struct lpfc_hba *phba, int val) 4570 { 4571 if (val >= LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) { 4572 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4573 "3111 lpfc_link_speed of %d cannot " 4574 "support loop mode, setting topology to default.\n", 4575 val); 4576 phba->cfg_topology = 0; 4577 } 4578 4579 switch (val) { 4580 case LPFC_USER_LINK_SPEED_AUTO: 4581 case LPFC_USER_LINK_SPEED_1G: 4582 case LPFC_USER_LINK_SPEED_2G: 4583 case LPFC_USER_LINK_SPEED_4G: 4584 case LPFC_USER_LINK_SPEED_8G: 4585 case LPFC_USER_LINK_SPEED_16G: 4586 case LPFC_USER_LINK_SPEED_32G: 4587 case LPFC_USER_LINK_SPEED_64G: 4588 phba->cfg_link_speed = val; 4589 return 0; 4590 default: 4591 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4592 "0405 lpfc_link_speed attribute cannot " 4593 "be set to %d, allowed values are " 4594 "["LPFC_LINK_SPEED_STRING"]\n", val); 4595 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 4596 return -EINVAL; 4597 } 4598 } 4599 4600 static DEVICE_ATTR_RW(lpfc_link_speed); 4601 4602 /* 4603 # lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER) 4604 # 0 = aer disabled or not supported 4605 # 1 = aer supported and enabled (default) 4606 # Value range is [0,1]. Default value is 1. 4607 */ 4608 LPFC_ATTR(aer_support, 1, 0, 1, 4609 "Enable PCIe device AER support"); 4610 lpfc_param_show(aer_support) 4611 4612 /** 4613 * lpfc_aer_support_store - Set the adapter for aer support 4614 * 4615 * @dev: class device that is converted into a Scsi_host. 4616 * @attr: device attribute, not used. 4617 * @buf: containing enable or disable aer flag. 4618 * @count: unused variable. 4619 * 4620 * Description: 4621 * If the val is 1 and currently the device's AER capability was not 4622 * enabled, invoke the kernel's enable AER helper routine, trying to 4623 * enable the device's AER capability. If the helper routine enabling 4624 * AER returns success, update the device's cfg_aer_support flag to 4625 * indicate AER is supported by the device; otherwise, if the device 4626 * AER capability is already enabled to support AER, then do nothing. 4627 * 4628 * If the val is 0 and currently the device's AER support was enabled, 4629 * invoke the kernel's disable AER helper routine. After that, update 4630 * the device's cfg_aer_support flag to indicate AER is not supported 4631 * by the device; otherwise, if the device AER capability is already 4632 * disabled from supporting AER, then do nothing. 4633 * 4634 * Returns: 4635 * length of the buf on success if val is in range the intended mode 4636 * is supported. 4637 * -EINVAL if val out of range or intended mode is not supported. 4638 **/ 4639 static ssize_t 4640 lpfc_aer_support_store(struct device *dev, struct device_attribute *attr, 4641 const char *buf, size_t count) 4642 { 4643 struct Scsi_Host *shost = class_to_shost(dev); 4644 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4645 struct lpfc_hba *phba = vport->phba; 4646 int val = 0, rc = -EINVAL; 4647 4648 if (!isdigit(buf[0])) 4649 return -EINVAL; 4650 if (sscanf(buf, "%i", &val) != 1) 4651 return -EINVAL; 4652 4653 switch (val) { 4654 case 0: 4655 if (phba->hba_flag & HBA_AER_ENABLED) { 4656 rc = pci_disable_pcie_error_reporting(phba->pcidev); 4657 if (!rc) { 4658 spin_lock_irq(&phba->hbalock); 4659 phba->hba_flag &= ~HBA_AER_ENABLED; 4660 spin_unlock_irq(&phba->hbalock); 4661 phba->cfg_aer_support = 0; 4662 rc = strlen(buf); 4663 } else 4664 rc = -EPERM; 4665 } else { 4666 phba->cfg_aer_support = 0; 4667 rc = strlen(buf); 4668 } 4669 break; 4670 case 1: 4671 if (!(phba->hba_flag & HBA_AER_ENABLED)) { 4672 rc = pci_enable_pcie_error_reporting(phba->pcidev); 4673 if (!rc) { 4674 spin_lock_irq(&phba->hbalock); 4675 phba->hba_flag |= HBA_AER_ENABLED; 4676 spin_unlock_irq(&phba->hbalock); 4677 phba->cfg_aer_support = 1; 4678 rc = strlen(buf); 4679 } else 4680 rc = -EPERM; 4681 } else { 4682 phba->cfg_aer_support = 1; 4683 rc = strlen(buf); 4684 } 4685 break; 4686 default: 4687 rc = -EINVAL; 4688 break; 4689 } 4690 return rc; 4691 } 4692 4693 static DEVICE_ATTR_RW(lpfc_aer_support); 4694 4695 /** 4696 * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device 4697 * @dev: class device that is converted into a Scsi_host. 4698 * @attr: device attribute, not used. 4699 * @buf: containing flag 1 for aer cleanup state. 4700 * @count: unused variable. 4701 * 4702 * Description: 4703 * If the @buf contains 1 and the device currently has the AER support 4704 * enabled, then invokes the kernel AER helper routine 4705 * pci_aer_clear_nonfatal_status() to clean up the uncorrectable 4706 * error status register. 4707 * 4708 * Notes: 4709 * 4710 * Returns: 4711 * -EINVAL if the buf does not contain the 1 or the device is not currently 4712 * enabled with the AER support. 4713 **/ 4714 static ssize_t 4715 lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr, 4716 const char *buf, size_t count) 4717 { 4718 struct Scsi_Host *shost = class_to_shost(dev); 4719 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4720 struct lpfc_hba *phba = vport->phba; 4721 int val, rc = -1; 4722 4723 if (!isdigit(buf[0])) 4724 return -EINVAL; 4725 if (sscanf(buf, "%i", &val) != 1) 4726 return -EINVAL; 4727 if (val != 1) 4728 return -EINVAL; 4729 4730 if (phba->hba_flag & HBA_AER_ENABLED) 4731 rc = pci_aer_clear_nonfatal_status(phba->pcidev); 4732 4733 if (rc == 0) 4734 return strlen(buf); 4735 else 4736 return -EPERM; 4737 } 4738 4739 static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL, 4740 lpfc_aer_cleanup_state); 4741 4742 /** 4743 * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions 4744 * 4745 * @dev: class device that is converted into a Scsi_host. 4746 * @attr: device attribute, not used. 4747 * @buf: containing the string the number of vfs to be enabled. 4748 * @count: unused variable. 4749 * 4750 * Description: 4751 * When this api is called either through user sysfs, the driver shall 4752 * try to enable or disable SR-IOV virtual functions according to the 4753 * following: 4754 * 4755 * If zero virtual function has been enabled to the physical function, 4756 * the driver shall invoke the pci enable virtual function api trying 4757 * to enable the virtual functions. If the nr_vfn provided is greater 4758 * than the maximum supported, the maximum virtual function number will 4759 * be used for invoking the api; otherwise, the nr_vfn provided shall 4760 * be used for invoking the api. If the api call returned success, the 4761 * actual number of virtual functions enabled will be set to the driver 4762 * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver 4763 * cfg_sriov_nr_virtfn remains zero. 4764 * 4765 * If none-zero virtual functions have already been enabled to the 4766 * physical function, as reflected by the driver's cfg_sriov_nr_virtfn, 4767 * -EINVAL will be returned and the driver does nothing; 4768 * 4769 * If the nr_vfn provided is zero and none-zero virtual functions have 4770 * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the 4771 * disabling virtual function api shall be invoded to disable all the 4772 * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to 4773 * zero. Otherwise, if zero virtual function has been enabled, do 4774 * nothing. 4775 * 4776 * Returns: 4777 * length of the buf on success if val is in range the intended mode 4778 * is supported. 4779 * -EINVAL if val out of range or intended mode is not supported. 4780 **/ 4781 static ssize_t 4782 lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr, 4783 const char *buf, size_t count) 4784 { 4785 struct Scsi_Host *shost = class_to_shost(dev); 4786 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4787 struct lpfc_hba *phba = vport->phba; 4788 struct pci_dev *pdev = phba->pcidev; 4789 int val = 0, rc = -EINVAL; 4790 4791 /* Sanity check on user data */ 4792 if (!isdigit(buf[0])) 4793 return -EINVAL; 4794 if (sscanf(buf, "%i", &val) != 1) 4795 return -EINVAL; 4796 if (val < 0) 4797 return -EINVAL; 4798 4799 /* Request disabling virtual functions */ 4800 if (val == 0) { 4801 if (phba->cfg_sriov_nr_virtfn > 0) { 4802 pci_disable_sriov(pdev); 4803 phba->cfg_sriov_nr_virtfn = 0; 4804 } 4805 return strlen(buf); 4806 } 4807 4808 /* Request enabling virtual functions */ 4809 if (phba->cfg_sriov_nr_virtfn > 0) { 4810 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4811 "3018 There are %d virtual functions " 4812 "enabled on physical function.\n", 4813 phba->cfg_sriov_nr_virtfn); 4814 return -EEXIST; 4815 } 4816 4817 if (val <= LPFC_MAX_VFN_PER_PFN) 4818 phba->cfg_sriov_nr_virtfn = val; 4819 else { 4820 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4821 "3019 Enabling %d virtual functions is not " 4822 "allowed.\n", val); 4823 return -EINVAL; 4824 } 4825 4826 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn); 4827 if (rc) { 4828 phba->cfg_sriov_nr_virtfn = 0; 4829 rc = -EPERM; 4830 } else 4831 rc = strlen(buf); 4832 4833 return rc; 4834 } 4835 4836 LPFC_ATTR(sriov_nr_virtfn, LPFC_DEF_VFN_PER_PFN, 0, LPFC_MAX_VFN_PER_PFN, 4837 "Enable PCIe device SR-IOV virtual fn"); 4838 4839 lpfc_param_show(sriov_nr_virtfn) 4840 static DEVICE_ATTR_RW(lpfc_sriov_nr_virtfn); 4841 4842 /** 4843 * lpfc_request_firmware_upgrade_store - Request for Linux generic firmware upgrade 4844 * 4845 * @dev: class device that is converted into a Scsi_host. 4846 * @attr: device attribute, not used. 4847 * @buf: containing the string the number of vfs to be enabled. 4848 * @count: unused variable. 4849 * 4850 * Description: 4851 * 4852 * Returns: 4853 * length of the buf on success if val is in range the intended mode 4854 * is supported. 4855 * -EINVAL if val out of range or intended mode is not supported. 4856 **/ 4857 static ssize_t 4858 lpfc_request_firmware_upgrade_store(struct device *dev, 4859 struct device_attribute *attr, 4860 const char *buf, size_t count) 4861 { 4862 struct Scsi_Host *shost = class_to_shost(dev); 4863 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4864 struct lpfc_hba *phba = vport->phba; 4865 int val = 0, rc; 4866 4867 /* Sanity check on user data */ 4868 if (!isdigit(buf[0])) 4869 return -EINVAL; 4870 if (sscanf(buf, "%i", &val) != 1) 4871 return -EINVAL; 4872 if (val != 1) 4873 return -EINVAL; 4874 4875 rc = lpfc_sli4_request_firmware_update(phba, RUN_FW_UPGRADE); 4876 if (rc) 4877 rc = -EPERM; 4878 else 4879 rc = strlen(buf); 4880 return rc; 4881 } 4882 4883 static int lpfc_req_fw_upgrade; 4884 module_param(lpfc_req_fw_upgrade, int, S_IRUGO|S_IWUSR); 4885 MODULE_PARM_DESC(lpfc_req_fw_upgrade, "Enable Linux generic firmware upgrade"); 4886 lpfc_param_show(request_firmware_upgrade) 4887 4888 /** 4889 * lpfc_request_firmware_upgrade_init - Enable initial linux generic fw upgrade 4890 * @phba: lpfc_hba pointer. 4891 * @val: 0 or 1. 4892 * 4893 * Description: 4894 * Set the initial Linux generic firmware upgrade enable or disable flag. 4895 * 4896 * Returns: 4897 * zero if val saved. 4898 * -EINVAL val out of range 4899 **/ 4900 static int 4901 lpfc_request_firmware_upgrade_init(struct lpfc_hba *phba, int val) 4902 { 4903 if (val >= 0 && val <= 1) { 4904 phba->cfg_request_firmware_upgrade = val; 4905 return 0; 4906 } 4907 return -EINVAL; 4908 } 4909 static DEVICE_ATTR(lpfc_req_fw_upgrade, S_IRUGO | S_IWUSR, 4910 lpfc_request_firmware_upgrade_show, 4911 lpfc_request_firmware_upgrade_store); 4912 4913 /** 4914 * lpfc_force_rscn_store 4915 * 4916 * @dev: class device that is converted into a Scsi_host. 4917 * @attr: device attribute, not used. 4918 * @buf: unused string 4919 * @count: unused variable. 4920 * 4921 * Description: 4922 * Force the switch to send a RSCN to all other NPorts in our zone 4923 * If we are direct connect pt2pt, build the RSCN command ourself 4924 * and send to the other NPort. Not supported for private loop. 4925 * 4926 * Returns: 4927 * 0 - on success 4928 * -EIO - if command is not sent 4929 **/ 4930 static ssize_t 4931 lpfc_force_rscn_store(struct device *dev, struct device_attribute *attr, 4932 const char *buf, size_t count) 4933 { 4934 struct Scsi_Host *shost = class_to_shost(dev); 4935 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4936 int i; 4937 4938 i = lpfc_issue_els_rscn(vport, 0); 4939 if (i) 4940 return -EIO; 4941 return strlen(buf); 4942 } 4943 4944 /* 4945 * lpfc_force_rscn: Force an RSCN to be sent to all remote NPorts 4946 * connected to the HBA. 4947 * 4948 * Value range is any ascii value 4949 */ 4950 static int lpfc_force_rscn; 4951 module_param(lpfc_force_rscn, int, 0644); 4952 MODULE_PARM_DESC(lpfc_force_rscn, 4953 "Force an RSCN to be sent to all remote NPorts"); 4954 lpfc_param_show(force_rscn) 4955 4956 /** 4957 * lpfc_force_rscn_init - Force an RSCN to be sent to all remote NPorts 4958 * @phba: lpfc_hba pointer. 4959 * @val: unused value. 4960 * 4961 * Returns: 4962 * zero if val saved. 4963 **/ 4964 static int 4965 lpfc_force_rscn_init(struct lpfc_hba *phba, int val) 4966 { 4967 return 0; 4968 } 4969 static DEVICE_ATTR_RW(lpfc_force_rscn); 4970 4971 /** 4972 * lpfc_fcp_imax_store 4973 * 4974 * @dev: class device that is converted into a Scsi_host. 4975 * @attr: device attribute, not used. 4976 * @buf: string with the number of fast-path FCP interrupts per second. 4977 * @count: unused variable. 4978 * 4979 * Description: 4980 * If val is in a valid range [636,651042], then set the adapter's 4981 * maximum number of fast-path FCP interrupts per second. 4982 * 4983 * Returns: 4984 * length of the buf on success if val is in range the intended mode 4985 * is supported. 4986 * -EINVAL if val out of range or intended mode is not supported. 4987 **/ 4988 static ssize_t 4989 lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr, 4990 const char *buf, size_t count) 4991 { 4992 struct Scsi_Host *shost = class_to_shost(dev); 4993 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4994 struct lpfc_hba *phba = vport->phba; 4995 struct lpfc_eq_intr_info *eqi; 4996 uint32_t usdelay; 4997 int val = 0, i; 4998 4999 /* fcp_imax is only valid for SLI4 */ 5000 if (phba->sli_rev != LPFC_SLI_REV4) 5001 return -EINVAL; 5002 5003 /* Sanity check on user data */ 5004 if (!isdigit(buf[0])) 5005 return -EINVAL; 5006 if (sscanf(buf, "%i", &val) != 1) 5007 return -EINVAL; 5008 5009 /* 5010 * Value range for the HBA is [5000,5000000] 5011 * The value for each EQ depends on how many EQs are configured. 5012 * Allow value == 0 5013 */ 5014 if (val && (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX)) 5015 return -EINVAL; 5016 5017 phba->cfg_auto_imax = (val) ? 0 : 1; 5018 if (phba->cfg_fcp_imax && !val) { 5019 queue_delayed_work(phba->wq, &phba->eq_delay_work, 5020 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 5021 5022 for_each_present_cpu(i) { 5023 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); 5024 eqi->icnt = 0; 5025 } 5026 } 5027 5028 phba->cfg_fcp_imax = (uint32_t)val; 5029 5030 if (phba->cfg_fcp_imax) 5031 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; 5032 else 5033 usdelay = 0; 5034 5035 for (i = 0; i < phba->cfg_irq_chann; i += LPFC_MAX_EQ_DELAY_EQID_CNT) 5036 lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT, 5037 usdelay); 5038 5039 return strlen(buf); 5040 } 5041 5042 /* 5043 # lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second 5044 # for the HBA. 5045 # 5046 # Value range is [5,000 to 5,000,000]. Default value is 50,000. 5047 */ 5048 static int lpfc_fcp_imax = LPFC_DEF_IMAX; 5049 module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR); 5050 MODULE_PARM_DESC(lpfc_fcp_imax, 5051 "Set the maximum number of FCP interrupts per second per HBA"); 5052 lpfc_param_show(fcp_imax) 5053 5054 /** 5055 * lpfc_fcp_imax_init - Set the initial sr-iov virtual function enable 5056 * @phba: lpfc_hba pointer. 5057 * @val: link speed value. 5058 * 5059 * Description: 5060 * If val is in a valid range [636,651042], then initialize the adapter's 5061 * maximum number of fast-path FCP interrupts per second. 5062 * 5063 * Returns: 5064 * zero if val saved. 5065 * -EINVAL val out of range 5066 **/ 5067 static int 5068 lpfc_fcp_imax_init(struct lpfc_hba *phba, int val) 5069 { 5070 if (phba->sli_rev != LPFC_SLI_REV4) { 5071 phba->cfg_fcp_imax = 0; 5072 return 0; 5073 } 5074 5075 if ((val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) || 5076 (val == 0)) { 5077 phba->cfg_fcp_imax = val; 5078 return 0; 5079 } 5080 5081 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5082 "3016 lpfc_fcp_imax: %d out of range, using default\n", 5083 val); 5084 phba->cfg_fcp_imax = LPFC_DEF_IMAX; 5085 5086 return 0; 5087 } 5088 5089 static DEVICE_ATTR_RW(lpfc_fcp_imax); 5090 5091 /** 5092 * lpfc_cq_max_proc_limit_store 5093 * 5094 * @dev: class device that is converted into a Scsi_host. 5095 * @attr: device attribute, not used. 5096 * @buf: string with the cq max processing limit of cqes 5097 * @count: unused variable. 5098 * 5099 * Description: 5100 * If val is in a valid range, then set value on each cq 5101 * 5102 * Returns: 5103 * The length of the buf: if successful 5104 * -ERANGE: if val is not in the valid range 5105 * -EINVAL: if bad value format or intended mode is not supported. 5106 **/ 5107 static ssize_t 5108 lpfc_cq_max_proc_limit_store(struct device *dev, struct device_attribute *attr, 5109 const char *buf, size_t count) 5110 { 5111 struct Scsi_Host *shost = class_to_shost(dev); 5112 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 5113 struct lpfc_hba *phba = vport->phba; 5114 struct lpfc_queue *eq, *cq; 5115 unsigned long val; 5116 int i; 5117 5118 /* cq_max_proc_limit is only valid for SLI4 */ 5119 if (phba->sli_rev != LPFC_SLI_REV4) 5120 return -EINVAL; 5121 5122 /* Sanity check on user data */ 5123 if (!isdigit(buf[0])) 5124 return -EINVAL; 5125 if (kstrtoul(buf, 0, &val)) 5126 return -EINVAL; 5127 5128 if (val < LPFC_CQ_MIN_PROC_LIMIT || val > LPFC_CQ_MAX_PROC_LIMIT) 5129 return -ERANGE; 5130 5131 phba->cfg_cq_max_proc_limit = (uint32_t)val; 5132 5133 /* set the values on the cq's */ 5134 for (i = 0; i < phba->cfg_irq_chann; i++) { 5135 /* Get the EQ corresponding to the IRQ vector */ 5136 eq = phba->sli4_hba.hba_eq_hdl[i].eq; 5137 if (!eq) 5138 continue; 5139 5140 list_for_each_entry(cq, &eq->child_list, list) 5141 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, 5142 cq->entry_count); 5143 } 5144 5145 return strlen(buf); 5146 } 5147 5148 /* 5149 * lpfc_cq_max_proc_limit: The maximum number CQE entries processed in an 5150 * itteration of CQ processing. 5151 */ 5152 static int lpfc_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT; 5153 module_param(lpfc_cq_max_proc_limit, int, 0644); 5154 MODULE_PARM_DESC(lpfc_cq_max_proc_limit, 5155 "Set the maximum number CQEs processed in an iteration of " 5156 "CQ processing"); 5157 lpfc_param_show(cq_max_proc_limit) 5158 5159 /* 5160 * lpfc_cq_poll_threshold: Set the threshold of CQE completions in a 5161 * single handler call which should request a polled completion rather 5162 * than re-enabling interrupts. 5163 */ 5164 LPFC_ATTR_RW(cq_poll_threshold, LPFC_CQ_DEF_THRESHOLD_TO_POLL, 5165 LPFC_CQ_MIN_THRESHOLD_TO_POLL, 5166 LPFC_CQ_MAX_THRESHOLD_TO_POLL, 5167 "CQE Processing Threshold to enable Polling"); 5168 5169 /** 5170 * lpfc_cq_max_proc_limit_init - Set the initial cq max_proc_limit 5171 * @phba: lpfc_hba pointer. 5172 * @val: entry limit 5173 * 5174 * Description: 5175 * If val is in a valid range, then initialize the adapter's maximum 5176 * value. 5177 * 5178 * Returns: 5179 * Always returns 0 for success, even if value not always set to 5180 * requested value. If value out of range or not supported, will fall 5181 * back to default. 5182 **/ 5183 static int 5184 lpfc_cq_max_proc_limit_init(struct lpfc_hba *phba, int val) 5185 { 5186 phba->cfg_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT; 5187 5188 if (phba->sli_rev != LPFC_SLI_REV4) 5189 return 0; 5190 5191 if (val >= LPFC_CQ_MIN_PROC_LIMIT && val <= LPFC_CQ_MAX_PROC_LIMIT) { 5192 phba->cfg_cq_max_proc_limit = val; 5193 return 0; 5194 } 5195 5196 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5197 "0371 lpfc_cq_max_proc_limit: %d out of range, using " 5198 "default\n", 5199 phba->cfg_cq_max_proc_limit); 5200 5201 return 0; 5202 } 5203 5204 static DEVICE_ATTR_RW(lpfc_cq_max_proc_limit); 5205 5206 /** 5207 * lpfc_fcp_cpu_map_show - Display current driver CPU affinity 5208 * @dev: class converted to a Scsi_host structure. 5209 * @attr: device attribute, not used. 5210 * @buf: on return contains text describing the state of the link. 5211 * 5212 * Returns: size of formatted string. 5213 **/ 5214 static ssize_t 5215 lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, 5216 char *buf) 5217 { 5218 struct Scsi_Host *shost = class_to_shost(dev); 5219 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 5220 struct lpfc_hba *phba = vport->phba; 5221 struct lpfc_vector_map_info *cpup; 5222 int len = 0; 5223 5224 if ((phba->sli_rev != LPFC_SLI_REV4) || 5225 (phba->intr_type != MSIX)) 5226 return len; 5227 5228 switch (phba->cfg_fcp_cpu_map) { 5229 case 0: 5230 len += scnprintf(buf + len, PAGE_SIZE-len, 5231 "fcp_cpu_map: No mapping (%d)\n", 5232 phba->cfg_fcp_cpu_map); 5233 return len; 5234 case 1: 5235 len += scnprintf(buf + len, PAGE_SIZE-len, 5236 "fcp_cpu_map: HBA centric mapping (%d): " 5237 "%d of %d CPUs online from %d possible CPUs\n", 5238 phba->cfg_fcp_cpu_map, num_online_cpus(), 5239 num_present_cpus(), 5240 phba->sli4_hba.num_possible_cpu); 5241 break; 5242 } 5243 5244 while (phba->sli4_hba.curr_disp_cpu < 5245 phba->sli4_hba.num_possible_cpu) { 5246 cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu]; 5247 5248 if (!cpu_present(phba->sli4_hba.curr_disp_cpu)) 5249 len += scnprintf(buf + len, PAGE_SIZE - len, 5250 "CPU %02d not present\n", 5251 phba->sli4_hba.curr_disp_cpu); 5252 else if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 5253 if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY) 5254 len += scnprintf( 5255 buf + len, PAGE_SIZE - len, 5256 "CPU %02d hdwq None " 5257 "physid %d coreid %d ht %d ua %d\n", 5258 phba->sli4_hba.curr_disp_cpu, 5259 cpup->phys_id, cpup->core_id, 5260 (cpup->flag & LPFC_CPU_MAP_HYPER), 5261 (cpup->flag & LPFC_CPU_MAP_UNASSIGN)); 5262 else 5263 len += scnprintf( 5264 buf + len, PAGE_SIZE - len, 5265 "CPU %02d EQ None hdwq %04d " 5266 "physid %d coreid %d ht %d ua %d\n", 5267 phba->sli4_hba.curr_disp_cpu, 5268 cpup->hdwq, cpup->phys_id, 5269 cpup->core_id, 5270 (cpup->flag & LPFC_CPU_MAP_HYPER), 5271 (cpup->flag & LPFC_CPU_MAP_UNASSIGN)); 5272 } else { 5273 if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY) 5274 len += scnprintf( 5275 buf + len, PAGE_SIZE - len, 5276 "CPU %02d hdwq None " 5277 "physid %d coreid %d ht %d ua %d IRQ %d\n", 5278 phba->sli4_hba.curr_disp_cpu, 5279 cpup->phys_id, 5280 cpup->core_id, 5281 (cpup->flag & LPFC_CPU_MAP_HYPER), 5282 (cpup->flag & LPFC_CPU_MAP_UNASSIGN), 5283 lpfc_get_irq(cpup->eq)); 5284 else 5285 len += scnprintf( 5286 buf + len, PAGE_SIZE - len, 5287 "CPU %02d EQ %04d hdwq %04d " 5288 "physid %d coreid %d ht %d ua %d IRQ %d\n", 5289 phba->sli4_hba.curr_disp_cpu, 5290 cpup->eq, cpup->hdwq, cpup->phys_id, 5291 cpup->core_id, 5292 (cpup->flag & LPFC_CPU_MAP_HYPER), 5293 (cpup->flag & LPFC_CPU_MAP_UNASSIGN), 5294 lpfc_get_irq(cpup->eq)); 5295 } 5296 5297 phba->sli4_hba.curr_disp_cpu++; 5298 5299 /* display max number of CPUs keeping some margin */ 5300 if (phba->sli4_hba.curr_disp_cpu < 5301 phba->sli4_hba.num_possible_cpu && 5302 (len >= (PAGE_SIZE - 64))) { 5303 len += scnprintf(buf + len, 5304 PAGE_SIZE - len, "more...\n"); 5305 break; 5306 } 5307 } 5308 5309 if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_possible_cpu) 5310 phba->sli4_hba.curr_disp_cpu = 0; 5311 5312 return len; 5313 } 5314 5315 /** 5316 * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors 5317 * @dev: class device that is converted into a Scsi_host. 5318 * @attr: device attribute, not used. 5319 * @buf: one or more lpfc_polling_flags values. 5320 * @count: not used. 5321 * 5322 * Returns: 5323 * -EINVAL - Not implemented yet. 5324 **/ 5325 static ssize_t 5326 lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr, 5327 const char *buf, size_t count) 5328 { 5329 return -EINVAL; 5330 } 5331 5332 /* 5333 # lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors 5334 # for the HBA. 5335 # 5336 # Value range is [0 to 1]. Default value is LPFC_HBA_CPU_MAP (1). 5337 # 0 - Do not affinitze IRQ vectors 5338 # 1 - Affintize HBA vectors with respect to each HBA 5339 # (start with CPU0 for each HBA) 5340 # This also defines how Hardware Queues are mapped to specific CPUs. 5341 */ 5342 static int lpfc_fcp_cpu_map = LPFC_HBA_CPU_MAP; 5343 module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR); 5344 MODULE_PARM_DESC(lpfc_fcp_cpu_map, 5345 "Defines how to map CPUs to IRQ vectors per HBA"); 5346 5347 /** 5348 * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable 5349 * @phba: lpfc_hba pointer. 5350 * @val: link speed value. 5351 * 5352 * Description: 5353 * If val is in a valid range [0-2], then affinitze the adapter's 5354 * MSIX vectors. 5355 * 5356 * Returns: 5357 * zero if val saved. 5358 * -EINVAL val out of range 5359 **/ 5360 static int 5361 lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val) 5362 { 5363 if (phba->sli_rev != LPFC_SLI_REV4) { 5364 phba->cfg_fcp_cpu_map = 0; 5365 return 0; 5366 } 5367 5368 if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) { 5369 phba->cfg_fcp_cpu_map = val; 5370 return 0; 5371 } 5372 5373 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5374 "3326 lpfc_fcp_cpu_map: %d out of range, using " 5375 "default\n", val); 5376 phba->cfg_fcp_cpu_map = LPFC_HBA_CPU_MAP; 5377 5378 return 0; 5379 } 5380 5381 static DEVICE_ATTR_RW(lpfc_fcp_cpu_map); 5382 5383 /* 5384 # lpfc_fcp_class: Determines FC class to use for the FCP protocol. 5385 # Value range is [2,3]. Default value is 3. 5386 */ 5387 LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3, 5388 "Select Fibre Channel class of service for FCP sequences"); 5389 5390 /* 5391 # lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range 5392 # is [0,1]. Default value is 1. 5393 */ 5394 LPFC_VPORT_ATTR_RW(use_adisc, 1, 0, 1, 5395 "Use ADISC on rediscovery to authenticate FCP devices"); 5396 5397 /* 5398 # lpfc_first_burst_size: First burst size to use on the NPorts 5399 # that support first burst. 5400 # Value range is [0,65536]. Default value is 0. 5401 */ 5402 LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536, 5403 "First burst size for Targets that support first burst"); 5404 5405 /* 5406 * lpfc_nvmet_fb_size: NVME Target mode supported first burst size. 5407 * When the driver is configured as an NVME target, this value is 5408 * communicated to the NVME initiator in the PRLI response. It is 5409 * used only when the lpfc_nvme_enable_fb and lpfc_nvmet_support 5410 * parameters are set and the target is sending the PRLI RSP. 5411 * Parameter supported on physical port only - no NPIV support. 5412 * Value range is [0,65536]. Default value is 0. 5413 */ 5414 LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536, 5415 "NVME Target mode first burst size in 512B increments."); 5416 5417 /* 5418 * lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions. 5419 * For the Initiator (I), enabling this parameter means that an NVMET 5420 * PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be 5421 * processed by the initiator for subsequent NVME FCP IO. 5422 * Currently, this feature is not supported on the NVME target 5423 * Value range is [0,1]. Default value is 0 (disabled). 5424 */ 5425 LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1, 5426 "Enable First Burst feature for NVME Initiator."); 5427 5428 /* 5429 # lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue 5430 # depth. Default value is 0. When the value of this parameter is zero the 5431 # SCSI command completion time is not used for controlling I/O queue depth. When 5432 # the parameter is set to a non-zero value, the I/O queue depth is controlled 5433 # to limit the I/O completion time to the parameter value. 5434 # The value is set in milliseconds. 5435 */ 5436 LPFC_VPORT_ATTR(max_scsicmpl_time, 0, 0, 60000, 5437 "Use command completion time to control queue depth"); 5438 5439 lpfc_vport_param_show(max_scsicmpl_time); 5440 static int 5441 lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val) 5442 { 5443 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5444 struct lpfc_nodelist *ndlp, *next_ndlp; 5445 5446 if (val == vport->cfg_max_scsicmpl_time) 5447 return 0; 5448 if ((val < 0) || (val > 60000)) 5449 return -EINVAL; 5450 vport->cfg_max_scsicmpl_time = val; 5451 5452 spin_lock_irq(shost->host_lock); 5453 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 5454 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 5455 continue; 5456 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; 5457 } 5458 spin_unlock_irq(shost->host_lock); 5459 return 0; 5460 } 5461 lpfc_vport_param_store(max_scsicmpl_time); 5462 static DEVICE_ATTR_RW(lpfc_max_scsicmpl_time); 5463 5464 /* 5465 # lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value 5466 # range is [0,1]. Default value is 0. 5467 */ 5468 LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support"); 5469 5470 /* 5471 # lpfc_xri_rebalancing: enable or disable XRI rebalancing feature 5472 # range is [0,1]. Default value is 1. 5473 */ 5474 LPFC_ATTR_R(xri_rebalancing, 1, 0, 1, "Enable/Disable XRI rebalancing"); 5475 5476 /* 5477 * lpfc_io_sched: Determine scheduling algrithmn for issuing FCP cmds 5478 * range is [0,1]. Default value is 0. 5479 * For [0], FCP commands are issued to Work Queues based on upper layer 5480 * hardware queue index. 5481 * For [1], FCP commands are issued to a Work Queue associated with the 5482 * current CPU. 5483 * 5484 * LPFC_FCP_SCHED_BY_HDWQ == 0 5485 * LPFC_FCP_SCHED_BY_CPU == 1 5486 * 5487 * The driver dynamically sets this to 1 (BY_CPU) if it's able to set up cpu 5488 * affinity for FCP/NVME I/Os through Work Queues associated with the current 5489 * CPU. Otherwise, the default 0 (Round Robin) scheduling of FCP/NVME I/Os 5490 * through WQs will be used. 5491 */ 5492 LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_BY_CPU, 5493 LPFC_FCP_SCHED_BY_HDWQ, 5494 LPFC_FCP_SCHED_BY_CPU, 5495 "Determine scheduling algorithm for " 5496 "issuing commands [0] - Hardware Queue, [1] - Current CPU"); 5497 5498 /* 5499 * lpfc_ns_query: Determine algrithmn for NameServer queries after RSCN 5500 * range is [0,1]. Default value is 0. 5501 * For [0], GID_FT is used for NameServer queries after RSCN (default) 5502 * For [1], GID_PT is used for NameServer queries after RSCN 5503 * 5504 */ 5505 LPFC_ATTR_RW(ns_query, LPFC_NS_QUERY_GID_FT, 5506 LPFC_NS_QUERY_GID_FT, LPFC_NS_QUERY_GID_PT, 5507 "Determine algorithm NameServer queries after RSCN " 5508 "[0] - GID_FT, [1] - GID_PT"); 5509 5510 /* 5511 # lpfc_fcp2_no_tgt_reset: Determine bus reset behavior 5512 # range is [0,1]. Default value is 0. 5513 # For [0], bus reset issues target reset to ALL devices 5514 # For [1], bus reset issues target reset to non-FCP2 devices 5515 */ 5516 LPFC_ATTR_RW(fcp2_no_tgt_reset, 0, 0, 1, "Determine bus reset behavior for " 5517 "FCP2 devices [0] - issue tgt reset, [1] - no tgt reset"); 5518 5519 5520 /* 5521 # lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing 5522 # cr_delay (msec) or cr_count outstanding commands. cr_delay can take 5523 # value [0,63]. cr_count can take value [1,255]. Default value of cr_delay 5524 # is 0. Default value of cr_count is 1. The cr_count feature is disabled if 5525 # cr_delay is set to 0. 5526 */ 5527 LPFC_ATTR_RW(cr_delay, 0, 0, 63, "A count of milliseconds after which an " 5528 "interrupt response is generated"); 5529 5530 LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an " 5531 "interrupt response is generated"); 5532 5533 /* 5534 # lpfc_multi_ring_support: Determines how many rings to spread available 5535 # cmd/rsp IOCB entries across. 5536 # Value range is [1,2]. Default value is 1. 5537 */ 5538 LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary " 5539 "SLI rings to spread IOCB entries across"); 5540 5541 /* 5542 # lpfc_multi_ring_rctl: If lpfc_multi_ring_support is enabled, this 5543 # identifies what rctl value to configure the additional ring for. 5544 # Value range is [1,0xff]. Default value is 4 (Unsolicated Data). 5545 */ 5546 LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1, 5547 255, "Identifies RCTL for additional ring configuration"); 5548 5549 /* 5550 # lpfc_multi_ring_type: If lpfc_multi_ring_support is enabled, this 5551 # identifies what type value to configure the additional ring for. 5552 # Value range is [1,0xff]. Default value is 5 (LLC/SNAP). 5553 */ 5554 LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1, 5555 255, "Identifies TYPE for additional ring configuration"); 5556 5557 /* 5558 # lpfc_enable_SmartSAN: Sets up FDMI support for SmartSAN 5559 # 0 = SmartSAN functionality disabled (default) 5560 # 1 = SmartSAN functionality enabled 5561 # This parameter will override the value of lpfc_fdmi_on module parameter. 5562 # Value range is [0,1]. Default value is 0. 5563 */ 5564 LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality"); 5565 5566 /* 5567 # lpfc_fdmi_on: Controls FDMI support. 5568 # 0 No FDMI support 5569 # 1 Traditional FDMI support (default) 5570 # Traditional FDMI support means the driver will assume FDMI-2 support; 5571 # however, if that fails, it will fallback to FDMI-1. 5572 # If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on. 5573 # If lpfc_enable_SmartSAN is set 0, the driver uses the current value of 5574 # lpfc_fdmi_on. 5575 # Value range [0,1]. Default value is 1. 5576 */ 5577 LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support"); 5578 5579 /* 5580 # Specifies the maximum number of ELS cmds we can have outstanding (for 5581 # discovery). Value range is [1,64]. Default value = 32. 5582 */ 5583 LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands " 5584 "during discovery"); 5585 5586 /* 5587 # lpfc_max_luns: maximum allowed LUN ID. This is the highest LUN ID that 5588 # will be scanned by the SCSI midlayer when sequential scanning is 5589 # used; and is also the highest LUN ID allowed when the SCSI midlayer 5590 # parses REPORT_LUN responses. The lpfc driver has no LUN count or 5591 # LUN ID limit, but the SCSI midlayer requires this field for the uses 5592 # above. The lpfc driver limits the default value to 255 for two reasons. 5593 # As it bounds the sequential scan loop, scanning for thousands of luns 5594 # on a target can take minutes of wall clock time. Additionally, 5595 # there are FC targets, such as JBODs, that only recognize 8-bits of 5596 # LUN ID. When they receive a value greater than 8 bits, they chop off 5597 # the high order bits. In other words, they see LUN IDs 0, 256, 512, 5598 # and so on all as LUN ID 0. This causes the linux kernel, which sees 5599 # valid responses at each of the LUN IDs, to believe there are multiple 5600 # devices present, when in fact, there is only 1. 5601 # A customer that is aware of their target behaviors, and the results as 5602 # indicated above, is welcome to increase the lpfc_max_luns value. 5603 # As mentioned, this value is not used by the lpfc driver, only the 5604 # SCSI midlayer. 5605 # Value range is [0,65535]. Default value is 255. 5606 # NOTE: The SCSI layer might probe all allowed LUN on some old targets. 5607 */ 5608 LPFC_VPORT_ULL_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN ID"); 5609 5610 /* 5611 # lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring. 5612 # Value range is [1,255], default value is 10. 5613 */ 5614 LPFC_ATTR_RW(poll_tmo, 10, 1, 255, 5615 "Milliseconds driver will wait between polling FCP ring"); 5616 5617 /* 5618 # lpfc_task_mgmt_tmo: Maximum time to wait for task management commands 5619 # to complete in seconds. Value range is [5,180], default value is 60. 5620 */ 5621 LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180, 5622 "Maximum time to wait for task management commands to complete"); 5623 /* 5624 # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that 5625 # support this feature 5626 # 0 = MSI disabled 5627 # 1 = MSI enabled 5628 # 2 = MSI-X enabled (default) 5629 # Value range is [0,2]. Default value is 2. 5630 */ 5631 LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " 5632 "MSI-X (2), if possible"); 5633 5634 /* 5635 * lpfc_nvme_oas: Use the oas bit when sending NVME/NVMET IOs 5636 * 5637 * 0 = NVME OAS disabled 5638 * 1 = NVME OAS enabled 5639 * 5640 * Value range is [0,1]. Default value is 0. 5641 */ 5642 LPFC_ATTR_RW(nvme_oas, 0, 0, 1, 5643 "Use OAS bit on NVME IOs"); 5644 5645 /* 5646 * lpfc_nvme_embed_cmd: Use the oas bit when sending NVME/NVMET IOs 5647 * 5648 * 0 = Put NVME Command in SGL 5649 * 1 = Embed NVME Command in WQE (unless G7) 5650 * 2 = Embed NVME Command in WQE (force) 5651 * 5652 * Value range is [0,2]. Default value is 1. 5653 */ 5654 LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2, 5655 "Embed NVME Command in WQE"); 5656 5657 /* 5658 * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues 5659 * the driver will advertise it supports to the SCSI layer. 5660 * 5661 * 0 = Set nr_hw_queues by the number of CPUs or HW queues. 5662 * 1,256 = Manually specify nr_hw_queue value to be advertised, 5663 * 5664 * Value range is [0,256]. Default value is 8. 5665 */ 5666 LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF, 5667 LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX, 5668 "Set the number of SCSI Queues advertised"); 5669 5670 /* 5671 * lpfc_hdw_queue: Set the number of Hardware Queues the driver 5672 * will advertise it supports to the NVME and SCSI layers. This also 5673 * will map to the number of CQ/WQ pairs the driver will create. 5674 * 5675 * The NVME Layer will try to create this many, plus 1 administrative 5676 * hardware queue. The administrative queue will always map to WQ 0 5677 * A hardware IO queue maps (qidx) to a specific driver CQ/WQ. 5678 * 5679 * 0 = Configure the number of hdw queues to the number of active CPUs. 5680 * 1,256 = Manually specify how many hdw queues to use. 5681 * 5682 * Value range is [0,256]. Default value is 0. 5683 */ 5684 LPFC_ATTR_R(hdw_queue, 5685 LPFC_HBA_HDWQ_DEF, 5686 LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX, 5687 "Set the number of I/O Hardware Queues"); 5688 5689 #if IS_ENABLED(CONFIG_X86) 5690 /** 5691 * lpfc_cpumask_irq_mode_init - initalizes cpumask of phba based on 5692 * irq_chann_mode 5693 * @phba: Pointer to HBA context object. 5694 **/ 5695 static void 5696 lpfc_cpumask_irq_mode_init(struct lpfc_hba *phba) 5697 { 5698 unsigned int cpu, first_cpu, numa_node = NUMA_NO_NODE; 5699 const struct cpumask *sibling_mask; 5700 struct cpumask *aff_mask = &phba->sli4_hba.irq_aff_mask; 5701 5702 cpumask_clear(aff_mask); 5703 5704 if (phba->irq_chann_mode == NUMA_MODE) { 5705 /* Check if we're a NUMA architecture */ 5706 numa_node = dev_to_node(&phba->pcidev->dev); 5707 if (numa_node == NUMA_NO_NODE) { 5708 phba->irq_chann_mode = NORMAL_MODE; 5709 return; 5710 } 5711 } 5712 5713 for_each_possible_cpu(cpu) { 5714 switch (phba->irq_chann_mode) { 5715 case NUMA_MODE: 5716 if (cpu_to_node(cpu) == numa_node) 5717 cpumask_set_cpu(cpu, aff_mask); 5718 break; 5719 case NHT_MODE: 5720 sibling_mask = topology_sibling_cpumask(cpu); 5721 first_cpu = cpumask_first(sibling_mask); 5722 if (first_cpu < nr_cpu_ids) 5723 cpumask_set_cpu(first_cpu, aff_mask); 5724 break; 5725 default: 5726 break; 5727 } 5728 } 5729 } 5730 #endif 5731 5732 static void 5733 lpfc_assign_default_irq_chann(struct lpfc_hba *phba) 5734 { 5735 #if IS_ENABLED(CONFIG_X86) 5736 switch (boot_cpu_data.x86_vendor) { 5737 case X86_VENDOR_AMD: 5738 /* If AMD architecture, then default is NUMA_MODE */ 5739 phba->irq_chann_mode = NUMA_MODE; 5740 break; 5741 case X86_VENDOR_INTEL: 5742 /* If Intel architecture, then default is no hyperthread mode */ 5743 phba->irq_chann_mode = NHT_MODE; 5744 break; 5745 default: 5746 phba->irq_chann_mode = NORMAL_MODE; 5747 break; 5748 } 5749 lpfc_cpumask_irq_mode_init(phba); 5750 #else 5751 phba->irq_chann_mode = NORMAL_MODE; 5752 #endif 5753 } 5754 5755 /* 5756 * lpfc_irq_chann: Set the number of IRQ vectors that are available 5757 * for Hardware Queues to utilize. This also will map to the number 5758 * of EQ / MSI-X vectors the driver will create. This should never be 5759 * more than the number of Hardware Queues 5760 * 5761 * 0 = Configure number of IRQ Channels to: 5762 * if AMD architecture, number of CPUs on HBA's NUMA node 5763 * if Intel architecture, number of physical CPUs. 5764 * otherwise, number of active CPUs. 5765 * [1,256] = Manually specify how many IRQ Channels to use. 5766 * 5767 * Value range is [0,256]. Default value is [0]. 5768 */ 5769 static uint lpfc_irq_chann = LPFC_IRQ_CHANN_DEF; 5770 module_param(lpfc_irq_chann, uint, 0444); 5771 MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate"); 5772 5773 /* lpfc_irq_chann_init - Set the hba irq_chann initial value 5774 * @phba: lpfc_hba pointer. 5775 * @val: contains the initial value 5776 * 5777 * Description: 5778 * Validates the initial value is within range and assigns it to the 5779 * adapter. If not in range, an error message is posted and the 5780 * default value is assigned. 5781 * 5782 * Returns: 5783 * zero if value is in range and is set 5784 * -EINVAL if value was out of range 5785 **/ 5786 static int 5787 lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val) 5788 { 5789 const struct cpumask *aff_mask; 5790 5791 if (phba->cfg_use_msi != 2) { 5792 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5793 "8532 use_msi = %u ignoring cfg_irq_numa\n", 5794 phba->cfg_use_msi); 5795 phba->irq_chann_mode = NORMAL_MODE; 5796 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF; 5797 return 0; 5798 } 5799 5800 /* Check if default setting was passed */ 5801 if (val == LPFC_IRQ_CHANN_DEF && 5802 phba->cfg_hdw_queue == LPFC_HBA_HDWQ_DEF && 5803 phba->sli_rev == LPFC_SLI_REV4) 5804 lpfc_assign_default_irq_chann(phba); 5805 5806 if (phba->irq_chann_mode != NORMAL_MODE) { 5807 aff_mask = &phba->sli4_hba.irq_aff_mask; 5808 5809 if (cpumask_empty(aff_mask)) { 5810 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5811 "8533 Could not identify CPUS for " 5812 "mode %d, ignoring\n", 5813 phba->irq_chann_mode); 5814 phba->irq_chann_mode = NORMAL_MODE; 5815 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF; 5816 } else { 5817 phba->cfg_irq_chann = cpumask_weight(aff_mask); 5818 5819 /* If no hyperthread mode, then set hdwq count to 5820 * aff_mask weight as well 5821 */ 5822 if (phba->irq_chann_mode == NHT_MODE) 5823 phba->cfg_hdw_queue = phba->cfg_irq_chann; 5824 5825 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5826 "8543 lpfc_irq_chann set to %u " 5827 "(mode: %d)\n", phba->cfg_irq_chann, 5828 phba->irq_chann_mode); 5829 } 5830 } else { 5831 if (val > LPFC_IRQ_CHANN_MAX) { 5832 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5833 "8545 lpfc_irq_chann attribute cannot " 5834 "be set to %u, allowed range is " 5835 "[%u,%u]\n", 5836 val, 5837 LPFC_IRQ_CHANN_MIN, 5838 LPFC_IRQ_CHANN_MAX); 5839 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF; 5840 return -EINVAL; 5841 } 5842 if (phba->sli_rev == LPFC_SLI_REV4) { 5843 phba->cfg_irq_chann = val; 5844 } else { 5845 phba->cfg_irq_chann = 2; 5846 phba->cfg_hdw_queue = 1; 5847 } 5848 } 5849 5850 return 0; 5851 } 5852 5853 /** 5854 * lpfc_irq_chann_show - Display value of irq_chann 5855 * @dev: class converted to a Scsi_host structure. 5856 * @attr: device attribute, not used. 5857 * @buf: on return contains a string with the list sizes 5858 * 5859 * Returns: size of formatted string. 5860 **/ 5861 static ssize_t 5862 lpfc_irq_chann_show(struct device *dev, struct device_attribute *attr, 5863 char *buf) 5864 { 5865 struct Scsi_Host *shost = class_to_shost(dev); 5866 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 5867 struct lpfc_hba *phba = vport->phba; 5868 5869 return scnprintf(buf, PAGE_SIZE, "%u\n", phba->cfg_irq_chann); 5870 } 5871 5872 static DEVICE_ATTR_RO(lpfc_irq_chann); 5873 5874 /* 5875 # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. 5876 # 0 = HBA resets disabled 5877 # 1 = HBA resets enabled (default) 5878 # 2 = HBA reset via PCI bus reset enabled 5879 # Value range is [0,2]. Default value is 1. 5880 */ 5881 LPFC_ATTR_RW(enable_hba_reset, 1, 0, 2, "Enable HBA resets from the driver."); 5882 5883 /* 5884 # lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer.. 5885 # 0 = HBA Heartbeat disabled 5886 # 1 = HBA Heartbeat enabled (default) 5887 # Value range is [0,1]. Default value is 1. 5888 */ 5889 LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat."); 5890 5891 /* 5892 # lpfc_EnableXLane: Enable Express Lane Feature 5893 # 0x0 Express Lane Feature disabled 5894 # 0x1 Express Lane Feature enabled 5895 # Value range is [0,1]. Default value is 0. 5896 */ 5897 LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature."); 5898 5899 /* 5900 # lpfc_XLanePriority: Define CS_CTL priority for Express Lane Feature 5901 # 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits) 5902 # Value range is [0x0,0x7f]. Default value is 0 5903 */ 5904 LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature."); 5905 5906 /* 5907 # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF) 5908 # 0 = BlockGuard disabled (default) 5909 # 1 = BlockGuard enabled 5910 # Value range is [0,1]. Default value is 0. 5911 */ 5912 LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); 5913 5914 /* 5915 # lpfc_prot_mask: 5916 # - Bit mask of host protection capabilities used to register with the 5917 # SCSI mid-layer 5918 # - Only meaningful if BG is turned on (lpfc_enable_bg=1). 5919 # - Allows you to ultimately specify which profiles to use 5920 # - Default will result in registering capabilities for all profiles. 5921 # - SHOST_DIF_TYPE1_PROTECTION 1 5922 # HBA supports T10 DIF Type 1: HBA to Target Type 1 Protection 5923 # - SHOST_DIX_TYPE0_PROTECTION 8 5924 # HBA supports DIX Type 0: Host to HBA protection only 5925 # - SHOST_DIX_TYPE1_PROTECTION 16 5926 # HBA supports DIX Type 1: Host to HBA Type 1 protection 5927 # 5928 */ 5929 LPFC_ATTR(prot_mask, 5930 (SHOST_DIF_TYPE1_PROTECTION | 5931 SHOST_DIX_TYPE0_PROTECTION | 5932 SHOST_DIX_TYPE1_PROTECTION), 5933 0, 5934 (SHOST_DIF_TYPE1_PROTECTION | 5935 SHOST_DIX_TYPE0_PROTECTION | 5936 SHOST_DIX_TYPE1_PROTECTION), 5937 "T10-DIF host protection capabilities mask"); 5938 5939 /* 5940 # lpfc_prot_guard: 5941 # - Bit mask of protection guard types to register with the SCSI mid-layer 5942 # - Guard types are currently either 1) T10-DIF CRC 2) IP checksum 5943 # - Allows you to ultimately specify which profiles to use 5944 # - Default will result in registering capabilities for all guard types 5945 # 5946 */ 5947 LPFC_ATTR(prot_guard, 5948 SHOST_DIX_GUARD_IP, SHOST_DIX_GUARD_CRC, SHOST_DIX_GUARD_IP, 5949 "T10-DIF host protection guard type"); 5950 5951 /* 5952 * Delay initial NPort discovery when Clean Address bit is cleared in 5953 * FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed. 5954 * This parameter can have value 0 or 1. 5955 * When this parameter is set to 0, no delay is added to the initial 5956 * discovery. 5957 * When this parameter is set to non-zero value, initial Nport discovery is 5958 * delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC 5959 * accept and FCID/Fabric name/Fabric portname is changed. 5960 * Driver always delay Nport discovery for subsequent FLOGI/FDISC completion 5961 * when Clean Address bit is cleared in FLOGI/FDISC 5962 * accept and FCID/Fabric name/Fabric portname is changed. 5963 * Default value is 0. 5964 */ 5965 LPFC_ATTR(delay_discovery, 0, 0, 1, 5966 "Delay NPort discovery when Clean Address bit is cleared."); 5967 5968 /* 5969 * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count 5970 * This value can be set to values between 64 and 4096. The default value 5971 * is 64, but may be increased to allow for larger Max I/O sizes. The scsi 5972 * and nvme layers will allow I/O sizes up to (MAX_SEG_COUNT * SEG_SIZE). 5973 * Because of the additional overhead involved in setting up T10-DIF, 5974 * this parameter will be limited to 128 if BlockGuard is enabled under SLI4 5975 * and will be limited to 512 if BlockGuard is enabled under SLI3. 5976 */ 5977 static uint lpfc_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT; 5978 module_param(lpfc_sg_seg_cnt, uint, 0444); 5979 MODULE_PARM_DESC(lpfc_sg_seg_cnt, "Max Scatter Gather Segment Count"); 5980 5981 /** 5982 * lpfc_sg_seg_cnt_show - Display the scatter/gather list sizes 5983 * configured for the adapter 5984 * @dev: class converted to a Scsi_host structure. 5985 * @attr: device attribute, not used. 5986 * @buf: on return contains a string with the list sizes 5987 * 5988 * Returns: size of formatted string. 5989 **/ 5990 static ssize_t 5991 lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr, 5992 char *buf) 5993 { 5994 struct Scsi_Host *shost = class_to_shost(dev); 5995 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 5996 struct lpfc_hba *phba = vport->phba; 5997 int len; 5998 5999 len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n", 6000 phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt); 6001 6002 len += scnprintf(buf + len, PAGE_SIZE - len, 6003 "Cfg: %d SCSI: %d NVME: %d\n", 6004 phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt, 6005 phba->cfg_nvme_seg_cnt); 6006 return len; 6007 } 6008 6009 static DEVICE_ATTR_RO(lpfc_sg_seg_cnt); 6010 6011 /** 6012 * lpfc_sg_seg_cnt_init - Set the hba sg_seg_cnt initial value 6013 * @phba: lpfc_hba pointer. 6014 * @val: contains the initial value 6015 * 6016 * Description: 6017 * Validates the initial value is within range and assigns it to the 6018 * adapter. If not in range, an error message is posted and the 6019 * default value is assigned. 6020 * 6021 * Returns: 6022 * zero if value is in range and is set 6023 * -EINVAL if value was out of range 6024 **/ 6025 static int 6026 lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val) 6027 { 6028 if (val >= LPFC_MIN_SG_SEG_CNT && val <= LPFC_MAX_SG_SEG_CNT) { 6029 phba->cfg_sg_seg_cnt = val; 6030 return 0; 6031 } 6032 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6033 "0409 lpfc_sg_seg_cnt attribute cannot be set to %d, " 6034 "allowed range is [%d, %d]\n", 6035 val, LPFC_MIN_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT); 6036 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT; 6037 return -EINVAL; 6038 } 6039 6040 /* 6041 * lpfc_enable_mds_diags: Enable MDS Diagnostics 6042 * 0 = MDS Diagnostics disabled (default) 6043 * 1 = MDS Diagnostics enabled 6044 * Value range is [0,1]. Default value is 0. 6045 */ 6046 LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics"); 6047 6048 /* 6049 * lpfc_ras_fwlog_buffsize: Firmware logging host buffer size 6050 * 0 = Disable firmware logging (default) 6051 * [1-4] = Multiple of 1/4th Mb of host memory for FW logging 6052 * Value range [0..4]. Default value is 0 6053 */ 6054 LPFC_ATTR(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging"); 6055 lpfc_param_show(ras_fwlog_buffsize); 6056 6057 static ssize_t 6058 lpfc_ras_fwlog_buffsize_set(struct lpfc_hba *phba, uint val) 6059 { 6060 int ret = 0; 6061 enum ras_state state; 6062 6063 if (!lpfc_rangecheck(val, 0, 4)) 6064 return -EINVAL; 6065 6066 if (phba->cfg_ras_fwlog_buffsize == val) 6067 return 0; 6068 6069 if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn)) 6070 return -EINVAL; 6071 6072 spin_lock_irq(&phba->hbalock); 6073 state = phba->ras_fwlog.state; 6074 spin_unlock_irq(&phba->hbalock); 6075 6076 if (state == REG_INPROGRESS) { 6077 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging " 6078 "registration is in progress\n"); 6079 return -EBUSY; 6080 } 6081 6082 /* For disable logging: stop the logs and free the DMA. 6083 * For ras_fwlog_buffsize size change we still need to free and 6084 * reallocate the DMA in lpfc_sli4_ras_fwlog_init. 6085 */ 6086 phba->cfg_ras_fwlog_buffsize = val; 6087 if (state == ACTIVE) { 6088 lpfc_ras_stop_fwlog(phba); 6089 lpfc_sli4_ras_dma_free(phba); 6090 } 6091 6092 lpfc_sli4_ras_init(phba); 6093 if (phba->ras_fwlog.ras_enabled) 6094 ret = lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level, 6095 LPFC_RAS_ENABLE_LOGGING); 6096 return ret; 6097 } 6098 6099 lpfc_param_store(ras_fwlog_buffsize); 6100 static DEVICE_ATTR_RW(lpfc_ras_fwlog_buffsize); 6101 6102 /* 6103 * lpfc_ras_fwlog_level: Firmware logging verbosity level 6104 * Valid only if firmware logging is enabled 6105 * 0(Least Verbosity) 4 (most verbosity) 6106 * Value range is [0..4]. Default value is 0 6107 */ 6108 LPFC_ATTR_RW(ras_fwlog_level, 0, 0, 4, "Firmware Logging Level"); 6109 6110 /* 6111 * lpfc_ras_fwlog_func: Firmware logging enabled on function number 6112 * Default function which has RAS support : 0 6113 * Value Range is [0..7]. 6114 * FW logging is a global action and enablement is via a specific 6115 * port. 6116 */ 6117 LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function"); 6118 6119 /* 6120 * lpfc_enable_bbcr: Enable BB Credit Recovery 6121 * 0 = BB Credit Recovery disabled 6122 * 1 = BB Credit Recovery enabled (default) 6123 * Value range is [0,1]. Default value is 1. 6124 */ 6125 LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery"); 6126 6127 /* Signaling module parameters */ 6128 int lpfc_fabric_cgn_frequency = 100; /* 100 ms default */ 6129 module_param(lpfc_fabric_cgn_frequency, int, 0444); 6130 MODULE_PARM_DESC(lpfc_fabric_cgn_frequency, "Congestion signaling fabric freq"); 6131 6132 int lpfc_acqe_cgn_frequency = 10; /* 10 sec default */ 6133 module_param(lpfc_acqe_cgn_frequency, int, 0444); 6134 MODULE_PARM_DESC(lpfc_acqe_cgn_frequency, "Congestion signaling ACQE freq"); 6135 6136 int lpfc_use_cgn_signal = 1; /* 0 - only use FPINs, 1 - Use signals if avail */ 6137 module_param(lpfc_use_cgn_signal, int, 0444); 6138 MODULE_PARM_DESC(lpfc_use_cgn_signal, "Use Congestion signaling if available"); 6139 6140 /* 6141 * lpfc_enable_dpp: Enable DPP on G7 6142 * 0 = DPP on G7 disabled 6143 * 1 = DPP on G7 enabled (default) 6144 * Value range is [0,1]. Default value is 1. 6145 */ 6146 LPFC_ATTR_RW(enable_dpp, 1, 0, 1, "Enable Direct Packet Push"); 6147 6148 /* 6149 * lpfc_enable_mi: Enable FDMI MIB 6150 * 0 = disabled 6151 * 1 = enabled (default) 6152 * Value range is [0,1]. 6153 */ 6154 LPFC_ATTR_R(enable_mi, 1, 0, 1, "Enable MI"); 6155 6156 /* 6157 * lpfc_max_vmid: Maximum number of VMs to be tagged. This is valid only if 6158 * either vmid_app_header or vmid_priority_tagging is enabled. 6159 * 4 - 255 = vmid support enabled for 4-255 VMs 6160 * Value range is [4,255]. 6161 */ 6162 LPFC_ATTR_RW(max_vmid, LPFC_MIN_VMID, LPFC_MIN_VMID, LPFC_MAX_VMID, 6163 "Maximum number of VMs supported"); 6164 6165 /* 6166 * lpfc_vmid_inactivity_timeout: Inactivity timeout duration in hours 6167 * 0 = Timeout is disabled 6168 * Value range is [0,24]. 6169 */ 6170 LPFC_ATTR_RW(vmid_inactivity_timeout, 4, 0, 24, 6171 "Inactivity timeout in hours"); 6172 6173 /* 6174 * lpfc_vmid_app_header: Enable App Header VMID support 6175 * 0 = Support is disabled (default) 6176 * 1 = Support is enabled 6177 * Value range is [0,1]. 6178 */ 6179 LPFC_ATTR_RW(vmid_app_header, LPFC_VMID_APP_HEADER_DISABLE, 6180 LPFC_VMID_APP_HEADER_DISABLE, LPFC_VMID_APP_HEADER_ENABLE, 6181 "Enable App Header VMID support"); 6182 6183 /* 6184 * lpfc_vmid_priority_tagging: Enable Priority Tagging VMID support 6185 * 0 = Support is disabled (default) 6186 * 1 = Allow supported targets only 6187 * 2 = Allow all targets 6188 * Value range is [0,2]. 6189 */ 6190 LPFC_ATTR_RW(vmid_priority_tagging, LPFC_VMID_PRIO_TAG_DISABLE, 6191 LPFC_VMID_PRIO_TAG_DISABLE, 6192 LPFC_VMID_PRIO_TAG_ALL_TARGETS, 6193 "Enable Priority Tagging VMID support"); 6194 6195 static struct attribute *lpfc_hba_attrs[] = { 6196 &dev_attr_nvme_info.attr, 6197 &dev_attr_scsi_stat.attr, 6198 &dev_attr_bg_info.attr, 6199 &dev_attr_bg_guard_err.attr, 6200 &dev_attr_bg_apptag_err.attr, 6201 &dev_attr_bg_reftag_err.attr, 6202 &dev_attr_info.attr, 6203 &dev_attr_serialnum.attr, 6204 &dev_attr_modeldesc.attr, 6205 &dev_attr_modelname.attr, 6206 &dev_attr_programtype.attr, 6207 &dev_attr_portnum.attr, 6208 &dev_attr_fwrev.attr, 6209 &dev_attr_hdw.attr, 6210 &dev_attr_option_rom_version.attr, 6211 &dev_attr_link_state.attr, 6212 &dev_attr_num_discovered_ports.attr, 6213 &dev_attr_menlo_mgmt_mode.attr, 6214 &dev_attr_lpfc_drvr_version.attr, 6215 &dev_attr_lpfc_enable_fip.attr, 6216 &dev_attr_lpfc_temp_sensor.attr, 6217 &dev_attr_lpfc_log_verbose.attr, 6218 &dev_attr_lpfc_lun_queue_depth.attr, 6219 &dev_attr_lpfc_tgt_queue_depth.attr, 6220 &dev_attr_lpfc_hba_queue_depth.attr, 6221 &dev_attr_lpfc_peer_port_login.attr, 6222 &dev_attr_lpfc_nodev_tmo.attr, 6223 &dev_attr_lpfc_devloss_tmo.attr, 6224 &dev_attr_lpfc_enable_fc4_type.attr, 6225 &dev_attr_lpfc_fcp_class.attr, 6226 &dev_attr_lpfc_use_adisc.attr, 6227 &dev_attr_lpfc_first_burst_size.attr, 6228 &dev_attr_lpfc_ack0.attr, 6229 &dev_attr_lpfc_xri_rebalancing.attr, 6230 &dev_attr_lpfc_topology.attr, 6231 &dev_attr_lpfc_scan_down.attr, 6232 &dev_attr_lpfc_link_speed.attr, 6233 &dev_attr_lpfc_fcp_io_sched.attr, 6234 &dev_attr_lpfc_ns_query.attr, 6235 &dev_attr_lpfc_fcp2_no_tgt_reset.attr, 6236 &dev_attr_lpfc_cr_delay.attr, 6237 &dev_attr_lpfc_cr_count.attr, 6238 &dev_attr_lpfc_multi_ring_support.attr, 6239 &dev_attr_lpfc_multi_ring_rctl.attr, 6240 &dev_attr_lpfc_multi_ring_type.attr, 6241 &dev_attr_lpfc_fdmi_on.attr, 6242 &dev_attr_lpfc_enable_SmartSAN.attr, 6243 &dev_attr_lpfc_max_luns.attr, 6244 &dev_attr_lpfc_enable_npiv.attr, 6245 &dev_attr_lpfc_fcf_failover_policy.attr, 6246 &dev_attr_lpfc_enable_rrq.attr, 6247 &dev_attr_lpfc_fcp_wait_abts_rsp.attr, 6248 &dev_attr_nport_evt_cnt.attr, 6249 &dev_attr_board_mode.attr, 6250 &dev_attr_max_vpi.attr, 6251 &dev_attr_used_vpi.attr, 6252 &dev_attr_max_rpi.attr, 6253 &dev_attr_used_rpi.attr, 6254 &dev_attr_max_xri.attr, 6255 &dev_attr_used_xri.attr, 6256 &dev_attr_npiv_info.attr, 6257 &dev_attr_issue_reset.attr, 6258 &dev_attr_lpfc_poll.attr, 6259 &dev_attr_lpfc_poll_tmo.attr, 6260 &dev_attr_lpfc_task_mgmt_tmo.attr, 6261 &dev_attr_lpfc_use_msi.attr, 6262 &dev_attr_lpfc_nvme_oas.attr, 6263 &dev_attr_lpfc_nvme_embed_cmd.attr, 6264 &dev_attr_lpfc_fcp_imax.attr, 6265 &dev_attr_lpfc_force_rscn.attr, 6266 &dev_attr_lpfc_cq_poll_threshold.attr, 6267 &dev_attr_lpfc_cq_max_proc_limit.attr, 6268 &dev_attr_lpfc_fcp_cpu_map.attr, 6269 &dev_attr_lpfc_fcp_mq_threshold.attr, 6270 &dev_attr_lpfc_hdw_queue.attr, 6271 &dev_attr_lpfc_irq_chann.attr, 6272 &dev_attr_lpfc_suppress_rsp.attr, 6273 &dev_attr_lpfc_nvmet_mrq.attr, 6274 &dev_attr_lpfc_nvmet_mrq_post.attr, 6275 &dev_attr_lpfc_nvme_enable_fb.attr, 6276 &dev_attr_lpfc_nvmet_fb_size.attr, 6277 &dev_attr_lpfc_enable_bg.attr, 6278 &dev_attr_lpfc_enable_hba_reset.attr, 6279 &dev_attr_lpfc_enable_hba_heartbeat.attr, 6280 &dev_attr_lpfc_EnableXLane.attr, 6281 &dev_attr_lpfc_XLanePriority.attr, 6282 &dev_attr_lpfc_xlane_lun.attr, 6283 &dev_attr_lpfc_xlane_tgt.attr, 6284 &dev_attr_lpfc_xlane_vpt.attr, 6285 &dev_attr_lpfc_xlane_lun_state.attr, 6286 &dev_attr_lpfc_xlane_lun_status.attr, 6287 &dev_attr_lpfc_xlane_priority.attr, 6288 &dev_attr_lpfc_sg_seg_cnt.attr, 6289 &dev_attr_lpfc_max_scsicmpl_time.attr, 6290 &dev_attr_lpfc_stat_data_ctrl.attr, 6291 &dev_attr_lpfc_aer_support.attr, 6292 &dev_attr_lpfc_aer_state_cleanup.attr, 6293 &dev_attr_lpfc_sriov_nr_virtfn.attr, 6294 &dev_attr_lpfc_req_fw_upgrade.attr, 6295 &dev_attr_lpfc_suppress_link_up.attr, 6296 &dev_attr_iocb_hw.attr, 6297 &dev_attr_pls.attr, 6298 &dev_attr_pt.attr, 6299 &dev_attr_txq_hw.attr, 6300 &dev_attr_txcmplq_hw.attr, 6301 &dev_attr_lpfc_sriov_hw_max_virtfn.attr, 6302 &dev_attr_protocol.attr, 6303 &dev_attr_lpfc_xlane_supported.attr, 6304 &dev_attr_lpfc_enable_mds_diags.attr, 6305 &dev_attr_lpfc_ras_fwlog_buffsize.attr, 6306 &dev_attr_lpfc_ras_fwlog_level.attr, 6307 &dev_attr_lpfc_ras_fwlog_func.attr, 6308 &dev_attr_lpfc_enable_bbcr.attr, 6309 &dev_attr_lpfc_enable_dpp.attr, 6310 &dev_attr_lpfc_enable_mi.attr, 6311 &dev_attr_cmf_info.attr, 6312 &dev_attr_lpfc_max_vmid.attr, 6313 &dev_attr_lpfc_vmid_inactivity_timeout.attr, 6314 &dev_attr_lpfc_vmid_app_header.attr, 6315 &dev_attr_lpfc_vmid_priority_tagging.attr, 6316 NULL, 6317 }; 6318 6319 static const struct attribute_group lpfc_hba_attr_group = { 6320 .attrs = lpfc_hba_attrs 6321 }; 6322 6323 const struct attribute_group *lpfc_hba_groups[] = { 6324 &lpfc_hba_attr_group, 6325 NULL 6326 }; 6327 6328 static struct attribute *lpfc_vport_attrs[] = { 6329 &dev_attr_info.attr, 6330 &dev_attr_link_state.attr, 6331 &dev_attr_num_discovered_ports.attr, 6332 &dev_attr_lpfc_drvr_version.attr, 6333 &dev_attr_lpfc_log_verbose.attr, 6334 &dev_attr_lpfc_lun_queue_depth.attr, 6335 &dev_attr_lpfc_tgt_queue_depth.attr, 6336 &dev_attr_lpfc_nodev_tmo.attr, 6337 &dev_attr_lpfc_devloss_tmo.attr, 6338 &dev_attr_lpfc_hba_queue_depth.attr, 6339 &dev_attr_lpfc_peer_port_login.attr, 6340 &dev_attr_lpfc_restrict_login.attr, 6341 &dev_attr_lpfc_fcp_class.attr, 6342 &dev_attr_lpfc_use_adisc.attr, 6343 &dev_attr_lpfc_first_burst_size.attr, 6344 &dev_attr_lpfc_max_luns.attr, 6345 &dev_attr_nport_evt_cnt.attr, 6346 &dev_attr_npiv_info.attr, 6347 &dev_attr_lpfc_enable_da_id.attr, 6348 &dev_attr_lpfc_max_scsicmpl_time.attr, 6349 &dev_attr_lpfc_stat_data_ctrl.attr, 6350 &dev_attr_lpfc_static_vport.attr, 6351 &dev_attr_cmf_info.attr, 6352 NULL, 6353 }; 6354 6355 static const struct attribute_group lpfc_vport_attr_group = { 6356 .attrs = lpfc_vport_attrs 6357 }; 6358 6359 const struct attribute_group *lpfc_vport_groups[] = { 6360 &lpfc_vport_attr_group, 6361 NULL 6362 }; 6363 6364 /** 6365 * sysfs_ctlreg_write - Write method for writing to ctlreg 6366 * @filp: open sysfs file 6367 * @kobj: kernel kobject that contains the kernel class device. 6368 * @bin_attr: kernel attributes passed to us. 6369 * @buf: contains the data to be written to the adapter IOREG space. 6370 * @off: offset into buffer to beginning of data. 6371 * @count: bytes to transfer. 6372 * 6373 * Description: 6374 * Accessed via /sys/class/scsi_host/hostxxx/ctlreg. 6375 * Uses the adapter io control registers to send buf contents to the adapter. 6376 * 6377 * Returns: 6378 * -ERANGE off and count combo out of range 6379 * -EINVAL off, count or buff address invalid 6380 * -EPERM adapter is offline 6381 * value of count, buf contents written 6382 **/ 6383 static ssize_t 6384 sysfs_ctlreg_write(struct file *filp, struct kobject *kobj, 6385 struct bin_attribute *bin_attr, 6386 char *buf, loff_t off, size_t count) 6387 { 6388 size_t buf_off; 6389 struct device *dev = container_of(kobj, struct device, kobj); 6390 struct Scsi_Host *shost = class_to_shost(dev); 6391 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6392 struct lpfc_hba *phba = vport->phba; 6393 6394 if (phba->sli_rev >= LPFC_SLI_REV4) 6395 return -EPERM; 6396 6397 if ((off + count) > FF_REG_AREA_SIZE) 6398 return -ERANGE; 6399 6400 if (count <= LPFC_REG_WRITE_KEY_SIZE) 6401 return 0; 6402 6403 if (off % 4 || count % 4 || (unsigned long)buf % 4) 6404 return -EINVAL; 6405 6406 /* This is to protect HBA registers from accidental writes. */ 6407 if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE)) 6408 return -EINVAL; 6409 6410 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 6411 return -EPERM; 6412 6413 spin_lock_irq(&phba->hbalock); 6414 for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE; 6415 buf_off += sizeof(uint32_t)) 6416 writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)), 6417 phba->ctrl_regs_memmap_p + off + buf_off); 6418 6419 spin_unlock_irq(&phba->hbalock); 6420 6421 return count; 6422 } 6423 6424 /** 6425 * sysfs_ctlreg_read - Read method for reading from ctlreg 6426 * @filp: open sysfs file 6427 * @kobj: kernel kobject that contains the kernel class device. 6428 * @bin_attr: kernel attributes passed to us. 6429 * @buf: if successful contains the data from the adapter IOREG space. 6430 * @off: offset into buffer to beginning of data. 6431 * @count: bytes to transfer. 6432 * 6433 * Description: 6434 * Accessed via /sys/class/scsi_host/hostxxx/ctlreg. 6435 * Uses the adapter io control registers to read data into buf. 6436 * 6437 * Returns: 6438 * -ERANGE off and count combo out of range 6439 * -EINVAL off, count or buff address invalid 6440 * value of count, buf contents read 6441 **/ 6442 static ssize_t 6443 sysfs_ctlreg_read(struct file *filp, struct kobject *kobj, 6444 struct bin_attribute *bin_attr, 6445 char *buf, loff_t off, size_t count) 6446 { 6447 size_t buf_off; 6448 uint32_t * tmp_ptr; 6449 struct device *dev = container_of(kobj, struct device, kobj); 6450 struct Scsi_Host *shost = class_to_shost(dev); 6451 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6452 struct lpfc_hba *phba = vport->phba; 6453 6454 if (phba->sli_rev >= LPFC_SLI_REV4) 6455 return -EPERM; 6456 6457 if (off > FF_REG_AREA_SIZE) 6458 return -ERANGE; 6459 6460 if ((off + count) > FF_REG_AREA_SIZE) 6461 count = FF_REG_AREA_SIZE - off; 6462 6463 if (count == 0) return 0; 6464 6465 if (off % 4 || count % 4 || (unsigned long)buf % 4) 6466 return -EINVAL; 6467 6468 spin_lock_irq(&phba->hbalock); 6469 6470 for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) { 6471 tmp_ptr = (uint32_t *)(buf + buf_off); 6472 *tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off); 6473 } 6474 6475 spin_unlock_irq(&phba->hbalock); 6476 6477 return count; 6478 } 6479 6480 static struct bin_attribute sysfs_ctlreg_attr = { 6481 .attr = { 6482 .name = "ctlreg", 6483 .mode = S_IRUSR | S_IWUSR, 6484 }, 6485 .size = 256, 6486 .read = sysfs_ctlreg_read, 6487 .write = sysfs_ctlreg_write, 6488 }; 6489 6490 /** 6491 * sysfs_mbox_write - Write method for writing information via mbox 6492 * @filp: open sysfs file 6493 * @kobj: kernel kobject that contains the kernel class device. 6494 * @bin_attr: kernel attributes passed to us. 6495 * @buf: contains the data to be written to sysfs mbox. 6496 * @off: offset into buffer to beginning of data. 6497 * @count: bytes to transfer. 6498 * 6499 * Description: 6500 * Deprecated function. All mailbox access from user space is performed via the 6501 * bsg interface. 6502 * 6503 * Returns: 6504 * -EPERM operation not permitted 6505 **/ 6506 static ssize_t 6507 sysfs_mbox_write(struct file *filp, struct kobject *kobj, 6508 struct bin_attribute *bin_attr, 6509 char *buf, loff_t off, size_t count) 6510 { 6511 return -EPERM; 6512 } 6513 6514 /** 6515 * sysfs_mbox_read - Read method for reading information via mbox 6516 * @filp: open sysfs file 6517 * @kobj: kernel kobject that contains the kernel class device. 6518 * @bin_attr: kernel attributes passed to us. 6519 * @buf: contains the data to be read from sysfs mbox. 6520 * @off: offset into buffer to beginning of data. 6521 * @count: bytes to transfer. 6522 * 6523 * Description: 6524 * Deprecated function. All mailbox access from user space is performed via the 6525 * bsg interface. 6526 * 6527 * Returns: 6528 * -EPERM operation not permitted 6529 **/ 6530 static ssize_t 6531 sysfs_mbox_read(struct file *filp, struct kobject *kobj, 6532 struct bin_attribute *bin_attr, 6533 char *buf, loff_t off, size_t count) 6534 { 6535 return -EPERM; 6536 } 6537 6538 static struct bin_attribute sysfs_mbox_attr = { 6539 .attr = { 6540 .name = "mbox", 6541 .mode = S_IRUSR | S_IWUSR, 6542 }, 6543 .size = MAILBOX_SYSFS_MAX, 6544 .read = sysfs_mbox_read, 6545 .write = sysfs_mbox_write, 6546 }; 6547 6548 /** 6549 * lpfc_alloc_sysfs_attr - Creates the ctlreg and mbox entries 6550 * @vport: address of lpfc vport structure. 6551 * 6552 * Return codes: 6553 * zero on success 6554 * error return code from sysfs_create_bin_file() 6555 **/ 6556 int 6557 lpfc_alloc_sysfs_attr(struct lpfc_vport *vport) 6558 { 6559 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6560 int error; 6561 6562 error = sysfs_create_bin_file(&shost->shost_dev.kobj, 6563 &sysfs_drvr_stat_data_attr); 6564 6565 /* Virtual ports do not need ctrl_reg and mbox */ 6566 if (error || vport->port_type == LPFC_NPIV_PORT) 6567 goto out; 6568 6569 error = sysfs_create_bin_file(&shost->shost_dev.kobj, 6570 &sysfs_ctlreg_attr); 6571 if (error) 6572 goto out_remove_stat_attr; 6573 6574 error = sysfs_create_bin_file(&shost->shost_dev.kobj, 6575 &sysfs_mbox_attr); 6576 if (error) 6577 goto out_remove_ctlreg_attr; 6578 6579 return 0; 6580 out_remove_ctlreg_attr: 6581 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); 6582 out_remove_stat_attr: 6583 sysfs_remove_bin_file(&shost->shost_dev.kobj, 6584 &sysfs_drvr_stat_data_attr); 6585 out: 6586 return error; 6587 } 6588 6589 /** 6590 * lpfc_free_sysfs_attr - Removes the ctlreg and mbox entries 6591 * @vport: address of lpfc vport structure. 6592 **/ 6593 void 6594 lpfc_free_sysfs_attr(struct lpfc_vport *vport) 6595 { 6596 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6597 sysfs_remove_bin_file(&shost->shost_dev.kobj, 6598 &sysfs_drvr_stat_data_attr); 6599 /* Virtual ports do not need ctrl_reg and mbox */ 6600 if (vport->port_type == LPFC_NPIV_PORT) 6601 return; 6602 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr); 6603 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); 6604 } 6605 6606 /* 6607 * Dynamic FC Host Attributes Support 6608 */ 6609 6610 /** 6611 * lpfc_get_host_symbolic_name - Copy symbolic name into the scsi host 6612 * @shost: kernel scsi host pointer. 6613 **/ 6614 static void 6615 lpfc_get_host_symbolic_name(struct Scsi_Host *shost) 6616 { 6617 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 6618 6619 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 6620 sizeof fc_host_symbolic_name(shost)); 6621 } 6622 6623 /** 6624 * lpfc_get_host_port_id - Copy the vport DID into the scsi host port id 6625 * @shost: kernel scsi host pointer. 6626 **/ 6627 static void 6628 lpfc_get_host_port_id(struct Scsi_Host *shost) 6629 { 6630 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6631 6632 /* note: fc_myDID already in cpu endianness */ 6633 fc_host_port_id(shost) = vport->fc_myDID; 6634 } 6635 6636 /** 6637 * lpfc_get_host_port_type - Set the value of the scsi host port type 6638 * @shost: kernel scsi host pointer. 6639 **/ 6640 static void 6641 lpfc_get_host_port_type(struct Scsi_Host *shost) 6642 { 6643 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6644 struct lpfc_hba *phba = vport->phba; 6645 6646 spin_lock_irq(shost->host_lock); 6647 6648 if (vport->port_type == LPFC_NPIV_PORT) { 6649 fc_host_port_type(shost) = FC_PORTTYPE_NPIV; 6650 } else if (lpfc_is_link_up(phba)) { 6651 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 6652 if (vport->fc_flag & FC_PUBLIC_LOOP) 6653 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; 6654 else 6655 fc_host_port_type(shost) = FC_PORTTYPE_LPORT; 6656 } else { 6657 if (vport->fc_flag & FC_FABRIC) 6658 fc_host_port_type(shost) = FC_PORTTYPE_NPORT; 6659 else 6660 fc_host_port_type(shost) = FC_PORTTYPE_PTP; 6661 } 6662 } else 6663 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; 6664 6665 spin_unlock_irq(shost->host_lock); 6666 } 6667 6668 /** 6669 * lpfc_get_host_port_state - Set the value of the scsi host port state 6670 * @shost: kernel scsi host pointer. 6671 **/ 6672 static void 6673 lpfc_get_host_port_state(struct Scsi_Host *shost) 6674 { 6675 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6676 struct lpfc_hba *phba = vport->phba; 6677 6678 spin_lock_irq(shost->host_lock); 6679 6680 if (vport->fc_flag & FC_OFFLINE_MODE) 6681 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; 6682 else { 6683 switch (phba->link_state) { 6684 case LPFC_LINK_UNKNOWN: 6685 case LPFC_LINK_DOWN: 6686 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; 6687 break; 6688 case LPFC_LINK_UP: 6689 case LPFC_CLEAR_LA: 6690 case LPFC_HBA_READY: 6691 /* Links up, reports port state accordingly */ 6692 if (vport->port_state < LPFC_VPORT_READY) 6693 fc_host_port_state(shost) = 6694 FC_PORTSTATE_BYPASSED; 6695 else 6696 fc_host_port_state(shost) = 6697 FC_PORTSTATE_ONLINE; 6698 break; 6699 case LPFC_HBA_ERROR: 6700 fc_host_port_state(shost) = FC_PORTSTATE_ERROR; 6701 break; 6702 default: 6703 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; 6704 break; 6705 } 6706 } 6707 6708 spin_unlock_irq(shost->host_lock); 6709 } 6710 6711 /** 6712 * lpfc_get_host_speed - Set the value of the scsi host speed 6713 * @shost: kernel scsi host pointer. 6714 **/ 6715 static void 6716 lpfc_get_host_speed(struct Scsi_Host *shost) 6717 { 6718 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6719 struct lpfc_hba *phba = vport->phba; 6720 6721 spin_lock_irq(shost->host_lock); 6722 6723 if ((lpfc_is_link_up(phba)) && (!(phba->hba_flag & HBA_FCOE_MODE))) { 6724 switch(phba->fc_linkspeed) { 6725 case LPFC_LINK_SPEED_1GHZ: 6726 fc_host_speed(shost) = FC_PORTSPEED_1GBIT; 6727 break; 6728 case LPFC_LINK_SPEED_2GHZ: 6729 fc_host_speed(shost) = FC_PORTSPEED_2GBIT; 6730 break; 6731 case LPFC_LINK_SPEED_4GHZ: 6732 fc_host_speed(shost) = FC_PORTSPEED_4GBIT; 6733 break; 6734 case LPFC_LINK_SPEED_8GHZ: 6735 fc_host_speed(shost) = FC_PORTSPEED_8GBIT; 6736 break; 6737 case LPFC_LINK_SPEED_10GHZ: 6738 fc_host_speed(shost) = FC_PORTSPEED_10GBIT; 6739 break; 6740 case LPFC_LINK_SPEED_16GHZ: 6741 fc_host_speed(shost) = FC_PORTSPEED_16GBIT; 6742 break; 6743 case LPFC_LINK_SPEED_32GHZ: 6744 fc_host_speed(shost) = FC_PORTSPEED_32GBIT; 6745 break; 6746 case LPFC_LINK_SPEED_64GHZ: 6747 fc_host_speed(shost) = FC_PORTSPEED_64GBIT; 6748 break; 6749 case LPFC_LINK_SPEED_128GHZ: 6750 fc_host_speed(shost) = FC_PORTSPEED_128GBIT; 6751 break; 6752 case LPFC_LINK_SPEED_256GHZ: 6753 fc_host_speed(shost) = FC_PORTSPEED_256GBIT; 6754 break; 6755 default: 6756 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 6757 break; 6758 } 6759 } else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) { 6760 switch (phba->fc_linkspeed) { 6761 case LPFC_ASYNC_LINK_SPEED_1GBPS: 6762 fc_host_speed(shost) = FC_PORTSPEED_1GBIT; 6763 break; 6764 case LPFC_ASYNC_LINK_SPEED_10GBPS: 6765 fc_host_speed(shost) = FC_PORTSPEED_10GBIT; 6766 break; 6767 case LPFC_ASYNC_LINK_SPEED_20GBPS: 6768 fc_host_speed(shost) = FC_PORTSPEED_20GBIT; 6769 break; 6770 case LPFC_ASYNC_LINK_SPEED_25GBPS: 6771 fc_host_speed(shost) = FC_PORTSPEED_25GBIT; 6772 break; 6773 case LPFC_ASYNC_LINK_SPEED_40GBPS: 6774 fc_host_speed(shost) = FC_PORTSPEED_40GBIT; 6775 break; 6776 case LPFC_ASYNC_LINK_SPEED_100GBPS: 6777 fc_host_speed(shost) = FC_PORTSPEED_100GBIT; 6778 break; 6779 default: 6780 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 6781 break; 6782 } 6783 } else 6784 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 6785 6786 spin_unlock_irq(shost->host_lock); 6787 } 6788 6789 /** 6790 * lpfc_get_host_fabric_name - Set the value of the scsi host fabric name 6791 * @shost: kernel scsi host pointer. 6792 **/ 6793 static void 6794 lpfc_get_host_fabric_name (struct Scsi_Host *shost) 6795 { 6796 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6797 struct lpfc_hba *phba = vport->phba; 6798 u64 node_name; 6799 6800 spin_lock_irq(shost->host_lock); 6801 6802 if ((vport->port_state > LPFC_FLOGI) && 6803 ((vport->fc_flag & FC_FABRIC) || 6804 ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) && 6805 (vport->fc_flag & FC_PUBLIC_LOOP)))) 6806 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn); 6807 else 6808 /* fabric is local port if there is no F/FL_Port */ 6809 node_name = 0; 6810 6811 spin_unlock_irq(shost->host_lock); 6812 6813 fc_host_fabric_name(shost) = node_name; 6814 } 6815 6816 /** 6817 * lpfc_get_stats - Return statistical information about the adapter 6818 * @shost: kernel scsi host pointer. 6819 * 6820 * Notes: 6821 * NULL on error for link down, no mbox pool, sli2 active, 6822 * management not allowed, memory allocation error, or mbox error. 6823 * 6824 * Returns: 6825 * NULL for error 6826 * address of the adapter host statistics 6827 **/ 6828 static struct fc_host_statistics * 6829 lpfc_get_stats(struct Scsi_Host *shost) 6830 { 6831 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6832 struct lpfc_hba *phba = vport->phba; 6833 struct lpfc_sli *psli = &phba->sli; 6834 struct fc_host_statistics *hs = &phba->link_stats; 6835 struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; 6836 LPFC_MBOXQ_t *pmboxq; 6837 MAILBOX_t *pmb; 6838 int rc = 0; 6839 6840 /* 6841 * prevent udev from issuing mailbox commands until the port is 6842 * configured. 6843 */ 6844 if (phba->link_state < LPFC_LINK_DOWN || 6845 !phba->mbox_mem_pool || 6846 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0) 6847 return NULL; 6848 6849 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) 6850 return NULL; 6851 6852 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6853 if (!pmboxq) 6854 return NULL; 6855 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 6856 6857 pmb = &pmboxq->u.mb; 6858 pmb->mbxCommand = MBX_READ_STATUS; 6859 pmb->mbxOwner = OWN_HOST; 6860 pmboxq->ctx_buf = NULL; 6861 pmboxq->vport = vport; 6862 6863 if (vport->fc_flag & FC_OFFLINE_MODE) { 6864 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 6865 if (rc != MBX_SUCCESS) { 6866 mempool_free(pmboxq, phba->mbox_mem_pool); 6867 return NULL; 6868 } 6869 } else { 6870 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 6871 if (rc != MBX_SUCCESS) { 6872 if (rc != MBX_TIMEOUT) 6873 mempool_free(pmboxq, phba->mbox_mem_pool); 6874 return NULL; 6875 } 6876 } 6877 6878 memset(hs, 0, sizeof (struct fc_host_statistics)); 6879 6880 hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt; 6881 /* 6882 * The MBX_READ_STATUS returns tx_k_bytes which has to 6883 * converted to words 6884 */ 6885 hs->tx_words = (uint64_t) 6886 ((uint64_t)pmb->un.varRdStatus.xmitByteCnt 6887 * (uint64_t)256); 6888 hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt; 6889 hs->rx_words = (uint64_t) 6890 ((uint64_t)pmb->un.varRdStatus.rcvByteCnt 6891 * (uint64_t)256); 6892 6893 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 6894 pmb->mbxCommand = MBX_READ_LNK_STAT; 6895 pmb->mbxOwner = OWN_HOST; 6896 pmboxq->ctx_buf = NULL; 6897 pmboxq->vport = vport; 6898 6899 if (vport->fc_flag & FC_OFFLINE_MODE) { 6900 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 6901 if (rc != MBX_SUCCESS) { 6902 mempool_free(pmboxq, phba->mbox_mem_pool); 6903 return NULL; 6904 } 6905 } else { 6906 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 6907 if (rc != MBX_SUCCESS) { 6908 if (rc != MBX_TIMEOUT) 6909 mempool_free(pmboxq, phba->mbox_mem_pool); 6910 return NULL; 6911 } 6912 } 6913 6914 hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt; 6915 hs->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt; 6916 hs->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt; 6917 hs->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt; 6918 hs->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; 6919 hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt; 6920 hs->error_frames = pmb->un.varRdLnk.crcCnt; 6921 6922 hs->cn_sig_warn = atomic64_read(&phba->cgn_acqe_stat.warn); 6923 hs->cn_sig_alarm = atomic64_read(&phba->cgn_acqe_stat.alarm); 6924 6925 hs->link_failure_count -= lso->link_failure_count; 6926 hs->loss_of_sync_count -= lso->loss_of_sync_count; 6927 hs->loss_of_signal_count -= lso->loss_of_signal_count; 6928 hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count; 6929 hs->invalid_tx_word_count -= lso->invalid_tx_word_count; 6930 hs->invalid_crc_count -= lso->invalid_crc_count; 6931 hs->error_frames -= lso->error_frames; 6932 6933 if (phba->hba_flag & HBA_FCOE_MODE) { 6934 hs->lip_count = -1; 6935 hs->nos_count = (phba->link_events >> 1); 6936 hs->nos_count -= lso->link_events; 6937 } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 6938 hs->lip_count = (phba->fc_eventTag >> 1); 6939 hs->lip_count -= lso->link_events; 6940 hs->nos_count = -1; 6941 } else { 6942 hs->lip_count = -1; 6943 hs->nos_count = (phba->fc_eventTag >> 1); 6944 hs->nos_count -= lso->link_events; 6945 } 6946 6947 hs->dumped_frames = -1; 6948 6949 hs->seconds_since_last_reset = ktime_get_seconds() - psli->stats_start; 6950 6951 mempool_free(pmboxq, phba->mbox_mem_pool); 6952 6953 return hs; 6954 } 6955 6956 /** 6957 * lpfc_reset_stats - Copy the adapter link stats information 6958 * @shost: kernel scsi host pointer. 6959 **/ 6960 static void 6961 lpfc_reset_stats(struct Scsi_Host *shost) 6962 { 6963 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6964 struct lpfc_hba *phba = vport->phba; 6965 struct lpfc_sli *psli = &phba->sli; 6966 struct lpfc_lnk_stat *lso = &psli->lnk_stat_offsets; 6967 LPFC_MBOXQ_t *pmboxq; 6968 MAILBOX_t *pmb; 6969 int rc = 0; 6970 6971 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) 6972 return; 6973 6974 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6975 if (!pmboxq) 6976 return; 6977 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 6978 6979 pmb = &pmboxq->u.mb; 6980 pmb->mbxCommand = MBX_READ_STATUS; 6981 pmb->mbxOwner = OWN_HOST; 6982 pmb->un.varWords[0] = 0x1; /* reset request */ 6983 pmboxq->ctx_buf = NULL; 6984 pmboxq->vport = vport; 6985 6986 if ((vport->fc_flag & FC_OFFLINE_MODE) || 6987 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { 6988 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 6989 if (rc != MBX_SUCCESS) { 6990 mempool_free(pmboxq, phba->mbox_mem_pool); 6991 return; 6992 } 6993 } else { 6994 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 6995 if (rc != MBX_SUCCESS) { 6996 if (rc != MBX_TIMEOUT) 6997 mempool_free(pmboxq, phba->mbox_mem_pool); 6998 return; 6999 } 7000 } 7001 7002 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 7003 pmb->mbxCommand = MBX_READ_LNK_STAT; 7004 pmb->mbxOwner = OWN_HOST; 7005 pmboxq->ctx_buf = NULL; 7006 pmboxq->vport = vport; 7007 7008 if ((vport->fc_flag & FC_OFFLINE_MODE) || 7009 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { 7010 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 7011 if (rc != MBX_SUCCESS) { 7012 mempool_free(pmboxq, phba->mbox_mem_pool); 7013 return; 7014 } 7015 } else { 7016 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 7017 if (rc != MBX_SUCCESS) { 7018 if (rc != MBX_TIMEOUT) 7019 mempool_free(pmboxq, phba->mbox_mem_pool); 7020 return; 7021 } 7022 } 7023 7024 lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt; 7025 lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt; 7026 lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt; 7027 lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt; 7028 lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; 7029 lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; 7030 lso->error_frames = pmb->un.varRdLnk.crcCnt; 7031 if (phba->hba_flag & HBA_FCOE_MODE) 7032 lso->link_events = (phba->link_events >> 1); 7033 else 7034 lso->link_events = (phba->fc_eventTag >> 1); 7035 7036 atomic64_set(&phba->cgn_acqe_stat.warn, 0); 7037 atomic64_set(&phba->cgn_acqe_stat.alarm, 0); 7038 7039 memset(&shost_to_fc_host(shost)->fpin_stats, 0, 7040 sizeof(shost_to_fc_host(shost)->fpin_stats)); 7041 7042 psli->stats_start = ktime_get_seconds(); 7043 7044 mempool_free(pmboxq, phba->mbox_mem_pool); 7045 7046 return; 7047 } 7048 7049 /* 7050 * The LPFC driver treats linkdown handling as target loss events so there 7051 * are no sysfs handlers for link_down_tmo. 7052 */ 7053 7054 /** 7055 * lpfc_get_node_by_target - Return the nodelist for a target 7056 * @starget: kernel scsi target pointer. 7057 * 7058 * Returns: 7059 * address of the node list if found 7060 * NULL target not found 7061 **/ 7062 static struct lpfc_nodelist * 7063 lpfc_get_node_by_target(struct scsi_target *starget) 7064 { 7065 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 7066 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 7067 struct lpfc_nodelist *ndlp; 7068 7069 spin_lock_irq(shost->host_lock); 7070 /* Search for this, mapped, target ID */ 7071 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 7072 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 7073 starget->id == ndlp->nlp_sid) { 7074 spin_unlock_irq(shost->host_lock); 7075 return ndlp; 7076 } 7077 } 7078 spin_unlock_irq(shost->host_lock); 7079 return NULL; 7080 } 7081 7082 /** 7083 * lpfc_get_starget_port_id - Set the target port id to the ndlp DID or -1 7084 * @starget: kernel scsi target pointer. 7085 **/ 7086 static void 7087 lpfc_get_starget_port_id(struct scsi_target *starget) 7088 { 7089 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget); 7090 7091 fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1; 7092 } 7093 7094 /** 7095 * lpfc_get_starget_node_name - Set the target node name 7096 * @starget: kernel scsi target pointer. 7097 * 7098 * Description: Set the target node name to the ndlp node name wwn or zero. 7099 **/ 7100 static void 7101 lpfc_get_starget_node_name(struct scsi_target *starget) 7102 { 7103 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget); 7104 7105 fc_starget_node_name(starget) = 7106 ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0; 7107 } 7108 7109 /** 7110 * lpfc_get_starget_port_name - Set the target port name 7111 * @starget: kernel scsi target pointer. 7112 * 7113 * Description: set the target port name to the ndlp port name wwn or zero. 7114 **/ 7115 static void 7116 lpfc_get_starget_port_name(struct scsi_target *starget) 7117 { 7118 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget); 7119 7120 fc_starget_port_name(starget) = 7121 ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0; 7122 } 7123 7124 /** 7125 * lpfc_set_rport_loss_tmo - Set the rport dev loss tmo 7126 * @rport: fc rport address. 7127 * @timeout: new value for dev loss tmo. 7128 * 7129 * Description: 7130 * If timeout is non zero set the dev_loss_tmo to timeout, else set 7131 * dev_loss_tmo to one. 7132 **/ 7133 static void 7134 lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) 7135 { 7136 struct lpfc_rport_data *rdata = rport->dd_data; 7137 struct lpfc_nodelist *ndlp = rdata->pnode; 7138 #if (IS_ENABLED(CONFIG_NVME_FC)) 7139 struct lpfc_nvme_rport *nrport = NULL; 7140 #endif 7141 7142 if (timeout) 7143 rport->dev_loss_tmo = timeout; 7144 else 7145 rport->dev_loss_tmo = 1; 7146 7147 if (!ndlp) { 7148 dev_info(&rport->dev, "Cannot find remote node to " 7149 "set rport dev loss tmo, port_id x%x\n", 7150 rport->port_id); 7151 return; 7152 } 7153 7154 #if (IS_ENABLED(CONFIG_NVME_FC)) 7155 nrport = lpfc_ndlp_get_nrport(ndlp); 7156 7157 if (nrport && nrport->remoteport) 7158 nvme_fc_set_remoteport_devloss(nrport->remoteport, 7159 rport->dev_loss_tmo); 7160 #endif 7161 } 7162 7163 /* 7164 * lpfc_rport_show_function - Return rport target information 7165 * 7166 * Description: 7167 * Macro that uses field to generate a function with the name lpfc_show_rport_ 7168 * 7169 * lpfc_show_rport_##field: returns the bytes formatted in buf 7170 * @cdev: class converted to an fc_rport. 7171 * @buf: on return contains the target_field or zero. 7172 * 7173 * Returns: size of formatted string. 7174 **/ 7175 #define lpfc_rport_show_function(field, format_string, sz, cast) \ 7176 static ssize_t \ 7177 lpfc_show_rport_##field (struct device *dev, \ 7178 struct device_attribute *attr, \ 7179 char *buf) \ 7180 { \ 7181 struct fc_rport *rport = transport_class_to_rport(dev); \ 7182 struct lpfc_rport_data *rdata = rport->hostdata; \ 7183 return scnprintf(buf, sz, format_string, \ 7184 (rdata->target) ? cast rdata->target->field : 0); \ 7185 } 7186 7187 #define lpfc_rport_rd_attr(field, format_string, sz) \ 7188 lpfc_rport_show_function(field, format_string, sz, ) \ 7189 static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL) 7190 7191 /** 7192 * lpfc_set_vport_symbolic_name - Set the vport's symbolic name 7193 * @fc_vport: The fc_vport who's symbolic name has been changed. 7194 * 7195 * Description: 7196 * This function is called by the transport after the @fc_vport's symbolic name 7197 * has been changed. This function re-registers the symbolic name with the 7198 * switch to propagate the change into the fabric if the vport is active. 7199 **/ 7200 static void 7201 lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport) 7202 { 7203 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; 7204 7205 if (vport->port_state == LPFC_VPORT_READY) 7206 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); 7207 } 7208 7209 /** 7210 * lpfc_hba_log_verbose_init - Set hba's log verbose level 7211 * @phba: Pointer to lpfc_hba struct. 7212 * @verbose: Verbose level to set. 7213 * 7214 * This function is called by the lpfc_get_cfgparam() routine to set the 7215 * module lpfc_log_verbose into the @phba cfg_log_verbose for use with 7216 * log message according to the module's lpfc_log_verbose parameter setting 7217 * before hba port or vport created. 7218 **/ 7219 static void 7220 lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose) 7221 { 7222 phba->cfg_log_verbose = verbose; 7223 } 7224 7225 struct fc_function_template lpfc_transport_functions = { 7226 /* fixed attributes the driver supports */ 7227 .show_host_node_name = 1, 7228 .show_host_port_name = 1, 7229 .show_host_supported_classes = 1, 7230 .show_host_supported_fc4s = 1, 7231 .show_host_supported_speeds = 1, 7232 .show_host_maxframe_size = 1, 7233 7234 .get_host_symbolic_name = lpfc_get_host_symbolic_name, 7235 .show_host_symbolic_name = 1, 7236 7237 /* dynamic attributes the driver supports */ 7238 .get_host_port_id = lpfc_get_host_port_id, 7239 .show_host_port_id = 1, 7240 7241 .get_host_port_type = lpfc_get_host_port_type, 7242 .show_host_port_type = 1, 7243 7244 .get_host_port_state = lpfc_get_host_port_state, 7245 .show_host_port_state = 1, 7246 7247 /* active_fc4s is shown but doesn't change (thus no get function) */ 7248 .show_host_active_fc4s = 1, 7249 7250 .get_host_speed = lpfc_get_host_speed, 7251 .show_host_speed = 1, 7252 7253 .get_host_fabric_name = lpfc_get_host_fabric_name, 7254 .show_host_fabric_name = 1, 7255 7256 /* 7257 * The LPFC driver treats linkdown handling as target loss events 7258 * so there are no sysfs handlers for link_down_tmo. 7259 */ 7260 7261 .get_fc_host_stats = lpfc_get_stats, 7262 .reset_fc_host_stats = lpfc_reset_stats, 7263 7264 .dd_fcrport_size = sizeof(struct lpfc_rport_data), 7265 .show_rport_maxframe_size = 1, 7266 .show_rport_supported_classes = 1, 7267 7268 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo, 7269 .show_rport_dev_loss_tmo = 1, 7270 7271 .get_starget_port_id = lpfc_get_starget_port_id, 7272 .show_starget_port_id = 1, 7273 7274 .get_starget_node_name = lpfc_get_starget_node_name, 7275 .show_starget_node_name = 1, 7276 7277 .get_starget_port_name = lpfc_get_starget_port_name, 7278 .show_starget_port_name = 1, 7279 7280 .issue_fc_host_lip = lpfc_issue_lip, 7281 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk, 7282 .terminate_rport_io = lpfc_terminate_rport_io, 7283 7284 .dd_fcvport_size = sizeof(struct lpfc_vport *), 7285 7286 .vport_disable = lpfc_vport_disable, 7287 7288 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name, 7289 7290 .bsg_request = lpfc_bsg_request, 7291 .bsg_timeout = lpfc_bsg_timeout, 7292 }; 7293 7294 struct fc_function_template lpfc_vport_transport_functions = { 7295 /* fixed attributes the driver supports */ 7296 .show_host_node_name = 1, 7297 .show_host_port_name = 1, 7298 .show_host_supported_classes = 1, 7299 .show_host_supported_fc4s = 1, 7300 .show_host_supported_speeds = 1, 7301 .show_host_maxframe_size = 1, 7302 7303 .get_host_symbolic_name = lpfc_get_host_symbolic_name, 7304 .show_host_symbolic_name = 1, 7305 7306 /* dynamic attributes the driver supports */ 7307 .get_host_port_id = lpfc_get_host_port_id, 7308 .show_host_port_id = 1, 7309 7310 .get_host_port_type = lpfc_get_host_port_type, 7311 .show_host_port_type = 1, 7312 7313 .get_host_port_state = lpfc_get_host_port_state, 7314 .show_host_port_state = 1, 7315 7316 /* active_fc4s is shown but doesn't change (thus no get function) */ 7317 .show_host_active_fc4s = 1, 7318 7319 .get_host_speed = lpfc_get_host_speed, 7320 .show_host_speed = 1, 7321 7322 .get_host_fabric_name = lpfc_get_host_fabric_name, 7323 .show_host_fabric_name = 1, 7324 7325 /* 7326 * The LPFC driver treats linkdown handling as target loss events 7327 * so there are no sysfs handlers for link_down_tmo. 7328 */ 7329 7330 .get_fc_host_stats = lpfc_get_stats, 7331 .reset_fc_host_stats = lpfc_reset_stats, 7332 7333 .dd_fcrport_size = sizeof(struct lpfc_rport_data), 7334 .show_rport_maxframe_size = 1, 7335 .show_rport_supported_classes = 1, 7336 7337 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo, 7338 .show_rport_dev_loss_tmo = 1, 7339 7340 .get_starget_port_id = lpfc_get_starget_port_id, 7341 .show_starget_port_id = 1, 7342 7343 .get_starget_node_name = lpfc_get_starget_node_name, 7344 .show_starget_node_name = 1, 7345 7346 .get_starget_port_name = lpfc_get_starget_port_name, 7347 .show_starget_port_name = 1, 7348 7349 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk, 7350 .terminate_rport_io = lpfc_terminate_rport_io, 7351 7352 .vport_disable = lpfc_vport_disable, 7353 7354 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name, 7355 }; 7356 7357 /** 7358 * lpfc_get_hba_function_mode - Used to determine the HBA function in FCoE 7359 * Mode 7360 * @phba: lpfc_hba pointer. 7361 **/ 7362 static void 7363 lpfc_get_hba_function_mode(struct lpfc_hba *phba) 7364 { 7365 /* If the adapter supports FCoE mode */ 7366 switch (phba->pcidev->device) { 7367 case PCI_DEVICE_ID_SKYHAWK: 7368 case PCI_DEVICE_ID_SKYHAWK_VF: 7369 case PCI_DEVICE_ID_LANCER_FCOE: 7370 case PCI_DEVICE_ID_LANCER_FCOE_VF: 7371 case PCI_DEVICE_ID_ZEPHYR_DCSP: 7372 case PCI_DEVICE_ID_HORNET: 7373 case PCI_DEVICE_ID_TIGERSHARK: 7374 case PCI_DEVICE_ID_TOMCAT: 7375 phba->hba_flag |= HBA_FCOE_MODE; 7376 break; 7377 default: 7378 /* for others, clear the flag */ 7379 phba->hba_flag &= ~HBA_FCOE_MODE; 7380 } 7381 } 7382 7383 /** 7384 * lpfc_get_cfgparam - Used during probe_one to init the adapter structure 7385 * @phba: lpfc_hba pointer. 7386 **/ 7387 void 7388 lpfc_get_cfgparam(struct lpfc_hba *phba) 7389 { 7390 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); 7391 lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched); 7392 lpfc_ns_query_init(phba, lpfc_ns_query); 7393 lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset); 7394 lpfc_cr_delay_init(phba, lpfc_cr_delay); 7395 lpfc_cr_count_init(phba, lpfc_cr_count); 7396 lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support); 7397 lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl); 7398 lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type); 7399 lpfc_ack0_init(phba, lpfc_ack0); 7400 lpfc_xri_rebalancing_init(phba, lpfc_xri_rebalancing); 7401 lpfc_topology_init(phba, lpfc_topology); 7402 lpfc_link_speed_init(phba, lpfc_link_speed); 7403 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 7404 lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo); 7405 lpfc_enable_npiv_init(phba, lpfc_enable_npiv); 7406 lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy); 7407 lpfc_enable_rrq_init(phba, lpfc_enable_rrq); 7408 lpfc_fcp_wait_abts_rsp_init(phba, lpfc_fcp_wait_abts_rsp); 7409 lpfc_fdmi_on_init(phba, lpfc_fdmi_on); 7410 lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN); 7411 lpfc_use_msi_init(phba, lpfc_use_msi); 7412 lpfc_nvme_oas_init(phba, lpfc_nvme_oas); 7413 lpfc_nvme_embed_cmd_init(phba, lpfc_nvme_embed_cmd); 7414 lpfc_fcp_imax_init(phba, lpfc_fcp_imax); 7415 lpfc_force_rscn_init(phba, lpfc_force_rscn); 7416 lpfc_cq_poll_threshold_init(phba, lpfc_cq_poll_threshold); 7417 lpfc_cq_max_proc_limit_init(phba, lpfc_cq_max_proc_limit); 7418 lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map); 7419 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); 7420 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); 7421 7422 lpfc_EnableXLane_init(phba, lpfc_EnableXLane); 7423 /* VMID Inits */ 7424 lpfc_max_vmid_init(phba, lpfc_max_vmid); 7425 lpfc_vmid_inactivity_timeout_init(phba, lpfc_vmid_inactivity_timeout); 7426 lpfc_vmid_app_header_init(phba, lpfc_vmid_app_header); 7427 lpfc_vmid_priority_tagging_init(phba, lpfc_vmid_priority_tagging); 7428 if (phba->sli_rev != LPFC_SLI_REV4) 7429 phba->cfg_EnableXLane = 0; 7430 lpfc_XLanePriority_init(phba, lpfc_XLanePriority); 7431 7432 memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t))); 7433 memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t))); 7434 phba->cfg_oas_lun_state = 0; 7435 phba->cfg_oas_lun_status = 0; 7436 phba->cfg_oas_flags = 0; 7437 phba->cfg_oas_priority = 0; 7438 lpfc_enable_bg_init(phba, lpfc_enable_bg); 7439 lpfc_prot_mask_init(phba, lpfc_prot_mask); 7440 lpfc_prot_guard_init(phba, lpfc_prot_guard); 7441 if (phba->sli_rev == LPFC_SLI_REV4) 7442 phba->cfg_poll = 0; 7443 else 7444 phba->cfg_poll = lpfc_poll; 7445 7446 /* Get the function mode */ 7447 lpfc_get_hba_function_mode(phba); 7448 7449 /* BlockGuard allowed for FC only. */ 7450 if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) { 7451 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7452 "0581 BlockGuard feature not supported\n"); 7453 /* If set, clear the BlockGuard support param */ 7454 phba->cfg_enable_bg = 0; 7455 } else if (phba->cfg_enable_bg) { 7456 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 7457 } 7458 7459 lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp); 7460 7461 lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type); 7462 lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq); 7463 lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post); 7464 7465 /* Initialize first burst. Target vs Initiator are different. */ 7466 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); 7467 lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size); 7468 lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold); 7469 lpfc_hdw_queue_init(phba, lpfc_hdw_queue); 7470 lpfc_irq_chann_init(phba, lpfc_irq_chann); 7471 lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr); 7472 lpfc_enable_dpp_init(phba, lpfc_enable_dpp); 7473 lpfc_enable_mi_init(phba, lpfc_enable_mi); 7474 7475 phba->cgn_p.cgn_param_mode = LPFC_CFG_OFF; 7476 phba->cmf_active_mode = LPFC_CFG_OFF; 7477 if (lpfc_fabric_cgn_frequency > EDC_CG_SIGFREQ_CNT_MAX || 7478 lpfc_fabric_cgn_frequency < EDC_CG_SIGFREQ_CNT_MIN) 7479 lpfc_fabric_cgn_frequency = 100; /* 100 ms default */ 7480 7481 if (phba->sli_rev != LPFC_SLI_REV4) { 7482 /* NVME only supported on SLI4 */ 7483 phba->nvmet_support = 0; 7484 phba->cfg_nvmet_mrq = 0; 7485 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; 7486 phba->cfg_enable_bbcr = 0; 7487 phba->cfg_xri_rebalancing = 0; 7488 } else { 7489 /* We MUST have FCP support */ 7490 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 7491 phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP; 7492 } 7493 7494 phba->cfg_auto_imax = (phba->cfg_fcp_imax) ? 0 : 1; 7495 7496 phba->cfg_enable_pbde = 0; 7497 7498 /* A value of 0 means use the number of CPUs found in the system */ 7499 if (phba->cfg_hdw_queue == 0) 7500 phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu; 7501 if (phba->cfg_irq_chann == 0) 7502 phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu; 7503 if (phba->cfg_irq_chann > phba->cfg_hdw_queue && 7504 phba->sli_rev == LPFC_SLI_REV4) 7505 phba->cfg_irq_chann = phba->cfg_hdw_queue; 7506 7507 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); 7508 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); 7509 lpfc_aer_support_init(phba, lpfc_aer_support); 7510 lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn); 7511 lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade); 7512 lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up); 7513 lpfc_delay_discovery_init(phba, lpfc_delay_discovery); 7514 lpfc_sli_mode_init(phba, lpfc_sli_mode); 7515 lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags); 7516 lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize); 7517 lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level); 7518 lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func); 7519 7520 return; 7521 } 7522 7523 /** 7524 * lpfc_nvme_mod_param_dep - Adjust module parameter value based on 7525 * dependencies between protocols and roles. 7526 * @phba: lpfc_hba pointer. 7527 **/ 7528 void 7529 lpfc_nvme_mod_param_dep(struct lpfc_hba *phba) 7530 { 7531 int logit = 0; 7532 7533 if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu) { 7534 phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu; 7535 logit = 1; 7536 } 7537 if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu) { 7538 phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu; 7539 logit = 1; 7540 } 7541 if (phba->cfg_irq_chann > phba->cfg_hdw_queue) { 7542 phba->cfg_irq_chann = phba->cfg_hdw_queue; 7543 logit = 1; 7544 } 7545 if (logit) 7546 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7547 "2006 Reducing Queues - CPU limitation: " 7548 "IRQ %d HDWQ %d\n", 7549 phba->cfg_irq_chann, 7550 phba->cfg_hdw_queue); 7551 7552 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && 7553 phba->nvmet_support) { 7554 phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP; 7555 7556 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 7557 "6013 %s x%x fb_size x%x, fb_max x%x\n", 7558 "NVME Target PRLI ACC enable_fb ", 7559 phba->cfg_nvme_enable_fb, 7560 phba->cfg_nvmet_fb_size, 7561 LPFC_NVMET_FB_SZ_MAX); 7562 7563 if (phba->cfg_nvme_enable_fb == 0) 7564 phba->cfg_nvmet_fb_size = 0; 7565 else { 7566 if (phba->cfg_nvmet_fb_size > LPFC_NVMET_FB_SZ_MAX) 7567 phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX; 7568 } 7569 7570 if (!phba->cfg_nvmet_mrq) 7571 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; 7572 7573 /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */ 7574 if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) { 7575 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; 7576 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 7577 "6018 Adjust lpfc_nvmet_mrq to %d\n", 7578 phba->cfg_nvmet_mrq); 7579 } 7580 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) 7581 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; 7582 7583 } else { 7584 /* Not NVME Target mode. Turn off Target parameters. */ 7585 phba->nvmet_support = 0; 7586 phba->cfg_nvmet_mrq = 0; 7587 phba->cfg_nvmet_fb_size = 0; 7588 } 7589 } 7590 7591 /** 7592 * lpfc_get_vport_cfgparam - Used during port create, init the vport structure 7593 * @vport: lpfc_vport pointer. 7594 **/ 7595 void 7596 lpfc_get_vport_cfgparam(struct lpfc_vport *vport) 7597 { 7598 lpfc_log_verbose_init(vport, lpfc_log_verbose); 7599 lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth); 7600 lpfc_tgt_queue_depth_init(vport, lpfc_tgt_queue_depth); 7601 lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo); 7602 lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo); 7603 lpfc_peer_port_login_init(vport, lpfc_peer_port_login); 7604 lpfc_restrict_login_init(vport, lpfc_restrict_login); 7605 lpfc_fcp_class_init(vport, lpfc_fcp_class); 7606 lpfc_use_adisc_init(vport, lpfc_use_adisc); 7607 lpfc_first_burst_size_init(vport, lpfc_first_burst_size); 7608 lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time); 7609 lpfc_discovery_threads_init(vport, lpfc_discovery_threads); 7610 lpfc_max_luns_init(vport, lpfc_max_luns); 7611 lpfc_scan_down_init(vport, lpfc_scan_down); 7612 lpfc_enable_da_id_init(vport, lpfc_enable_da_id); 7613 return; 7614 } 7615