1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/ctype.h> 25 #include <linux/delay.h> 26 #include <linux/pci.h> 27 #include <linux/interrupt.h> 28 #include <linux/module.h> 29 #include <linux/aer.h> 30 #include <linux/gfp.h> 31 #include <linux/kernel.h> 32 33 #include <scsi/scsi.h> 34 #include <scsi/scsi_device.h> 35 #include <scsi/scsi_host.h> 36 #include <scsi/scsi_tcq.h> 37 #include <scsi/scsi_transport_fc.h> 38 #include <scsi/fc/fc_fs.h> 39 40 #include "lpfc_hw4.h" 41 #include "lpfc_hw.h" 42 #include "lpfc_sli.h" 43 #include "lpfc_sli4.h" 44 #include "lpfc_nl.h" 45 #include "lpfc_disc.h" 46 #include "lpfc.h" 47 #include "lpfc_scsi.h" 48 #include "lpfc_nvme.h" 49 #include "lpfc_logmsg.h" 50 #include "lpfc_version.h" 51 #include "lpfc_compat.h" 52 #include "lpfc_crtn.h" 53 #include "lpfc_vport.h" 54 #include "lpfc_attr.h" 55 56 #define LPFC_DEF_DEVLOSS_TMO 30 57 #define LPFC_MIN_DEVLOSS_TMO 1 58 #define LPFC_MAX_DEVLOSS_TMO 255 59 60 #define LPFC_DEF_MRQ_POST 512 61 #define LPFC_MIN_MRQ_POST 512 62 #define LPFC_MAX_MRQ_POST 2048 63 64 /* 65 * Write key size should be multiple of 4. If write key is changed 66 * make sure that library write key is also changed. 67 */ 68 #define LPFC_REG_WRITE_KEY_SIZE 4 69 #define LPFC_REG_WRITE_KEY "EMLX" 70 71 const char *const trunk_errmsg[] = { /* map errcode */ 72 "", /* There is no such error code at index 0*/ 73 "link negotiated speed does not match existing" 74 " trunk - link was \"low\" speed", 75 "link negotiated speed does not match" 76 " existing trunk - link was \"middle\" speed", 77 "link negotiated speed does not match existing" 78 " trunk - link was \"high\" speed", 79 "Attached to non-trunking port - F_Port", 80 "Attached to non-trunking port - N_Port", 81 "FLOGI response timeout", 82 "non-FLOGI frame received", 83 "Invalid FLOGI response", 84 "Trunking initialization protocol", 85 "Trunk peer device mismatch", 86 }; 87 88 /** 89 * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules 90 * @incr: integer to convert. 91 * @hdw: ascii string holding converted integer plus a string terminator. 92 * 93 * Description: 94 * JEDEC Joint Electron Device Engineering Council. 95 * Convert a 32 bit integer composed of 8 nibbles into an 8 byte ascii 96 * character string. The string is then terminated with a NULL in byte 9. 97 * Hex 0-9 becomes ascii '0' to '9'. 98 * Hex a-f becomes ascii '=' to 'B' capital B. 99 * 100 * Notes: 101 * Coded for 32 bit integers only. 102 **/ 103 static void 104 lpfc_jedec_to_ascii(int incr, char hdw[]) 105 { 106 int i, j; 107 for (i = 0; i < 8; i++) { 108 j = (incr & 0xf); 109 if (j <= 9) 110 hdw[7 - i] = 0x30 + j; 111 else 112 hdw[7 - i] = 0x61 + j - 10; 113 incr = (incr >> 4); 114 } 115 hdw[8] = 0; 116 return; 117 } 118 119 /** 120 * lpfc_drvr_version_show - Return the Emulex driver string with version number 121 * @dev: class unused variable. 122 * @attr: device attribute, not used. 123 * @buf: on return contains the module description text. 124 * 125 * Returns: size of formatted string. 126 **/ 127 static ssize_t 128 lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr, 129 char *buf) 130 { 131 return scnprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n"); 132 } 133 134 /** 135 * lpfc_enable_fip_show - Return the fip mode of the HBA 136 * @dev: class unused variable. 137 * @attr: device attribute, not used. 138 * @buf: on return contains the module description text. 139 * 140 * Returns: size of formatted string. 141 **/ 142 static ssize_t 143 lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr, 144 char *buf) 145 { 146 struct Scsi_Host *shost = class_to_shost(dev); 147 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 148 struct lpfc_hba *phba = vport->phba; 149 150 if (phba->hba_flag & HBA_FIP_SUPPORT) 151 return scnprintf(buf, PAGE_SIZE, "1\n"); 152 else 153 return scnprintf(buf, PAGE_SIZE, "0\n"); 154 } 155 156 static ssize_t 157 lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, 158 char *buf) 159 { 160 struct Scsi_Host *shost = class_to_shost(dev); 161 struct lpfc_vport *vport = shost_priv(shost); 162 struct lpfc_hba *phba = vport->phba; 163 struct lpfc_nvmet_tgtport *tgtp; 164 struct nvme_fc_local_port *localport; 165 struct lpfc_nvme_lport *lport; 166 struct lpfc_nvme_rport *rport; 167 struct lpfc_nodelist *ndlp; 168 struct nvme_fc_remote_port *nrport; 169 struct lpfc_fc4_ctrl_stat *cstat; 170 uint64_t data1, data2, data3; 171 uint64_t totin, totout, tot; 172 char *statep; 173 int i; 174 int len = 0; 175 char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0}; 176 177 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { 178 len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n"); 179 return len; 180 } 181 if (phba->nvmet_support) { 182 if (!phba->targetport) { 183 len = scnprintf(buf, PAGE_SIZE, 184 "NVME Target: x%llx is not allocated\n", 185 wwn_to_u64(vport->fc_portname.u.wwn)); 186 return len; 187 } 188 /* Port state is only one of two values for now. */ 189 if (phba->targetport->port_id) 190 statep = "REGISTERED"; 191 else 192 statep = "INIT"; 193 scnprintf(tmp, sizeof(tmp), 194 "NVME Target Enabled State %s\n", 195 statep); 196 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 197 goto buffer_done; 198 199 scnprintf(tmp, sizeof(tmp), 200 "%s%d WWPN x%llx WWNN x%llx DID x%06x\n", 201 "NVME Target: lpfc", 202 phba->brd_no, 203 wwn_to_u64(vport->fc_portname.u.wwn), 204 wwn_to_u64(vport->fc_nodename.u.wwn), 205 phba->targetport->port_id); 206 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 207 goto buffer_done; 208 209 if (strlcat(buf, "\nNVME Target: Statistics\n", PAGE_SIZE) 210 >= PAGE_SIZE) 211 goto buffer_done; 212 213 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 214 scnprintf(tmp, sizeof(tmp), 215 "LS: Rcv %08x Drop %08x Abort %08x\n", 216 atomic_read(&tgtp->rcv_ls_req_in), 217 atomic_read(&tgtp->rcv_ls_req_drop), 218 atomic_read(&tgtp->xmt_ls_abort)); 219 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 220 goto buffer_done; 221 222 if (atomic_read(&tgtp->rcv_ls_req_in) != 223 atomic_read(&tgtp->rcv_ls_req_out)) { 224 scnprintf(tmp, sizeof(tmp), 225 "Rcv LS: in %08x != out %08x\n", 226 atomic_read(&tgtp->rcv_ls_req_in), 227 atomic_read(&tgtp->rcv_ls_req_out)); 228 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 229 goto buffer_done; 230 } 231 232 scnprintf(tmp, sizeof(tmp), 233 "LS: Xmt %08x Drop %08x Cmpl %08x\n", 234 atomic_read(&tgtp->xmt_ls_rsp), 235 atomic_read(&tgtp->xmt_ls_drop), 236 atomic_read(&tgtp->xmt_ls_rsp_cmpl)); 237 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 238 goto buffer_done; 239 240 scnprintf(tmp, sizeof(tmp), 241 "LS: RSP Abort %08x xb %08x Err %08x\n", 242 atomic_read(&tgtp->xmt_ls_rsp_aborted), 243 atomic_read(&tgtp->xmt_ls_rsp_xb_set), 244 atomic_read(&tgtp->xmt_ls_rsp_error)); 245 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 246 goto buffer_done; 247 248 scnprintf(tmp, sizeof(tmp), 249 "FCP: Rcv %08x Defer %08x Release %08x " 250 "Drop %08x\n", 251 atomic_read(&tgtp->rcv_fcp_cmd_in), 252 atomic_read(&tgtp->rcv_fcp_cmd_defer), 253 atomic_read(&tgtp->xmt_fcp_release), 254 atomic_read(&tgtp->rcv_fcp_cmd_drop)); 255 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 256 goto buffer_done; 257 258 if (atomic_read(&tgtp->rcv_fcp_cmd_in) != 259 atomic_read(&tgtp->rcv_fcp_cmd_out)) { 260 scnprintf(tmp, sizeof(tmp), 261 "Rcv FCP: in %08x != out %08x\n", 262 atomic_read(&tgtp->rcv_fcp_cmd_in), 263 atomic_read(&tgtp->rcv_fcp_cmd_out)); 264 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 265 goto buffer_done; 266 } 267 268 scnprintf(tmp, sizeof(tmp), 269 "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x " 270 "drop %08x\n", 271 atomic_read(&tgtp->xmt_fcp_read), 272 atomic_read(&tgtp->xmt_fcp_read_rsp), 273 atomic_read(&tgtp->xmt_fcp_write), 274 atomic_read(&tgtp->xmt_fcp_rsp), 275 atomic_read(&tgtp->xmt_fcp_drop)); 276 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 277 goto buffer_done; 278 279 scnprintf(tmp, sizeof(tmp), 280 "FCP Rsp Cmpl: %08x err %08x drop %08x\n", 281 atomic_read(&tgtp->xmt_fcp_rsp_cmpl), 282 atomic_read(&tgtp->xmt_fcp_rsp_error), 283 atomic_read(&tgtp->xmt_fcp_rsp_drop)); 284 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 285 goto buffer_done; 286 287 scnprintf(tmp, sizeof(tmp), 288 "FCP Rsp Abort: %08x xb %08x xricqe %08x\n", 289 atomic_read(&tgtp->xmt_fcp_rsp_aborted), 290 atomic_read(&tgtp->xmt_fcp_rsp_xb_set), 291 atomic_read(&tgtp->xmt_fcp_xri_abort_cqe)); 292 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 293 goto buffer_done; 294 295 scnprintf(tmp, sizeof(tmp), 296 "ABORT: Xmt %08x Cmpl %08x\n", 297 atomic_read(&tgtp->xmt_fcp_abort), 298 atomic_read(&tgtp->xmt_fcp_abort_cmpl)); 299 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 300 goto buffer_done; 301 302 scnprintf(tmp, sizeof(tmp), 303 "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x\n", 304 atomic_read(&tgtp->xmt_abort_sol), 305 atomic_read(&tgtp->xmt_abort_unsol), 306 atomic_read(&tgtp->xmt_abort_rsp), 307 atomic_read(&tgtp->xmt_abort_rsp_error)); 308 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 309 goto buffer_done; 310 311 scnprintf(tmp, sizeof(tmp), 312 "DELAY: ctx %08x fod %08x wqfull %08x\n", 313 atomic_read(&tgtp->defer_ctx), 314 atomic_read(&tgtp->defer_fod), 315 atomic_read(&tgtp->defer_wqfull)); 316 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 317 goto buffer_done; 318 319 /* Calculate outstanding IOs */ 320 tot = atomic_read(&tgtp->rcv_fcp_cmd_drop); 321 tot += atomic_read(&tgtp->xmt_fcp_release); 322 tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot; 323 324 scnprintf(tmp, sizeof(tmp), 325 "IO_CTX: %08x WAIT: cur %08x tot %08x\n" 326 "CTX Outstanding %08llx\n\n", 327 phba->sli4_hba.nvmet_xri_cnt, 328 phba->sli4_hba.nvmet_io_wait_cnt, 329 phba->sli4_hba.nvmet_io_wait_total, 330 tot); 331 strlcat(buf, tmp, PAGE_SIZE); 332 goto buffer_done; 333 } 334 335 localport = vport->localport; 336 if (!localport) { 337 len = scnprintf(buf, PAGE_SIZE, 338 "NVME Initiator x%llx is not allocated\n", 339 wwn_to_u64(vport->fc_portname.u.wwn)); 340 return len; 341 } 342 lport = (struct lpfc_nvme_lport *)localport->private; 343 if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE) 344 goto buffer_done; 345 346 scnprintf(tmp, sizeof(tmp), 347 "XRI Dist lpfc%d Total %d IO %d ELS %d\n", 348 phba->brd_no, 349 phba->sli4_hba.max_cfg_param.max_xri, 350 phba->sli4_hba.io_xri_max, 351 lpfc_sli4_get_els_iocb_cnt(phba)); 352 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 353 goto buffer_done; 354 355 /* Port state is only one of two values for now. */ 356 if (localport->port_id) 357 statep = "ONLINE"; 358 else 359 statep = "UNKNOWN "; 360 361 scnprintf(tmp, sizeof(tmp), 362 "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n", 363 "NVME LPORT lpfc", 364 phba->brd_no, 365 wwn_to_u64(vport->fc_portname.u.wwn), 366 wwn_to_u64(vport->fc_nodename.u.wwn), 367 localport->port_id, statep); 368 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 369 goto buffer_done; 370 371 spin_lock_irq(shost->host_lock); 372 373 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 374 nrport = NULL; 375 spin_lock(&vport->phba->hbalock); 376 rport = lpfc_ndlp_get_nrport(ndlp); 377 if (rport) 378 nrport = rport->remoteport; 379 spin_unlock(&vport->phba->hbalock); 380 if (!nrport) 381 continue; 382 383 /* Port state is only one of two values for now. */ 384 switch (nrport->port_state) { 385 case FC_OBJSTATE_ONLINE: 386 statep = "ONLINE"; 387 break; 388 case FC_OBJSTATE_UNKNOWN: 389 statep = "UNKNOWN "; 390 break; 391 default: 392 statep = "UNSUPPORTED"; 393 break; 394 } 395 396 /* Tab in to show lport ownership. */ 397 if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE) 398 goto unlock_buf_done; 399 if (phba->brd_no >= 10) { 400 if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE) 401 goto unlock_buf_done; 402 } 403 404 scnprintf(tmp, sizeof(tmp), "WWPN x%llx ", 405 nrport->port_name); 406 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 407 goto unlock_buf_done; 408 409 scnprintf(tmp, sizeof(tmp), "WWNN x%llx ", 410 nrport->node_name); 411 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 412 goto unlock_buf_done; 413 414 scnprintf(tmp, sizeof(tmp), "DID x%06x ", 415 nrport->port_id); 416 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 417 goto unlock_buf_done; 418 419 /* An NVME rport can have multiple roles. */ 420 if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) { 421 if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE) 422 goto unlock_buf_done; 423 } 424 if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) { 425 if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE) 426 goto unlock_buf_done; 427 } 428 if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) { 429 if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE) 430 goto unlock_buf_done; 431 } 432 if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR | 433 FC_PORT_ROLE_NVME_TARGET | 434 FC_PORT_ROLE_NVME_DISCOVERY)) { 435 scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x", 436 nrport->port_role); 437 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 438 goto unlock_buf_done; 439 } 440 441 scnprintf(tmp, sizeof(tmp), "%s\n", statep); 442 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 443 goto unlock_buf_done; 444 } 445 spin_unlock_irq(shost->host_lock); 446 447 if (!lport) 448 goto buffer_done; 449 450 if (strlcat(buf, "\nNVME Statistics\n", PAGE_SIZE) >= PAGE_SIZE) 451 goto buffer_done; 452 453 scnprintf(tmp, sizeof(tmp), 454 "LS: Xmt %010x Cmpl %010x Abort %08x\n", 455 atomic_read(&lport->fc4NvmeLsRequests), 456 atomic_read(&lport->fc4NvmeLsCmpls), 457 atomic_read(&lport->xmt_ls_abort)); 458 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 459 goto buffer_done; 460 461 scnprintf(tmp, sizeof(tmp), 462 "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n", 463 atomic_read(&lport->xmt_ls_err), 464 atomic_read(&lport->cmpl_ls_xb), 465 atomic_read(&lport->cmpl_ls_err)); 466 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 467 goto buffer_done; 468 469 totin = 0; 470 totout = 0; 471 for (i = 0; i < phba->cfg_hdw_queue; i++) { 472 cstat = &phba->sli4_hba.hdwq[i].nvme_cstat; 473 tot = cstat->io_cmpls; 474 totin += tot; 475 data1 = cstat->input_requests; 476 data2 = cstat->output_requests; 477 data3 = cstat->control_requests; 478 totout += (data1 + data2 + data3); 479 } 480 scnprintf(tmp, sizeof(tmp), 481 "Total FCP Cmpl %016llx Issue %016llx " 482 "OutIO %016llx\n", 483 totin, totout, totout - totin); 484 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 485 goto buffer_done; 486 487 scnprintf(tmp, sizeof(tmp), 488 "\tabort %08x noxri %08x nondlp %08x qdepth %08x " 489 "wqerr %08x err %08x\n", 490 atomic_read(&lport->xmt_fcp_abort), 491 atomic_read(&lport->xmt_fcp_noxri), 492 atomic_read(&lport->xmt_fcp_bad_ndlp), 493 atomic_read(&lport->xmt_fcp_qdepth), 494 atomic_read(&lport->xmt_fcp_err), 495 atomic_read(&lport->xmt_fcp_wqerr)); 496 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 497 goto buffer_done; 498 499 scnprintf(tmp, sizeof(tmp), 500 "FCP CMPL: xb %08x Err %08x\n", 501 atomic_read(&lport->cmpl_fcp_xb), 502 atomic_read(&lport->cmpl_fcp_err)); 503 strlcat(buf, tmp, PAGE_SIZE); 504 505 /* host_lock is already unlocked. */ 506 goto buffer_done; 507 508 unlock_buf_done: 509 spin_unlock_irq(shost->host_lock); 510 511 buffer_done: 512 len = strnlen(buf, PAGE_SIZE); 513 514 if (unlikely(len >= (PAGE_SIZE - 1))) { 515 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 516 "6314 Catching potential buffer " 517 "overflow > PAGE_SIZE = %lu bytes\n", 518 PAGE_SIZE); 519 strlcpy(buf + PAGE_SIZE - 1 - 520 strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1), 521 LPFC_NVME_INFO_MORE_STR, 522 strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1) 523 + 1); 524 } 525 526 return len; 527 } 528 529 static ssize_t 530 lpfc_scsi_stat_show(struct device *dev, struct device_attribute *attr, 531 char *buf) 532 { 533 struct Scsi_Host *shost = class_to_shost(dev); 534 struct lpfc_vport *vport = shost_priv(shost); 535 struct lpfc_hba *phba = vport->phba; 536 int len; 537 struct lpfc_fc4_ctrl_stat *cstat; 538 u64 data1, data2, data3; 539 u64 tot, totin, totout; 540 int i; 541 char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0}; 542 543 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP) || 544 (phba->sli_rev != LPFC_SLI_REV4)) 545 return 0; 546 547 scnprintf(buf, PAGE_SIZE, "SCSI HDWQ Statistics\n"); 548 549 totin = 0; 550 totout = 0; 551 for (i = 0; i < phba->cfg_hdw_queue; i++) { 552 cstat = &phba->sli4_hba.hdwq[i].scsi_cstat; 553 tot = cstat->io_cmpls; 554 totin += tot; 555 data1 = cstat->input_requests; 556 data2 = cstat->output_requests; 557 data3 = cstat->control_requests; 558 totout += (data1 + data2 + data3); 559 560 scnprintf(tmp, sizeof(tmp), "HDWQ (%d): Rd %016llx Wr %016llx " 561 "IO %016llx ", i, data1, data2, data3); 562 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 563 goto buffer_done; 564 565 scnprintf(tmp, sizeof(tmp), "Cmpl %016llx OutIO %016llx\n", 566 tot, ((data1 + data2 + data3) - tot)); 567 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) 568 goto buffer_done; 569 } 570 scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx " 571 "OutIO %016llx\n", totin, totout, totout - totin); 572 strlcat(buf, tmp, PAGE_SIZE); 573 574 buffer_done: 575 len = strnlen(buf, PAGE_SIZE); 576 577 return len; 578 } 579 580 static ssize_t 581 lpfc_bg_info_show(struct device *dev, struct device_attribute *attr, 582 char *buf) 583 { 584 struct Scsi_Host *shost = class_to_shost(dev); 585 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 586 struct lpfc_hba *phba = vport->phba; 587 588 if (phba->cfg_enable_bg) { 589 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 590 return scnprintf(buf, PAGE_SIZE, 591 "BlockGuard Enabled\n"); 592 else 593 return scnprintf(buf, PAGE_SIZE, 594 "BlockGuard Not Supported\n"); 595 } else 596 return scnprintf(buf, PAGE_SIZE, 597 "BlockGuard Disabled\n"); 598 } 599 600 static ssize_t 601 lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr, 602 char *buf) 603 { 604 struct Scsi_Host *shost = class_to_shost(dev); 605 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 606 struct lpfc_hba *phba = vport->phba; 607 608 return scnprintf(buf, PAGE_SIZE, "%llu\n", 609 (unsigned long long)phba->bg_guard_err_cnt); 610 } 611 612 static ssize_t 613 lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr, 614 char *buf) 615 { 616 struct Scsi_Host *shost = class_to_shost(dev); 617 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 618 struct lpfc_hba *phba = vport->phba; 619 620 return scnprintf(buf, PAGE_SIZE, "%llu\n", 621 (unsigned long long)phba->bg_apptag_err_cnt); 622 } 623 624 static ssize_t 625 lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr, 626 char *buf) 627 { 628 struct Scsi_Host *shost = class_to_shost(dev); 629 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 630 struct lpfc_hba *phba = vport->phba; 631 632 return scnprintf(buf, PAGE_SIZE, "%llu\n", 633 (unsigned long long)phba->bg_reftag_err_cnt); 634 } 635 636 /** 637 * lpfc_info_show - Return some pci info about the host in ascii 638 * @dev: class converted to a Scsi_host structure. 639 * @attr: device attribute, not used. 640 * @buf: on return contains the formatted text from lpfc_info(). 641 * 642 * Returns: size of formatted string. 643 **/ 644 static ssize_t 645 lpfc_info_show(struct device *dev, struct device_attribute *attr, 646 char *buf) 647 { 648 struct Scsi_Host *host = class_to_shost(dev); 649 650 return scnprintf(buf, PAGE_SIZE, "%s\n", lpfc_info(host)); 651 } 652 653 /** 654 * lpfc_serialnum_show - Return the hba serial number in ascii 655 * @dev: class converted to a Scsi_host structure. 656 * @attr: device attribute, not used. 657 * @buf: on return contains the formatted text serial number. 658 * 659 * Returns: size of formatted string. 660 **/ 661 static ssize_t 662 lpfc_serialnum_show(struct device *dev, struct device_attribute *attr, 663 char *buf) 664 { 665 struct Scsi_Host *shost = class_to_shost(dev); 666 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 667 struct lpfc_hba *phba = vport->phba; 668 669 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->SerialNumber); 670 } 671 672 /** 673 * lpfc_temp_sensor_show - Return the temperature sensor level 674 * @dev: class converted to a Scsi_host structure. 675 * @attr: device attribute, not used. 676 * @buf: on return contains the formatted support level. 677 * 678 * Description: 679 * Returns a number indicating the temperature sensor level currently 680 * supported, zero or one in ascii. 681 * 682 * Returns: size of formatted string. 683 **/ 684 static ssize_t 685 lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr, 686 char *buf) 687 { 688 struct Scsi_Host *shost = class_to_shost(dev); 689 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 690 struct lpfc_hba *phba = vport->phba; 691 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->temp_sensor_support); 692 } 693 694 /** 695 * lpfc_modeldesc_show - Return the model description of the hba 696 * @dev: class converted to a Scsi_host structure. 697 * @attr: device attribute, not used. 698 * @buf: on return contains the scsi vpd model description. 699 * 700 * Returns: size of formatted string. 701 **/ 702 static ssize_t 703 lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr, 704 char *buf) 705 { 706 struct Scsi_Host *shost = class_to_shost(dev); 707 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 708 struct lpfc_hba *phba = vport->phba; 709 710 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelDesc); 711 } 712 713 /** 714 * lpfc_modelname_show - Return the model name of the hba 715 * @dev: class converted to a Scsi_host structure. 716 * @attr: device attribute, not used. 717 * @buf: on return contains the scsi vpd model name. 718 * 719 * Returns: size of formatted string. 720 **/ 721 static ssize_t 722 lpfc_modelname_show(struct device *dev, struct device_attribute *attr, 723 char *buf) 724 { 725 struct Scsi_Host *shost = class_to_shost(dev); 726 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 727 struct lpfc_hba *phba = vport->phba; 728 729 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelName); 730 } 731 732 /** 733 * lpfc_programtype_show - Return the program type of the hba 734 * @dev: class converted to a Scsi_host structure. 735 * @attr: device attribute, not used. 736 * @buf: on return contains the scsi vpd program type. 737 * 738 * Returns: size of formatted string. 739 **/ 740 static ssize_t 741 lpfc_programtype_show(struct device *dev, struct device_attribute *attr, 742 char *buf) 743 { 744 struct Scsi_Host *shost = class_to_shost(dev); 745 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 746 struct lpfc_hba *phba = vport->phba; 747 748 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ProgramType); 749 } 750 751 /** 752 * lpfc_mlomgmt_show - Return the Menlo Maintenance sli flag 753 * @dev: class converted to a Scsi_host structure. 754 * @attr: device attribute, not used. 755 * @buf: on return contains the Menlo Maintenance sli flag. 756 * 757 * Returns: size of formatted string. 758 **/ 759 static ssize_t 760 lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf) 761 { 762 struct Scsi_Host *shost = class_to_shost(dev); 763 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 764 struct lpfc_hba *phba = vport->phba; 765 766 return scnprintf(buf, PAGE_SIZE, "%d\n", 767 (phba->sli.sli_flag & LPFC_MENLO_MAINT)); 768 } 769 770 /** 771 * lpfc_vportnum_show - Return the port number in ascii of the hba 772 * @dev: class converted to a Scsi_host structure. 773 * @attr: device attribute, not used. 774 * @buf: on return contains scsi vpd program type. 775 * 776 * Returns: size of formatted string. 777 **/ 778 static ssize_t 779 lpfc_vportnum_show(struct device *dev, struct device_attribute *attr, 780 char *buf) 781 { 782 struct Scsi_Host *shost = class_to_shost(dev); 783 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 784 struct lpfc_hba *phba = vport->phba; 785 786 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->Port); 787 } 788 789 /** 790 * lpfc_fwrev_show - Return the firmware rev running in the hba 791 * @dev: class converted to a Scsi_host structure. 792 * @attr: device attribute, not used. 793 * @buf: on return contains the scsi vpd program type. 794 * 795 * Returns: size of formatted string. 796 **/ 797 static ssize_t 798 lpfc_fwrev_show(struct device *dev, struct device_attribute *attr, 799 char *buf) 800 { 801 struct Scsi_Host *shost = class_to_shost(dev); 802 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 803 struct lpfc_hba *phba = vport->phba; 804 uint32_t if_type; 805 uint8_t sli_family; 806 char fwrev[FW_REV_STR_SIZE]; 807 int len; 808 809 lpfc_decode_firmware_rev(phba, fwrev, 1); 810 if_type = phba->sli4_hba.pc_sli4_params.if_type; 811 sli_family = phba->sli4_hba.pc_sli4_params.sli_family; 812 813 if (phba->sli_rev < LPFC_SLI_REV4) 814 len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d\n", 815 fwrev, phba->sli_rev); 816 else 817 len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n", 818 fwrev, phba->sli_rev, if_type, sli_family); 819 820 return len; 821 } 822 823 /** 824 * lpfc_hdw_show - Return the jedec information about the hba 825 * @dev: class converted to a Scsi_host structure. 826 * @attr: device attribute, not used. 827 * @buf: on return contains the scsi vpd program type. 828 * 829 * Returns: size of formatted string. 830 **/ 831 static ssize_t 832 lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf) 833 { 834 char hdw[9]; 835 struct Scsi_Host *shost = class_to_shost(dev); 836 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 837 struct lpfc_hba *phba = vport->phba; 838 lpfc_vpd_t *vp = &phba->vpd; 839 840 lpfc_jedec_to_ascii(vp->rev.biuRev, hdw); 841 return scnprintf(buf, PAGE_SIZE, "%s %08x %08x\n", hdw, 842 vp->rev.smRev, vp->rev.smFwRev); 843 } 844 845 /** 846 * lpfc_option_rom_version_show - Return the adapter ROM FCode version 847 * @dev: class converted to a Scsi_host structure. 848 * @attr: device attribute, not used. 849 * @buf: on return contains the ROM and FCode ascii strings. 850 * 851 * Returns: size of formatted string. 852 **/ 853 static ssize_t 854 lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr, 855 char *buf) 856 { 857 struct Scsi_Host *shost = class_to_shost(dev); 858 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 859 struct lpfc_hba *phba = vport->phba; 860 char fwrev[FW_REV_STR_SIZE]; 861 862 if (phba->sli_rev < LPFC_SLI_REV4) 863 return scnprintf(buf, PAGE_SIZE, "%s\n", 864 phba->OptionROMVersion); 865 866 lpfc_decode_firmware_rev(phba, fwrev, 1); 867 return scnprintf(buf, PAGE_SIZE, "%s\n", fwrev); 868 } 869 870 /** 871 * lpfc_state_show - Return the link state of the port 872 * @dev: class converted to a Scsi_host structure. 873 * @attr: device attribute, not used. 874 * @buf: on return contains text describing the state of the link. 875 * 876 * Notes: 877 * The switch statement has no default so zero will be returned. 878 * 879 * Returns: size of formatted string. 880 **/ 881 static ssize_t 882 lpfc_link_state_show(struct device *dev, struct device_attribute *attr, 883 char *buf) 884 { 885 struct Scsi_Host *shost = class_to_shost(dev); 886 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 887 struct lpfc_hba *phba = vport->phba; 888 int len = 0; 889 890 switch (phba->link_state) { 891 case LPFC_LINK_UNKNOWN: 892 case LPFC_WARM_START: 893 case LPFC_INIT_START: 894 case LPFC_INIT_MBX_CMDS: 895 case LPFC_LINK_DOWN: 896 case LPFC_HBA_ERROR: 897 if (phba->hba_flag & LINK_DISABLED) 898 len += scnprintf(buf + len, PAGE_SIZE-len, 899 "Link Down - User disabled\n"); 900 else 901 len += scnprintf(buf + len, PAGE_SIZE-len, 902 "Link Down\n"); 903 break; 904 case LPFC_LINK_UP: 905 case LPFC_CLEAR_LA: 906 case LPFC_HBA_READY: 907 len += scnprintf(buf + len, PAGE_SIZE-len, "Link Up - "); 908 909 switch (vport->port_state) { 910 case LPFC_LOCAL_CFG_LINK: 911 len += scnprintf(buf + len, PAGE_SIZE-len, 912 "Configuring Link\n"); 913 break; 914 case LPFC_FDISC: 915 case LPFC_FLOGI: 916 case LPFC_FABRIC_CFG_LINK: 917 case LPFC_NS_REG: 918 case LPFC_NS_QRY: 919 case LPFC_BUILD_DISC_LIST: 920 case LPFC_DISC_AUTH: 921 len += scnprintf(buf + len, PAGE_SIZE - len, 922 "Discovery\n"); 923 break; 924 case LPFC_VPORT_READY: 925 len += scnprintf(buf + len, PAGE_SIZE - len, 926 "Ready\n"); 927 break; 928 929 case LPFC_VPORT_FAILED: 930 len += scnprintf(buf + len, PAGE_SIZE - len, 931 "Failed\n"); 932 break; 933 934 case LPFC_VPORT_UNKNOWN: 935 len += scnprintf(buf + len, PAGE_SIZE - len, 936 "Unknown\n"); 937 break; 938 } 939 if (phba->sli.sli_flag & LPFC_MENLO_MAINT) 940 len += scnprintf(buf + len, PAGE_SIZE-len, 941 " Menlo Maint Mode\n"); 942 else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 943 if (vport->fc_flag & FC_PUBLIC_LOOP) 944 len += scnprintf(buf + len, PAGE_SIZE-len, 945 " Public Loop\n"); 946 else 947 len += scnprintf(buf + len, PAGE_SIZE-len, 948 " Private Loop\n"); 949 } else { 950 if (vport->fc_flag & FC_FABRIC) 951 len += scnprintf(buf + len, PAGE_SIZE-len, 952 " Fabric\n"); 953 else 954 len += scnprintf(buf + len, PAGE_SIZE-len, 955 " Point-2-Point\n"); 956 } 957 } 958 959 if ((phba->sli_rev == LPFC_SLI_REV4) && 960 ((bf_get(lpfc_sli_intf_if_type, 961 &phba->sli4_hba.sli_intf) == 962 LPFC_SLI_INTF_IF_TYPE_6))) { 963 struct lpfc_trunk_link link = phba->trunk_link; 964 965 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba)) 966 len += scnprintf(buf + len, PAGE_SIZE - len, 967 "Trunk port 0: Link %s %s\n", 968 (link.link0.state == LPFC_LINK_UP) ? 969 "Up" : "Down. ", 970 trunk_errmsg[link.link0.fault]); 971 972 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba)) 973 len += scnprintf(buf + len, PAGE_SIZE - len, 974 "Trunk port 1: Link %s %s\n", 975 (link.link1.state == LPFC_LINK_UP) ? 976 "Up" : "Down. ", 977 trunk_errmsg[link.link1.fault]); 978 979 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba)) 980 len += scnprintf(buf + len, PAGE_SIZE - len, 981 "Trunk port 2: Link %s %s\n", 982 (link.link2.state == LPFC_LINK_UP) ? 983 "Up" : "Down. ", 984 trunk_errmsg[link.link2.fault]); 985 986 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba)) 987 len += scnprintf(buf + len, PAGE_SIZE - len, 988 "Trunk port 3: Link %s %s\n", 989 (link.link3.state == LPFC_LINK_UP) ? 990 "Up" : "Down. ", 991 trunk_errmsg[link.link3.fault]); 992 993 } 994 995 return len; 996 } 997 998 /** 999 * lpfc_sli4_protocol_show - Return the fip mode of the HBA 1000 * @dev: class unused variable. 1001 * @attr: device attribute, not used. 1002 * @buf: on return contains the module description text. 1003 * 1004 * Returns: size of formatted string. 1005 **/ 1006 static ssize_t 1007 lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr, 1008 char *buf) 1009 { 1010 struct Scsi_Host *shost = class_to_shost(dev); 1011 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1012 struct lpfc_hba *phba = vport->phba; 1013 1014 if (phba->sli_rev < LPFC_SLI_REV4) 1015 return scnprintf(buf, PAGE_SIZE, "fc\n"); 1016 1017 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) { 1018 if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE) 1019 return scnprintf(buf, PAGE_SIZE, "fcoe\n"); 1020 if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) 1021 return scnprintf(buf, PAGE_SIZE, "fc\n"); 1022 } 1023 return scnprintf(buf, PAGE_SIZE, "unknown\n"); 1024 } 1025 1026 /** 1027 * lpfc_oas_supported_show - Return whether or not Optimized Access Storage 1028 * (OAS) is supported. 1029 * @dev: class unused variable. 1030 * @attr: device attribute, not used. 1031 * @buf: on return contains the module description text. 1032 * 1033 * Returns: size of formatted string. 1034 **/ 1035 static ssize_t 1036 lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr, 1037 char *buf) 1038 { 1039 struct Scsi_Host *shost = class_to_shost(dev); 1040 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 1041 struct lpfc_hba *phba = vport->phba; 1042 1043 return scnprintf(buf, PAGE_SIZE, "%d\n", 1044 phba->sli4_hba.pc_sli4_params.oas_supported); 1045 } 1046 1047 /** 1048 * lpfc_link_state_store - Transition the link_state on an HBA port 1049 * @dev: class device that is converted into a Scsi_host. 1050 * @attr: device attribute, not used. 1051 * @buf: one or more lpfc_polling_flags values. 1052 * @count: not used. 1053 * 1054 * Returns: 1055 * -EINVAL if the buffer is not "up" or "down" 1056 * return from link state change function if non-zero 1057 * length of the buf on success 1058 **/ 1059 static ssize_t 1060 lpfc_link_state_store(struct device *dev, struct device_attribute *attr, 1061 const char *buf, size_t count) 1062 { 1063 struct Scsi_Host *shost = class_to_shost(dev); 1064 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1065 struct lpfc_hba *phba = vport->phba; 1066 1067 int status = -EINVAL; 1068 1069 if ((strncmp(buf, "up", sizeof("up") - 1) == 0) && 1070 (phba->link_state == LPFC_LINK_DOWN)) 1071 status = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 1072 else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) && 1073 (phba->link_state >= LPFC_LINK_UP)) 1074 status = phba->lpfc_hba_down_link(phba, MBX_NOWAIT); 1075 1076 if (status == 0) 1077 return strlen(buf); 1078 else 1079 return status; 1080 } 1081 1082 /** 1083 * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports 1084 * @dev: class device that is converted into a Scsi_host. 1085 * @attr: device attribute, not used. 1086 * @buf: on return contains the sum of fc mapped and unmapped. 1087 * 1088 * Description: 1089 * Returns the ascii text number of the sum of the fc mapped and unmapped 1090 * vport counts. 1091 * 1092 * Returns: size of formatted string. 1093 **/ 1094 static ssize_t 1095 lpfc_num_discovered_ports_show(struct device *dev, 1096 struct device_attribute *attr, char *buf) 1097 { 1098 struct Scsi_Host *shost = class_to_shost(dev); 1099 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1100 1101 return scnprintf(buf, PAGE_SIZE, "%d\n", 1102 vport->fc_map_cnt + vport->fc_unmap_cnt); 1103 } 1104 1105 /** 1106 * lpfc_issue_lip - Misnomer, name carried over from long ago 1107 * @shost: Scsi_Host pointer. 1108 * 1109 * Description: 1110 * Bring the link down gracefully then re-init the link. The firmware will 1111 * re-init the fiber channel interface as required. Does not issue a LIP. 1112 * 1113 * Returns: 1114 * -EPERM port offline or management commands are being blocked 1115 * -ENOMEM cannot allocate memory for the mailbox command 1116 * -EIO error sending the mailbox command 1117 * zero for success 1118 **/ 1119 static int 1120 lpfc_issue_lip(struct Scsi_Host *shost) 1121 { 1122 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1123 struct lpfc_hba *phba = vport->phba; 1124 LPFC_MBOXQ_t *pmboxq; 1125 int mbxstatus = MBXERR_ERROR; 1126 1127 /* 1128 * If the link is offline, disabled or BLOCK_MGMT_IO 1129 * it doesn't make any sense to allow issue_lip 1130 */ 1131 if ((vport->fc_flag & FC_OFFLINE_MODE) || 1132 (phba->hba_flag & LINK_DISABLED) || 1133 (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)) 1134 return -EPERM; 1135 1136 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); 1137 1138 if (!pmboxq) 1139 return -ENOMEM; 1140 1141 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 1142 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; 1143 pmboxq->u.mb.mbxOwner = OWN_HOST; 1144 1145 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); 1146 1147 if ((mbxstatus == MBX_SUCCESS) && 1148 (pmboxq->u.mb.mbxStatus == 0 || 1149 pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) { 1150 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 1151 lpfc_init_link(phba, pmboxq, phba->cfg_topology, 1152 phba->cfg_link_speed); 1153 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, 1154 phba->fc_ratov * 2); 1155 if ((mbxstatus == MBX_SUCCESS) && 1156 (pmboxq->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) 1157 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 1158 "2859 SLI authentication is required " 1159 "for INIT_LINK but has not done yet\n"); 1160 } 1161 1162 lpfc_set_loopback_flag(phba); 1163 if (mbxstatus != MBX_TIMEOUT) 1164 mempool_free(pmboxq, phba->mbox_mem_pool); 1165 1166 if (mbxstatus == MBXERR_ERROR) 1167 return -EIO; 1168 1169 return 0; 1170 } 1171 1172 int 1173 lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock) 1174 { 1175 int cnt = 0; 1176 1177 spin_lock_irq(lock); 1178 while (!list_empty(q)) { 1179 spin_unlock_irq(lock); 1180 msleep(20); 1181 if (cnt++ > 250) { /* 5 secs */ 1182 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1183 "0466 %s %s\n", 1184 "Outstanding IO when ", 1185 "bringing Adapter offline\n"); 1186 return 0; 1187 } 1188 spin_lock_irq(lock); 1189 } 1190 spin_unlock_irq(lock); 1191 return 1; 1192 } 1193 1194 /** 1195 * lpfc_do_offline - Issues a mailbox command to bring the link down 1196 * @phba: lpfc_hba pointer. 1197 * @type: LPFC_EVT_OFFLINE, LPFC_EVT_WARM_START, LPFC_EVT_KILL. 1198 * 1199 * Notes: 1200 * Assumes any error from lpfc_do_offline() will be negative. 1201 * Can wait up to 5 seconds for the port ring buffers count 1202 * to reach zero, prints a warning if it is not zero and continues. 1203 * lpfc_workq_post_event() returns a non-zero return code if call fails. 1204 * 1205 * Returns: 1206 * -EIO error posting the event 1207 * zero for success 1208 **/ 1209 static int 1210 lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) 1211 { 1212 struct completion online_compl; 1213 struct lpfc_queue *qp = NULL; 1214 struct lpfc_sli_ring *pring; 1215 struct lpfc_sli *psli; 1216 int status = 0; 1217 int i; 1218 int rc; 1219 1220 init_completion(&online_compl); 1221 rc = lpfc_workq_post_event(phba, &status, &online_compl, 1222 LPFC_EVT_OFFLINE_PREP); 1223 if (rc == 0) 1224 return -ENOMEM; 1225 1226 wait_for_completion(&online_compl); 1227 1228 if (status != 0) 1229 return -EIO; 1230 1231 psli = &phba->sli; 1232 1233 /* 1234 * If freeing the queues have already started, don't access them. 1235 * Otherwise set FREE_WAIT to indicate that queues are being used 1236 * to hold the freeing process until we finish. 1237 */ 1238 spin_lock_irq(&phba->hbalock); 1239 if (!(psli->sli_flag & LPFC_QUEUE_FREE_INIT)) { 1240 psli->sli_flag |= LPFC_QUEUE_FREE_WAIT; 1241 } else { 1242 spin_unlock_irq(&phba->hbalock); 1243 goto skip_wait; 1244 } 1245 spin_unlock_irq(&phba->hbalock); 1246 1247 /* Wait a little for things to settle down, but not 1248 * long enough for dev loss timeout to expire. 1249 */ 1250 if (phba->sli_rev != LPFC_SLI_REV4) { 1251 for (i = 0; i < psli->num_rings; i++) { 1252 pring = &psli->sli3_ring[i]; 1253 if (!lpfc_emptyq_wait(phba, &pring->txcmplq, 1254 &phba->hbalock)) 1255 goto out; 1256 } 1257 } else { 1258 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 1259 pring = qp->pring; 1260 if (!pring) 1261 continue; 1262 if (!lpfc_emptyq_wait(phba, &pring->txcmplq, 1263 &pring->ring_lock)) 1264 goto out; 1265 } 1266 } 1267 out: 1268 spin_lock_irq(&phba->hbalock); 1269 psli->sli_flag &= ~LPFC_QUEUE_FREE_WAIT; 1270 spin_unlock_irq(&phba->hbalock); 1271 1272 skip_wait: 1273 init_completion(&online_compl); 1274 rc = lpfc_workq_post_event(phba, &status, &online_compl, type); 1275 if (rc == 0) 1276 return -ENOMEM; 1277 1278 wait_for_completion(&online_compl); 1279 1280 if (status != 0) 1281 return -EIO; 1282 1283 return 0; 1284 } 1285 1286 /** 1287 * lpfc_reset_pci_bus - resets PCI bridge controller's secondary bus of an HBA 1288 * @phba: lpfc_hba pointer. 1289 * 1290 * Description: 1291 * Issues a PCI secondary bus reset for the phba->pcidev. 1292 * 1293 * Notes: 1294 * First walks the bus_list to ensure only PCI devices with Emulex 1295 * vendor id, device ids that support hot reset, only one occurrence 1296 * of function 0, and all ports on the bus are in offline mode to ensure the 1297 * hot reset only affects one valid HBA. 1298 * 1299 * Returns: 1300 * -ENOTSUPP, cfg_enable_hba_reset must be of value 2 1301 * -ENODEV, NULL ptr to pcidev 1302 * -EBADSLT, detected invalid device 1303 * -EBUSY, port is not in offline state 1304 * 0, successful 1305 */ 1306 static int 1307 lpfc_reset_pci_bus(struct lpfc_hba *phba) 1308 { 1309 struct pci_dev *pdev = phba->pcidev; 1310 struct Scsi_Host *shost = NULL; 1311 struct lpfc_hba *phba_other = NULL; 1312 struct pci_dev *ptr = NULL; 1313 int res; 1314 1315 if (phba->cfg_enable_hba_reset != 2) 1316 return -ENOTSUPP; 1317 1318 if (!pdev) { 1319 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8345 pdev NULL!\n"); 1320 return -ENODEV; 1321 } 1322 1323 res = lpfc_check_pci_resettable(phba); 1324 if (res) 1325 return res; 1326 1327 /* Walk the list of devices on the pci_dev's bus */ 1328 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) { 1329 /* Check port is offline */ 1330 shost = pci_get_drvdata(ptr); 1331 if (shost) { 1332 phba_other = 1333 ((struct lpfc_vport *)shost->hostdata)->phba; 1334 if (!(phba_other->pport->fc_flag & FC_OFFLINE_MODE)) { 1335 lpfc_printf_log(phba_other, KERN_INFO, LOG_INIT, 1336 "8349 WWPN = 0x%02x%02x%02x%02x" 1337 "%02x%02x%02x%02x is not " 1338 "offline!\n", 1339 phba_other->wwpn[0], 1340 phba_other->wwpn[1], 1341 phba_other->wwpn[2], 1342 phba_other->wwpn[3], 1343 phba_other->wwpn[4], 1344 phba_other->wwpn[5], 1345 phba_other->wwpn[6], 1346 phba_other->wwpn[7]); 1347 return -EBUSY; 1348 } 1349 } 1350 } 1351 1352 /* Issue PCI bus reset */ 1353 res = pci_reset_bus(pdev); 1354 if (res) { 1355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1356 "8350 PCI reset bus failed: %d\n", res); 1357 } 1358 1359 return res; 1360 } 1361 1362 /** 1363 * lpfc_selective_reset - Offline then onlines the port 1364 * @phba: lpfc_hba pointer. 1365 * 1366 * Description: 1367 * If the port is configured to allow a reset then the hba is brought 1368 * offline then online. 1369 * 1370 * Notes: 1371 * Assumes any error from lpfc_do_offline() will be negative. 1372 * Do not make this function static. 1373 * 1374 * Returns: 1375 * lpfc_do_offline() return code if not zero 1376 * -EIO reset not configured or error posting the event 1377 * zero for success 1378 **/ 1379 int 1380 lpfc_selective_reset(struct lpfc_hba *phba) 1381 { 1382 struct completion online_compl; 1383 int status = 0; 1384 int rc; 1385 1386 if (!phba->cfg_enable_hba_reset) 1387 return -EACCES; 1388 1389 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) { 1390 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 1391 1392 if (status != 0) 1393 return status; 1394 } 1395 1396 init_completion(&online_compl); 1397 rc = lpfc_workq_post_event(phba, &status, &online_compl, 1398 LPFC_EVT_ONLINE); 1399 if (rc == 0) 1400 return -ENOMEM; 1401 1402 wait_for_completion(&online_compl); 1403 1404 if (status != 0) 1405 return -EIO; 1406 1407 return 0; 1408 } 1409 1410 /** 1411 * lpfc_issue_reset - Selectively resets an adapter 1412 * @dev: class device that is converted into a Scsi_host. 1413 * @attr: device attribute, not used. 1414 * @buf: containing the string "selective". 1415 * @count: unused variable. 1416 * 1417 * Description: 1418 * If the buf contains the string "selective" then lpfc_selective_reset() 1419 * is called to perform the reset. 1420 * 1421 * Notes: 1422 * Assumes any error from lpfc_selective_reset() will be negative. 1423 * If lpfc_selective_reset() returns zero then the length of the buffer 1424 * is returned which indicates success 1425 * 1426 * Returns: 1427 * -EINVAL if the buffer does not contain the string "selective" 1428 * length of buf if lpfc-selective_reset() if the call succeeds 1429 * return value of lpfc_selective_reset() if the call fails 1430 **/ 1431 static ssize_t 1432 lpfc_issue_reset(struct device *dev, struct device_attribute *attr, 1433 const char *buf, size_t count) 1434 { 1435 struct Scsi_Host *shost = class_to_shost(dev); 1436 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1437 struct lpfc_hba *phba = vport->phba; 1438 int status = -EINVAL; 1439 1440 if (!phba->cfg_enable_hba_reset) 1441 return -EACCES; 1442 1443 if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) 1444 status = phba->lpfc_selective_reset(phba); 1445 1446 if (status == 0) 1447 return strlen(buf); 1448 else 1449 return status; 1450 } 1451 1452 /** 1453 * lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness 1454 * @phba: lpfc_hba pointer. 1455 * 1456 * Description: 1457 * SLI4 interface type-2 device to wait on the sliport status register for 1458 * the readyness after performing a firmware reset. 1459 * 1460 * Returns: 1461 * zero for success, -EPERM when port does not have privilege to perform the 1462 * reset, -EIO when port timeout from recovering from the reset. 1463 * 1464 * Note: 1465 * As the caller will interpret the return code by value, be careful in making 1466 * change or addition to return codes. 1467 **/ 1468 int 1469 lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba) 1470 { 1471 struct lpfc_register portstat_reg = {0}; 1472 int i; 1473 1474 msleep(100); 1475 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 1476 &portstat_reg.word0)) 1477 return -EIO; 1478 1479 /* verify if privileged for the request operation */ 1480 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) && 1481 !bf_get(lpfc_sliport_status_err, &portstat_reg)) 1482 return -EPERM; 1483 1484 /* wait for the SLI port firmware ready after firmware reset */ 1485 for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) { 1486 msleep(10); 1487 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 1488 &portstat_reg.word0)) 1489 continue; 1490 if (!bf_get(lpfc_sliport_status_err, &portstat_reg)) 1491 continue; 1492 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg)) 1493 continue; 1494 if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg)) 1495 continue; 1496 break; 1497 } 1498 1499 if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT) 1500 return 0; 1501 else 1502 return -EIO; 1503 } 1504 1505 /** 1506 * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc 1507 * @phba: lpfc_hba pointer. 1508 * 1509 * Description: 1510 * Request SLI4 interface type-2 device to perform a physical register set 1511 * access. 1512 * 1513 * Returns: 1514 * zero for success 1515 **/ 1516 static ssize_t 1517 lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) 1518 { 1519 struct completion online_compl; 1520 struct pci_dev *pdev = phba->pcidev; 1521 uint32_t before_fc_flag; 1522 uint32_t sriov_nr_virtfn; 1523 uint32_t reg_val; 1524 int status = 0, rc = 0; 1525 int job_posted = 1, sriov_err; 1526 1527 if (!phba->cfg_enable_hba_reset) 1528 return -EACCES; 1529 1530 if ((phba->sli_rev < LPFC_SLI_REV4) || 1531 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 1532 LPFC_SLI_INTF_IF_TYPE_2)) 1533 return -EPERM; 1534 1535 /* Keep state if we need to restore back */ 1536 before_fc_flag = phba->pport->fc_flag; 1537 sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn; 1538 1539 /* Disable SR-IOV virtual functions if enabled */ 1540 if (phba->cfg_sriov_nr_virtfn) { 1541 pci_disable_sriov(pdev); 1542 phba->cfg_sriov_nr_virtfn = 0; 1543 } 1544 1545 if (opcode == LPFC_FW_DUMP) 1546 phba->hba_flag |= HBA_FW_DUMP_OP; 1547 1548 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 1549 1550 if (status != 0) { 1551 phba->hba_flag &= ~HBA_FW_DUMP_OP; 1552 return status; 1553 } 1554 1555 /* wait for the device to be quiesced before firmware reset */ 1556 msleep(100); 1557 1558 reg_val = readl(phba->sli4_hba.conf_regs_memmap_p + 1559 LPFC_CTL_PDEV_CTL_OFFSET); 1560 1561 if (opcode == LPFC_FW_DUMP) 1562 reg_val |= LPFC_FW_DUMP_REQUEST; 1563 else if (opcode == LPFC_FW_RESET) 1564 reg_val |= LPFC_CTL_PDEV_CTL_FRST; 1565 else if (opcode == LPFC_DV_RESET) 1566 reg_val |= LPFC_CTL_PDEV_CTL_DRST; 1567 1568 writel(reg_val, phba->sli4_hba.conf_regs_memmap_p + 1569 LPFC_CTL_PDEV_CTL_OFFSET); 1570 /* flush */ 1571 readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); 1572 1573 /* delay driver action following IF_TYPE_2 reset */ 1574 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1575 1576 if (rc == -EPERM) { 1577 /* no privilege for reset */ 1578 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1579 "3150 No privilege to perform the requested " 1580 "access: x%x\n", reg_val); 1581 } else if (rc == -EIO) { 1582 /* reset failed, there is nothing more we can do */ 1583 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1584 "3153 Fail to perform the requested " 1585 "access: x%x\n", reg_val); 1586 return rc; 1587 } 1588 1589 /* keep the original port state */ 1590 if (before_fc_flag & FC_OFFLINE_MODE) 1591 goto out; 1592 1593 init_completion(&online_compl); 1594 job_posted = lpfc_workq_post_event(phba, &status, &online_compl, 1595 LPFC_EVT_ONLINE); 1596 if (!job_posted) 1597 goto out; 1598 1599 wait_for_completion(&online_compl); 1600 1601 out: 1602 /* in any case, restore the virtual functions enabled as before */ 1603 if (sriov_nr_virtfn) { 1604 sriov_err = 1605 lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn); 1606 if (!sriov_err) 1607 phba->cfg_sriov_nr_virtfn = sriov_nr_virtfn; 1608 } 1609 1610 /* return proper error code */ 1611 if (!rc) { 1612 if (!job_posted) 1613 rc = -ENOMEM; 1614 else if (status) 1615 rc = -EIO; 1616 } 1617 return rc; 1618 } 1619 1620 /** 1621 * lpfc_nport_evt_cnt_show - Return the number of nport events 1622 * @dev: class device that is converted into a Scsi_host. 1623 * @attr: device attribute, not used. 1624 * @buf: on return contains the ascii number of nport events. 1625 * 1626 * Returns: size of formatted string. 1627 **/ 1628 static ssize_t 1629 lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr, 1630 char *buf) 1631 { 1632 struct Scsi_Host *shost = class_to_shost(dev); 1633 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1634 struct lpfc_hba *phba = vport->phba; 1635 1636 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); 1637 } 1638 1639 static int 1640 lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out) 1641 { 1642 LPFC_MBOXQ_t *mbox = NULL; 1643 unsigned long val = 0; 1644 char *pval = NULL; 1645 int rc = 0; 1646 1647 if (!strncmp("enable", buff_out, 1648 strlen("enable"))) { 1649 pval = buff_out + strlen("enable") + 1; 1650 rc = kstrtoul(pval, 0, &val); 1651 if (rc) 1652 return rc; /* Invalid number */ 1653 } else if (!strncmp("disable", buff_out, 1654 strlen("disable"))) { 1655 val = 0; 1656 } else { 1657 return -EINVAL; /* Invalid command */ 1658 } 1659 1660 switch (val) { 1661 case 0: 1662 val = 0x0; /* Disable */ 1663 break; 1664 case 2: 1665 val = 0x1; /* Enable two port trunk */ 1666 break; 1667 case 4: 1668 val = 0x2; /* Enable four port trunk */ 1669 break; 1670 default: 1671 return -EINVAL; 1672 } 1673 1674 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1675 "0070 Set trunk mode with val %ld ", val); 1676 1677 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1678 if (!mbox) 1679 return -ENOMEM; 1680 1681 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 1682 LPFC_MBOX_OPCODE_FCOE_FC_SET_TRUNK_MODE, 1683 12, LPFC_SLI4_MBX_EMBED); 1684 1685 bf_set(lpfc_mbx_set_trunk_mode, 1686 &mbox->u.mqe.un.set_trunk_mode, 1687 val); 1688 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 1689 if (rc) 1690 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1691 "0071 Set trunk mode failed with status: %d", 1692 rc); 1693 if (rc != MBX_TIMEOUT) 1694 mempool_free(mbox, phba->mbox_mem_pool); 1695 1696 return 0; 1697 } 1698 1699 /** 1700 * lpfc_board_mode_show - Return the state of the board 1701 * @dev: class device that is converted into a Scsi_host. 1702 * @attr: device attribute, not used. 1703 * @buf: on return contains the state of the adapter. 1704 * 1705 * Returns: size of formatted string. 1706 **/ 1707 static ssize_t 1708 lpfc_board_mode_show(struct device *dev, struct device_attribute *attr, 1709 char *buf) 1710 { 1711 struct Scsi_Host *shost = class_to_shost(dev); 1712 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1713 struct lpfc_hba *phba = vport->phba; 1714 char * state; 1715 1716 if (phba->link_state == LPFC_HBA_ERROR) 1717 state = "error"; 1718 else if (phba->link_state == LPFC_WARM_START) 1719 state = "warm start"; 1720 else if (phba->link_state == LPFC_INIT_START) 1721 state = "offline"; 1722 else 1723 state = "online"; 1724 1725 return scnprintf(buf, PAGE_SIZE, "%s\n", state); 1726 } 1727 1728 /** 1729 * lpfc_board_mode_store - Puts the hba in online, offline, warm or error state 1730 * @dev: class device that is converted into a Scsi_host. 1731 * @attr: device attribute, not used. 1732 * @buf: containing one of the strings "online", "offline", "warm" or "error". 1733 * @count: unused variable. 1734 * 1735 * Returns: 1736 * -EACCES if enable hba reset not enabled 1737 * -EINVAL if the buffer does not contain a valid string (see above) 1738 * -EIO if lpfc_workq_post_event() or lpfc_do_offline() fails 1739 * buf length greater than zero indicates success 1740 **/ 1741 static ssize_t 1742 lpfc_board_mode_store(struct device *dev, struct device_attribute *attr, 1743 const char *buf, size_t count) 1744 { 1745 struct Scsi_Host *shost = class_to_shost(dev); 1746 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1747 struct lpfc_hba *phba = vport->phba; 1748 struct completion online_compl; 1749 char *board_mode_str = NULL; 1750 int status = 0; 1751 int rc; 1752 1753 if (!phba->cfg_enable_hba_reset) { 1754 status = -EACCES; 1755 goto board_mode_out; 1756 } 1757 1758 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 1759 "3050 lpfc_board_mode set to %s\n", buf); 1760 1761 init_completion(&online_compl); 1762 1763 if(strncmp(buf, "online", sizeof("online") - 1) == 0) { 1764 rc = lpfc_workq_post_event(phba, &status, &online_compl, 1765 LPFC_EVT_ONLINE); 1766 if (rc == 0) { 1767 status = -ENOMEM; 1768 goto board_mode_out; 1769 } 1770 wait_for_completion(&online_compl); 1771 if (status) 1772 status = -EIO; 1773 } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0) 1774 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 1775 else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0) 1776 if (phba->sli_rev == LPFC_SLI_REV4) 1777 status = -EINVAL; 1778 else 1779 status = lpfc_do_offline(phba, LPFC_EVT_WARM_START); 1780 else if (strncmp(buf, "error", sizeof("error") - 1) == 0) 1781 if (phba->sli_rev == LPFC_SLI_REV4) 1782 status = -EINVAL; 1783 else 1784 status = lpfc_do_offline(phba, LPFC_EVT_KILL); 1785 else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0) 1786 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP); 1787 else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0) 1788 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET); 1789 else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0) 1790 status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET); 1791 else if (strncmp(buf, "pci_bus_reset", sizeof("pci_bus_reset") - 1) 1792 == 0) 1793 status = lpfc_reset_pci_bus(phba); 1794 else if (strncmp(buf, "trunk", sizeof("trunk") - 1) == 0) 1795 status = lpfc_set_trunking(phba, (char *)buf + sizeof("trunk")); 1796 else 1797 status = -EINVAL; 1798 1799 board_mode_out: 1800 if (!status) 1801 return strlen(buf); 1802 else { 1803 board_mode_str = strchr(buf, '\n'); 1804 if (board_mode_str) 1805 *board_mode_str = '\0'; 1806 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 1807 "3097 Failed \"%s\", status(%d), " 1808 "fc_flag(x%x)\n", 1809 buf, status, phba->pport->fc_flag); 1810 return status; 1811 } 1812 } 1813 1814 /** 1815 * lpfc_get_hba_info - Return various bits of informaton about the adapter 1816 * @phba: pointer to the adapter structure. 1817 * @mxri: max xri count. 1818 * @axri: available xri count. 1819 * @mrpi: max rpi count. 1820 * @arpi: available rpi count. 1821 * @mvpi: max vpi count. 1822 * @avpi: available vpi count. 1823 * 1824 * Description: 1825 * If an integer pointer for an count is not null then the value for the 1826 * count is returned. 1827 * 1828 * Returns: 1829 * zero on error 1830 * one for success 1831 **/ 1832 static int 1833 lpfc_get_hba_info(struct lpfc_hba *phba, 1834 uint32_t *mxri, uint32_t *axri, 1835 uint32_t *mrpi, uint32_t *arpi, 1836 uint32_t *mvpi, uint32_t *avpi) 1837 { 1838 struct lpfc_mbx_read_config *rd_config; 1839 LPFC_MBOXQ_t *pmboxq; 1840 MAILBOX_t *pmb; 1841 int rc = 0; 1842 uint32_t max_vpi; 1843 1844 /* 1845 * prevent udev from issuing mailbox commands until the port is 1846 * configured. 1847 */ 1848 if (phba->link_state < LPFC_LINK_DOWN || 1849 !phba->mbox_mem_pool || 1850 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0) 1851 return 0; 1852 1853 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) 1854 return 0; 1855 1856 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1857 if (!pmboxq) 1858 return 0; 1859 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 1860 1861 pmb = &pmboxq->u.mb; 1862 pmb->mbxCommand = MBX_READ_CONFIG; 1863 pmb->mbxOwner = OWN_HOST; 1864 pmboxq->ctx_buf = NULL; 1865 1866 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 1867 rc = MBX_NOT_FINISHED; 1868 else 1869 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 1870 1871 if (rc != MBX_SUCCESS) { 1872 if (rc != MBX_TIMEOUT) 1873 mempool_free(pmboxq, phba->mbox_mem_pool); 1874 return 0; 1875 } 1876 1877 if (phba->sli_rev == LPFC_SLI_REV4) { 1878 rd_config = &pmboxq->u.mqe.un.rd_config; 1879 if (mrpi) 1880 *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 1881 if (arpi) 1882 *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) - 1883 phba->sli4_hba.max_cfg_param.rpi_used; 1884 if (mxri) 1885 *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 1886 if (axri) 1887 *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) - 1888 phba->sli4_hba.max_cfg_param.xri_used; 1889 1890 /* Account for differences with SLI-3. Get vpi count from 1891 * mailbox data and subtract one for max vpi value. 1892 */ 1893 max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ? 1894 (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0; 1895 1896 /* Limit the max we support */ 1897 if (max_vpi > LPFC_MAX_VPI) 1898 max_vpi = LPFC_MAX_VPI; 1899 if (mvpi) 1900 *mvpi = max_vpi; 1901 if (avpi) 1902 *avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used; 1903 } else { 1904 if (mrpi) 1905 *mrpi = pmb->un.varRdConfig.max_rpi; 1906 if (arpi) 1907 *arpi = pmb->un.varRdConfig.avail_rpi; 1908 if (mxri) 1909 *mxri = pmb->un.varRdConfig.max_xri; 1910 if (axri) 1911 *axri = pmb->un.varRdConfig.avail_xri; 1912 if (mvpi) 1913 *mvpi = pmb->un.varRdConfig.max_vpi; 1914 if (avpi) { 1915 /* avail_vpi is only valid if link is up and ready */ 1916 if (phba->link_state == LPFC_HBA_READY) 1917 *avpi = pmb->un.varRdConfig.avail_vpi; 1918 else 1919 *avpi = pmb->un.varRdConfig.max_vpi; 1920 } 1921 } 1922 1923 mempool_free(pmboxq, phba->mbox_mem_pool); 1924 return 1; 1925 } 1926 1927 /** 1928 * lpfc_max_rpi_show - Return maximum rpi 1929 * @dev: class device that is converted into a Scsi_host. 1930 * @attr: device attribute, not used. 1931 * @buf: on return contains the maximum rpi count in decimal or "Unknown". 1932 * 1933 * Description: 1934 * Calls lpfc_get_hba_info() asking for just the mrpi count. 1935 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set 1936 * to "Unknown" and the buffer length is returned, therefore the caller 1937 * must check for "Unknown" in the buffer to detect a failure. 1938 * 1939 * Returns: size of formatted string. 1940 **/ 1941 static ssize_t 1942 lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr, 1943 char *buf) 1944 { 1945 struct Scsi_Host *shost = class_to_shost(dev); 1946 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1947 struct lpfc_hba *phba = vport->phba; 1948 uint32_t cnt; 1949 1950 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL)) 1951 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt); 1952 return scnprintf(buf, PAGE_SIZE, "Unknown\n"); 1953 } 1954 1955 /** 1956 * lpfc_used_rpi_show - Return maximum rpi minus available rpi 1957 * @dev: class device that is converted into a Scsi_host. 1958 * @attr: device attribute, not used. 1959 * @buf: containing the used rpi count in decimal or "Unknown". 1960 * 1961 * Description: 1962 * Calls lpfc_get_hba_info() asking for just the mrpi and arpi counts. 1963 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set 1964 * to "Unknown" and the buffer length is returned, therefore the caller 1965 * must check for "Unknown" in the buffer to detect a failure. 1966 * 1967 * Returns: size of formatted string. 1968 **/ 1969 static ssize_t 1970 lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr, 1971 char *buf) 1972 { 1973 struct Scsi_Host *shost = class_to_shost(dev); 1974 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1975 struct lpfc_hba *phba = vport->phba; 1976 uint32_t cnt, acnt; 1977 1978 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL)) 1979 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); 1980 return scnprintf(buf, PAGE_SIZE, "Unknown\n"); 1981 } 1982 1983 /** 1984 * lpfc_max_xri_show - Return maximum xri 1985 * @dev: class device that is converted into a Scsi_host. 1986 * @attr: device attribute, not used. 1987 * @buf: on return contains the maximum xri count in decimal or "Unknown". 1988 * 1989 * Description: 1990 * Calls lpfc_get_hba_info() asking for just the mrpi count. 1991 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set 1992 * to "Unknown" and the buffer length is returned, therefore the caller 1993 * must check for "Unknown" in the buffer to detect a failure. 1994 * 1995 * Returns: size of formatted string. 1996 **/ 1997 static ssize_t 1998 lpfc_max_xri_show(struct device *dev, struct device_attribute *attr, 1999 char *buf) 2000 { 2001 struct Scsi_Host *shost = class_to_shost(dev); 2002 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2003 struct lpfc_hba *phba = vport->phba; 2004 uint32_t cnt; 2005 2006 if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL)) 2007 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt); 2008 return scnprintf(buf, PAGE_SIZE, "Unknown\n"); 2009 } 2010 2011 /** 2012 * lpfc_used_xri_show - Return maximum xpi minus the available xpi 2013 * @dev: class device that is converted into a Scsi_host. 2014 * @attr: device attribute, not used. 2015 * @buf: on return contains the used xri count in decimal or "Unknown". 2016 * 2017 * Description: 2018 * Calls lpfc_get_hba_info() asking for just the mxri and axri counts. 2019 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set 2020 * to "Unknown" and the buffer length is returned, therefore the caller 2021 * must check for "Unknown" in the buffer to detect a failure. 2022 * 2023 * Returns: size of formatted string. 2024 **/ 2025 static ssize_t 2026 lpfc_used_xri_show(struct device *dev, struct device_attribute *attr, 2027 char *buf) 2028 { 2029 struct Scsi_Host *shost = class_to_shost(dev); 2030 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2031 struct lpfc_hba *phba = vport->phba; 2032 uint32_t cnt, acnt; 2033 2034 if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL)) 2035 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); 2036 return scnprintf(buf, PAGE_SIZE, "Unknown\n"); 2037 } 2038 2039 /** 2040 * lpfc_max_vpi_show - Return maximum vpi 2041 * @dev: class device that is converted into a Scsi_host. 2042 * @attr: device attribute, not used. 2043 * @buf: on return contains the maximum vpi count in decimal or "Unknown". 2044 * 2045 * Description: 2046 * Calls lpfc_get_hba_info() asking for just the mvpi count. 2047 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set 2048 * to "Unknown" and the buffer length is returned, therefore the caller 2049 * must check for "Unknown" in the buffer to detect a failure. 2050 * 2051 * Returns: size of formatted string. 2052 **/ 2053 static ssize_t 2054 lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr, 2055 char *buf) 2056 { 2057 struct Scsi_Host *shost = class_to_shost(dev); 2058 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2059 struct lpfc_hba *phba = vport->phba; 2060 uint32_t cnt; 2061 2062 if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL)) 2063 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt); 2064 return scnprintf(buf, PAGE_SIZE, "Unknown\n"); 2065 } 2066 2067 /** 2068 * lpfc_used_vpi_show - Return maximum vpi minus the available vpi 2069 * @dev: class device that is converted into a Scsi_host. 2070 * @attr: device attribute, not used. 2071 * @buf: on return contains the used vpi count in decimal or "Unknown". 2072 * 2073 * Description: 2074 * Calls lpfc_get_hba_info() asking for just the mvpi and avpi counts. 2075 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set 2076 * to "Unknown" and the buffer length is returned, therefore the caller 2077 * must check for "Unknown" in the buffer to detect a failure. 2078 * 2079 * Returns: size of formatted string. 2080 **/ 2081 static ssize_t 2082 lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr, 2083 char *buf) 2084 { 2085 struct Scsi_Host *shost = class_to_shost(dev); 2086 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2087 struct lpfc_hba *phba = vport->phba; 2088 uint32_t cnt, acnt; 2089 2090 if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt)) 2091 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); 2092 return scnprintf(buf, PAGE_SIZE, "Unknown\n"); 2093 } 2094 2095 /** 2096 * lpfc_npiv_info_show - Return text about NPIV support for the adapter 2097 * @dev: class device that is converted into a Scsi_host. 2098 * @attr: device attribute, not used. 2099 * @buf: text that must be interpreted to determine if npiv is supported. 2100 * 2101 * Description: 2102 * Buffer will contain text indicating npiv is not suppoerted on the port, 2103 * the port is an NPIV physical port, or it is an npiv virtual port with 2104 * the id of the vport. 2105 * 2106 * Returns: size of formatted string. 2107 **/ 2108 static ssize_t 2109 lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr, 2110 char *buf) 2111 { 2112 struct Scsi_Host *shost = class_to_shost(dev); 2113 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2114 struct lpfc_hba *phba = vport->phba; 2115 2116 if (!(phba->max_vpi)) 2117 return scnprintf(buf, PAGE_SIZE, "NPIV Not Supported\n"); 2118 if (vport->port_type == LPFC_PHYSICAL_PORT) 2119 return scnprintf(buf, PAGE_SIZE, "NPIV Physical\n"); 2120 return scnprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi); 2121 } 2122 2123 /** 2124 * lpfc_poll_show - Return text about poll support for the adapter 2125 * @dev: class device that is converted into a Scsi_host. 2126 * @attr: device attribute, not used. 2127 * @buf: on return contains the cfg_poll in hex. 2128 * 2129 * Notes: 2130 * cfg_poll should be a lpfc_polling_flags type. 2131 * 2132 * Returns: size of formatted string. 2133 **/ 2134 static ssize_t 2135 lpfc_poll_show(struct device *dev, struct device_attribute *attr, 2136 char *buf) 2137 { 2138 struct Scsi_Host *shost = class_to_shost(dev); 2139 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2140 struct lpfc_hba *phba = vport->phba; 2141 2142 return scnprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll); 2143 } 2144 2145 /** 2146 * lpfc_poll_store - Set the value of cfg_poll for the adapter 2147 * @dev: class device that is converted into a Scsi_host. 2148 * @attr: device attribute, not used. 2149 * @buf: one or more lpfc_polling_flags values. 2150 * @count: not used. 2151 * 2152 * Notes: 2153 * buf contents converted to integer and checked for a valid value. 2154 * 2155 * Returns: 2156 * -EINVAL if the buffer connot be converted or is out of range 2157 * length of the buf on success 2158 **/ 2159 static ssize_t 2160 lpfc_poll_store(struct device *dev, struct device_attribute *attr, 2161 const char *buf, size_t count) 2162 { 2163 struct Scsi_Host *shost = class_to_shost(dev); 2164 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2165 struct lpfc_hba *phba = vport->phba; 2166 uint32_t creg_val; 2167 uint32_t old_val; 2168 int val=0; 2169 2170 if (!isdigit(buf[0])) 2171 return -EINVAL; 2172 2173 if (sscanf(buf, "%i", &val) != 1) 2174 return -EINVAL; 2175 2176 if ((val & 0x3) != val) 2177 return -EINVAL; 2178 2179 if (phba->sli_rev == LPFC_SLI_REV4) 2180 val = 0; 2181 2182 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 2183 "3051 lpfc_poll changed from %d to %d\n", 2184 phba->cfg_poll, val); 2185 2186 spin_lock_irq(&phba->hbalock); 2187 2188 old_val = phba->cfg_poll; 2189 2190 if (val & ENABLE_FCP_RING_POLLING) { 2191 if ((val & DISABLE_FCP_RING_INT) && 2192 !(old_val & DISABLE_FCP_RING_INT)) { 2193 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 2194 spin_unlock_irq(&phba->hbalock); 2195 return -EINVAL; 2196 } 2197 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 2198 writel(creg_val, phba->HCregaddr); 2199 readl(phba->HCregaddr); /* flush */ 2200 2201 lpfc_poll_start_timer(phba); 2202 } 2203 } else if (val != 0x0) { 2204 spin_unlock_irq(&phba->hbalock); 2205 return -EINVAL; 2206 } 2207 2208 if (!(val & DISABLE_FCP_RING_INT) && 2209 (old_val & DISABLE_FCP_RING_INT)) 2210 { 2211 spin_unlock_irq(&phba->hbalock); 2212 del_timer(&phba->fcp_poll_timer); 2213 spin_lock_irq(&phba->hbalock); 2214 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 2215 spin_unlock_irq(&phba->hbalock); 2216 return -EINVAL; 2217 } 2218 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 2219 writel(creg_val, phba->HCregaddr); 2220 readl(phba->HCregaddr); /* flush */ 2221 } 2222 2223 phba->cfg_poll = val; 2224 2225 spin_unlock_irq(&phba->hbalock); 2226 2227 return strlen(buf); 2228 } 2229 2230 /** 2231 * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions 2232 * @dev: class converted to a Scsi_host structure. 2233 * @attr: device attribute, not used. 2234 * @buf: on return contains the formatted support level. 2235 * 2236 * Description: 2237 * Returns the maximum number of virtual functions a physical function can 2238 * support, 0 will be returned if called on virtual function. 2239 * 2240 * Returns: size of formatted string. 2241 **/ 2242 static ssize_t 2243 lpfc_sriov_hw_max_virtfn_show(struct device *dev, 2244 struct device_attribute *attr, 2245 char *buf) 2246 { 2247 struct Scsi_Host *shost = class_to_shost(dev); 2248 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2249 struct lpfc_hba *phba = vport->phba; 2250 uint16_t max_nr_virtfn; 2251 2252 max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba); 2253 return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn); 2254 } 2255 2256 static inline bool lpfc_rangecheck(uint val, uint min, uint max) 2257 { 2258 return val >= min && val <= max; 2259 } 2260 2261 /** 2262 * lpfc_enable_bbcr_set: Sets an attribute value. 2263 * @phba: pointer the the adapter structure. 2264 * @val: integer attribute value. 2265 * 2266 * Description: 2267 * Validates the min and max values then sets the 2268 * adapter config field if in the valid range. prints error message 2269 * and does not set the parameter if invalid. 2270 * 2271 * Returns: 2272 * zero on success 2273 * -EINVAL if val is invalid 2274 */ 2275 static ssize_t 2276 lpfc_enable_bbcr_set(struct lpfc_hba *phba, uint val) 2277 { 2278 if (lpfc_rangecheck(val, 0, 1) && phba->sli_rev == LPFC_SLI_REV4) { 2279 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2280 "3068 %s_enable_bbcr changed from %d to %d\n", 2281 LPFC_DRIVER_NAME, phba->cfg_enable_bbcr, val); 2282 phba->cfg_enable_bbcr = val; 2283 return 0; 2284 } 2285 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2286 "0451 %s_enable_bbcr cannot set to %d, range is 0, 1\n", 2287 LPFC_DRIVER_NAME, val); 2288 return -EINVAL; 2289 } 2290 2291 /** 2292 * lpfc_param_show - Return a cfg attribute value in decimal 2293 * 2294 * Description: 2295 * Macro that given an attr e.g. hba_queue_depth expands 2296 * into a function with the name lpfc_hba_queue_depth_show. 2297 * 2298 * lpfc_##attr##_show: Return the decimal value of an adapters cfg_xxx field. 2299 * @dev: class device that is converted into a Scsi_host. 2300 * @attr: device attribute, not used. 2301 * @buf: on return contains the attribute value in decimal. 2302 * 2303 * Returns: size of formatted string. 2304 **/ 2305 #define lpfc_param_show(attr) \ 2306 static ssize_t \ 2307 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ 2308 char *buf) \ 2309 { \ 2310 struct Scsi_Host *shost = class_to_shost(dev);\ 2311 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 2312 struct lpfc_hba *phba = vport->phba;\ 2313 return scnprintf(buf, PAGE_SIZE, "%d\n",\ 2314 phba->cfg_##attr);\ 2315 } 2316 2317 /** 2318 * lpfc_param_hex_show - Return a cfg attribute value in hex 2319 * 2320 * Description: 2321 * Macro that given an attr e.g. hba_queue_depth expands 2322 * into a function with the name lpfc_hba_queue_depth_show 2323 * 2324 * lpfc_##attr##_show: Return the hex value of an adapters cfg_xxx field. 2325 * @dev: class device that is converted into a Scsi_host. 2326 * @attr: device attribute, not used. 2327 * @buf: on return contains the attribute value in hexadecimal. 2328 * 2329 * Returns: size of formatted string. 2330 **/ 2331 #define lpfc_param_hex_show(attr) \ 2332 static ssize_t \ 2333 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ 2334 char *buf) \ 2335 { \ 2336 struct Scsi_Host *shost = class_to_shost(dev);\ 2337 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 2338 struct lpfc_hba *phba = vport->phba;\ 2339 uint val = 0;\ 2340 val = phba->cfg_##attr;\ 2341 return scnprintf(buf, PAGE_SIZE, "%#x\n",\ 2342 phba->cfg_##attr);\ 2343 } 2344 2345 /** 2346 * lpfc_param_init - Initializes a cfg attribute 2347 * 2348 * Description: 2349 * Macro that given an attr e.g. hba_queue_depth expands 2350 * into a function with the name lpfc_hba_queue_depth_init. The macro also 2351 * takes a default argument, a minimum and maximum argument. 2352 * 2353 * lpfc_##attr##_init: Initializes an attribute. 2354 * @phba: pointer the the adapter structure. 2355 * @val: integer attribute value. 2356 * 2357 * Validates the min and max values then sets the adapter config field 2358 * accordingly, or uses the default if out of range and prints an error message. 2359 * 2360 * Returns: 2361 * zero on success 2362 * -EINVAL if default used 2363 **/ 2364 #define lpfc_param_init(attr, default, minval, maxval) \ 2365 static int \ 2366 lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \ 2367 { \ 2368 if (lpfc_rangecheck(val, minval, maxval)) {\ 2369 phba->cfg_##attr = val;\ 2370 return 0;\ 2371 }\ 2372 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ 2373 "0449 lpfc_"#attr" attribute cannot be set to %d, "\ 2374 "allowed range is ["#minval", "#maxval"]\n", val); \ 2375 phba->cfg_##attr = default;\ 2376 return -EINVAL;\ 2377 } 2378 2379 /** 2380 * lpfc_param_set - Set a cfg attribute value 2381 * 2382 * Description: 2383 * Macro that given an attr e.g. hba_queue_depth expands 2384 * into a function with the name lpfc_hba_queue_depth_set 2385 * 2386 * lpfc_##attr##_set: Sets an attribute value. 2387 * @phba: pointer the the adapter structure. 2388 * @val: integer attribute value. 2389 * 2390 * Description: 2391 * Validates the min and max values then sets the 2392 * adapter config field if in the valid range. prints error message 2393 * and does not set the parameter if invalid. 2394 * 2395 * Returns: 2396 * zero on success 2397 * -EINVAL if val is invalid 2398 **/ 2399 #define lpfc_param_set(attr, default, minval, maxval) \ 2400 static int \ 2401 lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \ 2402 { \ 2403 if (lpfc_rangecheck(val, minval, maxval)) {\ 2404 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ 2405 "3052 lpfc_" #attr " changed from %d to %d\n", \ 2406 phba->cfg_##attr, val); \ 2407 phba->cfg_##attr = val;\ 2408 return 0;\ 2409 }\ 2410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ 2411 "0450 lpfc_"#attr" attribute cannot be set to %d, "\ 2412 "allowed range is ["#minval", "#maxval"]\n", val); \ 2413 return -EINVAL;\ 2414 } 2415 2416 /** 2417 * lpfc_param_store - Set a vport attribute value 2418 * 2419 * Description: 2420 * Macro that given an attr e.g. hba_queue_depth expands 2421 * into a function with the name lpfc_hba_queue_depth_store. 2422 * 2423 * lpfc_##attr##_store: Set an sttribute value. 2424 * @dev: class device that is converted into a Scsi_host. 2425 * @attr: device attribute, not used. 2426 * @buf: contains the attribute value in ascii. 2427 * @count: not used. 2428 * 2429 * Description: 2430 * Convert the ascii text number to an integer, then 2431 * use the lpfc_##attr##_set function to set the value. 2432 * 2433 * Returns: 2434 * -EINVAL if val is invalid or lpfc_##attr##_set() fails 2435 * length of buffer upon success. 2436 **/ 2437 #define lpfc_param_store(attr) \ 2438 static ssize_t \ 2439 lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ 2440 const char *buf, size_t count) \ 2441 { \ 2442 struct Scsi_Host *shost = class_to_shost(dev);\ 2443 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 2444 struct lpfc_hba *phba = vport->phba;\ 2445 uint val = 0;\ 2446 if (!isdigit(buf[0]))\ 2447 return -EINVAL;\ 2448 if (sscanf(buf, "%i", &val) != 1)\ 2449 return -EINVAL;\ 2450 if (lpfc_##attr##_set(phba, val) == 0) \ 2451 return strlen(buf);\ 2452 else \ 2453 return -EINVAL;\ 2454 } 2455 2456 /** 2457 * lpfc_vport_param_show - Return decimal formatted cfg attribute value 2458 * 2459 * Description: 2460 * Macro that given an attr e.g. hba_queue_depth expands 2461 * into a function with the name lpfc_hba_queue_depth_show 2462 * 2463 * lpfc_##attr##_show: prints the attribute value in decimal. 2464 * @dev: class device that is converted into a Scsi_host. 2465 * @attr: device attribute, not used. 2466 * @buf: on return contains the attribute value in decimal. 2467 * 2468 * Returns: length of formatted string. 2469 **/ 2470 #define lpfc_vport_param_show(attr) \ 2471 static ssize_t \ 2472 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ 2473 char *buf) \ 2474 { \ 2475 struct Scsi_Host *shost = class_to_shost(dev);\ 2476 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 2477 return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\ 2478 } 2479 2480 /** 2481 * lpfc_vport_param_hex_show - Return hex formatted attribute value 2482 * 2483 * Description: 2484 * Macro that given an attr e.g. 2485 * hba_queue_depth expands into a function with the name 2486 * lpfc_hba_queue_depth_show 2487 * 2488 * lpfc_##attr##_show: prints the attribute value in hexadecimal. 2489 * @dev: class device that is converted into a Scsi_host. 2490 * @attr: device attribute, not used. 2491 * @buf: on return contains the attribute value in hexadecimal. 2492 * 2493 * Returns: length of formatted string. 2494 **/ 2495 #define lpfc_vport_param_hex_show(attr) \ 2496 static ssize_t \ 2497 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ 2498 char *buf) \ 2499 { \ 2500 struct Scsi_Host *shost = class_to_shost(dev);\ 2501 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 2502 return scnprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\ 2503 } 2504 2505 /** 2506 * lpfc_vport_param_init - Initialize a vport cfg attribute 2507 * 2508 * Description: 2509 * Macro that given an attr e.g. hba_queue_depth expands 2510 * into a function with the name lpfc_hba_queue_depth_init. The macro also 2511 * takes a default argument, a minimum and maximum argument. 2512 * 2513 * lpfc_##attr##_init: validates the min and max values then sets the 2514 * adapter config field accordingly, or uses the default if out of range 2515 * and prints an error message. 2516 * @phba: pointer the the adapter structure. 2517 * @val: integer attribute value. 2518 * 2519 * Returns: 2520 * zero on success 2521 * -EINVAL if default used 2522 **/ 2523 #define lpfc_vport_param_init(attr, default, minval, maxval) \ 2524 static int \ 2525 lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \ 2526 { \ 2527 if (lpfc_rangecheck(val, minval, maxval)) {\ 2528 vport->cfg_##attr = val;\ 2529 return 0;\ 2530 }\ 2531 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ 2532 "0423 lpfc_"#attr" attribute cannot be set to %d, "\ 2533 "allowed range is ["#minval", "#maxval"]\n", val); \ 2534 vport->cfg_##attr = default;\ 2535 return -EINVAL;\ 2536 } 2537 2538 /** 2539 * lpfc_vport_param_set - Set a vport cfg attribute 2540 * 2541 * Description: 2542 * Macro that given an attr e.g. hba_queue_depth expands 2543 * into a function with the name lpfc_hba_queue_depth_set 2544 * 2545 * lpfc_##attr##_set: validates the min and max values then sets the 2546 * adapter config field if in the valid range. prints error message 2547 * and does not set the parameter if invalid. 2548 * @phba: pointer the the adapter structure. 2549 * @val: integer attribute value. 2550 * 2551 * Returns: 2552 * zero on success 2553 * -EINVAL if val is invalid 2554 **/ 2555 #define lpfc_vport_param_set(attr, default, minval, maxval) \ 2556 static int \ 2557 lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \ 2558 { \ 2559 if (lpfc_rangecheck(val, minval, maxval)) {\ 2560 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ 2561 "3053 lpfc_" #attr \ 2562 " changed from %d (x%x) to %d (x%x)\n", \ 2563 vport->cfg_##attr, vport->cfg_##attr, \ 2564 val, val); \ 2565 vport->cfg_##attr = val;\ 2566 return 0;\ 2567 }\ 2568 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ 2569 "0424 lpfc_"#attr" attribute cannot be set to %d, "\ 2570 "allowed range is ["#minval", "#maxval"]\n", val); \ 2571 return -EINVAL;\ 2572 } 2573 2574 /** 2575 * lpfc_vport_param_store - Set a vport attribute 2576 * 2577 * Description: 2578 * Macro that given an attr e.g. hba_queue_depth 2579 * expands into a function with the name lpfc_hba_queue_depth_store 2580 * 2581 * lpfc_##attr##_store: convert the ascii text number to an integer, then 2582 * use the lpfc_##attr##_set function to set the value. 2583 * @cdev: class device that is converted into a Scsi_host. 2584 * @buf: contains the attribute value in decimal. 2585 * @count: not used. 2586 * 2587 * Returns: 2588 * -EINVAL if val is invalid or lpfc_##attr##_set() fails 2589 * length of buffer upon success. 2590 **/ 2591 #define lpfc_vport_param_store(attr) \ 2592 static ssize_t \ 2593 lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ 2594 const char *buf, size_t count) \ 2595 { \ 2596 struct Scsi_Host *shost = class_to_shost(dev);\ 2597 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ 2598 uint val = 0;\ 2599 if (!isdigit(buf[0]))\ 2600 return -EINVAL;\ 2601 if (sscanf(buf, "%i", &val) != 1)\ 2602 return -EINVAL;\ 2603 if (lpfc_##attr##_set(vport, val) == 0) \ 2604 return strlen(buf);\ 2605 else \ 2606 return -EINVAL;\ 2607 } 2608 2609 2610 static DEVICE_ATTR(nvme_info, 0444, lpfc_nvme_info_show, NULL); 2611 static DEVICE_ATTR(scsi_stat, 0444, lpfc_scsi_stat_show, NULL); 2612 static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL); 2613 static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL); 2614 static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL); 2615 static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL); 2616 static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL); 2617 static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL); 2618 static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL); 2619 static DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL); 2620 static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL); 2621 static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL); 2622 static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL); 2623 static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL); 2624 static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show, 2625 lpfc_link_state_store); 2626 static DEVICE_ATTR(option_rom_version, S_IRUGO, 2627 lpfc_option_rom_version_show, NULL); 2628 static DEVICE_ATTR(num_discovered_ports, S_IRUGO, 2629 lpfc_num_discovered_ports_show, NULL); 2630 static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL); 2631 static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL); 2632 static DEVICE_ATTR_RO(lpfc_drvr_version); 2633 static DEVICE_ATTR_RO(lpfc_enable_fip); 2634 static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, 2635 lpfc_board_mode_show, lpfc_board_mode_store); 2636 static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); 2637 static DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL); 2638 static DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL); 2639 static DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL); 2640 static DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL); 2641 static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL); 2642 static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL); 2643 static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL); 2644 static DEVICE_ATTR_RO(lpfc_temp_sensor); 2645 static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn); 2646 static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL); 2647 static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show, 2648 NULL); 2649 2650 static char *lpfc_soft_wwn_key = "C99G71SL8032A"; 2651 #define WWN_SZ 8 2652 /** 2653 * lpfc_wwn_set - Convert string to the 8 byte WWN value. 2654 * @buf: WWN string. 2655 * @cnt: Length of string. 2656 * @wwn: Array to receive converted wwn value. 2657 * 2658 * Returns: 2659 * -EINVAL if the buffer does not contain a valid wwn 2660 * 0 success 2661 **/ 2662 static size_t 2663 lpfc_wwn_set(const char *buf, size_t cnt, char wwn[]) 2664 { 2665 unsigned int i, j; 2666 2667 /* Count may include a LF at end of string */ 2668 if (buf[cnt-1] == '\n') 2669 cnt--; 2670 2671 if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) || 2672 ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x')))) 2673 return -EINVAL; 2674 2675 memset(wwn, 0, WWN_SZ); 2676 2677 /* Validate and store the new name */ 2678 for (i = 0, j = 0; i < 16; i++) { 2679 if ((*buf >= 'a') && (*buf <= 'f')) 2680 j = ((j << 4) | ((*buf++ - 'a') + 10)); 2681 else if ((*buf >= 'A') && (*buf <= 'F')) 2682 j = ((j << 4) | ((*buf++ - 'A') + 10)); 2683 else if ((*buf >= '0') && (*buf <= '9')) 2684 j = ((j << 4) | (*buf++ - '0')); 2685 else 2686 return -EINVAL; 2687 if (i % 2) { 2688 wwn[i/2] = j & 0xff; 2689 j = 0; 2690 } 2691 } 2692 return 0; 2693 } 2694 /** 2695 * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid 2696 * @dev: class device that is converted into a Scsi_host. 2697 * @attr: device attribute, not used. 2698 * @buf: containing the string lpfc_soft_wwn_key. 2699 * @count: must be size of lpfc_soft_wwn_key. 2700 * 2701 * Returns: 2702 * -EINVAL if the buffer does not contain lpfc_soft_wwn_key 2703 * length of buf indicates success 2704 **/ 2705 static ssize_t 2706 lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr, 2707 const char *buf, size_t count) 2708 { 2709 struct Scsi_Host *shost = class_to_shost(dev); 2710 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2711 struct lpfc_hba *phba = vport->phba; 2712 unsigned int cnt = count; 2713 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level; 2714 u32 *fawwpn_key = (uint32_t *)&vport->fc_sparam.un.vendorVersion[0]; 2715 2716 /* 2717 * We're doing a simple sanity check for soft_wwpn setting. 2718 * We require that the user write a specific key to enable 2719 * the soft_wwpn attribute to be settable. Once the attribute 2720 * is written, the enable key resets. If further updates are 2721 * desired, the key must be written again to re-enable the 2722 * attribute. 2723 * 2724 * The "key" is not secret - it is a hardcoded string shown 2725 * here. The intent is to protect against the random user or 2726 * application that is just writing attributes. 2727 */ 2728 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) { 2729 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2730 "0051 "LPFC_DRIVER_NAME" soft wwpn can not" 2731 " be enabled: fawwpn is enabled\n"); 2732 return -EINVAL; 2733 } 2734 2735 /* count may include a LF at end of string */ 2736 if (buf[cnt-1] == '\n') 2737 cnt--; 2738 2739 if ((cnt != strlen(lpfc_soft_wwn_key)) || 2740 (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0)) 2741 return -EINVAL; 2742 2743 phba->soft_wwn_enable = 1; 2744 2745 dev_printk(KERN_WARNING, &phba->pcidev->dev, 2746 "lpfc%d: soft_wwpn assignment has been enabled.\n", 2747 phba->brd_no); 2748 dev_printk(KERN_WARNING, &phba->pcidev->dev, 2749 " The soft_wwpn feature is not supported by Broadcom."); 2750 2751 return count; 2752 } 2753 static DEVICE_ATTR_WO(lpfc_soft_wwn_enable); 2754 2755 /** 2756 * lpfc_soft_wwpn_show - Return the cfg soft ww port name of the adapter 2757 * @dev: class device that is converted into a Scsi_host. 2758 * @attr: device attribute, not used. 2759 * @buf: on return contains the wwpn in hexadecimal. 2760 * 2761 * Returns: size of formatted string. 2762 **/ 2763 static ssize_t 2764 lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr, 2765 char *buf) 2766 { 2767 struct Scsi_Host *shost = class_to_shost(dev); 2768 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2769 struct lpfc_hba *phba = vport->phba; 2770 2771 return scnprintf(buf, PAGE_SIZE, "0x%llx\n", 2772 (unsigned long long)phba->cfg_soft_wwpn); 2773 } 2774 2775 /** 2776 * lpfc_soft_wwpn_store - Set the ww port name of the adapter 2777 * @dev class device that is converted into a Scsi_host. 2778 * @attr: device attribute, not used. 2779 * @buf: contains the wwpn in hexadecimal. 2780 * @count: number of wwpn bytes in buf 2781 * 2782 * Returns: 2783 * -EACCES hba reset not enabled, adapter over temp 2784 * -EINVAL soft wwn not enabled, count is invalid, invalid wwpn byte invalid 2785 * -EIO error taking adapter offline or online 2786 * value of count on success 2787 **/ 2788 static ssize_t 2789 lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr, 2790 const char *buf, size_t count) 2791 { 2792 struct Scsi_Host *shost = class_to_shost(dev); 2793 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2794 struct lpfc_hba *phba = vport->phba; 2795 struct completion online_compl; 2796 int stat1 = 0, stat2 = 0; 2797 unsigned int cnt = count; 2798 u8 wwpn[WWN_SZ]; 2799 int rc; 2800 2801 if (!phba->cfg_enable_hba_reset) 2802 return -EACCES; 2803 spin_lock_irq(&phba->hbalock); 2804 if (phba->over_temp_state == HBA_OVER_TEMP) { 2805 spin_unlock_irq(&phba->hbalock); 2806 return -EACCES; 2807 } 2808 spin_unlock_irq(&phba->hbalock); 2809 /* count may include a LF at end of string */ 2810 if (buf[cnt-1] == '\n') 2811 cnt--; 2812 2813 if (!phba->soft_wwn_enable) 2814 return -EINVAL; 2815 2816 /* lock setting wwpn, wwnn down */ 2817 phba->soft_wwn_enable = 0; 2818 2819 rc = lpfc_wwn_set(buf, cnt, wwpn); 2820 if (rc) { 2821 /* not able to set wwpn, unlock it */ 2822 phba->soft_wwn_enable = 1; 2823 return rc; 2824 } 2825 2826 phba->cfg_soft_wwpn = wwn_to_u64(wwpn); 2827 fc_host_port_name(shost) = phba->cfg_soft_wwpn; 2828 if (phba->cfg_soft_wwnn) 2829 fc_host_node_name(shost) = phba->cfg_soft_wwnn; 2830 2831 dev_printk(KERN_NOTICE, &phba->pcidev->dev, 2832 "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no); 2833 2834 stat1 = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 2835 if (stat1) 2836 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2837 "0463 lpfc_soft_wwpn attribute set failed to " 2838 "reinit adapter - %d\n", stat1); 2839 init_completion(&online_compl); 2840 rc = lpfc_workq_post_event(phba, &stat2, &online_compl, 2841 LPFC_EVT_ONLINE); 2842 if (rc == 0) 2843 return -ENOMEM; 2844 2845 wait_for_completion(&online_compl); 2846 if (stat2) 2847 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2848 "0464 lpfc_soft_wwpn attribute set failed to " 2849 "reinit adapter - %d\n", stat2); 2850 return (stat1 || stat2) ? -EIO : count; 2851 } 2852 static DEVICE_ATTR_RW(lpfc_soft_wwpn); 2853 2854 /** 2855 * lpfc_soft_wwnn_show - Return the cfg soft ww node name for the adapter 2856 * @dev: class device that is converted into a Scsi_host. 2857 * @attr: device attribute, not used. 2858 * @buf: on return contains the wwnn in hexadecimal. 2859 * 2860 * Returns: size of formatted string. 2861 **/ 2862 static ssize_t 2863 lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr, 2864 char *buf) 2865 { 2866 struct Scsi_Host *shost = class_to_shost(dev); 2867 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 2868 return scnprintf(buf, PAGE_SIZE, "0x%llx\n", 2869 (unsigned long long)phba->cfg_soft_wwnn); 2870 } 2871 2872 /** 2873 * lpfc_soft_wwnn_store - sets the ww node name of the adapter 2874 * @cdev: class device that is converted into a Scsi_host. 2875 * @buf: contains the ww node name in hexadecimal. 2876 * @count: number of wwnn bytes in buf. 2877 * 2878 * Returns: 2879 * -EINVAL soft wwn not enabled, count is invalid, invalid wwnn byte invalid 2880 * value of count on success 2881 **/ 2882 static ssize_t 2883 lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr, 2884 const char *buf, size_t count) 2885 { 2886 struct Scsi_Host *shost = class_to_shost(dev); 2887 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 2888 unsigned int cnt = count; 2889 u8 wwnn[WWN_SZ]; 2890 int rc; 2891 2892 /* count may include a LF at end of string */ 2893 if (buf[cnt-1] == '\n') 2894 cnt--; 2895 2896 if (!phba->soft_wwn_enable) 2897 return -EINVAL; 2898 2899 rc = lpfc_wwn_set(buf, cnt, wwnn); 2900 if (rc) { 2901 /* Allow wwnn to be set many times, as long as the enable 2902 * is set. However, once the wwpn is set, everything locks. 2903 */ 2904 return rc; 2905 } 2906 2907 phba->cfg_soft_wwnn = wwn_to_u64(wwnn); 2908 2909 dev_printk(KERN_NOTICE, &phba->pcidev->dev, 2910 "lpfc%d: soft_wwnn set. Value will take effect upon " 2911 "setting of the soft_wwpn\n", phba->brd_no); 2912 2913 return count; 2914 } 2915 static DEVICE_ATTR_RW(lpfc_soft_wwnn); 2916 2917 /** 2918 * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for 2919 * Optimized Access Storage (OAS) operations. 2920 * @dev: class device that is converted into a Scsi_host. 2921 * @attr: device attribute, not used. 2922 * @buf: buffer for passing information. 2923 * 2924 * Returns: 2925 * value of count 2926 **/ 2927 static ssize_t 2928 lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr, 2929 char *buf) 2930 { 2931 struct Scsi_Host *shost = class_to_shost(dev); 2932 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 2933 2934 return scnprintf(buf, PAGE_SIZE, "0x%llx\n", 2935 wwn_to_u64(phba->cfg_oas_tgt_wwpn)); 2936 } 2937 2938 /** 2939 * lpfc_oas_tgt_store - Store wwpn of target whose luns maybe enabled for 2940 * Optimized Access Storage (OAS) operations. 2941 * @dev: class device that is converted into a Scsi_host. 2942 * @attr: device attribute, not used. 2943 * @buf: buffer for passing information. 2944 * @count: Size of the data buffer. 2945 * 2946 * Returns: 2947 * -EINVAL count is invalid, invalid wwpn byte invalid 2948 * -EPERM oas is not supported by hba 2949 * value of count on success 2950 **/ 2951 static ssize_t 2952 lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr, 2953 const char *buf, size_t count) 2954 { 2955 struct Scsi_Host *shost = class_to_shost(dev); 2956 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 2957 unsigned int cnt = count; 2958 uint8_t wwpn[WWN_SZ]; 2959 int rc; 2960 2961 if (!phba->cfg_fof) 2962 return -EPERM; 2963 2964 /* count may include a LF at end of string */ 2965 if (buf[cnt-1] == '\n') 2966 cnt--; 2967 2968 rc = lpfc_wwn_set(buf, cnt, wwpn); 2969 if (rc) 2970 return rc; 2971 2972 memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t))); 2973 memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t))); 2974 if (wwn_to_u64(wwpn) == 0) 2975 phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET; 2976 else 2977 phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET; 2978 phba->cfg_oas_flags &= ~OAS_LUN_VALID; 2979 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN; 2980 return count; 2981 } 2982 static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR, 2983 lpfc_oas_tgt_show, lpfc_oas_tgt_store); 2984 2985 /** 2986 * lpfc_oas_priority_show - Return wwpn of target whose luns maybe enabled for 2987 * Optimized Access Storage (OAS) operations. 2988 * @dev: class device that is converted into a Scsi_host. 2989 * @attr: device attribute, not used. 2990 * @buf: buffer for passing information. 2991 * 2992 * Returns: 2993 * value of count 2994 **/ 2995 static ssize_t 2996 lpfc_oas_priority_show(struct device *dev, struct device_attribute *attr, 2997 char *buf) 2998 { 2999 struct Scsi_Host *shost = class_to_shost(dev); 3000 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3001 3002 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority); 3003 } 3004 3005 /** 3006 * lpfc_oas_priority_store - Store wwpn of target whose luns maybe enabled for 3007 * Optimized Access Storage (OAS) operations. 3008 * @dev: class device that is converted into a Scsi_host. 3009 * @attr: device attribute, not used. 3010 * @buf: buffer for passing information. 3011 * @count: Size of the data buffer. 3012 * 3013 * Returns: 3014 * -EINVAL count is invalid, invalid wwpn byte invalid 3015 * -EPERM oas is not supported by hba 3016 * value of count on success 3017 **/ 3018 static ssize_t 3019 lpfc_oas_priority_store(struct device *dev, struct device_attribute *attr, 3020 const char *buf, size_t count) 3021 { 3022 struct Scsi_Host *shost = class_to_shost(dev); 3023 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3024 unsigned int cnt = count; 3025 unsigned long val; 3026 int ret; 3027 3028 if (!phba->cfg_fof) 3029 return -EPERM; 3030 3031 /* count may include a LF at end of string */ 3032 if (buf[cnt-1] == '\n') 3033 cnt--; 3034 3035 ret = kstrtoul(buf, 0, &val); 3036 if (ret || (val > 0x7f)) 3037 return -EINVAL; 3038 3039 if (val) 3040 phba->cfg_oas_priority = (uint8_t)val; 3041 else 3042 phba->cfg_oas_priority = phba->cfg_XLanePriority; 3043 return count; 3044 } 3045 static DEVICE_ATTR(lpfc_xlane_priority, S_IRUGO | S_IWUSR, 3046 lpfc_oas_priority_show, lpfc_oas_priority_store); 3047 3048 /** 3049 * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled 3050 * for Optimized Access Storage (OAS) operations. 3051 * @dev: class device that is converted into a Scsi_host. 3052 * @attr: device attribute, not used. 3053 * @buf: buffer for passing information. 3054 * 3055 * Returns: 3056 * value of count on success 3057 **/ 3058 static ssize_t 3059 lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr, 3060 char *buf) 3061 { 3062 struct Scsi_Host *shost = class_to_shost(dev); 3063 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3064 3065 return scnprintf(buf, PAGE_SIZE, "0x%llx\n", 3066 wwn_to_u64(phba->cfg_oas_vpt_wwpn)); 3067 } 3068 3069 /** 3070 * lpfc_oas_vpt_store - Store wwpn of vport whose targets maybe enabled 3071 * for Optimized Access Storage (OAS) operations. 3072 * @dev: class device that is converted into a Scsi_host. 3073 * @attr: device attribute, not used. 3074 * @buf: buffer for passing information. 3075 * @count: Size of the data buffer. 3076 * 3077 * Returns: 3078 * -EINVAL count is invalid, invalid wwpn byte invalid 3079 * -EPERM oas is not supported by hba 3080 * value of count on success 3081 **/ 3082 static ssize_t 3083 lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr, 3084 const char *buf, size_t count) 3085 { 3086 struct Scsi_Host *shost = class_to_shost(dev); 3087 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3088 unsigned int cnt = count; 3089 uint8_t wwpn[WWN_SZ]; 3090 int rc; 3091 3092 if (!phba->cfg_fof) 3093 return -EPERM; 3094 3095 /* count may include a LF at end of string */ 3096 if (buf[cnt-1] == '\n') 3097 cnt--; 3098 3099 rc = lpfc_wwn_set(buf, cnt, wwpn); 3100 if (rc) 3101 return rc; 3102 3103 memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t))); 3104 memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t))); 3105 if (wwn_to_u64(wwpn) == 0) 3106 phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT; 3107 else 3108 phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT; 3109 phba->cfg_oas_flags &= ~OAS_LUN_VALID; 3110 if (phba->cfg_oas_priority == 0) 3111 phba->cfg_oas_priority = phba->cfg_XLanePriority; 3112 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN; 3113 return count; 3114 } 3115 static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR, 3116 lpfc_oas_vpt_show, lpfc_oas_vpt_store); 3117 3118 /** 3119 * lpfc_oas_lun_state_show - Return the current state (enabled or disabled) 3120 * of whether luns will be enabled or disabled 3121 * for Optimized Access Storage (OAS) operations. 3122 * @dev: class device that is converted into a Scsi_host. 3123 * @attr: device attribute, not used. 3124 * @buf: buffer for passing information. 3125 * 3126 * Returns: 3127 * size of formatted string. 3128 **/ 3129 static ssize_t 3130 lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr, 3131 char *buf) 3132 { 3133 struct Scsi_Host *shost = class_to_shost(dev); 3134 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3135 3136 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state); 3137 } 3138 3139 /** 3140 * lpfc_oas_lun_state_store - Store the state (enabled or disabled) 3141 * of whether luns will be enabled or disabled 3142 * for Optimized Access Storage (OAS) operations. 3143 * @dev: class device that is converted into a Scsi_host. 3144 * @attr: device attribute, not used. 3145 * @buf: buffer for passing information. 3146 * @count: Size of the data buffer. 3147 * 3148 * Returns: 3149 * -EINVAL count is invalid, invalid wwpn byte invalid 3150 * -EPERM oas is not supported by hba 3151 * value of count on success 3152 **/ 3153 static ssize_t 3154 lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr, 3155 const char *buf, size_t count) 3156 { 3157 struct Scsi_Host *shost = class_to_shost(dev); 3158 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3159 int val = 0; 3160 3161 if (!phba->cfg_fof) 3162 return -EPERM; 3163 3164 if (!isdigit(buf[0])) 3165 return -EINVAL; 3166 3167 if (sscanf(buf, "%i", &val) != 1) 3168 return -EINVAL; 3169 3170 if ((val != 0) && (val != 1)) 3171 return -EINVAL; 3172 3173 phba->cfg_oas_lun_state = val; 3174 return strlen(buf); 3175 } 3176 static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR, 3177 lpfc_oas_lun_state_show, lpfc_oas_lun_state_store); 3178 3179 /** 3180 * lpfc_oas_lun_status_show - Return the status of the Optimized Access 3181 * Storage (OAS) lun returned by the 3182 * lpfc_oas_lun_show function. 3183 * @dev: class device that is converted into a Scsi_host. 3184 * @attr: device attribute, not used. 3185 * @buf: buffer for passing information. 3186 * 3187 * Returns: 3188 * size of formatted string. 3189 **/ 3190 static ssize_t 3191 lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr, 3192 char *buf) 3193 { 3194 struct Scsi_Host *shost = class_to_shost(dev); 3195 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3196 3197 if (!(phba->cfg_oas_flags & OAS_LUN_VALID)) 3198 return -EFAULT; 3199 3200 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status); 3201 } 3202 static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO, 3203 lpfc_oas_lun_status_show, NULL); 3204 3205 3206 /** 3207 * lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage 3208 * (OAS) operations. 3209 * @phba: lpfc_hba pointer. 3210 * @ndlp: pointer to fcp target node. 3211 * @lun: the fc lun for setting oas state. 3212 * @oas_state: the oas state to be set to the lun. 3213 * 3214 * Returns: 3215 * SUCCESS : 0 3216 * -EPERM OAS is not enabled or not supported by this port. 3217 * 3218 */ 3219 static size_t 3220 lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[], 3221 uint8_t tgt_wwpn[], uint64_t lun, 3222 uint32_t oas_state, uint8_t pri) 3223 { 3224 3225 int rc = 0; 3226 3227 if (!phba->cfg_fof) 3228 return -EPERM; 3229 3230 if (oas_state) { 3231 if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn, 3232 (struct lpfc_name *)tgt_wwpn, 3233 lun, pri)) 3234 rc = -ENOMEM; 3235 } else { 3236 lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn, 3237 (struct lpfc_name *)tgt_wwpn, lun, pri); 3238 } 3239 return rc; 3240 3241 } 3242 3243 /** 3244 * lpfc_oas_lun_get_next - get the next lun that has been enabled for Optimized 3245 * Access Storage (OAS) operations. 3246 * @phba: lpfc_hba pointer. 3247 * @vpt_wwpn: wwpn of the vport associated with the returned lun 3248 * @tgt_wwpn: wwpn of the target associated with the returned lun 3249 * @lun_status: status of the lun returned lun 3250 * 3251 * Returns the first or next lun enabled for OAS operations for the vport/target 3252 * specified. If a lun is found, its vport wwpn, target wwpn and status is 3253 * returned. If the lun is not found, NOT_OAS_ENABLED_LUN is returned. 3254 * 3255 * Return: 3256 * lun that is OAS enabled for the vport/target 3257 * NOT_OAS_ENABLED_LUN when no oas enabled lun found. 3258 */ 3259 static uint64_t 3260 lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[], 3261 uint8_t tgt_wwpn[], uint32_t *lun_status, 3262 uint32_t *lun_pri) 3263 { 3264 uint64_t found_lun; 3265 3266 if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn) 3267 return NOT_OAS_ENABLED_LUN; 3268 if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *) 3269 phba->sli4_hba.oas_next_vpt_wwpn, 3270 (struct lpfc_name *) 3271 phba->sli4_hba.oas_next_tgt_wwpn, 3272 &phba->sli4_hba.oas_next_lun, 3273 (struct lpfc_name *)vpt_wwpn, 3274 (struct lpfc_name *)tgt_wwpn, 3275 &found_lun, lun_status, lun_pri)) 3276 return found_lun; 3277 else 3278 return NOT_OAS_ENABLED_LUN; 3279 } 3280 3281 /** 3282 * lpfc_oas_lun_state_change - enable/disable a lun for OAS operations 3283 * @phba: lpfc_hba pointer. 3284 * @vpt_wwpn: vport wwpn by reference. 3285 * @tgt_wwpn: target wwpn by reference. 3286 * @lun: the fc lun for setting oas state. 3287 * @oas_state: the oas state to be set to the oas_lun. 3288 * 3289 * This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE) 3290 * a lun for OAS operations. 3291 * 3292 * Return: 3293 * SUCCESS: 0 3294 * -ENOMEM: failed to enable an lun for OAS operations 3295 * -EPERM: OAS is not enabled 3296 */ 3297 static ssize_t 3298 lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[], 3299 uint8_t tgt_wwpn[], uint64_t lun, 3300 uint32_t oas_state, uint8_t pri) 3301 { 3302 3303 int rc; 3304 3305 rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun, 3306 oas_state, pri); 3307 return rc; 3308 } 3309 3310 /** 3311 * lpfc_oas_lun_show - Return oas enabled luns from a chosen target 3312 * @dev: class device that is converted into a Scsi_host. 3313 * @attr: device attribute, not used. 3314 * @buf: buffer for passing information. 3315 * 3316 * This routine returns a lun enabled for OAS each time the function 3317 * is called. 3318 * 3319 * Returns: 3320 * SUCCESS: size of formatted string. 3321 * -EFAULT: target or vport wwpn was not set properly. 3322 * -EPERM: oas is not enabled. 3323 **/ 3324 static ssize_t 3325 lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr, 3326 char *buf) 3327 { 3328 struct Scsi_Host *shost = class_to_shost(dev); 3329 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3330 3331 uint64_t oas_lun; 3332 int len = 0; 3333 3334 if (!phba->cfg_fof) 3335 return -EPERM; 3336 3337 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0) 3338 if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)) 3339 return -EFAULT; 3340 3341 if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0) 3342 if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)) 3343 return -EFAULT; 3344 3345 oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn, 3346 phba->cfg_oas_tgt_wwpn, 3347 &phba->cfg_oas_lun_status, 3348 &phba->cfg_oas_priority); 3349 if (oas_lun != NOT_OAS_ENABLED_LUN) 3350 phba->cfg_oas_flags |= OAS_LUN_VALID; 3351 3352 len += scnprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun); 3353 3354 return len; 3355 } 3356 3357 /** 3358 * lpfc_oas_lun_store - Sets the OAS state for lun 3359 * @dev: class device that is converted into a Scsi_host. 3360 * @attr: device attribute, not used. 3361 * @buf: buffer for passing information. 3362 * 3363 * This function sets the OAS state for lun. Before this function is called, 3364 * the vport wwpn, target wwpn, and oas state need to be set. 3365 * 3366 * Returns: 3367 * SUCCESS: size of formatted string. 3368 * -EFAULT: target or vport wwpn was not set properly. 3369 * -EPERM: oas is not enabled. 3370 * size of formatted string. 3371 **/ 3372 static ssize_t 3373 lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr, 3374 const char *buf, size_t count) 3375 { 3376 struct Scsi_Host *shost = class_to_shost(dev); 3377 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3378 uint64_t scsi_lun; 3379 uint32_t pri; 3380 ssize_t rc; 3381 3382 if (!phba->cfg_fof) 3383 return -EPERM; 3384 3385 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0) 3386 return -EFAULT; 3387 3388 if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0) 3389 return -EFAULT; 3390 3391 if (!isdigit(buf[0])) 3392 return -EINVAL; 3393 3394 if (sscanf(buf, "0x%llx", &scsi_lun) != 1) 3395 return -EINVAL; 3396 3397 pri = phba->cfg_oas_priority; 3398 if (pri == 0) 3399 pri = phba->cfg_XLanePriority; 3400 3401 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3402 "3372 Try to set vport 0x%llx target 0x%llx lun:0x%llx " 3403 "priority 0x%x with oas state %d\n", 3404 wwn_to_u64(phba->cfg_oas_vpt_wwpn), 3405 wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun, 3406 pri, phba->cfg_oas_lun_state); 3407 3408 rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn, 3409 phba->cfg_oas_tgt_wwpn, scsi_lun, 3410 phba->cfg_oas_lun_state, pri); 3411 if (rc) 3412 return rc; 3413 3414 return count; 3415 } 3416 static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR, 3417 lpfc_oas_lun_show, lpfc_oas_lun_store); 3418 3419 int lpfc_enable_nvmet_cnt; 3420 unsigned long long lpfc_enable_nvmet[LPFC_NVMET_MAX_PORTS] = { 3421 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3422 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; 3423 module_param_array(lpfc_enable_nvmet, ullong, &lpfc_enable_nvmet_cnt, 0444); 3424 MODULE_PARM_DESC(lpfc_enable_nvmet, "Enable HBA port(s) WWPN as a NVME Target"); 3425 3426 static int lpfc_poll = 0; 3427 module_param(lpfc_poll, int, S_IRUGO); 3428 MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:" 3429 " 0 - none," 3430 " 1 - poll with interrupts enabled" 3431 " 3 - poll and disable FCP ring interrupts"); 3432 3433 static DEVICE_ATTR_RW(lpfc_poll); 3434 3435 int lpfc_no_hba_reset_cnt; 3436 unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = { 3437 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; 3438 module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444); 3439 MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset"); 3440 3441 LPFC_ATTR(sli_mode, 0, 0, 3, 3442 "SLI mode selector:" 3443 " 0 - auto (SLI-3 if supported)," 3444 " 2 - select SLI-2 even on SLI-3 capable HBAs," 3445 " 3 - select SLI-3"); 3446 3447 LPFC_ATTR_R(enable_npiv, 1, 0, 1, 3448 "Enable NPIV functionality"); 3449 3450 LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2, 3451 "FCF Fast failover=1 Priority failover=2"); 3452 3453 /* 3454 # lpfc_enable_rrq: Track XRI/OXID reuse after IO failures 3455 # 0x0 = disabled, XRI/OXID use not tracked. 3456 # 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent. 3457 # 0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent. 3458 */ 3459 LPFC_ATTR_R(enable_rrq, 2, 0, 2, 3460 "Enable RRQ functionality"); 3461 3462 /* 3463 # lpfc_suppress_link_up: Bring link up at initialization 3464 # 0x0 = bring link up (issue MBX_INIT_LINK) 3465 # 0x1 = do NOT bring link up at initialization(MBX_INIT_LINK) 3466 # 0x2 = never bring up link 3467 # Default value is 0. 3468 */ 3469 LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK, 3470 LPFC_DELAY_INIT_LINK_INDEFINITELY, 3471 "Suppress Link Up at initialization"); 3472 3473 static ssize_t 3474 lpfc_pls_show(struct device *dev, struct device_attribute *attr, char *buf) 3475 { 3476 struct Scsi_Host *shost = class_to_shost(dev); 3477 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3478 3479 return scnprintf(buf, PAGE_SIZE, "%d\n", 3480 phba->sli4_hba.pc_sli4_params.pls); 3481 } 3482 static DEVICE_ATTR(pls, 0444, 3483 lpfc_pls_show, NULL); 3484 3485 static ssize_t 3486 lpfc_pt_show(struct device *dev, struct device_attribute *attr, char *buf) 3487 { 3488 struct Scsi_Host *shost = class_to_shost(dev); 3489 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3490 3491 return scnprintf(buf, PAGE_SIZE, "%d\n", 3492 (phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0); 3493 } 3494 static DEVICE_ATTR(pt, 0444, 3495 lpfc_pt_show, NULL); 3496 3497 /* 3498 # lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS 3499 # 1 - (1024) 3500 # 2 - (2048) 3501 # 3 - (3072) 3502 # 4 - (4096) 3503 # 5 - (5120) 3504 */ 3505 static ssize_t 3506 lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf) 3507 { 3508 struct Scsi_Host *shost = class_to_shost(dev); 3509 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; 3510 3511 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max); 3512 } 3513 3514 static DEVICE_ATTR(iocb_hw, S_IRUGO, 3515 lpfc_iocb_hw_show, NULL); 3516 static ssize_t 3517 lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf) 3518 { 3519 struct Scsi_Host *shost = class_to_shost(dev); 3520 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; 3521 struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba); 3522 3523 return scnprintf(buf, PAGE_SIZE, "%d\n", 3524 pring ? pring->txq_max : 0); 3525 } 3526 3527 static DEVICE_ATTR(txq_hw, S_IRUGO, 3528 lpfc_txq_hw_show, NULL); 3529 static ssize_t 3530 lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr, 3531 char *buf) 3532 { 3533 struct Scsi_Host *shost = class_to_shost(dev); 3534 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; 3535 struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba); 3536 3537 return scnprintf(buf, PAGE_SIZE, "%d\n", 3538 pring ? pring->txcmplq_max : 0); 3539 } 3540 3541 static DEVICE_ATTR(txcmplq_hw, S_IRUGO, 3542 lpfc_txcmplq_hw_show, NULL); 3543 3544 /* 3545 # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear 3546 # until the timer expires. Value range is [0,255]. Default value is 30. 3547 */ 3548 static int lpfc_nodev_tmo = LPFC_DEF_DEVLOSS_TMO; 3549 static int lpfc_devloss_tmo = LPFC_DEF_DEVLOSS_TMO; 3550 module_param(lpfc_nodev_tmo, int, 0); 3551 MODULE_PARM_DESC(lpfc_nodev_tmo, 3552 "Seconds driver will hold I/O waiting " 3553 "for a device to come back"); 3554 3555 /** 3556 * lpfc_nodev_tmo_show - Return the hba dev loss timeout value 3557 * @dev: class converted to a Scsi_host structure. 3558 * @attr: device attribute, not used. 3559 * @buf: on return contains the dev loss timeout in decimal. 3560 * 3561 * Returns: size of formatted string. 3562 **/ 3563 static ssize_t 3564 lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr, 3565 char *buf) 3566 { 3567 struct Scsi_Host *shost = class_to_shost(dev); 3568 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3569 3570 return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo); 3571 } 3572 3573 /** 3574 * lpfc_nodev_tmo_init - Set the hba nodev timeout value 3575 * @vport: lpfc vport structure pointer. 3576 * @val: contains the nodev timeout value. 3577 * 3578 * Description: 3579 * If the devloss tmo is already set then nodev tmo is set to devloss tmo, 3580 * a kernel error message is printed and zero is returned. 3581 * Else if val is in range then nodev tmo and devloss tmo are set to val. 3582 * Otherwise nodev tmo is set to the default value. 3583 * 3584 * Returns: 3585 * zero if already set or if val is in range 3586 * -EINVAL val out of range 3587 **/ 3588 static int 3589 lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val) 3590 { 3591 if (vport->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) { 3592 vport->cfg_nodev_tmo = vport->cfg_devloss_tmo; 3593 if (val != LPFC_DEF_DEVLOSS_TMO) 3594 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3595 "0407 Ignoring lpfc_nodev_tmo module " 3596 "parameter because lpfc_devloss_tmo " 3597 "is set.\n"); 3598 return 0; 3599 } 3600 3601 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { 3602 vport->cfg_nodev_tmo = val; 3603 vport->cfg_devloss_tmo = val; 3604 return 0; 3605 } 3606 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3607 "0400 lpfc_nodev_tmo attribute cannot be set to" 3608 " %d, allowed range is [%d, %d]\n", 3609 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); 3610 vport->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO; 3611 return -EINVAL; 3612 } 3613 3614 /** 3615 * lpfc_update_rport_devloss_tmo - Update dev loss tmo value 3616 * @vport: lpfc vport structure pointer. 3617 * 3618 * Description: 3619 * Update all the ndlp's dev loss tmo with the vport devloss tmo value. 3620 **/ 3621 static void 3622 lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) 3623 { 3624 struct Scsi_Host *shost; 3625 struct lpfc_nodelist *ndlp; 3626 #if (IS_ENABLED(CONFIG_NVME_FC)) 3627 struct lpfc_nvme_rport *rport; 3628 struct nvme_fc_remote_port *remoteport = NULL; 3629 #endif 3630 3631 shost = lpfc_shost_from_vport(vport); 3632 spin_lock_irq(shost->host_lock); 3633 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 3634 if (!NLP_CHK_NODE_ACT(ndlp)) 3635 continue; 3636 if (ndlp->rport) 3637 ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; 3638 #if (IS_ENABLED(CONFIG_NVME_FC)) 3639 spin_lock(&vport->phba->hbalock); 3640 rport = lpfc_ndlp_get_nrport(ndlp); 3641 if (rport) 3642 remoteport = rport->remoteport; 3643 spin_unlock(&vport->phba->hbalock); 3644 if (rport && remoteport) 3645 nvme_fc_set_remoteport_devloss(remoteport, 3646 vport->cfg_devloss_tmo); 3647 #endif 3648 } 3649 spin_unlock_irq(shost->host_lock); 3650 } 3651 3652 /** 3653 * lpfc_nodev_tmo_set - Set the vport nodev tmo and devloss tmo values 3654 * @vport: lpfc vport structure pointer. 3655 * @val: contains the tmo value. 3656 * 3657 * Description: 3658 * If the devloss tmo is already set or the vport dev loss tmo has changed 3659 * then a kernel error message is printed and zero is returned. 3660 * Else if val is in range then nodev tmo and devloss tmo are set to val. 3661 * Otherwise nodev tmo is set to the default value. 3662 * 3663 * Returns: 3664 * zero if already set or if val is in range 3665 * -EINVAL val out of range 3666 **/ 3667 static int 3668 lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val) 3669 { 3670 if (vport->dev_loss_tmo_changed || 3671 (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) { 3672 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3673 "0401 Ignoring change to lpfc_nodev_tmo " 3674 "because lpfc_devloss_tmo is set.\n"); 3675 return 0; 3676 } 3677 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { 3678 vport->cfg_nodev_tmo = val; 3679 vport->cfg_devloss_tmo = val; 3680 /* 3681 * For compat: set the fc_host dev loss so new rports 3682 * will get the value. 3683 */ 3684 fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val; 3685 lpfc_update_rport_devloss_tmo(vport); 3686 return 0; 3687 } 3688 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3689 "0403 lpfc_nodev_tmo attribute cannot be set to " 3690 "%d, allowed range is [%d, %d]\n", 3691 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); 3692 return -EINVAL; 3693 } 3694 3695 lpfc_vport_param_store(nodev_tmo) 3696 3697 static DEVICE_ATTR_RW(lpfc_nodev_tmo); 3698 3699 /* 3700 # lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that 3701 # disappear until the timer expires. Value range is [0,255]. Default 3702 # value is 30. 3703 */ 3704 module_param(lpfc_devloss_tmo, int, S_IRUGO); 3705 MODULE_PARM_DESC(lpfc_devloss_tmo, 3706 "Seconds driver will hold I/O waiting " 3707 "for a device to come back"); 3708 lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO, 3709 LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO) 3710 lpfc_vport_param_show(devloss_tmo) 3711 3712 /** 3713 * lpfc_devloss_tmo_set - Sets vport nodev tmo, devloss tmo values, changed bit 3714 * @vport: lpfc vport structure pointer. 3715 * @val: contains the tmo value. 3716 * 3717 * Description: 3718 * If val is in a valid range then set the vport nodev tmo, 3719 * devloss tmo, also set the vport dev loss tmo changed flag. 3720 * Else a kernel error message is printed. 3721 * 3722 * Returns: 3723 * zero if val is in range 3724 * -EINVAL val out of range 3725 **/ 3726 static int 3727 lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val) 3728 { 3729 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { 3730 vport->cfg_nodev_tmo = val; 3731 vport->cfg_devloss_tmo = val; 3732 vport->dev_loss_tmo_changed = 1; 3733 fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val; 3734 lpfc_update_rport_devloss_tmo(vport); 3735 return 0; 3736 } 3737 3738 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3739 "0404 lpfc_devloss_tmo attribute cannot be set to " 3740 "%d, allowed range is [%d, %d]\n", 3741 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); 3742 return -EINVAL; 3743 } 3744 3745 lpfc_vport_param_store(devloss_tmo) 3746 static DEVICE_ATTR_RW(lpfc_devloss_tmo); 3747 3748 /* 3749 * lpfc_suppress_rsp: Enable suppress rsp feature is firmware supports it 3750 * lpfc_suppress_rsp = 0 Disable 3751 * lpfc_suppress_rsp = 1 Enable (default) 3752 * 3753 */ 3754 LPFC_ATTR_R(suppress_rsp, 1, 0, 1, 3755 "Enable suppress rsp feature is firmware supports it"); 3756 3757 /* 3758 * lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds 3759 * lpfc_nvmet_mrq = 0 driver will calcualte optimal number of RQ pairs 3760 * lpfc_nvmet_mrq = 1 use a single RQ pair 3761 * lpfc_nvmet_mrq >= 2 use specified RQ pairs for MRQ 3762 * 3763 */ 3764 LPFC_ATTR_R(nvmet_mrq, 3765 LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_MAX, 3766 "Specify number of RQ pairs for processing NVMET cmds"); 3767 3768 /* 3769 * lpfc_nvmet_mrq_post: Specify number of RQ buffer to initially post 3770 * to each NVMET RQ. Range 64 to 2048, default is 512. 3771 */ 3772 LPFC_ATTR_R(nvmet_mrq_post, 3773 LPFC_NVMET_RQE_DEF_POST, LPFC_NVMET_RQE_MIN_POST, 3774 LPFC_NVMET_RQE_DEF_COUNT, 3775 "Specify number of RQ buffers to initially post"); 3776 3777 /* 3778 * lpfc_enable_fc4_type: Defines what FC4 types are supported. 3779 * Supported Values: 1 - register just FCP 3780 * 3 - register both FCP and NVME 3781 * Supported values are [1,3]. Default value is 3 3782 */ 3783 LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH, 3784 LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH, 3785 "Enable FC4 Protocol support - FCP / NVME"); 3786 3787 /* 3788 # lpfc_log_verbose: Only turn this flag on if you are willing to risk being 3789 # deluged with LOTS of information. 3790 # You can set a bit mask to record specific types of verbose messages: 3791 # See lpfc_logmsh.h for definitions. 3792 */ 3793 LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff, 3794 "Verbose logging bit-mask"); 3795 3796 /* 3797 # lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters 3798 # objects that have been registered with the nameserver after login. 3799 */ 3800 LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1, 3801 "Deregister nameserver objects before LOGO"); 3802 3803 /* 3804 # lun_queue_depth: This parameter is used to limit the number of outstanding 3805 # commands per FCP LUN. 3806 */ 3807 LPFC_VPORT_ATTR_R(lun_queue_depth, 64, 1, 512, 3808 "Max number of FCP commands we can queue to a specific LUN"); 3809 3810 /* 3811 # tgt_queue_depth: This parameter is used to limit the number of outstanding 3812 # commands per target port. Value range is [10,65535]. Default value is 65535. 3813 */ 3814 static uint lpfc_tgt_queue_depth = LPFC_MAX_TGT_QDEPTH; 3815 module_param(lpfc_tgt_queue_depth, uint, 0444); 3816 MODULE_PARM_DESC(lpfc_tgt_queue_depth, "Set max Target queue depth"); 3817 lpfc_vport_param_show(tgt_queue_depth); 3818 lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH, 3819 LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH); 3820 3821 /** 3822 * lpfc_tgt_queue_depth_store: Sets an attribute value. 3823 * @phba: pointer the the adapter structure. 3824 * @val: integer attribute value. 3825 * 3826 * Description: Sets the parameter to the new value. 3827 * 3828 * Returns: 3829 * zero on success 3830 * -EINVAL if val is invalid 3831 */ 3832 static int 3833 lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val) 3834 { 3835 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3836 struct lpfc_nodelist *ndlp; 3837 3838 if (!lpfc_rangecheck(val, LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH)) 3839 return -EINVAL; 3840 3841 if (val == vport->cfg_tgt_queue_depth) 3842 return 0; 3843 3844 spin_lock_irq(shost->host_lock); 3845 vport->cfg_tgt_queue_depth = val; 3846 3847 /* Next loop thru nodelist and change cmd_qdepth */ 3848 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) 3849 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; 3850 3851 spin_unlock_irq(shost->host_lock); 3852 return 0; 3853 } 3854 3855 lpfc_vport_param_store(tgt_queue_depth); 3856 static DEVICE_ATTR_RW(lpfc_tgt_queue_depth); 3857 3858 /* 3859 # hba_queue_depth: This parameter is used to limit the number of outstanding 3860 # commands per lpfc HBA. Value range is [32,8192]. If this parameter 3861 # value is greater than the maximum number of exchanges supported by the HBA, 3862 # then maximum number of exchanges supported by the HBA is used to determine 3863 # the hba_queue_depth. 3864 */ 3865 LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192, 3866 "Max number of FCP commands we can queue to a lpfc HBA"); 3867 3868 /* 3869 # peer_port_login: This parameter allows/prevents logins 3870 # between peer ports hosted on the same physical port. 3871 # When this parameter is set 0 peer ports of same physical port 3872 # are not allowed to login to each other. 3873 # When this parameter is set 1 peer ports of same physical port 3874 # are allowed to login to each other. 3875 # Default value of this parameter is 0. 3876 */ 3877 LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1, 3878 "Allow peer ports on the same physical port to login to each " 3879 "other."); 3880 3881 /* 3882 # restrict_login: This parameter allows/prevents logins 3883 # between Virtual Ports and remote initiators. 3884 # When this parameter is not set (0) Virtual Ports will accept PLOGIs from 3885 # other initiators and will attempt to PLOGI all remote ports. 3886 # When this parameter is set (1) Virtual Ports will reject PLOGIs from 3887 # remote ports and will not attempt to PLOGI to other initiators. 3888 # This parameter does not restrict to the physical port. 3889 # This parameter does not restrict logins to Fabric resident remote ports. 3890 # Default value of this parameter is 1. 3891 */ 3892 static int lpfc_restrict_login = 1; 3893 module_param(lpfc_restrict_login, int, S_IRUGO); 3894 MODULE_PARM_DESC(lpfc_restrict_login, 3895 "Restrict virtual ports login to remote initiators."); 3896 lpfc_vport_param_show(restrict_login); 3897 3898 /** 3899 * lpfc_restrict_login_init - Set the vport restrict login flag 3900 * @vport: lpfc vport structure pointer. 3901 * @val: contains the restrict login value. 3902 * 3903 * Description: 3904 * If val is not in a valid range then log a kernel error message and set 3905 * the vport restrict login to one. 3906 * If the port type is physical clear the restrict login flag and return. 3907 * Else set the restrict login flag to val. 3908 * 3909 * Returns: 3910 * zero if val is in range 3911 * -EINVAL val out of range 3912 **/ 3913 static int 3914 lpfc_restrict_login_init(struct lpfc_vport *vport, int val) 3915 { 3916 if (val < 0 || val > 1) { 3917 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3918 "0422 lpfc_restrict_login attribute cannot " 3919 "be set to %d, allowed range is [0, 1]\n", 3920 val); 3921 vport->cfg_restrict_login = 1; 3922 return -EINVAL; 3923 } 3924 if (vport->port_type == LPFC_PHYSICAL_PORT) { 3925 vport->cfg_restrict_login = 0; 3926 return 0; 3927 } 3928 vport->cfg_restrict_login = val; 3929 return 0; 3930 } 3931 3932 /** 3933 * lpfc_restrict_login_set - Set the vport restrict login flag 3934 * @vport: lpfc vport structure pointer. 3935 * @val: contains the restrict login value. 3936 * 3937 * Description: 3938 * If val is not in a valid range then log a kernel error message and set 3939 * the vport restrict login to one. 3940 * If the port type is physical and the val is not zero log a kernel 3941 * error message, clear the restrict login flag and return zero. 3942 * Else set the restrict login flag to val. 3943 * 3944 * Returns: 3945 * zero if val is in range 3946 * -EINVAL val out of range 3947 **/ 3948 static int 3949 lpfc_restrict_login_set(struct lpfc_vport *vport, int val) 3950 { 3951 if (val < 0 || val > 1) { 3952 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3953 "0425 lpfc_restrict_login attribute cannot " 3954 "be set to %d, allowed range is [0, 1]\n", 3955 val); 3956 vport->cfg_restrict_login = 1; 3957 return -EINVAL; 3958 } 3959 if (vport->port_type == LPFC_PHYSICAL_PORT && val != 0) { 3960 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3961 "0468 lpfc_restrict_login must be 0 for " 3962 "Physical ports.\n"); 3963 vport->cfg_restrict_login = 0; 3964 return 0; 3965 } 3966 vport->cfg_restrict_login = val; 3967 return 0; 3968 } 3969 lpfc_vport_param_store(restrict_login); 3970 static DEVICE_ATTR_RW(lpfc_restrict_login); 3971 3972 /* 3973 # Some disk devices have a "select ID" or "select Target" capability. 3974 # From a protocol standpoint "select ID" usually means select the 3975 # Fibre channel "ALPA". In the FC-AL Profile there is an "informative 3976 # annex" which contains a table that maps a "select ID" (a number 3977 # between 0 and 7F) to an ALPA. By default, for compatibility with 3978 # older drivers, the lpfc driver scans this table from low ALPA to high 3979 # ALPA. 3980 # 3981 # Turning on the scan-down variable (on = 1, off = 0) will 3982 # cause the lpfc driver to use an inverted table, effectively 3983 # scanning ALPAs from high to low. Value range is [0,1]. Default value is 1. 3984 # 3985 # (Note: This "select ID" functionality is a LOOP ONLY characteristic 3986 # and will not work across a fabric. Also this parameter will take 3987 # effect only in the case when ALPA map is not available.) 3988 */ 3989 LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1, 3990 "Start scanning for devices from highest ALPA to lowest"); 3991 3992 /* 3993 # lpfc_topology: link topology for init link 3994 # 0x0 = attempt loop mode then point-to-point 3995 # 0x01 = internal loopback mode 3996 # 0x02 = attempt point-to-point mode only 3997 # 0x04 = attempt loop mode only 3998 # 0x06 = attempt point-to-point mode then loop 3999 # Set point-to-point mode if you want to run as an N_Port. 4000 # Set loop mode if you want to run as an NL_Port. Value range is [0,0x6]. 4001 # Default value is 0. 4002 */ 4003 LPFC_ATTR(topology, 0, 0, 6, 4004 "Select Fibre Channel topology"); 4005 4006 /** 4007 * lpfc_topology_set - Set the adapters topology field 4008 * @phba: lpfc_hba pointer. 4009 * @val: topology value. 4010 * 4011 * Description: 4012 * If val is in a valid range then set the adapter's topology field and 4013 * issue a lip; if the lip fails reset the topology to the old value. 4014 * 4015 * If the value is not in range log a kernel error message and return an error. 4016 * 4017 * Returns: 4018 * zero if val is in range and lip okay 4019 * non-zero return value from lpfc_issue_lip() 4020 * -EINVAL val out of range 4021 **/ 4022 static ssize_t 4023 lpfc_topology_store(struct device *dev, struct device_attribute *attr, 4024 const char *buf, size_t count) 4025 { 4026 struct Scsi_Host *shost = class_to_shost(dev); 4027 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4028 struct lpfc_hba *phba = vport->phba; 4029 int val = 0; 4030 int nolip = 0; 4031 const char *val_buf = buf; 4032 int err; 4033 uint32_t prev_val; 4034 4035 if (!strncmp(buf, "nolip ", strlen("nolip "))) { 4036 nolip = 1; 4037 val_buf = &buf[strlen("nolip ")]; 4038 } 4039 4040 if (!isdigit(val_buf[0])) 4041 return -EINVAL; 4042 if (sscanf(val_buf, "%i", &val) != 1) 4043 return -EINVAL; 4044 4045 if (val >= 0 && val <= 6) { 4046 prev_val = phba->cfg_topology; 4047 if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G && 4048 val == 4) { 4049 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 4050 "3113 Loop mode not supported at speed %d\n", 4051 val); 4052 return -EINVAL; 4053 } 4054 /* 4055 * The 'topology' is not a configurable parameter if : 4056 * - persistent topology enabled 4057 * - G7/G6 with no private loop support 4058 */ 4059 4060 if ((phba->hba_flag & HBA_PERSISTENT_TOPO || 4061 (!phba->sli4_hba.pc_sli4_params.pls && 4062 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC || 4063 phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC))) && 4064 val == 4) { 4065 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 4066 "3114 Loop mode not supported\n"); 4067 return -EINVAL; 4068 } 4069 phba->cfg_topology = val; 4070 if (nolip) 4071 return strlen(buf); 4072 4073 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 4074 "3054 lpfc_topology changed from %d to %d\n", 4075 prev_val, val); 4076 if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4) 4077 phba->fc_topology_changed = 1; 4078 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); 4079 if (err) { 4080 phba->cfg_topology = prev_val; 4081 return -EINVAL; 4082 } else 4083 return strlen(buf); 4084 } 4085 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4086 "%d:0467 lpfc_topology attribute cannot be set to %d, " 4087 "allowed range is [0, 6]\n", 4088 phba->brd_no, val); 4089 return -EINVAL; 4090 } 4091 4092 lpfc_param_show(topology) 4093 static DEVICE_ATTR_RW(lpfc_topology); 4094 4095 /** 4096 * lpfc_static_vport_show: Read callback function for 4097 * lpfc_static_vport sysfs file. 4098 * @dev: Pointer to class device object. 4099 * @attr: device attribute structure. 4100 * @buf: Data buffer. 4101 * 4102 * This function is the read call back function for 4103 * lpfc_static_vport sysfs file. The lpfc_static_vport 4104 * sysfs file report the mageability of the vport. 4105 **/ 4106 static ssize_t 4107 lpfc_static_vport_show(struct device *dev, struct device_attribute *attr, 4108 char *buf) 4109 { 4110 struct Scsi_Host *shost = class_to_shost(dev); 4111 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4112 if (vport->vport_flag & STATIC_VPORT) 4113 sprintf(buf, "1\n"); 4114 else 4115 sprintf(buf, "0\n"); 4116 4117 return strlen(buf); 4118 } 4119 4120 /* 4121 * Sysfs attribute to control the statistical data collection. 4122 */ 4123 static DEVICE_ATTR_RO(lpfc_static_vport); 4124 4125 /** 4126 * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file 4127 * @dev: Pointer to class device. 4128 * @buf: Data buffer. 4129 * @count: Size of the data buffer. 4130 * 4131 * This function get called when a user write to the lpfc_stat_data_ctrl 4132 * sysfs file. This function parse the command written to the sysfs file 4133 * and take appropriate action. These commands are used for controlling 4134 * driver statistical data collection. 4135 * Following are the command this function handles. 4136 * 4137 * setbucket <bucket_type> <base> <step> 4138 * = Set the latency buckets. 4139 * destroybucket = destroy all the buckets. 4140 * start = start data collection 4141 * stop = stop data collection 4142 * reset = reset the collected data 4143 **/ 4144 static ssize_t 4145 lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr, 4146 const char *buf, size_t count) 4147 { 4148 struct Scsi_Host *shost = class_to_shost(dev); 4149 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4150 struct lpfc_hba *phba = vport->phba; 4151 #define LPFC_MAX_DATA_CTRL_LEN 1024 4152 static char bucket_data[LPFC_MAX_DATA_CTRL_LEN]; 4153 unsigned long i; 4154 char *str_ptr, *token; 4155 struct lpfc_vport **vports; 4156 struct Scsi_Host *v_shost; 4157 char *bucket_type_str, *base_str, *step_str; 4158 unsigned long base, step, bucket_type; 4159 4160 if (!strncmp(buf, "setbucket", strlen("setbucket"))) { 4161 if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1)) 4162 return -EINVAL; 4163 4164 strncpy(bucket_data, buf, LPFC_MAX_DATA_CTRL_LEN); 4165 str_ptr = &bucket_data[0]; 4166 /* Ignore this token - this is command token */ 4167 token = strsep(&str_ptr, "\t "); 4168 if (!token) 4169 return -EINVAL; 4170 4171 bucket_type_str = strsep(&str_ptr, "\t "); 4172 if (!bucket_type_str) 4173 return -EINVAL; 4174 4175 if (!strncmp(bucket_type_str, "linear", strlen("linear"))) 4176 bucket_type = LPFC_LINEAR_BUCKET; 4177 else if (!strncmp(bucket_type_str, "power2", strlen("power2"))) 4178 bucket_type = LPFC_POWER2_BUCKET; 4179 else 4180 return -EINVAL; 4181 4182 base_str = strsep(&str_ptr, "\t "); 4183 if (!base_str) 4184 return -EINVAL; 4185 base = simple_strtoul(base_str, NULL, 0); 4186 4187 step_str = strsep(&str_ptr, "\t "); 4188 if (!step_str) 4189 return -EINVAL; 4190 step = simple_strtoul(step_str, NULL, 0); 4191 if (!step) 4192 return -EINVAL; 4193 4194 /* Block the data collection for every vport */ 4195 vports = lpfc_create_vport_work_array(phba); 4196 if (vports == NULL) 4197 return -ENOMEM; 4198 4199 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 4200 v_shost = lpfc_shost_from_vport(vports[i]); 4201 spin_lock_irq(v_shost->host_lock); 4202 /* Block and reset data collection */ 4203 vports[i]->stat_data_blocked = 1; 4204 if (vports[i]->stat_data_enabled) 4205 lpfc_vport_reset_stat_data(vports[i]); 4206 spin_unlock_irq(v_shost->host_lock); 4207 } 4208 4209 /* Set the bucket attributes */ 4210 phba->bucket_type = bucket_type; 4211 phba->bucket_base = base; 4212 phba->bucket_step = step; 4213 4214 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 4215 v_shost = lpfc_shost_from_vport(vports[i]); 4216 4217 /* Unblock data collection */ 4218 spin_lock_irq(v_shost->host_lock); 4219 vports[i]->stat_data_blocked = 0; 4220 spin_unlock_irq(v_shost->host_lock); 4221 } 4222 lpfc_destroy_vport_work_array(phba, vports); 4223 return strlen(buf); 4224 } 4225 4226 if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) { 4227 vports = lpfc_create_vport_work_array(phba); 4228 if (vports == NULL) 4229 return -ENOMEM; 4230 4231 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 4232 v_shost = lpfc_shost_from_vport(vports[i]); 4233 spin_lock_irq(shost->host_lock); 4234 vports[i]->stat_data_blocked = 1; 4235 lpfc_free_bucket(vport); 4236 vport->stat_data_enabled = 0; 4237 vports[i]->stat_data_blocked = 0; 4238 spin_unlock_irq(shost->host_lock); 4239 } 4240 lpfc_destroy_vport_work_array(phba, vports); 4241 phba->bucket_type = LPFC_NO_BUCKET; 4242 phba->bucket_base = 0; 4243 phba->bucket_step = 0; 4244 return strlen(buf); 4245 } 4246 4247 if (!strncmp(buf, "start", strlen("start"))) { 4248 /* If no buckets configured return error */ 4249 if (phba->bucket_type == LPFC_NO_BUCKET) 4250 return -EINVAL; 4251 spin_lock_irq(shost->host_lock); 4252 if (vport->stat_data_enabled) { 4253 spin_unlock_irq(shost->host_lock); 4254 return strlen(buf); 4255 } 4256 lpfc_alloc_bucket(vport); 4257 vport->stat_data_enabled = 1; 4258 spin_unlock_irq(shost->host_lock); 4259 return strlen(buf); 4260 } 4261 4262 if (!strncmp(buf, "stop", strlen("stop"))) { 4263 spin_lock_irq(shost->host_lock); 4264 if (vport->stat_data_enabled == 0) { 4265 spin_unlock_irq(shost->host_lock); 4266 return strlen(buf); 4267 } 4268 lpfc_free_bucket(vport); 4269 vport->stat_data_enabled = 0; 4270 spin_unlock_irq(shost->host_lock); 4271 return strlen(buf); 4272 } 4273 4274 if (!strncmp(buf, "reset", strlen("reset"))) { 4275 if ((phba->bucket_type == LPFC_NO_BUCKET) 4276 || !vport->stat_data_enabled) 4277 return strlen(buf); 4278 spin_lock_irq(shost->host_lock); 4279 vport->stat_data_blocked = 1; 4280 lpfc_vport_reset_stat_data(vport); 4281 vport->stat_data_blocked = 0; 4282 spin_unlock_irq(shost->host_lock); 4283 return strlen(buf); 4284 } 4285 return -EINVAL; 4286 } 4287 4288 4289 /** 4290 * lpfc_stat_data_ctrl_show - Read function for lpfc_stat_data_ctrl sysfs file 4291 * @dev: Pointer to class device object. 4292 * @buf: Data buffer. 4293 * 4294 * This function is the read call back function for 4295 * lpfc_stat_data_ctrl sysfs file. This function report the 4296 * current statistical data collection state. 4297 **/ 4298 static ssize_t 4299 lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr, 4300 char *buf) 4301 { 4302 struct Scsi_Host *shost = class_to_shost(dev); 4303 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4304 struct lpfc_hba *phba = vport->phba; 4305 int index = 0; 4306 int i; 4307 char *bucket_type; 4308 unsigned long bucket_value; 4309 4310 switch (phba->bucket_type) { 4311 case LPFC_LINEAR_BUCKET: 4312 bucket_type = "linear"; 4313 break; 4314 case LPFC_POWER2_BUCKET: 4315 bucket_type = "power2"; 4316 break; 4317 default: 4318 bucket_type = "No Bucket"; 4319 break; 4320 } 4321 4322 sprintf(&buf[index], "Statistical Data enabled :%d, " 4323 "blocked :%d, Bucket type :%s, Bucket base :%d," 4324 " Bucket step :%d\nLatency Ranges :", 4325 vport->stat_data_enabled, vport->stat_data_blocked, 4326 bucket_type, phba->bucket_base, phba->bucket_step); 4327 index = strlen(buf); 4328 if (phba->bucket_type != LPFC_NO_BUCKET) { 4329 for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) { 4330 if (phba->bucket_type == LPFC_LINEAR_BUCKET) 4331 bucket_value = phba->bucket_base + 4332 phba->bucket_step * i; 4333 else 4334 bucket_value = phba->bucket_base + 4335 (1 << i) * phba->bucket_step; 4336 4337 if (index + 10 > PAGE_SIZE) 4338 break; 4339 sprintf(&buf[index], "%08ld ", bucket_value); 4340 index = strlen(buf); 4341 } 4342 } 4343 sprintf(&buf[index], "\n"); 4344 return strlen(buf); 4345 } 4346 4347 /* 4348 * Sysfs attribute to control the statistical data collection. 4349 */ 4350 static DEVICE_ATTR_RW(lpfc_stat_data_ctrl); 4351 4352 /* 4353 * lpfc_drvr_stat_data: sysfs attr to get driver statistical data. 4354 */ 4355 4356 /* 4357 * Each Bucket takes 11 characters and 1 new line + 17 bytes WWN 4358 * for each target. 4359 */ 4360 #define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18) 4361 #define MAX_STAT_DATA_SIZE_PER_TARGET \ 4362 STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT) 4363 4364 4365 /** 4366 * sysfs_drvr_stat_data_read - Read function for lpfc_drvr_stat_data attribute 4367 * @filp: sysfs file 4368 * @kobj: Pointer to the kernel object 4369 * @bin_attr: Attribute object 4370 * @buff: Buffer pointer 4371 * @off: File offset 4372 * @count: Buffer size 4373 * 4374 * This function is the read call back function for lpfc_drvr_stat_data 4375 * sysfs file. This function export the statistical data to user 4376 * applications. 4377 **/ 4378 static ssize_t 4379 sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj, 4380 struct bin_attribute *bin_attr, 4381 char *buf, loff_t off, size_t count) 4382 { 4383 struct device *dev = container_of(kobj, struct device, 4384 kobj); 4385 struct Scsi_Host *shost = class_to_shost(dev); 4386 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4387 struct lpfc_hba *phba = vport->phba; 4388 int i = 0, index = 0; 4389 unsigned long nport_index; 4390 struct lpfc_nodelist *ndlp = NULL; 4391 nport_index = (unsigned long)off / 4392 MAX_STAT_DATA_SIZE_PER_TARGET; 4393 4394 if (!vport->stat_data_enabled || vport->stat_data_blocked 4395 || (phba->bucket_type == LPFC_NO_BUCKET)) 4396 return 0; 4397 4398 spin_lock_irq(shost->host_lock); 4399 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 4400 if (!NLP_CHK_NODE_ACT(ndlp) || !ndlp->lat_data) 4401 continue; 4402 4403 if (nport_index > 0) { 4404 nport_index--; 4405 continue; 4406 } 4407 4408 if ((index + MAX_STAT_DATA_SIZE_PER_TARGET) 4409 > count) 4410 break; 4411 4412 if (!ndlp->lat_data) 4413 continue; 4414 4415 /* Print the WWN */ 4416 sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:", 4417 ndlp->nlp_portname.u.wwn[0], 4418 ndlp->nlp_portname.u.wwn[1], 4419 ndlp->nlp_portname.u.wwn[2], 4420 ndlp->nlp_portname.u.wwn[3], 4421 ndlp->nlp_portname.u.wwn[4], 4422 ndlp->nlp_portname.u.wwn[5], 4423 ndlp->nlp_portname.u.wwn[6], 4424 ndlp->nlp_portname.u.wwn[7]); 4425 4426 index = strlen(buf); 4427 4428 for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) { 4429 sprintf(&buf[index], "%010u,", 4430 ndlp->lat_data[i].cmd_count); 4431 index = strlen(buf); 4432 } 4433 sprintf(&buf[index], "\n"); 4434 index = strlen(buf); 4435 } 4436 spin_unlock_irq(shost->host_lock); 4437 return index; 4438 } 4439 4440 static struct bin_attribute sysfs_drvr_stat_data_attr = { 4441 .attr = { 4442 .name = "lpfc_drvr_stat_data", 4443 .mode = S_IRUSR, 4444 }, 4445 .size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET, 4446 .read = sysfs_drvr_stat_data_read, 4447 .write = NULL, 4448 }; 4449 4450 /* 4451 # lpfc_link_speed: Link speed selection for initializing the Fibre Channel 4452 # connection. 4453 # Value range is [0,16]. Default value is 0. 4454 */ 4455 /** 4456 * lpfc_link_speed_set - Set the adapters link speed 4457 * @phba: lpfc_hba pointer. 4458 * @val: link speed value. 4459 * 4460 * Description: 4461 * If val is in a valid range then set the adapter's link speed field and 4462 * issue a lip; if the lip fails reset the link speed to the old value. 4463 * 4464 * Notes: 4465 * If the value is not in range log a kernel error message and return an error. 4466 * 4467 * Returns: 4468 * zero if val is in range and lip okay. 4469 * non-zero return value from lpfc_issue_lip() 4470 * -EINVAL val out of range 4471 **/ 4472 static ssize_t 4473 lpfc_link_speed_store(struct device *dev, struct device_attribute *attr, 4474 const char *buf, size_t count) 4475 { 4476 struct Scsi_Host *shost = class_to_shost(dev); 4477 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4478 struct lpfc_hba *phba = vport->phba; 4479 int val = LPFC_USER_LINK_SPEED_AUTO; 4480 int nolip = 0; 4481 const char *val_buf = buf; 4482 int err; 4483 uint32_t prev_val, if_type; 4484 4485 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 4486 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2 && 4487 phba->hba_flag & HBA_FORCED_LINK_SPEED) 4488 return -EPERM; 4489 4490 if (!strncmp(buf, "nolip ", strlen("nolip "))) { 4491 nolip = 1; 4492 val_buf = &buf[strlen("nolip ")]; 4493 } 4494 4495 if (!isdigit(val_buf[0])) 4496 return -EINVAL; 4497 if (sscanf(val_buf, "%i", &val) != 1) 4498 return -EINVAL; 4499 4500 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 4501 "3055 lpfc_link_speed changed from %d to %d %s\n", 4502 phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)"); 4503 4504 if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) || 4505 ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) || 4506 ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) || 4507 ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) || 4508 ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) || 4509 ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb)) || 4510 ((val == LPFC_USER_LINK_SPEED_32G) && !(phba->lmt & LMT_32Gb)) || 4511 ((val == LPFC_USER_LINK_SPEED_64G) && !(phba->lmt & LMT_64Gb))) { 4512 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4513 "2879 lpfc_link_speed attribute cannot be set " 4514 "to %d. Speed is not supported by this port.\n", 4515 val); 4516 return -EINVAL; 4517 } 4518 if (val >= LPFC_USER_LINK_SPEED_16G && 4519 phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 4520 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4521 "3112 lpfc_link_speed attribute cannot be set " 4522 "to %d. Speed is not supported in loop mode.\n", 4523 val); 4524 return -EINVAL; 4525 } 4526 4527 switch (val) { 4528 case LPFC_USER_LINK_SPEED_AUTO: 4529 case LPFC_USER_LINK_SPEED_1G: 4530 case LPFC_USER_LINK_SPEED_2G: 4531 case LPFC_USER_LINK_SPEED_4G: 4532 case LPFC_USER_LINK_SPEED_8G: 4533 case LPFC_USER_LINK_SPEED_16G: 4534 case LPFC_USER_LINK_SPEED_32G: 4535 case LPFC_USER_LINK_SPEED_64G: 4536 prev_val = phba->cfg_link_speed; 4537 phba->cfg_link_speed = val; 4538 if (nolip) 4539 return strlen(buf); 4540 4541 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); 4542 if (err) { 4543 phba->cfg_link_speed = prev_val; 4544 return -EINVAL; 4545 } 4546 return strlen(buf); 4547 default: 4548 break; 4549 } 4550 4551 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4552 "0469 lpfc_link_speed attribute cannot be set to %d, " 4553 "allowed values are [%s]\n", 4554 val, LPFC_LINK_SPEED_STRING); 4555 return -EINVAL; 4556 4557 } 4558 4559 static int lpfc_link_speed = 0; 4560 module_param(lpfc_link_speed, int, S_IRUGO); 4561 MODULE_PARM_DESC(lpfc_link_speed, "Select link speed"); 4562 lpfc_param_show(link_speed) 4563 4564 /** 4565 * lpfc_link_speed_init - Set the adapters link speed 4566 * @phba: lpfc_hba pointer. 4567 * @val: link speed value. 4568 * 4569 * Description: 4570 * If val is in a valid range then set the adapter's link speed field. 4571 * 4572 * Notes: 4573 * If the value is not in range log a kernel error message, clear the link 4574 * speed and return an error. 4575 * 4576 * Returns: 4577 * zero if val saved. 4578 * -EINVAL val out of range 4579 **/ 4580 static int 4581 lpfc_link_speed_init(struct lpfc_hba *phba, int val) 4582 { 4583 if (val >= LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) { 4584 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4585 "3111 lpfc_link_speed of %d cannot " 4586 "support loop mode, setting topology to default.\n", 4587 val); 4588 phba->cfg_topology = 0; 4589 } 4590 4591 switch (val) { 4592 case LPFC_USER_LINK_SPEED_AUTO: 4593 case LPFC_USER_LINK_SPEED_1G: 4594 case LPFC_USER_LINK_SPEED_2G: 4595 case LPFC_USER_LINK_SPEED_4G: 4596 case LPFC_USER_LINK_SPEED_8G: 4597 case LPFC_USER_LINK_SPEED_16G: 4598 case LPFC_USER_LINK_SPEED_32G: 4599 case LPFC_USER_LINK_SPEED_64G: 4600 phba->cfg_link_speed = val; 4601 return 0; 4602 default: 4603 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4604 "0405 lpfc_link_speed attribute cannot " 4605 "be set to %d, allowed values are " 4606 "["LPFC_LINK_SPEED_STRING"]\n", val); 4607 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 4608 return -EINVAL; 4609 } 4610 } 4611 4612 static DEVICE_ATTR_RW(lpfc_link_speed); 4613 4614 /* 4615 # lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER) 4616 # 0 = aer disabled or not supported 4617 # 1 = aer supported and enabled (default) 4618 # Value range is [0,1]. Default value is 1. 4619 */ 4620 LPFC_ATTR(aer_support, 1, 0, 1, 4621 "Enable PCIe device AER support"); 4622 lpfc_param_show(aer_support) 4623 4624 /** 4625 * lpfc_aer_support_store - Set the adapter for aer support 4626 * 4627 * @dev: class device that is converted into a Scsi_host. 4628 * @attr: device attribute, not used. 4629 * @buf: containing enable or disable aer flag. 4630 * @count: unused variable. 4631 * 4632 * Description: 4633 * If the val is 1 and currently the device's AER capability was not 4634 * enabled, invoke the kernel's enable AER helper routine, trying to 4635 * enable the device's AER capability. If the helper routine enabling 4636 * AER returns success, update the device's cfg_aer_support flag to 4637 * indicate AER is supported by the device; otherwise, if the device 4638 * AER capability is already enabled to support AER, then do nothing. 4639 * 4640 * If the val is 0 and currently the device's AER support was enabled, 4641 * invoke the kernel's disable AER helper routine. After that, update 4642 * the device's cfg_aer_support flag to indicate AER is not supported 4643 * by the device; otherwise, if the device AER capability is already 4644 * disabled from supporting AER, then do nothing. 4645 * 4646 * Returns: 4647 * length of the buf on success if val is in range the intended mode 4648 * is supported. 4649 * -EINVAL if val out of range or intended mode is not supported. 4650 **/ 4651 static ssize_t 4652 lpfc_aer_support_store(struct device *dev, struct device_attribute *attr, 4653 const char *buf, size_t count) 4654 { 4655 struct Scsi_Host *shost = class_to_shost(dev); 4656 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4657 struct lpfc_hba *phba = vport->phba; 4658 int val = 0, rc = -EINVAL; 4659 4660 if (!isdigit(buf[0])) 4661 return -EINVAL; 4662 if (sscanf(buf, "%i", &val) != 1) 4663 return -EINVAL; 4664 4665 switch (val) { 4666 case 0: 4667 if (phba->hba_flag & HBA_AER_ENABLED) { 4668 rc = pci_disable_pcie_error_reporting(phba->pcidev); 4669 if (!rc) { 4670 spin_lock_irq(&phba->hbalock); 4671 phba->hba_flag &= ~HBA_AER_ENABLED; 4672 spin_unlock_irq(&phba->hbalock); 4673 phba->cfg_aer_support = 0; 4674 rc = strlen(buf); 4675 } else 4676 rc = -EPERM; 4677 } else { 4678 phba->cfg_aer_support = 0; 4679 rc = strlen(buf); 4680 } 4681 break; 4682 case 1: 4683 if (!(phba->hba_flag & HBA_AER_ENABLED)) { 4684 rc = pci_enable_pcie_error_reporting(phba->pcidev); 4685 if (!rc) { 4686 spin_lock_irq(&phba->hbalock); 4687 phba->hba_flag |= HBA_AER_ENABLED; 4688 spin_unlock_irq(&phba->hbalock); 4689 phba->cfg_aer_support = 1; 4690 rc = strlen(buf); 4691 } else 4692 rc = -EPERM; 4693 } else { 4694 phba->cfg_aer_support = 1; 4695 rc = strlen(buf); 4696 } 4697 break; 4698 default: 4699 rc = -EINVAL; 4700 break; 4701 } 4702 return rc; 4703 } 4704 4705 static DEVICE_ATTR_RW(lpfc_aer_support); 4706 4707 /** 4708 * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device 4709 * @dev: class device that is converted into a Scsi_host. 4710 * @attr: device attribute, not used. 4711 * @buf: containing flag 1 for aer cleanup state. 4712 * @count: unused variable. 4713 * 4714 * Description: 4715 * If the @buf contains 1 and the device currently has the AER support 4716 * enabled, then invokes the kernel AER helper routine 4717 * pci_aer_clear_nonfatal_status() to clean up the uncorrectable 4718 * error status register. 4719 * 4720 * Notes: 4721 * 4722 * Returns: 4723 * -EINVAL if the buf does not contain the 1 or the device is not currently 4724 * enabled with the AER support. 4725 **/ 4726 static ssize_t 4727 lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr, 4728 const char *buf, size_t count) 4729 { 4730 struct Scsi_Host *shost = class_to_shost(dev); 4731 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4732 struct lpfc_hba *phba = vport->phba; 4733 int val, rc = -1; 4734 4735 if (!isdigit(buf[0])) 4736 return -EINVAL; 4737 if (sscanf(buf, "%i", &val) != 1) 4738 return -EINVAL; 4739 if (val != 1) 4740 return -EINVAL; 4741 4742 if (phba->hba_flag & HBA_AER_ENABLED) 4743 rc = pci_aer_clear_nonfatal_status(phba->pcidev); 4744 4745 if (rc == 0) 4746 return strlen(buf); 4747 else 4748 return -EPERM; 4749 } 4750 4751 static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL, 4752 lpfc_aer_cleanup_state); 4753 4754 /** 4755 * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions 4756 * 4757 * @dev: class device that is converted into a Scsi_host. 4758 * @attr: device attribute, not used. 4759 * @buf: containing the string the number of vfs to be enabled. 4760 * @count: unused variable. 4761 * 4762 * Description: 4763 * When this api is called either through user sysfs, the driver shall 4764 * try to enable or disable SR-IOV virtual functions according to the 4765 * following: 4766 * 4767 * If zero virtual function has been enabled to the physical function, 4768 * the driver shall invoke the pci enable virtual function api trying 4769 * to enable the virtual functions. If the nr_vfn provided is greater 4770 * than the maximum supported, the maximum virtual function number will 4771 * be used for invoking the api; otherwise, the nr_vfn provided shall 4772 * be used for invoking the api. If the api call returned success, the 4773 * actual number of virtual functions enabled will be set to the driver 4774 * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver 4775 * cfg_sriov_nr_virtfn remains zero. 4776 * 4777 * If none-zero virtual functions have already been enabled to the 4778 * physical function, as reflected by the driver's cfg_sriov_nr_virtfn, 4779 * -EINVAL will be returned and the driver does nothing; 4780 * 4781 * If the nr_vfn provided is zero and none-zero virtual functions have 4782 * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the 4783 * disabling virtual function api shall be invoded to disable all the 4784 * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to 4785 * zero. Otherwise, if zero virtual function has been enabled, do 4786 * nothing. 4787 * 4788 * Returns: 4789 * length of the buf on success if val is in range the intended mode 4790 * is supported. 4791 * -EINVAL if val out of range or intended mode is not supported. 4792 **/ 4793 static ssize_t 4794 lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr, 4795 const char *buf, size_t count) 4796 { 4797 struct Scsi_Host *shost = class_to_shost(dev); 4798 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4799 struct lpfc_hba *phba = vport->phba; 4800 struct pci_dev *pdev = phba->pcidev; 4801 int val = 0, rc = -EINVAL; 4802 4803 /* Sanity check on user data */ 4804 if (!isdigit(buf[0])) 4805 return -EINVAL; 4806 if (sscanf(buf, "%i", &val) != 1) 4807 return -EINVAL; 4808 if (val < 0) 4809 return -EINVAL; 4810 4811 /* Request disabling virtual functions */ 4812 if (val == 0) { 4813 if (phba->cfg_sriov_nr_virtfn > 0) { 4814 pci_disable_sriov(pdev); 4815 phba->cfg_sriov_nr_virtfn = 0; 4816 } 4817 return strlen(buf); 4818 } 4819 4820 /* Request enabling virtual functions */ 4821 if (phba->cfg_sriov_nr_virtfn > 0) { 4822 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4823 "3018 There are %d virtual functions " 4824 "enabled on physical function.\n", 4825 phba->cfg_sriov_nr_virtfn); 4826 return -EEXIST; 4827 } 4828 4829 if (val <= LPFC_MAX_VFN_PER_PFN) 4830 phba->cfg_sriov_nr_virtfn = val; 4831 else { 4832 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4833 "3019 Enabling %d virtual functions is not " 4834 "allowed.\n", val); 4835 return -EINVAL; 4836 } 4837 4838 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn); 4839 if (rc) { 4840 phba->cfg_sriov_nr_virtfn = 0; 4841 rc = -EPERM; 4842 } else 4843 rc = strlen(buf); 4844 4845 return rc; 4846 } 4847 4848 LPFC_ATTR(sriov_nr_virtfn, LPFC_DEF_VFN_PER_PFN, 0, LPFC_MAX_VFN_PER_PFN, 4849 "Enable PCIe device SR-IOV virtual fn"); 4850 4851 lpfc_param_show(sriov_nr_virtfn) 4852 static DEVICE_ATTR_RW(lpfc_sriov_nr_virtfn); 4853 4854 /** 4855 * lpfc_request_firmware_store - Request for Linux generic firmware upgrade 4856 * 4857 * @dev: class device that is converted into a Scsi_host. 4858 * @attr: device attribute, not used. 4859 * @buf: containing the string the number of vfs to be enabled. 4860 * @count: unused variable. 4861 * 4862 * Description: 4863 * 4864 * Returns: 4865 * length of the buf on success if val is in range the intended mode 4866 * is supported. 4867 * -EINVAL if val out of range or intended mode is not supported. 4868 **/ 4869 static ssize_t 4870 lpfc_request_firmware_upgrade_store(struct device *dev, 4871 struct device_attribute *attr, 4872 const char *buf, size_t count) 4873 { 4874 struct Scsi_Host *shost = class_to_shost(dev); 4875 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4876 struct lpfc_hba *phba = vport->phba; 4877 int val = 0, rc; 4878 4879 /* Sanity check on user data */ 4880 if (!isdigit(buf[0])) 4881 return -EINVAL; 4882 if (sscanf(buf, "%i", &val) != 1) 4883 return -EINVAL; 4884 if (val != 1) 4885 return -EINVAL; 4886 4887 rc = lpfc_sli4_request_firmware_update(phba, RUN_FW_UPGRADE); 4888 if (rc) 4889 rc = -EPERM; 4890 else 4891 rc = strlen(buf); 4892 return rc; 4893 } 4894 4895 static int lpfc_req_fw_upgrade; 4896 module_param(lpfc_req_fw_upgrade, int, S_IRUGO|S_IWUSR); 4897 MODULE_PARM_DESC(lpfc_req_fw_upgrade, "Enable Linux generic firmware upgrade"); 4898 lpfc_param_show(request_firmware_upgrade) 4899 4900 /** 4901 * lpfc_request_firmware_upgrade_init - Enable initial linux generic fw upgrade 4902 * @phba: lpfc_hba pointer. 4903 * @val: 0 or 1. 4904 * 4905 * Description: 4906 * Set the initial Linux generic firmware upgrade enable or disable flag. 4907 * 4908 * Returns: 4909 * zero if val saved. 4910 * -EINVAL val out of range 4911 **/ 4912 static int 4913 lpfc_request_firmware_upgrade_init(struct lpfc_hba *phba, int val) 4914 { 4915 if (val >= 0 && val <= 1) { 4916 phba->cfg_request_firmware_upgrade = val; 4917 return 0; 4918 } 4919 return -EINVAL; 4920 } 4921 static DEVICE_ATTR(lpfc_req_fw_upgrade, S_IRUGO | S_IWUSR, 4922 lpfc_request_firmware_upgrade_show, 4923 lpfc_request_firmware_upgrade_store); 4924 4925 /** 4926 * lpfc_force_rscn_store 4927 * 4928 * @dev: class device that is converted into a Scsi_host. 4929 * @attr: device attribute, not used. 4930 * @buf: unused string 4931 * @count: unused variable. 4932 * 4933 * Description: 4934 * Force the switch to send a RSCN to all other NPorts in our zone 4935 * If we are direct connect pt2pt, build the RSCN command ourself 4936 * and send to the other NPort. Not supported for private loop. 4937 * 4938 * Returns: 4939 * 0 - on success 4940 * -EIO - if command is not sent 4941 **/ 4942 static ssize_t 4943 lpfc_force_rscn_store(struct device *dev, struct device_attribute *attr, 4944 const char *buf, size_t count) 4945 { 4946 struct Scsi_Host *shost = class_to_shost(dev); 4947 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4948 int i; 4949 4950 i = lpfc_issue_els_rscn(vport, 0); 4951 if (i) 4952 return -EIO; 4953 return strlen(buf); 4954 } 4955 4956 /* 4957 * lpfc_force_rscn: Force an RSCN to be sent to all remote NPorts 4958 * connected to the HBA. 4959 * 4960 * Value range is any ascii value 4961 */ 4962 static int lpfc_force_rscn; 4963 module_param(lpfc_force_rscn, int, 0644); 4964 MODULE_PARM_DESC(lpfc_force_rscn, 4965 "Force an RSCN to be sent to all remote NPorts"); 4966 lpfc_param_show(force_rscn) 4967 4968 /** 4969 * lpfc_force_rscn_init - Force an RSCN to be sent to all remote NPorts 4970 * @phba: lpfc_hba pointer. 4971 * @val: unused value. 4972 * 4973 * Returns: 4974 * zero if val saved. 4975 **/ 4976 static int 4977 lpfc_force_rscn_init(struct lpfc_hba *phba, int val) 4978 { 4979 return 0; 4980 } 4981 static DEVICE_ATTR_RW(lpfc_force_rscn); 4982 4983 /** 4984 * lpfc_fcp_imax_store 4985 * 4986 * @dev: class device that is converted into a Scsi_host. 4987 * @attr: device attribute, not used. 4988 * @buf: string with the number of fast-path FCP interrupts per second. 4989 * @count: unused variable. 4990 * 4991 * Description: 4992 * If val is in a valid range [636,651042], then set the adapter's 4993 * maximum number of fast-path FCP interrupts per second. 4994 * 4995 * Returns: 4996 * length of the buf on success if val is in range the intended mode 4997 * is supported. 4998 * -EINVAL if val out of range or intended mode is not supported. 4999 **/ 5000 static ssize_t 5001 lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr, 5002 const char *buf, size_t count) 5003 { 5004 struct Scsi_Host *shost = class_to_shost(dev); 5005 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 5006 struct lpfc_hba *phba = vport->phba; 5007 struct lpfc_eq_intr_info *eqi; 5008 uint32_t usdelay; 5009 int val = 0, i; 5010 5011 /* fcp_imax is only valid for SLI4 */ 5012 if (phba->sli_rev != LPFC_SLI_REV4) 5013 return -EINVAL; 5014 5015 /* Sanity check on user data */ 5016 if (!isdigit(buf[0])) 5017 return -EINVAL; 5018 if (sscanf(buf, "%i", &val) != 1) 5019 return -EINVAL; 5020 5021 /* 5022 * Value range for the HBA is [5000,5000000] 5023 * The value for each EQ depends on how many EQs are configured. 5024 * Allow value == 0 5025 */ 5026 if (val && (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX)) 5027 return -EINVAL; 5028 5029 phba->cfg_auto_imax = (val) ? 0 : 1; 5030 if (phba->cfg_fcp_imax && !val) { 5031 queue_delayed_work(phba->wq, &phba->eq_delay_work, 5032 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 5033 5034 for_each_present_cpu(i) { 5035 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); 5036 eqi->icnt = 0; 5037 } 5038 } 5039 5040 phba->cfg_fcp_imax = (uint32_t)val; 5041 5042 if (phba->cfg_fcp_imax) 5043 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; 5044 else 5045 usdelay = 0; 5046 5047 for (i = 0; i < phba->cfg_irq_chann; i += LPFC_MAX_EQ_DELAY_EQID_CNT) 5048 lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT, 5049 usdelay); 5050 5051 return strlen(buf); 5052 } 5053 5054 /* 5055 # lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second 5056 # for the HBA. 5057 # 5058 # Value range is [5,000 to 5,000,000]. Default value is 50,000. 5059 */ 5060 static int lpfc_fcp_imax = LPFC_DEF_IMAX; 5061 module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR); 5062 MODULE_PARM_DESC(lpfc_fcp_imax, 5063 "Set the maximum number of FCP interrupts per second per HBA"); 5064 lpfc_param_show(fcp_imax) 5065 5066 /** 5067 * lpfc_fcp_imax_init - Set the initial sr-iov virtual function enable 5068 * @phba: lpfc_hba pointer. 5069 * @val: link speed value. 5070 * 5071 * Description: 5072 * If val is in a valid range [636,651042], then initialize the adapter's 5073 * maximum number of fast-path FCP interrupts per second. 5074 * 5075 * Returns: 5076 * zero if val saved. 5077 * -EINVAL val out of range 5078 **/ 5079 static int 5080 lpfc_fcp_imax_init(struct lpfc_hba *phba, int val) 5081 { 5082 if (phba->sli_rev != LPFC_SLI_REV4) { 5083 phba->cfg_fcp_imax = 0; 5084 return 0; 5085 } 5086 5087 if ((val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) || 5088 (val == 0)) { 5089 phba->cfg_fcp_imax = val; 5090 return 0; 5091 } 5092 5093 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5094 "3016 lpfc_fcp_imax: %d out of range, using default\n", 5095 val); 5096 phba->cfg_fcp_imax = LPFC_DEF_IMAX; 5097 5098 return 0; 5099 } 5100 5101 static DEVICE_ATTR_RW(lpfc_fcp_imax); 5102 5103 /** 5104 * lpfc_cq_max_proc_limit_store 5105 * 5106 * @dev: class device that is converted into a Scsi_host. 5107 * @attr: device attribute, not used. 5108 * @buf: string with the cq max processing limit of cqes 5109 * @count: unused variable. 5110 * 5111 * Description: 5112 * If val is in a valid range, then set value on each cq 5113 * 5114 * Returns: 5115 * The length of the buf: if successful 5116 * -ERANGE: if val is not in the valid range 5117 * -EINVAL: if bad value format or intended mode is not supported. 5118 **/ 5119 static ssize_t 5120 lpfc_cq_max_proc_limit_store(struct device *dev, struct device_attribute *attr, 5121 const char *buf, size_t count) 5122 { 5123 struct Scsi_Host *shost = class_to_shost(dev); 5124 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 5125 struct lpfc_hba *phba = vport->phba; 5126 struct lpfc_queue *eq, *cq; 5127 unsigned long val; 5128 int i; 5129 5130 /* cq_max_proc_limit is only valid for SLI4 */ 5131 if (phba->sli_rev != LPFC_SLI_REV4) 5132 return -EINVAL; 5133 5134 /* Sanity check on user data */ 5135 if (!isdigit(buf[0])) 5136 return -EINVAL; 5137 if (kstrtoul(buf, 0, &val)) 5138 return -EINVAL; 5139 5140 if (val < LPFC_CQ_MIN_PROC_LIMIT || val > LPFC_CQ_MAX_PROC_LIMIT) 5141 return -ERANGE; 5142 5143 phba->cfg_cq_max_proc_limit = (uint32_t)val; 5144 5145 /* set the values on the cq's */ 5146 for (i = 0; i < phba->cfg_irq_chann; i++) { 5147 /* Get the EQ corresponding to the IRQ vector */ 5148 eq = phba->sli4_hba.hba_eq_hdl[i].eq; 5149 if (!eq) 5150 continue; 5151 5152 list_for_each_entry(cq, &eq->child_list, list) 5153 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, 5154 cq->entry_count); 5155 } 5156 5157 return strlen(buf); 5158 } 5159 5160 /* 5161 * lpfc_cq_max_proc_limit: The maximum number CQE entries processed in an 5162 * itteration of CQ processing. 5163 */ 5164 static int lpfc_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT; 5165 module_param(lpfc_cq_max_proc_limit, int, 0644); 5166 MODULE_PARM_DESC(lpfc_cq_max_proc_limit, 5167 "Set the maximum number CQEs processed in an iteration of " 5168 "CQ processing"); 5169 lpfc_param_show(cq_max_proc_limit) 5170 5171 /* 5172 * lpfc_cq_poll_threshold: Set the threshold of CQE completions in a 5173 * single handler call which should request a polled completion rather 5174 * than re-enabling interrupts. 5175 */ 5176 LPFC_ATTR_RW(cq_poll_threshold, LPFC_CQ_DEF_THRESHOLD_TO_POLL, 5177 LPFC_CQ_MIN_THRESHOLD_TO_POLL, 5178 LPFC_CQ_MAX_THRESHOLD_TO_POLL, 5179 "CQE Processing Threshold to enable Polling"); 5180 5181 /** 5182 * lpfc_cq_max_proc_limit_init - Set the initial cq max_proc_limit 5183 * @phba: lpfc_hba pointer. 5184 * @val: entry limit 5185 * 5186 * Description: 5187 * If val is in a valid range, then initialize the adapter's maximum 5188 * value. 5189 * 5190 * Returns: 5191 * Always returns 0 for success, even if value not always set to 5192 * requested value. If value out of range or not supported, will fall 5193 * back to default. 5194 **/ 5195 static int 5196 lpfc_cq_max_proc_limit_init(struct lpfc_hba *phba, int val) 5197 { 5198 phba->cfg_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT; 5199 5200 if (phba->sli_rev != LPFC_SLI_REV4) 5201 return 0; 5202 5203 if (val >= LPFC_CQ_MIN_PROC_LIMIT && val <= LPFC_CQ_MAX_PROC_LIMIT) { 5204 phba->cfg_cq_max_proc_limit = val; 5205 return 0; 5206 } 5207 5208 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5209 "0371 "LPFC_DRIVER_NAME"_cq_max_proc_limit: " 5210 "%d out of range, using default\n", 5211 phba->cfg_cq_max_proc_limit); 5212 5213 return 0; 5214 } 5215 5216 static DEVICE_ATTR_RW(lpfc_cq_max_proc_limit); 5217 5218 /** 5219 * lpfc_state_show - Display current driver CPU affinity 5220 * @dev: class converted to a Scsi_host structure. 5221 * @attr: device attribute, not used. 5222 * @buf: on return contains text describing the state of the link. 5223 * 5224 * Returns: size of formatted string. 5225 **/ 5226 static ssize_t 5227 lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, 5228 char *buf) 5229 { 5230 struct Scsi_Host *shost = class_to_shost(dev); 5231 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 5232 struct lpfc_hba *phba = vport->phba; 5233 struct lpfc_vector_map_info *cpup; 5234 int len = 0; 5235 5236 if ((phba->sli_rev != LPFC_SLI_REV4) || 5237 (phba->intr_type != MSIX)) 5238 return len; 5239 5240 switch (phba->cfg_fcp_cpu_map) { 5241 case 0: 5242 len += scnprintf(buf + len, PAGE_SIZE-len, 5243 "fcp_cpu_map: No mapping (%d)\n", 5244 phba->cfg_fcp_cpu_map); 5245 return len; 5246 case 1: 5247 len += scnprintf(buf + len, PAGE_SIZE-len, 5248 "fcp_cpu_map: HBA centric mapping (%d): " 5249 "%d of %d CPUs online from %d possible CPUs\n", 5250 phba->cfg_fcp_cpu_map, num_online_cpus(), 5251 num_present_cpus(), 5252 phba->sli4_hba.num_possible_cpu); 5253 break; 5254 } 5255 5256 while (phba->sli4_hba.curr_disp_cpu < 5257 phba->sli4_hba.num_possible_cpu) { 5258 cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu]; 5259 5260 if (!cpu_present(phba->sli4_hba.curr_disp_cpu)) 5261 len += scnprintf(buf + len, PAGE_SIZE - len, 5262 "CPU %02d not present\n", 5263 phba->sli4_hba.curr_disp_cpu); 5264 else if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 5265 if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY) 5266 len += scnprintf( 5267 buf + len, PAGE_SIZE - len, 5268 "CPU %02d hdwq None " 5269 "physid %d coreid %d ht %d ua %d\n", 5270 phba->sli4_hba.curr_disp_cpu, 5271 cpup->phys_id, cpup->core_id, 5272 (cpup->flag & LPFC_CPU_MAP_HYPER), 5273 (cpup->flag & LPFC_CPU_MAP_UNASSIGN)); 5274 else 5275 len += scnprintf( 5276 buf + len, PAGE_SIZE - len, 5277 "CPU %02d EQ None hdwq %04d " 5278 "physid %d coreid %d ht %d ua %d\n", 5279 phba->sli4_hba.curr_disp_cpu, 5280 cpup->hdwq, cpup->phys_id, 5281 cpup->core_id, 5282 (cpup->flag & LPFC_CPU_MAP_HYPER), 5283 (cpup->flag & LPFC_CPU_MAP_UNASSIGN)); 5284 } else { 5285 if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY) 5286 len += scnprintf( 5287 buf + len, PAGE_SIZE - len, 5288 "CPU %02d hdwq None " 5289 "physid %d coreid %d ht %d ua %d IRQ %d\n", 5290 phba->sli4_hba.curr_disp_cpu, 5291 cpup->phys_id, 5292 cpup->core_id, 5293 (cpup->flag & LPFC_CPU_MAP_HYPER), 5294 (cpup->flag & LPFC_CPU_MAP_UNASSIGN), 5295 lpfc_get_irq(cpup->eq)); 5296 else 5297 len += scnprintf( 5298 buf + len, PAGE_SIZE - len, 5299 "CPU %02d EQ %04d hdwq %04d " 5300 "physid %d coreid %d ht %d ua %d IRQ %d\n", 5301 phba->sli4_hba.curr_disp_cpu, 5302 cpup->eq, cpup->hdwq, cpup->phys_id, 5303 cpup->core_id, 5304 (cpup->flag & LPFC_CPU_MAP_HYPER), 5305 (cpup->flag & LPFC_CPU_MAP_UNASSIGN), 5306 lpfc_get_irq(cpup->eq)); 5307 } 5308 5309 phba->sli4_hba.curr_disp_cpu++; 5310 5311 /* display max number of CPUs keeping some margin */ 5312 if (phba->sli4_hba.curr_disp_cpu < 5313 phba->sli4_hba.num_possible_cpu && 5314 (len >= (PAGE_SIZE - 64))) { 5315 len += scnprintf(buf + len, 5316 PAGE_SIZE - len, "more...\n"); 5317 break; 5318 } 5319 } 5320 5321 if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_possible_cpu) 5322 phba->sli4_hba.curr_disp_cpu = 0; 5323 5324 return len; 5325 } 5326 5327 /** 5328 * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors 5329 * @dev: class device that is converted into a Scsi_host. 5330 * @attr: device attribute, not used. 5331 * @buf: one or more lpfc_polling_flags values. 5332 * @count: not used. 5333 * 5334 * Returns: 5335 * -EINVAL - Not implemented yet. 5336 **/ 5337 static ssize_t 5338 lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr, 5339 const char *buf, size_t count) 5340 { 5341 int status = -EINVAL; 5342 return status; 5343 } 5344 5345 /* 5346 # lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors 5347 # for the HBA. 5348 # 5349 # Value range is [0 to 1]. Default value is LPFC_HBA_CPU_MAP (1). 5350 # 0 - Do not affinitze IRQ vectors 5351 # 1 - Affintize HBA vectors with respect to each HBA 5352 # (start with CPU0 for each HBA) 5353 # This also defines how Hardware Queues are mapped to specific CPUs. 5354 */ 5355 static int lpfc_fcp_cpu_map = LPFC_HBA_CPU_MAP; 5356 module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR); 5357 MODULE_PARM_DESC(lpfc_fcp_cpu_map, 5358 "Defines how to map CPUs to IRQ vectors per HBA"); 5359 5360 /** 5361 * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable 5362 * @phba: lpfc_hba pointer. 5363 * @val: link speed value. 5364 * 5365 * Description: 5366 * If val is in a valid range [0-2], then affinitze the adapter's 5367 * MSIX vectors. 5368 * 5369 * Returns: 5370 * zero if val saved. 5371 * -EINVAL val out of range 5372 **/ 5373 static int 5374 lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val) 5375 { 5376 if (phba->sli_rev != LPFC_SLI_REV4) { 5377 phba->cfg_fcp_cpu_map = 0; 5378 return 0; 5379 } 5380 5381 if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) { 5382 phba->cfg_fcp_cpu_map = val; 5383 return 0; 5384 } 5385 5386 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5387 "3326 lpfc_fcp_cpu_map: %d out of range, using " 5388 "default\n", val); 5389 phba->cfg_fcp_cpu_map = LPFC_HBA_CPU_MAP; 5390 5391 return 0; 5392 } 5393 5394 static DEVICE_ATTR_RW(lpfc_fcp_cpu_map); 5395 5396 /* 5397 # lpfc_fcp_class: Determines FC class to use for the FCP protocol. 5398 # Value range is [2,3]. Default value is 3. 5399 */ 5400 LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3, 5401 "Select Fibre Channel class of service for FCP sequences"); 5402 5403 /* 5404 # lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range 5405 # is [0,1]. Default value is 0. 5406 */ 5407 LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1, 5408 "Use ADISC on rediscovery to authenticate FCP devices"); 5409 5410 /* 5411 # lpfc_first_burst_size: First burst size to use on the NPorts 5412 # that support first burst. 5413 # Value range is [0,65536]. Default value is 0. 5414 */ 5415 LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536, 5416 "First burst size for Targets that support first burst"); 5417 5418 /* 5419 * lpfc_nvmet_fb_size: NVME Target mode supported first burst size. 5420 * When the driver is configured as an NVME target, this value is 5421 * communicated to the NVME initiator in the PRLI response. It is 5422 * used only when the lpfc_nvme_enable_fb and lpfc_nvmet_support 5423 * parameters are set and the target is sending the PRLI RSP. 5424 * Parameter supported on physical port only - no NPIV support. 5425 * Value range is [0,65536]. Default value is 0. 5426 */ 5427 LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536, 5428 "NVME Target mode first burst size in 512B increments."); 5429 5430 /* 5431 * lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions. 5432 * For the Initiator (I), enabling this parameter means that an NVMET 5433 * PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be 5434 * processed by the initiator for subsequent NVME FCP IO. 5435 * Currently, this feature is not supported on the NVME target 5436 * Value range is [0,1]. Default value is 0 (disabled). 5437 */ 5438 LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1, 5439 "Enable First Burst feature for NVME Initiator."); 5440 5441 /* 5442 # lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue 5443 # depth. Default value is 0. When the value of this parameter is zero the 5444 # SCSI command completion time is not used for controlling I/O queue depth. When 5445 # the parameter is set to a non-zero value, the I/O queue depth is controlled 5446 # to limit the I/O completion time to the parameter value. 5447 # The value is set in milliseconds. 5448 */ 5449 LPFC_VPORT_ATTR(max_scsicmpl_time, 0, 0, 60000, 5450 "Use command completion time to control queue depth"); 5451 5452 lpfc_vport_param_show(max_scsicmpl_time); 5453 static int 5454 lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val) 5455 { 5456 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5457 struct lpfc_nodelist *ndlp, *next_ndlp; 5458 5459 if (val == vport->cfg_max_scsicmpl_time) 5460 return 0; 5461 if ((val < 0) || (val > 60000)) 5462 return -EINVAL; 5463 vport->cfg_max_scsicmpl_time = val; 5464 5465 spin_lock_irq(shost->host_lock); 5466 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 5467 if (!NLP_CHK_NODE_ACT(ndlp)) 5468 continue; 5469 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 5470 continue; 5471 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; 5472 } 5473 spin_unlock_irq(shost->host_lock); 5474 return 0; 5475 } 5476 lpfc_vport_param_store(max_scsicmpl_time); 5477 static DEVICE_ATTR_RW(lpfc_max_scsicmpl_time); 5478 5479 /* 5480 # lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value 5481 # range is [0,1]. Default value is 0. 5482 */ 5483 LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support"); 5484 5485 /* 5486 # lpfc_xri_rebalancing: enable or disable XRI rebalancing feature 5487 # range is [0,1]. Default value is 1. 5488 */ 5489 LPFC_ATTR_R(xri_rebalancing, 1, 0, 1, "Enable/Disable XRI rebalancing"); 5490 5491 /* 5492 * lpfc_io_sched: Determine scheduling algrithmn for issuing FCP cmds 5493 * range is [0,1]. Default value is 0. 5494 * For [0], FCP commands are issued to Work Queues based on upper layer 5495 * hardware queue index. 5496 * For [1], FCP commands are issued to a Work Queue associated with the 5497 * current CPU. 5498 * 5499 * LPFC_FCP_SCHED_BY_HDWQ == 0 5500 * LPFC_FCP_SCHED_BY_CPU == 1 5501 * 5502 * The driver dynamically sets this to 1 (BY_CPU) if it's able to set up cpu 5503 * affinity for FCP/NVME I/Os through Work Queues associated with the current 5504 * CPU. Otherwise, the default 0 (Round Robin) scheduling of FCP/NVME I/Os 5505 * through WQs will be used. 5506 */ 5507 LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_BY_CPU, 5508 LPFC_FCP_SCHED_BY_HDWQ, 5509 LPFC_FCP_SCHED_BY_CPU, 5510 "Determine scheduling algorithm for " 5511 "issuing commands [0] - Hardware Queue, [1] - Current CPU"); 5512 5513 /* 5514 * lpfc_ns_query: Determine algrithmn for NameServer queries after RSCN 5515 * range is [0,1]. Default value is 0. 5516 * For [0], GID_FT is used for NameServer queries after RSCN (default) 5517 * For [1], GID_PT is used for NameServer queries after RSCN 5518 * 5519 */ 5520 LPFC_ATTR_RW(ns_query, LPFC_NS_QUERY_GID_FT, 5521 LPFC_NS_QUERY_GID_FT, LPFC_NS_QUERY_GID_PT, 5522 "Determine algorithm NameServer queries after RSCN " 5523 "[0] - GID_FT, [1] - GID_PT"); 5524 5525 /* 5526 # lpfc_fcp2_no_tgt_reset: Determine bus reset behavior 5527 # range is [0,1]. Default value is 0. 5528 # For [0], bus reset issues target reset to ALL devices 5529 # For [1], bus reset issues target reset to non-FCP2 devices 5530 */ 5531 LPFC_ATTR_RW(fcp2_no_tgt_reset, 0, 0, 1, "Determine bus reset behavior for " 5532 "FCP2 devices [0] - issue tgt reset, [1] - no tgt reset"); 5533 5534 5535 /* 5536 # lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing 5537 # cr_delay (msec) or cr_count outstanding commands. cr_delay can take 5538 # value [0,63]. cr_count can take value [1,255]. Default value of cr_delay 5539 # is 0. Default value of cr_count is 1. The cr_count feature is disabled if 5540 # cr_delay is set to 0. 5541 */ 5542 LPFC_ATTR_RW(cr_delay, 0, 0, 63, "A count of milliseconds after which an " 5543 "interrupt response is generated"); 5544 5545 LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an " 5546 "interrupt response is generated"); 5547 5548 /* 5549 # lpfc_multi_ring_support: Determines how many rings to spread available 5550 # cmd/rsp IOCB entries across. 5551 # Value range is [1,2]. Default value is 1. 5552 */ 5553 LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary " 5554 "SLI rings to spread IOCB entries across"); 5555 5556 /* 5557 # lpfc_multi_ring_rctl: If lpfc_multi_ring_support is enabled, this 5558 # identifies what rctl value to configure the additional ring for. 5559 # Value range is [1,0xff]. Default value is 4 (Unsolicated Data). 5560 */ 5561 LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1, 5562 255, "Identifies RCTL for additional ring configuration"); 5563 5564 /* 5565 # lpfc_multi_ring_type: If lpfc_multi_ring_support is enabled, this 5566 # identifies what type value to configure the additional ring for. 5567 # Value range is [1,0xff]. Default value is 5 (LLC/SNAP). 5568 */ 5569 LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1, 5570 255, "Identifies TYPE for additional ring configuration"); 5571 5572 /* 5573 # lpfc_enable_SmartSAN: Sets up FDMI support for SmartSAN 5574 # 0 = SmartSAN functionality disabled (default) 5575 # 1 = SmartSAN functionality enabled 5576 # This parameter will override the value of lpfc_fdmi_on module parameter. 5577 # Value range is [0,1]. Default value is 0. 5578 */ 5579 LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality"); 5580 5581 /* 5582 # lpfc_fdmi_on: Controls FDMI support. 5583 # 0 No FDMI support 5584 # 1 Traditional FDMI support (default) 5585 # Traditional FDMI support means the driver will assume FDMI-2 support; 5586 # however, if that fails, it will fallback to FDMI-1. 5587 # If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on. 5588 # If lpfc_enable_SmartSAN is set 0, the driver uses the current value of 5589 # lpfc_fdmi_on. 5590 # Value range [0,1]. Default value is 1. 5591 */ 5592 LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support"); 5593 5594 /* 5595 # Specifies the maximum number of ELS cmds we can have outstanding (for 5596 # discovery). Value range is [1,64]. Default value = 32. 5597 */ 5598 LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands " 5599 "during discovery"); 5600 5601 /* 5602 # lpfc_max_luns: maximum allowed LUN ID. This is the highest LUN ID that 5603 # will be scanned by the SCSI midlayer when sequential scanning is 5604 # used; and is also the highest LUN ID allowed when the SCSI midlayer 5605 # parses REPORT_LUN responses. The lpfc driver has no LUN count or 5606 # LUN ID limit, but the SCSI midlayer requires this field for the uses 5607 # above. The lpfc driver limits the default value to 255 for two reasons. 5608 # As it bounds the sequential scan loop, scanning for thousands of luns 5609 # on a target can take minutes of wall clock time. Additionally, 5610 # there are FC targets, such as JBODs, that only recognize 8-bits of 5611 # LUN ID. When they receive a value greater than 8 bits, they chop off 5612 # the high order bits. In other words, they see LUN IDs 0, 256, 512, 5613 # and so on all as LUN ID 0. This causes the linux kernel, which sees 5614 # valid responses at each of the LUN IDs, to believe there are multiple 5615 # devices present, when in fact, there is only 1. 5616 # A customer that is aware of their target behaviors, and the results as 5617 # indicated above, is welcome to increase the lpfc_max_luns value. 5618 # As mentioned, this value is not used by the lpfc driver, only the 5619 # SCSI midlayer. 5620 # Value range is [0,65535]. Default value is 255. 5621 # NOTE: The SCSI layer might probe all allowed LUN on some old targets. 5622 */ 5623 LPFC_VPORT_ULL_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN ID"); 5624 5625 /* 5626 # lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring. 5627 # Value range is [1,255], default value is 10. 5628 */ 5629 LPFC_ATTR_RW(poll_tmo, 10, 1, 255, 5630 "Milliseconds driver will wait between polling FCP ring"); 5631 5632 /* 5633 # lpfc_task_mgmt_tmo: Maximum time to wait for task management commands 5634 # to complete in seconds. Value range is [5,180], default value is 60. 5635 */ 5636 LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180, 5637 "Maximum time to wait for task management commands to complete"); 5638 /* 5639 # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that 5640 # support this feature 5641 # 0 = MSI disabled 5642 # 1 = MSI enabled 5643 # 2 = MSI-X enabled (default) 5644 # Value range is [0,2]. Default value is 2. 5645 */ 5646 LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " 5647 "MSI-X (2), if possible"); 5648 5649 /* 5650 * lpfc_nvme_oas: Use the oas bit when sending NVME/NVMET IOs 5651 * 5652 * 0 = NVME OAS disabled 5653 * 1 = NVME OAS enabled 5654 * 5655 * Value range is [0,1]. Default value is 0. 5656 */ 5657 LPFC_ATTR_RW(nvme_oas, 0, 0, 1, 5658 "Use OAS bit on NVME IOs"); 5659 5660 /* 5661 * lpfc_nvme_embed_cmd: Use the oas bit when sending NVME/NVMET IOs 5662 * 5663 * 0 = Put NVME Command in SGL 5664 * 1 = Embed NVME Command in WQE (unless G7) 5665 * 2 = Embed NVME Command in WQE (force) 5666 * 5667 * Value range is [0,2]. Default value is 1. 5668 */ 5669 LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2, 5670 "Embed NVME Command in WQE"); 5671 5672 /* 5673 * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues 5674 * the driver will advertise it supports to the SCSI layer. 5675 * 5676 * 0 = Set nr_hw_queues by the number of CPUs or HW queues. 5677 * 1,256 = Manually specify nr_hw_queue value to be advertised, 5678 * 5679 * Value range is [0,256]. Default value is 8. 5680 */ 5681 LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF, 5682 LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX, 5683 "Set the number of SCSI Queues advertised"); 5684 5685 /* 5686 * lpfc_hdw_queue: Set the number of Hardware Queues the driver 5687 * will advertise it supports to the NVME and SCSI layers. This also 5688 * will map to the number of CQ/WQ pairs the driver will create. 5689 * 5690 * The NVME Layer will try to create this many, plus 1 administrative 5691 * hardware queue. The administrative queue will always map to WQ 0 5692 * A hardware IO queue maps (qidx) to a specific driver CQ/WQ. 5693 * 5694 * 0 = Configure the number of hdw queues to the number of active CPUs. 5695 * 1,256 = Manually specify how many hdw queues to use. 5696 * 5697 * Value range is [0,256]. Default value is 0. 5698 */ 5699 LPFC_ATTR_R(hdw_queue, 5700 LPFC_HBA_HDWQ_DEF, 5701 LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX, 5702 "Set the number of I/O Hardware Queues"); 5703 5704 #if IS_ENABLED(CONFIG_X86) 5705 /** 5706 * lpfc_cpumask_irq_mode_init - initalizes cpumask of phba based on 5707 * irq_chann_mode 5708 * @phba: Pointer to HBA context object. 5709 **/ 5710 static void 5711 lpfc_cpumask_irq_mode_init(struct lpfc_hba *phba) 5712 { 5713 unsigned int cpu, first_cpu, numa_node = NUMA_NO_NODE; 5714 const struct cpumask *sibling_mask; 5715 struct cpumask *aff_mask = &phba->sli4_hba.irq_aff_mask; 5716 5717 cpumask_clear(aff_mask); 5718 5719 if (phba->irq_chann_mode == NUMA_MODE) { 5720 /* Check if we're a NUMA architecture */ 5721 numa_node = dev_to_node(&phba->pcidev->dev); 5722 if (numa_node == NUMA_NO_NODE) { 5723 phba->irq_chann_mode = NORMAL_MODE; 5724 return; 5725 } 5726 } 5727 5728 for_each_possible_cpu(cpu) { 5729 switch (phba->irq_chann_mode) { 5730 case NUMA_MODE: 5731 if (cpu_to_node(cpu) == numa_node) 5732 cpumask_set_cpu(cpu, aff_mask); 5733 break; 5734 case NHT_MODE: 5735 sibling_mask = topology_sibling_cpumask(cpu); 5736 first_cpu = cpumask_first(sibling_mask); 5737 if (first_cpu < nr_cpu_ids) 5738 cpumask_set_cpu(first_cpu, aff_mask); 5739 break; 5740 default: 5741 break; 5742 } 5743 } 5744 } 5745 #endif 5746 5747 static void 5748 lpfc_assign_default_irq_chann(struct lpfc_hba *phba) 5749 { 5750 #if IS_ENABLED(CONFIG_X86) 5751 switch (boot_cpu_data.x86_vendor) { 5752 case X86_VENDOR_AMD: 5753 /* If AMD architecture, then default is NUMA_MODE */ 5754 phba->irq_chann_mode = NUMA_MODE; 5755 break; 5756 case X86_VENDOR_INTEL: 5757 /* If Intel architecture, then default is no hyperthread mode */ 5758 phba->irq_chann_mode = NHT_MODE; 5759 break; 5760 default: 5761 phba->irq_chann_mode = NORMAL_MODE; 5762 break; 5763 } 5764 lpfc_cpumask_irq_mode_init(phba); 5765 #else 5766 phba->irq_chann_mode = NORMAL_MODE; 5767 #endif 5768 } 5769 5770 /* 5771 * lpfc_irq_chann: Set the number of IRQ vectors that are available 5772 * for Hardware Queues to utilize. This also will map to the number 5773 * of EQ / MSI-X vectors the driver will create. This should never be 5774 * more than the number of Hardware Queues 5775 * 5776 * 0 = Configure number of IRQ Channels to: 5777 * if AMD architecture, number of CPUs on HBA's NUMA node 5778 * if Intel architecture, number of physical CPUs. 5779 * otherwise, number of active CPUs. 5780 * [1,256] = Manually specify how many IRQ Channels to use. 5781 * 5782 * Value range is [0,256]. Default value is [0]. 5783 */ 5784 static uint lpfc_irq_chann = LPFC_IRQ_CHANN_DEF; 5785 module_param(lpfc_irq_chann, uint, 0444); 5786 MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate"); 5787 5788 /* lpfc_irq_chann_init - Set the hba irq_chann initial value 5789 * @phba: lpfc_hba pointer. 5790 * @val: contains the initial value 5791 * 5792 * Description: 5793 * Validates the initial value is within range and assigns it to the 5794 * adapter. If not in range, an error message is posted and the 5795 * default value is assigned. 5796 * 5797 * Returns: 5798 * zero if value is in range and is set 5799 * -EINVAL if value was out of range 5800 **/ 5801 static int 5802 lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val) 5803 { 5804 const struct cpumask *aff_mask; 5805 5806 if (phba->cfg_use_msi != 2) { 5807 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5808 "8532 use_msi = %u ignoring cfg_irq_numa\n", 5809 phba->cfg_use_msi); 5810 phba->irq_chann_mode = NORMAL_MODE; 5811 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF; 5812 return 0; 5813 } 5814 5815 /* Check if default setting was passed */ 5816 if (val == LPFC_IRQ_CHANN_DEF) 5817 lpfc_assign_default_irq_chann(phba); 5818 5819 if (phba->irq_chann_mode != NORMAL_MODE) { 5820 aff_mask = &phba->sli4_hba.irq_aff_mask; 5821 5822 if (cpumask_empty(aff_mask)) { 5823 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5824 "8533 Could not identify CPUS for " 5825 "mode %d, ignoring\n", 5826 phba->irq_chann_mode); 5827 phba->irq_chann_mode = NORMAL_MODE; 5828 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF; 5829 } else { 5830 phba->cfg_irq_chann = cpumask_weight(aff_mask); 5831 5832 /* If no hyperthread mode, then set hdwq count to 5833 * aff_mask weight as well 5834 */ 5835 if (phba->irq_chann_mode == NHT_MODE) 5836 phba->cfg_hdw_queue = phba->cfg_irq_chann; 5837 5838 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5839 "8543 lpfc_irq_chann set to %u " 5840 "(mode: %d)\n", phba->cfg_irq_chann, 5841 phba->irq_chann_mode); 5842 } 5843 } else { 5844 if (val > LPFC_IRQ_CHANN_MAX) { 5845 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5846 "8545 lpfc_irq_chann attribute cannot " 5847 "be set to %u, allowed range is " 5848 "[%u,%u]\n", 5849 val, 5850 LPFC_IRQ_CHANN_MIN, 5851 LPFC_IRQ_CHANN_MAX); 5852 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF; 5853 return -EINVAL; 5854 } 5855 phba->cfg_irq_chann = val; 5856 } 5857 5858 return 0; 5859 } 5860 5861 /** 5862 * lpfc_irq_chann_show - Display value of irq_chann 5863 * @dev: class converted to a Scsi_host structure. 5864 * @attr: device attribute, not used. 5865 * @buf: on return contains a string with the list sizes 5866 * 5867 * Returns: size of formatted string. 5868 **/ 5869 static ssize_t 5870 lpfc_irq_chann_show(struct device *dev, struct device_attribute *attr, 5871 char *buf) 5872 { 5873 struct Scsi_Host *shost = class_to_shost(dev); 5874 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 5875 struct lpfc_hba *phba = vport->phba; 5876 5877 return scnprintf(buf, PAGE_SIZE, "%u\n", phba->cfg_irq_chann); 5878 } 5879 5880 static DEVICE_ATTR_RO(lpfc_irq_chann); 5881 5882 /* 5883 # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. 5884 # 0 = HBA resets disabled 5885 # 1 = HBA resets enabled (default) 5886 # 2 = HBA reset via PCI bus reset enabled 5887 # Value range is [0,2]. Default value is 1. 5888 */ 5889 LPFC_ATTR_RW(enable_hba_reset, 1, 0, 2, "Enable HBA resets from the driver."); 5890 5891 /* 5892 # lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer.. 5893 # 0 = HBA Heartbeat disabled 5894 # 1 = HBA Heartbeat enabled (default) 5895 # Value range is [0,1]. Default value is 1. 5896 */ 5897 LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat."); 5898 5899 /* 5900 # lpfc_EnableXLane: Enable Express Lane Feature 5901 # 0x0 Express Lane Feature disabled 5902 # 0x1 Express Lane Feature enabled 5903 # Value range is [0,1]. Default value is 0. 5904 */ 5905 LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature."); 5906 5907 /* 5908 # lpfc_XLanePriority: Define CS_CTL priority for Express Lane Feature 5909 # 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits) 5910 # Value range is [0x0,0x7f]. Default value is 0 5911 */ 5912 LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature."); 5913 5914 /* 5915 # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF) 5916 # 0 = BlockGuard disabled (default) 5917 # 1 = BlockGuard enabled 5918 # Value range is [0,1]. Default value is 0. 5919 */ 5920 LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); 5921 5922 /* 5923 # lpfc_prot_mask: i 5924 # - Bit mask of host protection capabilities used to register with the 5925 # SCSI mid-layer 5926 # - Only meaningful if BG is turned on (lpfc_enable_bg=1). 5927 # - Allows you to ultimately specify which profiles to use 5928 # - Default will result in registering capabilities for all profiles. 5929 # - SHOST_DIF_TYPE1_PROTECTION 1 5930 # HBA supports T10 DIF Type 1: HBA to Target Type 1 Protection 5931 # - SHOST_DIX_TYPE0_PROTECTION 8 5932 # HBA supports DIX Type 0: Host to HBA protection only 5933 # - SHOST_DIX_TYPE1_PROTECTION 16 5934 # HBA supports DIX Type 1: Host to HBA Type 1 protection 5935 # 5936 */ 5937 LPFC_ATTR(prot_mask, 5938 (SHOST_DIF_TYPE1_PROTECTION | 5939 SHOST_DIX_TYPE0_PROTECTION | 5940 SHOST_DIX_TYPE1_PROTECTION), 5941 0, 5942 (SHOST_DIF_TYPE1_PROTECTION | 5943 SHOST_DIX_TYPE0_PROTECTION | 5944 SHOST_DIX_TYPE1_PROTECTION), 5945 "T10-DIF host protection capabilities mask"); 5946 5947 /* 5948 # lpfc_prot_guard: i 5949 # - Bit mask of protection guard types to register with the SCSI mid-layer 5950 # - Guard types are currently either 1) T10-DIF CRC 2) IP checksum 5951 # - Allows you to ultimately specify which profiles to use 5952 # - Default will result in registering capabilities for all guard types 5953 # 5954 */ 5955 LPFC_ATTR(prot_guard, 5956 SHOST_DIX_GUARD_IP, SHOST_DIX_GUARD_CRC, SHOST_DIX_GUARD_IP, 5957 "T10-DIF host protection guard type"); 5958 5959 /* 5960 * Delay initial NPort discovery when Clean Address bit is cleared in 5961 * FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed. 5962 * This parameter can have value 0 or 1. 5963 * When this parameter is set to 0, no delay is added to the initial 5964 * discovery. 5965 * When this parameter is set to non-zero value, initial Nport discovery is 5966 * delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC 5967 * accept and FCID/Fabric name/Fabric portname is changed. 5968 * Driver always delay Nport discovery for subsequent FLOGI/FDISC completion 5969 * when Clean Address bit is cleared in FLOGI/FDISC 5970 * accept and FCID/Fabric name/Fabric portname is changed. 5971 * Default value is 0. 5972 */ 5973 LPFC_ATTR(delay_discovery, 0, 0, 1, 5974 "Delay NPort discovery when Clean Address bit is cleared."); 5975 5976 /* 5977 * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count 5978 * This value can be set to values between 64 and 4096. The default value 5979 * is 64, but may be increased to allow for larger Max I/O sizes. The scsi 5980 * and nvme layers will allow I/O sizes up to (MAX_SEG_COUNT * SEG_SIZE). 5981 * Because of the additional overhead involved in setting up T10-DIF, 5982 * this parameter will be limited to 128 if BlockGuard is enabled under SLI4 5983 * and will be limited to 512 if BlockGuard is enabled under SLI3. 5984 */ 5985 static uint lpfc_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT; 5986 module_param(lpfc_sg_seg_cnt, uint, 0444); 5987 MODULE_PARM_DESC(lpfc_sg_seg_cnt, "Max Scatter Gather Segment Count"); 5988 5989 /** 5990 * lpfc_sg_seg_cnt_show - Display the scatter/gather list sizes 5991 * configured for the adapter 5992 * @dev: class converted to a Scsi_host structure. 5993 * @attr: device attribute, not used. 5994 * @buf: on return contains a string with the list sizes 5995 * 5996 * Returns: size of formatted string. 5997 **/ 5998 static ssize_t 5999 lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr, 6000 char *buf) 6001 { 6002 struct Scsi_Host *shost = class_to_shost(dev); 6003 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 6004 struct lpfc_hba *phba = vport->phba; 6005 int len; 6006 6007 len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n", 6008 phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt); 6009 6010 len += scnprintf(buf + len, PAGE_SIZE, "Cfg: %d SCSI: %d NVME: %d\n", 6011 phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt, 6012 phba->cfg_nvme_seg_cnt); 6013 return len; 6014 } 6015 6016 static DEVICE_ATTR_RO(lpfc_sg_seg_cnt); 6017 6018 /** 6019 * lpfc_sg_seg_cnt_init - Set the hba sg_seg_cnt initial value 6020 * @phba: lpfc_hba pointer. 6021 * @val: contains the initial value 6022 * 6023 * Description: 6024 * Validates the initial value is within range and assigns it to the 6025 * adapter. If not in range, an error message is posted and the 6026 * default value is assigned. 6027 * 6028 * Returns: 6029 * zero if value is in range and is set 6030 * -EINVAL if value was out of range 6031 **/ 6032 static int 6033 lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val) 6034 { 6035 if (val >= LPFC_MIN_SG_SEG_CNT && val <= LPFC_MAX_SG_SEG_CNT) { 6036 phba->cfg_sg_seg_cnt = val; 6037 return 0; 6038 } 6039 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6040 "0409 "LPFC_DRIVER_NAME"_sg_seg_cnt attribute cannot " 6041 "be set to %d, allowed range is [%d, %d]\n", 6042 val, LPFC_MIN_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT); 6043 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT; 6044 return -EINVAL; 6045 } 6046 6047 /* 6048 * lpfc_enable_mds_diags: Enable MDS Diagnostics 6049 * 0 = MDS Diagnostics disabled (default) 6050 * 1 = MDS Diagnostics enabled 6051 * Value range is [0,1]. Default value is 0. 6052 */ 6053 LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics"); 6054 6055 /* 6056 * lpfc_ras_fwlog_buffsize: Firmware logging host buffer size 6057 * 0 = Disable firmware logging (default) 6058 * [1-4] = Multiple of 1/4th Mb of host memory for FW logging 6059 * Value range [0..4]. Default value is 0 6060 */ 6061 LPFC_ATTR(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging"); 6062 lpfc_param_show(ras_fwlog_buffsize); 6063 6064 static ssize_t 6065 lpfc_ras_fwlog_buffsize_set(struct lpfc_hba *phba, uint val) 6066 { 6067 int ret = 0; 6068 enum ras_state state; 6069 6070 if (!lpfc_rangecheck(val, 0, 4)) 6071 return -EINVAL; 6072 6073 if (phba->cfg_ras_fwlog_buffsize == val) 6074 return 0; 6075 6076 if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn)) 6077 return -EINVAL; 6078 6079 spin_lock_irq(&phba->hbalock); 6080 state = phba->ras_fwlog.state; 6081 spin_unlock_irq(&phba->hbalock); 6082 6083 if (state == REG_INPROGRESS) { 6084 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging " 6085 "registration is in progress\n"); 6086 return -EBUSY; 6087 } 6088 6089 /* For disable logging: stop the logs and free the DMA. 6090 * For ras_fwlog_buffsize size change we still need to free and 6091 * reallocate the DMA in lpfc_sli4_ras_fwlog_init. 6092 */ 6093 phba->cfg_ras_fwlog_buffsize = val; 6094 if (state == ACTIVE) { 6095 lpfc_ras_stop_fwlog(phba); 6096 lpfc_sli4_ras_dma_free(phba); 6097 } 6098 6099 lpfc_sli4_ras_init(phba); 6100 if (phba->ras_fwlog.ras_enabled) 6101 ret = lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level, 6102 LPFC_RAS_ENABLE_LOGGING); 6103 return ret; 6104 } 6105 6106 lpfc_param_store(ras_fwlog_buffsize); 6107 static DEVICE_ATTR_RW(lpfc_ras_fwlog_buffsize); 6108 6109 /* 6110 * lpfc_ras_fwlog_level: Firmware logging verbosity level 6111 * Valid only if firmware logging is enabled 6112 * 0(Least Verbosity) 4 (most verbosity) 6113 * Value range is [0..4]. Default value is 0 6114 */ 6115 LPFC_ATTR_RW(ras_fwlog_level, 0, 0, 4, "Firmware Logging Level"); 6116 6117 /* 6118 * lpfc_ras_fwlog_func: Firmware logging enabled on function number 6119 * Default function which has RAS support : 0 6120 * Value Range is [0..7]. 6121 * FW logging is a global action and enablement is via a specific 6122 * port. 6123 */ 6124 LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function"); 6125 6126 /* 6127 * lpfc_enable_bbcr: Enable BB Credit Recovery 6128 * 0 = BB Credit Recovery disabled 6129 * 1 = BB Credit Recovery enabled (default) 6130 * Value range is [0,1]. Default value is 1. 6131 */ 6132 LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery"); 6133 6134 /* 6135 * lpfc_enable_dpp: Enable DPP on G7 6136 * 0 = DPP on G7 disabled 6137 * 1 = DPP on G7 enabled (default) 6138 * Value range is [0,1]. Default value is 1. 6139 */ 6140 LPFC_ATTR_RW(enable_dpp, 1, 0, 1, "Enable Direct Packet Push"); 6141 6142 struct device_attribute *lpfc_hba_attrs[] = { 6143 &dev_attr_nvme_info, 6144 &dev_attr_scsi_stat, 6145 &dev_attr_bg_info, 6146 &dev_attr_bg_guard_err, 6147 &dev_attr_bg_apptag_err, 6148 &dev_attr_bg_reftag_err, 6149 &dev_attr_info, 6150 &dev_attr_serialnum, 6151 &dev_attr_modeldesc, 6152 &dev_attr_modelname, 6153 &dev_attr_programtype, 6154 &dev_attr_portnum, 6155 &dev_attr_fwrev, 6156 &dev_attr_hdw, 6157 &dev_attr_option_rom_version, 6158 &dev_attr_link_state, 6159 &dev_attr_num_discovered_ports, 6160 &dev_attr_menlo_mgmt_mode, 6161 &dev_attr_lpfc_drvr_version, 6162 &dev_attr_lpfc_enable_fip, 6163 &dev_attr_lpfc_temp_sensor, 6164 &dev_attr_lpfc_log_verbose, 6165 &dev_attr_lpfc_lun_queue_depth, 6166 &dev_attr_lpfc_tgt_queue_depth, 6167 &dev_attr_lpfc_hba_queue_depth, 6168 &dev_attr_lpfc_peer_port_login, 6169 &dev_attr_lpfc_nodev_tmo, 6170 &dev_attr_lpfc_devloss_tmo, 6171 &dev_attr_lpfc_enable_fc4_type, 6172 &dev_attr_lpfc_fcp_class, 6173 &dev_attr_lpfc_use_adisc, 6174 &dev_attr_lpfc_first_burst_size, 6175 &dev_attr_lpfc_ack0, 6176 &dev_attr_lpfc_xri_rebalancing, 6177 &dev_attr_lpfc_topology, 6178 &dev_attr_lpfc_scan_down, 6179 &dev_attr_lpfc_link_speed, 6180 &dev_attr_lpfc_fcp_io_sched, 6181 &dev_attr_lpfc_ns_query, 6182 &dev_attr_lpfc_fcp2_no_tgt_reset, 6183 &dev_attr_lpfc_cr_delay, 6184 &dev_attr_lpfc_cr_count, 6185 &dev_attr_lpfc_multi_ring_support, 6186 &dev_attr_lpfc_multi_ring_rctl, 6187 &dev_attr_lpfc_multi_ring_type, 6188 &dev_attr_lpfc_fdmi_on, 6189 &dev_attr_lpfc_enable_SmartSAN, 6190 &dev_attr_lpfc_max_luns, 6191 &dev_attr_lpfc_enable_npiv, 6192 &dev_attr_lpfc_fcf_failover_policy, 6193 &dev_attr_lpfc_enable_rrq, 6194 &dev_attr_nport_evt_cnt, 6195 &dev_attr_board_mode, 6196 &dev_attr_max_vpi, 6197 &dev_attr_used_vpi, 6198 &dev_attr_max_rpi, 6199 &dev_attr_used_rpi, 6200 &dev_attr_max_xri, 6201 &dev_attr_used_xri, 6202 &dev_attr_npiv_info, 6203 &dev_attr_issue_reset, 6204 &dev_attr_lpfc_poll, 6205 &dev_attr_lpfc_poll_tmo, 6206 &dev_attr_lpfc_task_mgmt_tmo, 6207 &dev_attr_lpfc_use_msi, 6208 &dev_attr_lpfc_nvme_oas, 6209 &dev_attr_lpfc_nvme_embed_cmd, 6210 &dev_attr_lpfc_fcp_imax, 6211 &dev_attr_lpfc_force_rscn, 6212 &dev_attr_lpfc_cq_poll_threshold, 6213 &dev_attr_lpfc_cq_max_proc_limit, 6214 &dev_attr_lpfc_fcp_cpu_map, 6215 &dev_attr_lpfc_fcp_mq_threshold, 6216 &dev_attr_lpfc_hdw_queue, 6217 &dev_attr_lpfc_irq_chann, 6218 &dev_attr_lpfc_suppress_rsp, 6219 &dev_attr_lpfc_nvmet_mrq, 6220 &dev_attr_lpfc_nvmet_mrq_post, 6221 &dev_attr_lpfc_nvme_enable_fb, 6222 &dev_attr_lpfc_nvmet_fb_size, 6223 &dev_attr_lpfc_enable_bg, 6224 &dev_attr_lpfc_soft_wwnn, 6225 &dev_attr_lpfc_soft_wwpn, 6226 &dev_attr_lpfc_soft_wwn_enable, 6227 &dev_attr_lpfc_enable_hba_reset, 6228 &dev_attr_lpfc_enable_hba_heartbeat, 6229 &dev_attr_lpfc_EnableXLane, 6230 &dev_attr_lpfc_XLanePriority, 6231 &dev_attr_lpfc_xlane_lun, 6232 &dev_attr_lpfc_xlane_tgt, 6233 &dev_attr_lpfc_xlane_vpt, 6234 &dev_attr_lpfc_xlane_lun_state, 6235 &dev_attr_lpfc_xlane_lun_status, 6236 &dev_attr_lpfc_xlane_priority, 6237 &dev_attr_lpfc_sg_seg_cnt, 6238 &dev_attr_lpfc_max_scsicmpl_time, 6239 &dev_attr_lpfc_stat_data_ctrl, 6240 &dev_attr_lpfc_aer_support, 6241 &dev_attr_lpfc_aer_state_cleanup, 6242 &dev_attr_lpfc_sriov_nr_virtfn, 6243 &dev_attr_lpfc_req_fw_upgrade, 6244 &dev_attr_lpfc_suppress_link_up, 6245 &dev_attr_iocb_hw, 6246 &dev_attr_pls, 6247 &dev_attr_pt, 6248 &dev_attr_txq_hw, 6249 &dev_attr_txcmplq_hw, 6250 &dev_attr_lpfc_sriov_hw_max_virtfn, 6251 &dev_attr_protocol, 6252 &dev_attr_lpfc_xlane_supported, 6253 &dev_attr_lpfc_enable_mds_diags, 6254 &dev_attr_lpfc_ras_fwlog_buffsize, 6255 &dev_attr_lpfc_ras_fwlog_level, 6256 &dev_attr_lpfc_ras_fwlog_func, 6257 &dev_attr_lpfc_enable_bbcr, 6258 &dev_attr_lpfc_enable_dpp, 6259 NULL, 6260 }; 6261 6262 struct device_attribute *lpfc_vport_attrs[] = { 6263 &dev_attr_info, 6264 &dev_attr_link_state, 6265 &dev_attr_num_discovered_ports, 6266 &dev_attr_lpfc_drvr_version, 6267 &dev_attr_lpfc_log_verbose, 6268 &dev_attr_lpfc_lun_queue_depth, 6269 &dev_attr_lpfc_tgt_queue_depth, 6270 &dev_attr_lpfc_nodev_tmo, 6271 &dev_attr_lpfc_devloss_tmo, 6272 &dev_attr_lpfc_hba_queue_depth, 6273 &dev_attr_lpfc_peer_port_login, 6274 &dev_attr_lpfc_restrict_login, 6275 &dev_attr_lpfc_fcp_class, 6276 &dev_attr_lpfc_use_adisc, 6277 &dev_attr_lpfc_first_burst_size, 6278 &dev_attr_lpfc_max_luns, 6279 &dev_attr_nport_evt_cnt, 6280 &dev_attr_npiv_info, 6281 &dev_attr_lpfc_enable_da_id, 6282 &dev_attr_lpfc_max_scsicmpl_time, 6283 &dev_attr_lpfc_stat_data_ctrl, 6284 &dev_attr_lpfc_static_vport, 6285 NULL, 6286 }; 6287 6288 /** 6289 * sysfs_ctlreg_write - Write method for writing to ctlreg 6290 * @filp: open sysfs file 6291 * @kobj: kernel kobject that contains the kernel class device. 6292 * @bin_attr: kernel attributes passed to us. 6293 * @buf: contains the data to be written to the adapter IOREG space. 6294 * @off: offset into buffer to beginning of data. 6295 * @count: bytes to transfer. 6296 * 6297 * Description: 6298 * Accessed via /sys/class/scsi_host/hostxxx/ctlreg. 6299 * Uses the adapter io control registers to send buf contents to the adapter. 6300 * 6301 * Returns: 6302 * -ERANGE off and count combo out of range 6303 * -EINVAL off, count or buff address invalid 6304 * -EPERM adapter is offline 6305 * value of count, buf contents written 6306 **/ 6307 static ssize_t 6308 sysfs_ctlreg_write(struct file *filp, struct kobject *kobj, 6309 struct bin_attribute *bin_attr, 6310 char *buf, loff_t off, size_t count) 6311 { 6312 size_t buf_off; 6313 struct device *dev = container_of(kobj, struct device, kobj); 6314 struct Scsi_Host *shost = class_to_shost(dev); 6315 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6316 struct lpfc_hba *phba = vport->phba; 6317 6318 if (phba->sli_rev >= LPFC_SLI_REV4) 6319 return -EPERM; 6320 6321 if ((off + count) > FF_REG_AREA_SIZE) 6322 return -ERANGE; 6323 6324 if (count <= LPFC_REG_WRITE_KEY_SIZE) 6325 return 0; 6326 6327 if (off % 4 || count % 4 || (unsigned long)buf % 4) 6328 return -EINVAL; 6329 6330 /* This is to protect HBA registers from accidental writes. */ 6331 if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE)) 6332 return -EINVAL; 6333 6334 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 6335 return -EPERM; 6336 6337 spin_lock_irq(&phba->hbalock); 6338 for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE; 6339 buf_off += sizeof(uint32_t)) 6340 writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)), 6341 phba->ctrl_regs_memmap_p + off + buf_off); 6342 6343 spin_unlock_irq(&phba->hbalock); 6344 6345 return count; 6346 } 6347 6348 /** 6349 * sysfs_ctlreg_read - Read method for reading from ctlreg 6350 * @filp: open sysfs file 6351 * @kobj: kernel kobject that contains the kernel class device. 6352 * @bin_attr: kernel attributes passed to us. 6353 * @buf: if successful contains the data from the adapter IOREG space. 6354 * @off: offset into buffer to beginning of data. 6355 * @count: bytes to transfer. 6356 * 6357 * Description: 6358 * Accessed via /sys/class/scsi_host/hostxxx/ctlreg. 6359 * Uses the adapter io control registers to read data into buf. 6360 * 6361 * Returns: 6362 * -ERANGE off and count combo out of range 6363 * -EINVAL off, count or buff address invalid 6364 * value of count, buf contents read 6365 **/ 6366 static ssize_t 6367 sysfs_ctlreg_read(struct file *filp, struct kobject *kobj, 6368 struct bin_attribute *bin_attr, 6369 char *buf, loff_t off, size_t count) 6370 { 6371 size_t buf_off; 6372 uint32_t * tmp_ptr; 6373 struct device *dev = container_of(kobj, struct device, kobj); 6374 struct Scsi_Host *shost = class_to_shost(dev); 6375 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6376 struct lpfc_hba *phba = vport->phba; 6377 6378 if (phba->sli_rev >= LPFC_SLI_REV4) 6379 return -EPERM; 6380 6381 if (off > FF_REG_AREA_SIZE) 6382 return -ERANGE; 6383 6384 if ((off + count) > FF_REG_AREA_SIZE) 6385 count = FF_REG_AREA_SIZE - off; 6386 6387 if (count == 0) return 0; 6388 6389 if (off % 4 || count % 4 || (unsigned long)buf % 4) 6390 return -EINVAL; 6391 6392 spin_lock_irq(&phba->hbalock); 6393 6394 for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) { 6395 tmp_ptr = (uint32_t *)(buf + buf_off); 6396 *tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off); 6397 } 6398 6399 spin_unlock_irq(&phba->hbalock); 6400 6401 return count; 6402 } 6403 6404 static struct bin_attribute sysfs_ctlreg_attr = { 6405 .attr = { 6406 .name = "ctlreg", 6407 .mode = S_IRUSR | S_IWUSR, 6408 }, 6409 .size = 256, 6410 .read = sysfs_ctlreg_read, 6411 .write = sysfs_ctlreg_write, 6412 }; 6413 6414 /** 6415 * sysfs_mbox_write - Write method for writing information via mbox 6416 * @filp: open sysfs file 6417 * @kobj: kernel kobject that contains the kernel class device. 6418 * @bin_attr: kernel attributes passed to us. 6419 * @buf: contains the data to be written to sysfs mbox. 6420 * @off: offset into buffer to beginning of data. 6421 * @count: bytes to transfer. 6422 * 6423 * Description: 6424 * Deprecated function. All mailbox access from user space is performed via the 6425 * bsg interface. 6426 * 6427 * Returns: 6428 * -EPERM operation not permitted 6429 **/ 6430 static ssize_t 6431 sysfs_mbox_write(struct file *filp, struct kobject *kobj, 6432 struct bin_attribute *bin_attr, 6433 char *buf, loff_t off, size_t count) 6434 { 6435 return -EPERM; 6436 } 6437 6438 /** 6439 * sysfs_mbox_read - Read method for reading information via mbox 6440 * @filp: open sysfs file 6441 * @kobj: kernel kobject that contains the kernel class device. 6442 * @bin_attr: kernel attributes passed to us. 6443 * @buf: contains the data to be read from sysfs mbox. 6444 * @off: offset into buffer to beginning of data. 6445 * @count: bytes to transfer. 6446 * 6447 * Description: 6448 * Deprecated function. All mailbox access from user space is performed via the 6449 * bsg interface. 6450 * 6451 * Returns: 6452 * -EPERM operation not permitted 6453 **/ 6454 static ssize_t 6455 sysfs_mbox_read(struct file *filp, struct kobject *kobj, 6456 struct bin_attribute *bin_attr, 6457 char *buf, loff_t off, size_t count) 6458 { 6459 return -EPERM; 6460 } 6461 6462 static struct bin_attribute sysfs_mbox_attr = { 6463 .attr = { 6464 .name = "mbox", 6465 .mode = S_IRUSR | S_IWUSR, 6466 }, 6467 .size = MAILBOX_SYSFS_MAX, 6468 .read = sysfs_mbox_read, 6469 .write = sysfs_mbox_write, 6470 }; 6471 6472 /** 6473 * lpfc_alloc_sysfs_attr - Creates the ctlreg and mbox entries 6474 * @vport: address of lpfc vport structure. 6475 * 6476 * Return codes: 6477 * zero on success 6478 * error return code from sysfs_create_bin_file() 6479 **/ 6480 int 6481 lpfc_alloc_sysfs_attr(struct lpfc_vport *vport) 6482 { 6483 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6484 int error; 6485 6486 error = sysfs_create_bin_file(&shost->shost_dev.kobj, 6487 &sysfs_drvr_stat_data_attr); 6488 6489 /* Virtual ports do not need ctrl_reg and mbox */ 6490 if (error || vport->port_type == LPFC_NPIV_PORT) 6491 goto out; 6492 6493 error = sysfs_create_bin_file(&shost->shost_dev.kobj, 6494 &sysfs_ctlreg_attr); 6495 if (error) 6496 goto out_remove_stat_attr; 6497 6498 error = sysfs_create_bin_file(&shost->shost_dev.kobj, 6499 &sysfs_mbox_attr); 6500 if (error) 6501 goto out_remove_ctlreg_attr; 6502 6503 return 0; 6504 out_remove_ctlreg_attr: 6505 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); 6506 out_remove_stat_attr: 6507 sysfs_remove_bin_file(&shost->shost_dev.kobj, 6508 &sysfs_drvr_stat_data_attr); 6509 out: 6510 return error; 6511 } 6512 6513 /** 6514 * lpfc_free_sysfs_attr - Removes the ctlreg and mbox entries 6515 * @vport: address of lpfc vport structure. 6516 **/ 6517 void 6518 lpfc_free_sysfs_attr(struct lpfc_vport *vport) 6519 { 6520 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6521 sysfs_remove_bin_file(&shost->shost_dev.kobj, 6522 &sysfs_drvr_stat_data_attr); 6523 /* Virtual ports do not need ctrl_reg and mbox */ 6524 if (vport->port_type == LPFC_NPIV_PORT) 6525 return; 6526 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr); 6527 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); 6528 } 6529 6530 /* 6531 * Dynamic FC Host Attributes Support 6532 */ 6533 6534 /** 6535 * lpfc_get_host_symbolic_name - Copy symbolic name into the scsi host 6536 * @shost: kernel scsi host pointer. 6537 **/ 6538 static void 6539 lpfc_get_host_symbolic_name(struct Scsi_Host *shost) 6540 { 6541 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 6542 6543 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 6544 sizeof fc_host_symbolic_name(shost)); 6545 } 6546 6547 /** 6548 * lpfc_get_host_port_id - Copy the vport DID into the scsi host port id 6549 * @shost: kernel scsi host pointer. 6550 **/ 6551 static void 6552 lpfc_get_host_port_id(struct Scsi_Host *shost) 6553 { 6554 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6555 6556 /* note: fc_myDID already in cpu endianness */ 6557 fc_host_port_id(shost) = vport->fc_myDID; 6558 } 6559 6560 /** 6561 * lpfc_get_host_port_type - Set the value of the scsi host port type 6562 * @shost: kernel scsi host pointer. 6563 **/ 6564 static void 6565 lpfc_get_host_port_type(struct Scsi_Host *shost) 6566 { 6567 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6568 struct lpfc_hba *phba = vport->phba; 6569 6570 spin_lock_irq(shost->host_lock); 6571 6572 if (vport->port_type == LPFC_NPIV_PORT) { 6573 fc_host_port_type(shost) = FC_PORTTYPE_NPIV; 6574 } else if (lpfc_is_link_up(phba)) { 6575 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 6576 if (vport->fc_flag & FC_PUBLIC_LOOP) 6577 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; 6578 else 6579 fc_host_port_type(shost) = FC_PORTTYPE_LPORT; 6580 } else { 6581 if (vport->fc_flag & FC_FABRIC) 6582 fc_host_port_type(shost) = FC_PORTTYPE_NPORT; 6583 else 6584 fc_host_port_type(shost) = FC_PORTTYPE_PTP; 6585 } 6586 } else 6587 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; 6588 6589 spin_unlock_irq(shost->host_lock); 6590 } 6591 6592 /** 6593 * lpfc_get_host_port_state - Set the value of the scsi host port state 6594 * @shost: kernel scsi host pointer. 6595 **/ 6596 static void 6597 lpfc_get_host_port_state(struct Scsi_Host *shost) 6598 { 6599 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6600 struct lpfc_hba *phba = vport->phba; 6601 6602 spin_lock_irq(shost->host_lock); 6603 6604 if (vport->fc_flag & FC_OFFLINE_MODE) 6605 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; 6606 else { 6607 switch (phba->link_state) { 6608 case LPFC_LINK_UNKNOWN: 6609 case LPFC_LINK_DOWN: 6610 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; 6611 break; 6612 case LPFC_LINK_UP: 6613 case LPFC_CLEAR_LA: 6614 case LPFC_HBA_READY: 6615 /* Links up, reports port state accordingly */ 6616 if (vport->port_state < LPFC_VPORT_READY) 6617 fc_host_port_state(shost) = 6618 FC_PORTSTATE_BYPASSED; 6619 else 6620 fc_host_port_state(shost) = 6621 FC_PORTSTATE_ONLINE; 6622 break; 6623 case LPFC_HBA_ERROR: 6624 fc_host_port_state(shost) = FC_PORTSTATE_ERROR; 6625 break; 6626 default: 6627 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; 6628 break; 6629 } 6630 } 6631 6632 spin_unlock_irq(shost->host_lock); 6633 } 6634 6635 /** 6636 * lpfc_get_host_speed - Set the value of the scsi host speed 6637 * @shost: kernel scsi host pointer. 6638 **/ 6639 static void 6640 lpfc_get_host_speed(struct Scsi_Host *shost) 6641 { 6642 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6643 struct lpfc_hba *phba = vport->phba; 6644 6645 spin_lock_irq(shost->host_lock); 6646 6647 if ((lpfc_is_link_up(phba)) && (!(phba->hba_flag & HBA_FCOE_MODE))) { 6648 switch(phba->fc_linkspeed) { 6649 case LPFC_LINK_SPEED_1GHZ: 6650 fc_host_speed(shost) = FC_PORTSPEED_1GBIT; 6651 break; 6652 case LPFC_LINK_SPEED_2GHZ: 6653 fc_host_speed(shost) = FC_PORTSPEED_2GBIT; 6654 break; 6655 case LPFC_LINK_SPEED_4GHZ: 6656 fc_host_speed(shost) = FC_PORTSPEED_4GBIT; 6657 break; 6658 case LPFC_LINK_SPEED_8GHZ: 6659 fc_host_speed(shost) = FC_PORTSPEED_8GBIT; 6660 break; 6661 case LPFC_LINK_SPEED_10GHZ: 6662 fc_host_speed(shost) = FC_PORTSPEED_10GBIT; 6663 break; 6664 case LPFC_LINK_SPEED_16GHZ: 6665 fc_host_speed(shost) = FC_PORTSPEED_16GBIT; 6666 break; 6667 case LPFC_LINK_SPEED_32GHZ: 6668 fc_host_speed(shost) = FC_PORTSPEED_32GBIT; 6669 break; 6670 case LPFC_LINK_SPEED_64GHZ: 6671 fc_host_speed(shost) = FC_PORTSPEED_64GBIT; 6672 break; 6673 case LPFC_LINK_SPEED_128GHZ: 6674 fc_host_speed(shost) = FC_PORTSPEED_128GBIT; 6675 break; 6676 default: 6677 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 6678 break; 6679 } 6680 } else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) { 6681 switch (phba->fc_linkspeed) { 6682 case LPFC_ASYNC_LINK_SPEED_10GBPS: 6683 fc_host_speed(shost) = FC_PORTSPEED_10GBIT; 6684 break; 6685 case LPFC_ASYNC_LINK_SPEED_25GBPS: 6686 fc_host_speed(shost) = FC_PORTSPEED_25GBIT; 6687 break; 6688 case LPFC_ASYNC_LINK_SPEED_40GBPS: 6689 fc_host_speed(shost) = FC_PORTSPEED_40GBIT; 6690 break; 6691 case LPFC_ASYNC_LINK_SPEED_100GBPS: 6692 fc_host_speed(shost) = FC_PORTSPEED_100GBIT; 6693 break; 6694 default: 6695 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 6696 break; 6697 } 6698 } else 6699 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 6700 6701 spin_unlock_irq(shost->host_lock); 6702 } 6703 6704 /** 6705 * lpfc_get_host_fabric_name - Set the value of the scsi host fabric name 6706 * @shost: kernel scsi host pointer. 6707 **/ 6708 static void 6709 lpfc_get_host_fabric_name (struct Scsi_Host *shost) 6710 { 6711 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6712 struct lpfc_hba *phba = vport->phba; 6713 u64 node_name; 6714 6715 spin_lock_irq(shost->host_lock); 6716 6717 if ((vport->port_state > LPFC_FLOGI) && 6718 ((vport->fc_flag & FC_FABRIC) || 6719 ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) && 6720 (vport->fc_flag & FC_PUBLIC_LOOP)))) 6721 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn); 6722 else 6723 /* fabric is local port if there is no F/FL_Port */ 6724 node_name = 0; 6725 6726 spin_unlock_irq(shost->host_lock); 6727 6728 fc_host_fabric_name(shost) = node_name; 6729 } 6730 6731 /** 6732 * lpfc_get_stats - Return statistical information about the adapter 6733 * @shost: kernel scsi host pointer. 6734 * 6735 * Notes: 6736 * NULL on error for link down, no mbox pool, sli2 active, 6737 * management not allowed, memory allocation error, or mbox error. 6738 * 6739 * Returns: 6740 * NULL for error 6741 * address of the adapter host statistics 6742 **/ 6743 static struct fc_host_statistics * 6744 lpfc_get_stats(struct Scsi_Host *shost) 6745 { 6746 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6747 struct lpfc_hba *phba = vport->phba; 6748 struct lpfc_sli *psli = &phba->sli; 6749 struct fc_host_statistics *hs = &phba->link_stats; 6750 struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; 6751 LPFC_MBOXQ_t *pmboxq; 6752 MAILBOX_t *pmb; 6753 int rc = 0; 6754 6755 /* 6756 * prevent udev from issuing mailbox commands until the port is 6757 * configured. 6758 */ 6759 if (phba->link_state < LPFC_LINK_DOWN || 6760 !phba->mbox_mem_pool || 6761 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0) 6762 return NULL; 6763 6764 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) 6765 return NULL; 6766 6767 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6768 if (!pmboxq) 6769 return NULL; 6770 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 6771 6772 pmb = &pmboxq->u.mb; 6773 pmb->mbxCommand = MBX_READ_STATUS; 6774 pmb->mbxOwner = OWN_HOST; 6775 pmboxq->ctx_buf = NULL; 6776 pmboxq->vport = vport; 6777 6778 if (vport->fc_flag & FC_OFFLINE_MODE) 6779 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 6780 else 6781 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 6782 6783 if (rc != MBX_SUCCESS) { 6784 if (rc != MBX_TIMEOUT) 6785 mempool_free(pmboxq, phba->mbox_mem_pool); 6786 return NULL; 6787 } 6788 6789 memset(hs, 0, sizeof (struct fc_host_statistics)); 6790 6791 hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt; 6792 /* 6793 * The MBX_READ_STATUS returns tx_k_bytes which has to 6794 * converted to words 6795 */ 6796 hs->tx_words = (uint64_t) 6797 ((uint64_t)pmb->un.varRdStatus.xmitByteCnt 6798 * (uint64_t)256); 6799 hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt; 6800 hs->rx_words = (uint64_t) 6801 ((uint64_t)pmb->un.varRdStatus.rcvByteCnt 6802 * (uint64_t)256); 6803 6804 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 6805 pmb->mbxCommand = MBX_READ_LNK_STAT; 6806 pmb->mbxOwner = OWN_HOST; 6807 pmboxq->ctx_buf = NULL; 6808 pmboxq->vport = vport; 6809 6810 if (vport->fc_flag & FC_OFFLINE_MODE) 6811 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 6812 else 6813 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 6814 6815 if (rc != MBX_SUCCESS) { 6816 if (rc != MBX_TIMEOUT) 6817 mempool_free(pmboxq, phba->mbox_mem_pool); 6818 return NULL; 6819 } 6820 6821 hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt; 6822 hs->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt; 6823 hs->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt; 6824 hs->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt; 6825 hs->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; 6826 hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt; 6827 hs->error_frames = pmb->un.varRdLnk.crcCnt; 6828 6829 hs->link_failure_count -= lso->link_failure_count; 6830 hs->loss_of_sync_count -= lso->loss_of_sync_count; 6831 hs->loss_of_signal_count -= lso->loss_of_signal_count; 6832 hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count; 6833 hs->invalid_tx_word_count -= lso->invalid_tx_word_count; 6834 hs->invalid_crc_count -= lso->invalid_crc_count; 6835 hs->error_frames -= lso->error_frames; 6836 6837 if (phba->hba_flag & HBA_FCOE_MODE) { 6838 hs->lip_count = -1; 6839 hs->nos_count = (phba->link_events >> 1); 6840 hs->nos_count -= lso->link_events; 6841 } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 6842 hs->lip_count = (phba->fc_eventTag >> 1); 6843 hs->lip_count -= lso->link_events; 6844 hs->nos_count = -1; 6845 } else { 6846 hs->lip_count = -1; 6847 hs->nos_count = (phba->fc_eventTag >> 1); 6848 hs->nos_count -= lso->link_events; 6849 } 6850 6851 hs->dumped_frames = -1; 6852 6853 hs->seconds_since_last_reset = ktime_get_seconds() - psli->stats_start; 6854 6855 mempool_free(pmboxq, phba->mbox_mem_pool); 6856 6857 return hs; 6858 } 6859 6860 /** 6861 * lpfc_reset_stats - Copy the adapter link stats information 6862 * @shost: kernel scsi host pointer. 6863 **/ 6864 static void 6865 lpfc_reset_stats(struct Scsi_Host *shost) 6866 { 6867 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6868 struct lpfc_hba *phba = vport->phba; 6869 struct lpfc_sli *psli = &phba->sli; 6870 struct lpfc_lnk_stat *lso = &psli->lnk_stat_offsets; 6871 LPFC_MBOXQ_t *pmboxq; 6872 MAILBOX_t *pmb; 6873 int rc = 0; 6874 6875 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) 6876 return; 6877 6878 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6879 if (!pmboxq) 6880 return; 6881 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 6882 6883 pmb = &pmboxq->u.mb; 6884 pmb->mbxCommand = MBX_READ_STATUS; 6885 pmb->mbxOwner = OWN_HOST; 6886 pmb->un.varWords[0] = 0x1; /* reset request */ 6887 pmboxq->ctx_buf = NULL; 6888 pmboxq->vport = vport; 6889 6890 if ((vport->fc_flag & FC_OFFLINE_MODE) || 6891 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) 6892 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 6893 else 6894 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 6895 6896 if (rc != MBX_SUCCESS) { 6897 if (rc != MBX_TIMEOUT) 6898 mempool_free(pmboxq, phba->mbox_mem_pool); 6899 return; 6900 } 6901 6902 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 6903 pmb->mbxCommand = MBX_READ_LNK_STAT; 6904 pmb->mbxOwner = OWN_HOST; 6905 pmboxq->ctx_buf = NULL; 6906 pmboxq->vport = vport; 6907 6908 if ((vport->fc_flag & FC_OFFLINE_MODE) || 6909 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) 6910 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 6911 else 6912 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 6913 6914 if (rc != MBX_SUCCESS) { 6915 if (rc != MBX_TIMEOUT) 6916 mempool_free( pmboxq, phba->mbox_mem_pool); 6917 return; 6918 } 6919 6920 lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt; 6921 lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt; 6922 lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt; 6923 lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt; 6924 lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; 6925 lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; 6926 lso->error_frames = pmb->un.varRdLnk.crcCnt; 6927 if (phba->hba_flag & HBA_FCOE_MODE) 6928 lso->link_events = (phba->link_events >> 1); 6929 else 6930 lso->link_events = (phba->fc_eventTag >> 1); 6931 6932 psli->stats_start = ktime_get_seconds(); 6933 6934 mempool_free(pmboxq, phba->mbox_mem_pool); 6935 6936 return; 6937 } 6938 6939 /* 6940 * The LPFC driver treats linkdown handling as target loss events so there 6941 * are no sysfs handlers for link_down_tmo. 6942 */ 6943 6944 /** 6945 * lpfc_get_node_by_target - Return the nodelist for a target 6946 * @starget: kernel scsi target pointer. 6947 * 6948 * Returns: 6949 * address of the node list if found 6950 * NULL target not found 6951 **/ 6952 static struct lpfc_nodelist * 6953 lpfc_get_node_by_target(struct scsi_target *starget) 6954 { 6955 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 6956 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6957 struct lpfc_nodelist *ndlp; 6958 6959 spin_lock_irq(shost->host_lock); 6960 /* Search for this, mapped, target ID */ 6961 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 6962 if (NLP_CHK_NODE_ACT(ndlp) && 6963 ndlp->nlp_state == NLP_STE_MAPPED_NODE && 6964 starget->id == ndlp->nlp_sid) { 6965 spin_unlock_irq(shost->host_lock); 6966 return ndlp; 6967 } 6968 } 6969 spin_unlock_irq(shost->host_lock); 6970 return NULL; 6971 } 6972 6973 /** 6974 * lpfc_get_starget_port_id - Set the target port id to the ndlp DID or -1 6975 * @starget: kernel scsi target pointer. 6976 **/ 6977 static void 6978 lpfc_get_starget_port_id(struct scsi_target *starget) 6979 { 6980 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget); 6981 6982 fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1; 6983 } 6984 6985 /** 6986 * lpfc_get_starget_node_name - Set the target node name 6987 * @starget: kernel scsi target pointer. 6988 * 6989 * Description: Set the target node name to the ndlp node name wwn or zero. 6990 **/ 6991 static void 6992 lpfc_get_starget_node_name(struct scsi_target *starget) 6993 { 6994 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget); 6995 6996 fc_starget_node_name(starget) = 6997 ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0; 6998 } 6999 7000 /** 7001 * lpfc_get_starget_port_name - Set the target port name 7002 * @starget: kernel scsi target pointer. 7003 * 7004 * Description: set the target port name to the ndlp port name wwn or zero. 7005 **/ 7006 static void 7007 lpfc_get_starget_port_name(struct scsi_target *starget) 7008 { 7009 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget); 7010 7011 fc_starget_port_name(starget) = 7012 ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0; 7013 } 7014 7015 /** 7016 * lpfc_set_rport_loss_tmo - Set the rport dev loss tmo 7017 * @rport: fc rport address. 7018 * @timeout: new value for dev loss tmo. 7019 * 7020 * Description: 7021 * If timeout is non zero set the dev_loss_tmo to timeout, else set 7022 * dev_loss_tmo to one. 7023 **/ 7024 static void 7025 lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) 7026 { 7027 struct lpfc_rport_data *rdata = rport->dd_data; 7028 struct lpfc_nodelist *ndlp = rdata->pnode; 7029 #if (IS_ENABLED(CONFIG_NVME_FC)) 7030 struct lpfc_nvme_rport *nrport = NULL; 7031 #endif 7032 7033 if (timeout) 7034 rport->dev_loss_tmo = timeout; 7035 else 7036 rport->dev_loss_tmo = 1; 7037 7038 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 7039 dev_info(&rport->dev, "Cannot find remote node to " 7040 "set rport dev loss tmo, port_id x%x\n", 7041 rport->port_id); 7042 return; 7043 } 7044 7045 #if (IS_ENABLED(CONFIG_NVME_FC)) 7046 nrport = lpfc_ndlp_get_nrport(ndlp); 7047 7048 if (nrport && nrport->remoteport) 7049 nvme_fc_set_remoteport_devloss(nrport->remoteport, 7050 rport->dev_loss_tmo); 7051 #endif 7052 } 7053 7054 /** 7055 * lpfc_rport_show_function - Return rport target information 7056 * 7057 * Description: 7058 * Macro that uses field to generate a function with the name lpfc_show_rport_ 7059 * 7060 * lpfc_show_rport_##field: returns the bytes formatted in buf 7061 * @cdev: class converted to an fc_rport. 7062 * @buf: on return contains the target_field or zero. 7063 * 7064 * Returns: size of formatted string. 7065 **/ 7066 #define lpfc_rport_show_function(field, format_string, sz, cast) \ 7067 static ssize_t \ 7068 lpfc_show_rport_##field (struct device *dev, \ 7069 struct device_attribute *attr, \ 7070 char *buf) \ 7071 { \ 7072 struct fc_rport *rport = transport_class_to_rport(dev); \ 7073 struct lpfc_rport_data *rdata = rport->hostdata; \ 7074 return scnprintf(buf, sz, format_string, \ 7075 (rdata->target) ? cast rdata->target->field : 0); \ 7076 } 7077 7078 #define lpfc_rport_rd_attr(field, format_string, sz) \ 7079 lpfc_rport_show_function(field, format_string, sz, ) \ 7080 static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL) 7081 7082 /** 7083 * lpfc_set_vport_symbolic_name - Set the vport's symbolic name 7084 * @fc_vport: The fc_vport who's symbolic name has been changed. 7085 * 7086 * Description: 7087 * This function is called by the transport after the @fc_vport's symbolic name 7088 * has been changed. This function re-registers the symbolic name with the 7089 * switch to propagate the change into the fabric if the vport is active. 7090 **/ 7091 static void 7092 lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport) 7093 { 7094 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; 7095 7096 if (vport->port_state == LPFC_VPORT_READY) 7097 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); 7098 } 7099 7100 /** 7101 * lpfc_hba_log_verbose_init - Set hba's log verbose level 7102 * @phba: Pointer to lpfc_hba struct. 7103 * 7104 * This function is called by the lpfc_get_cfgparam() routine to set the 7105 * module lpfc_log_verbose into the @phba cfg_log_verbose for use with 7106 * log message according to the module's lpfc_log_verbose parameter setting 7107 * before hba port or vport created. 7108 **/ 7109 static void 7110 lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose) 7111 { 7112 phba->cfg_log_verbose = verbose; 7113 } 7114 7115 struct fc_function_template lpfc_transport_functions = { 7116 /* fixed attributes the driver supports */ 7117 .show_host_node_name = 1, 7118 .show_host_port_name = 1, 7119 .show_host_supported_classes = 1, 7120 .show_host_supported_fc4s = 1, 7121 .show_host_supported_speeds = 1, 7122 .show_host_maxframe_size = 1, 7123 7124 .get_host_symbolic_name = lpfc_get_host_symbolic_name, 7125 .show_host_symbolic_name = 1, 7126 7127 /* dynamic attributes the driver supports */ 7128 .get_host_port_id = lpfc_get_host_port_id, 7129 .show_host_port_id = 1, 7130 7131 .get_host_port_type = lpfc_get_host_port_type, 7132 .show_host_port_type = 1, 7133 7134 .get_host_port_state = lpfc_get_host_port_state, 7135 .show_host_port_state = 1, 7136 7137 /* active_fc4s is shown but doesn't change (thus no get function) */ 7138 .show_host_active_fc4s = 1, 7139 7140 .get_host_speed = lpfc_get_host_speed, 7141 .show_host_speed = 1, 7142 7143 .get_host_fabric_name = lpfc_get_host_fabric_name, 7144 .show_host_fabric_name = 1, 7145 7146 /* 7147 * The LPFC driver treats linkdown handling as target loss events 7148 * so there are no sysfs handlers for link_down_tmo. 7149 */ 7150 7151 .get_fc_host_stats = lpfc_get_stats, 7152 .reset_fc_host_stats = lpfc_reset_stats, 7153 7154 .dd_fcrport_size = sizeof(struct lpfc_rport_data), 7155 .show_rport_maxframe_size = 1, 7156 .show_rport_supported_classes = 1, 7157 7158 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo, 7159 .show_rport_dev_loss_tmo = 1, 7160 7161 .get_starget_port_id = lpfc_get_starget_port_id, 7162 .show_starget_port_id = 1, 7163 7164 .get_starget_node_name = lpfc_get_starget_node_name, 7165 .show_starget_node_name = 1, 7166 7167 .get_starget_port_name = lpfc_get_starget_port_name, 7168 .show_starget_port_name = 1, 7169 7170 .issue_fc_host_lip = lpfc_issue_lip, 7171 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk, 7172 .terminate_rport_io = lpfc_terminate_rport_io, 7173 7174 .dd_fcvport_size = sizeof(struct lpfc_vport *), 7175 7176 .vport_disable = lpfc_vport_disable, 7177 7178 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name, 7179 7180 .bsg_request = lpfc_bsg_request, 7181 .bsg_timeout = lpfc_bsg_timeout, 7182 }; 7183 7184 struct fc_function_template lpfc_vport_transport_functions = { 7185 /* fixed attributes the driver supports */ 7186 .show_host_node_name = 1, 7187 .show_host_port_name = 1, 7188 .show_host_supported_classes = 1, 7189 .show_host_supported_fc4s = 1, 7190 .show_host_supported_speeds = 1, 7191 .show_host_maxframe_size = 1, 7192 7193 .get_host_symbolic_name = lpfc_get_host_symbolic_name, 7194 .show_host_symbolic_name = 1, 7195 7196 /* dynamic attributes the driver supports */ 7197 .get_host_port_id = lpfc_get_host_port_id, 7198 .show_host_port_id = 1, 7199 7200 .get_host_port_type = lpfc_get_host_port_type, 7201 .show_host_port_type = 1, 7202 7203 .get_host_port_state = lpfc_get_host_port_state, 7204 .show_host_port_state = 1, 7205 7206 /* active_fc4s is shown but doesn't change (thus no get function) */ 7207 .show_host_active_fc4s = 1, 7208 7209 .get_host_speed = lpfc_get_host_speed, 7210 .show_host_speed = 1, 7211 7212 .get_host_fabric_name = lpfc_get_host_fabric_name, 7213 .show_host_fabric_name = 1, 7214 7215 /* 7216 * The LPFC driver treats linkdown handling as target loss events 7217 * so there are no sysfs handlers for link_down_tmo. 7218 */ 7219 7220 .get_fc_host_stats = lpfc_get_stats, 7221 .reset_fc_host_stats = lpfc_reset_stats, 7222 7223 .dd_fcrport_size = sizeof(struct lpfc_rport_data), 7224 .show_rport_maxframe_size = 1, 7225 .show_rport_supported_classes = 1, 7226 7227 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo, 7228 .show_rport_dev_loss_tmo = 1, 7229 7230 .get_starget_port_id = lpfc_get_starget_port_id, 7231 .show_starget_port_id = 1, 7232 7233 .get_starget_node_name = lpfc_get_starget_node_name, 7234 .show_starget_node_name = 1, 7235 7236 .get_starget_port_name = lpfc_get_starget_port_name, 7237 .show_starget_port_name = 1, 7238 7239 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk, 7240 .terminate_rport_io = lpfc_terminate_rport_io, 7241 7242 .vport_disable = lpfc_vport_disable, 7243 7244 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name, 7245 }; 7246 7247 /** 7248 * lpfc_get_hba_function_mode - Used to determine the HBA function in FCoE 7249 * Mode 7250 * @phba: lpfc_hba pointer. 7251 **/ 7252 static void 7253 lpfc_get_hba_function_mode(struct lpfc_hba *phba) 7254 { 7255 /* If the adapter supports FCoE mode */ 7256 switch (phba->pcidev->device) { 7257 case PCI_DEVICE_ID_SKYHAWK: 7258 case PCI_DEVICE_ID_SKYHAWK_VF: 7259 case PCI_DEVICE_ID_LANCER_FCOE: 7260 case PCI_DEVICE_ID_LANCER_FCOE_VF: 7261 case PCI_DEVICE_ID_ZEPHYR_DCSP: 7262 case PCI_DEVICE_ID_HORNET: 7263 case PCI_DEVICE_ID_TIGERSHARK: 7264 case PCI_DEVICE_ID_TOMCAT: 7265 phba->hba_flag |= HBA_FCOE_MODE; 7266 break; 7267 default: 7268 /* for others, clear the flag */ 7269 phba->hba_flag &= ~HBA_FCOE_MODE; 7270 } 7271 } 7272 7273 /** 7274 * lpfc_get_cfgparam - Used during probe_one to init the adapter structure 7275 * @phba: lpfc_hba pointer. 7276 **/ 7277 void 7278 lpfc_get_cfgparam(struct lpfc_hba *phba) 7279 { 7280 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); 7281 lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched); 7282 lpfc_ns_query_init(phba, lpfc_ns_query); 7283 lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset); 7284 lpfc_cr_delay_init(phba, lpfc_cr_delay); 7285 lpfc_cr_count_init(phba, lpfc_cr_count); 7286 lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support); 7287 lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl); 7288 lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type); 7289 lpfc_ack0_init(phba, lpfc_ack0); 7290 lpfc_xri_rebalancing_init(phba, lpfc_xri_rebalancing); 7291 lpfc_topology_init(phba, lpfc_topology); 7292 lpfc_link_speed_init(phba, lpfc_link_speed); 7293 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 7294 lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo); 7295 lpfc_enable_npiv_init(phba, lpfc_enable_npiv); 7296 lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy); 7297 lpfc_enable_rrq_init(phba, lpfc_enable_rrq); 7298 lpfc_fdmi_on_init(phba, lpfc_fdmi_on); 7299 lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN); 7300 lpfc_use_msi_init(phba, lpfc_use_msi); 7301 lpfc_nvme_oas_init(phba, lpfc_nvme_oas); 7302 lpfc_nvme_embed_cmd_init(phba, lpfc_nvme_embed_cmd); 7303 lpfc_fcp_imax_init(phba, lpfc_fcp_imax); 7304 lpfc_force_rscn_init(phba, lpfc_force_rscn); 7305 lpfc_cq_poll_threshold_init(phba, lpfc_cq_poll_threshold); 7306 lpfc_cq_max_proc_limit_init(phba, lpfc_cq_max_proc_limit); 7307 lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map); 7308 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); 7309 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); 7310 7311 lpfc_EnableXLane_init(phba, lpfc_EnableXLane); 7312 if (phba->sli_rev != LPFC_SLI_REV4) 7313 phba->cfg_EnableXLane = 0; 7314 lpfc_XLanePriority_init(phba, lpfc_XLanePriority); 7315 7316 memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t))); 7317 memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t))); 7318 phba->cfg_oas_lun_state = 0; 7319 phba->cfg_oas_lun_status = 0; 7320 phba->cfg_oas_flags = 0; 7321 phba->cfg_oas_priority = 0; 7322 lpfc_enable_bg_init(phba, lpfc_enable_bg); 7323 lpfc_prot_mask_init(phba, lpfc_prot_mask); 7324 lpfc_prot_guard_init(phba, lpfc_prot_guard); 7325 if (phba->sli_rev == LPFC_SLI_REV4) 7326 phba->cfg_poll = 0; 7327 else 7328 phba->cfg_poll = lpfc_poll; 7329 7330 /* Get the function mode */ 7331 lpfc_get_hba_function_mode(phba); 7332 7333 /* BlockGuard allowed for FC only. */ 7334 if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) { 7335 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7336 "0581 BlockGuard feature not supported\n"); 7337 /* If set, clear the BlockGuard support param */ 7338 phba->cfg_enable_bg = 0; 7339 } else if (phba->cfg_enable_bg) { 7340 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 7341 } 7342 7343 lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp); 7344 7345 lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type); 7346 lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq); 7347 lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post); 7348 7349 /* Initialize first burst. Target vs Initiator are different. */ 7350 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); 7351 lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size); 7352 lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold); 7353 lpfc_hdw_queue_init(phba, lpfc_hdw_queue); 7354 lpfc_irq_chann_init(phba, lpfc_irq_chann); 7355 lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr); 7356 lpfc_enable_dpp_init(phba, lpfc_enable_dpp); 7357 7358 if (phba->sli_rev != LPFC_SLI_REV4) { 7359 /* NVME only supported on SLI4 */ 7360 phba->nvmet_support = 0; 7361 phba->cfg_nvmet_mrq = 0; 7362 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; 7363 phba->cfg_enable_bbcr = 0; 7364 phba->cfg_xri_rebalancing = 0; 7365 } else { 7366 /* We MUST have FCP support */ 7367 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 7368 phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP; 7369 } 7370 7371 phba->cfg_auto_imax = (phba->cfg_fcp_imax) ? 0 : 1; 7372 7373 phba->cfg_enable_pbde = 0; 7374 7375 /* A value of 0 means use the number of CPUs found in the system */ 7376 if (phba->cfg_hdw_queue == 0) 7377 phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu; 7378 if (phba->cfg_irq_chann == 0) 7379 phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu; 7380 if (phba->cfg_irq_chann > phba->cfg_hdw_queue) 7381 phba->cfg_irq_chann = phba->cfg_hdw_queue; 7382 7383 phba->cfg_soft_wwnn = 0L; 7384 phba->cfg_soft_wwpn = 0L; 7385 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); 7386 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); 7387 lpfc_aer_support_init(phba, lpfc_aer_support); 7388 lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn); 7389 lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade); 7390 lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up); 7391 lpfc_delay_discovery_init(phba, lpfc_delay_discovery); 7392 lpfc_sli_mode_init(phba, lpfc_sli_mode); 7393 lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags); 7394 lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize); 7395 lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level); 7396 lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func); 7397 7398 return; 7399 } 7400 7401 /** 7402 * lpfc_nvme_mod_param_dep - Adjust module parameter value based on 7403 * dependencies between protocols and roles. 7404 * @phba: lpfc_hba pointer. 7405 **/ 7406 void 7407 lpfc_nvme_mod_param_dep(struct lpfc_hba *phba) 7408 { 7409 if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu) 7410 phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu; 7411 if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu) 7412 phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu; 7413 if (phba->cfg_irq_chann > phba->cfg_hdw_queue) 7414 phba->cfg_irq_chann = phba->cfg_hdw_queue; 7415 7416 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && 7417 phba->nvmet_support) { 7418 phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP; 7419 7420 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 7421 "6013 %s x%x fb_size x%x, fb_max x%x\n", 7422 "NVME Target PRLI ACC enable_fb ", 7423 phba->cfg_nvme_enable_fb, 7424 phba->cfg_nvmet_fb_size, 7425 LPFC_NVMET_FB_SZ_MAX); 7426 7427 if (phba->cfg_nvme_enable_fb == 0) 7428 phba->cfg_nvmet_fb_size = 0; 7429 else { 7430 if (phba->cfg_nvmet_fb_size > LPFC_NVMET_FB_SZ_MAX) 7431 phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX; 7432 } 7433 7434 if (!phba->cfg_nvmet_mrq) 7435 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; 7436 7437 /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */ 7438 if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) { 7439 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; 7440 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 7441 "6018 Adjust lpfc_nvmet_mrq to %d\n", 7442 phba->cfg_nvmet_mrq); 7443 } 7444 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) 7445 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; 7446 7447 } else { 7448 /* Not NVME Target mode. Turn off Target parameters. */ 7449 phba->nvmet_support = 0; 7450 phba->cfg_nvmet_mrq = 0; 7451 phba->cfg_nvmet_fb_size = 0; 7452 } 7453 } 7454 7455 /** 7456 * lpfc_get_vport_cfgparam - Used during port create, init the vport structure 7457 * @vport: lpfc_vport pointer. 7458 **/ 7459 void 7460 lpfc_get_vport_cfgparam(struct lpfc_vport *vport) 7461 { 7462 lpfc_log_verbose_init(vport, lpfc_log_verbose); 7463 lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth); 7464 lpfc_tgt_queue_depth_init(vport, lpfc_tgt_queue_depth); 7465 lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo); 7466 lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo); 7467 lpfc_peer_port_login_init(vport, lpfc_peer_port_login); 7468 lpfc_restrict_login_init(vport, lpfc_restrict_login); 7469 lpfc_fcp_class_init(vport, lpfc_fcp_class); 7470 lpfc_use_adisc_init(vport, lpfc_use_adisc); 7471 lpfc_first_burst_size_init(vport, lpfc_first_burst_size); 7472 lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time); 7473 lpfc_discovery_threads_init(vport, lpfc_discovery_threads); 7474 lpfc_max_luns_init(vport, lpfc_max_luns); 7475 lpfc_scan_down_init(vport, lpfc_scan_down); 7476 lpfc_enable_da_id_init(vport, lpfc_enable_da_id); 7477 return; 7478 } 7479