1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <scsi/scsi_host.h> 25 #include <linux/ktime.h> 26 #include <linux/workqueue.h> 27 28 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS) 29 #define CONFIG_SCSI_LPFC_DEBUG_FS 30 #endif 31 32 struct lpfc_sli2_slim; 33 34 #define ELX_MODEL_NAME_SIZE 80 35 36 #define LPFC_PCI_DEV_LP 0x1 37 #define LPFC_PCI_DEV_OC 0x2 38 39 #define LPFC_SLI_REV2 2 40 #define LPFC_SLI_REV3 3 41 #define LPFC_SLI_REV4 4 42 43 #define LPFC_MAX_TARGET 4096 /* max number of targets supported */ 44 #define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els 45 requests */ 46 #define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact 47 the NameServer before giving up. */ 48 #define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */ 49 #define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */ 50 #define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi 51 cmnd for menlo needs nearly twice as for firmware 52 downloads using bsg */ 53 54 #define LPFC_DEFAULT_XPSGL_SIZE 256 55 #define LPFC_MAX_SG_TABLESIZE 0xffff 56 #define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */ 57 #define LPFC_MAX_BG_SLI4_SEG_CNT_DIF 128 /* sg element count for BlockGuard */ 58 #define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */ 59 #define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ 60 #define LPFC_MIN_SG_SEG_CNT 32 /* sg element count per scsi cmnd */ 61 #define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */ 62 #define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */ 63 #define LPFC_MAX_NVME_SEG_CNT 256 /* max SGL element cnt per NVME cmnd */ 64 65 #define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */ 66 #define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ 67 #define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ 68 #define LPFC_VNAME_LEN 100 /* vport symbolic name length */ 69 #define LPFC_TGTQ_RAMPUP_PCENT 5 /* Target queue rampup in percentage */ 70 #define LPFC_MIN_TGT_QDEPTH 10 71 #define LPFC_MAX_TGT_QDEPTH 0xFFFF 72 73 #define LPFC_MAX_BUCKET_COUNT 20 /* Maximum no. of buckets for stat data 74 collection. */ 75 /* 76 * Following time intervals are used of adjusting SCSI device 77 * queue depths when there are driver resource error or Firmware 78 * resource error. 79 */ 80 /* 1 Second */ 81 #define QUEUE_RAMP_DOWN_INTERVAL (msecs_to_jiffies(1000 * 1)) 82 83 /* Number of exchanges reserved for discovery to complete */ 84 #define LPFC_DISC_IOCB_BUFF_COUNT 20 85 86 #define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */ 87 #define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */ 88 89 /* Error Attention event polling interval */ 90 #define LPFC_ERATT_POLL_INTERVAL 5 /* EATT poll interval in seconds */ 91 92 /* Define macros for 64 bit support */ 93 #define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr))) 94 #define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32))) 95 #define getPaddr(high, low) ((dma_addr_t)( \ 96 (( (u64)(high)<<16 ) << 16)|( (u64)(low)))) 97 /* Provide maximum configuration definitions. */ 98 #define LPFC_DRVR_TIMEOUT 16 /* driver iocb timeout value in sec */ 99 #define FC_MAX_ADPTMSG 64 100 101 #define MAX_HBAEVT 32 102 #define MAX_HBAS_NO_RESET 16 103 104 /* Number of MSI-X vectors the driver uses */ 105 #define LPFC_MSIX_VECTORS 2 106 107 /* lpfc wait event data ready flag */ 108 #define LPFC_DATA_READY 0 /* bit 0 */ 109 110 /* queue dump line buffer size */ 111 #define LPFC_LBUF_SZ 128 112 113 /* mailbox system shutdown options */ 114 #define LPFC_MBX_NO_WAIT 0 115 #define LPFC_MBX_WAIT 1 116 117 enum lpfc_polling_flags { 118 ENABLE_FCP_RING_POLLING = 0x1, 119 DISABLE_FCP_RING_INT = 0x2 120 }; 121 122 struct perf_prof { 123 uint16_t cmd_cpu[40]; 124 uint16_t rsp_cpu[40]; 125 uint16_t qh_cpu[40]; 126 uint16_t wqidx[40]; 127 }; 128 129 /* 130 * Provide for FC4 TYPE x28 - NVME. The 131 * bit mask for FCP and NVME is 0x8 identically 132 * because they are 32 bit positions distance. 133 */ 134 #define LPFC_FC4_TYPE_BITMASK 0x00000100 135 136 /* Provide DMA memory definitions the driver uses per port instance. */ 137 struct lpfc_dmabuf { 138 struct list_head list; 139 void *virt; /* virtual address ptr */ 140 dma_addr_t phys; /* mapped address */ 141 uint32_t buffer_tag; /* used for tagged queue ring */ 142 }; 143 144 struct lpfc_nvmet_ctxbuf { 145 struct list_head list; 146 struct lpfc_async_xchg_ctx *context; 147 struct lpfc_iocbq *iocbq; 148 struct lpfc_sglq *sglq; 149 struct work_struct defer_work; 150 }; 151 152 struct lpfc_dma_pool { 153 struct lpfc_dmabuf *elements; 154 uint32_t max_count; 155 uint32_t current_count; 156 }; 157 158 struct hbq_dmabuf { 159 struct lpfc_dmabuf hbuf; 160 struct lpfc_dmabuf dbuf; 161 uint16_t total_size; 162 uint16_t bytes_recv; 163 uint32_t tag; 164 struct lpfc_cq_event cq_event; 165 unsigned long time_stamp; 166 void *context; 167 }; 168 169 struct rqb_dmabuf { 170 struct lpfc_dmabuf hbuf; 171 struct lpfc_dmabuf dbuf; 172 uint16_t total_size; 173 uint16_t bytes_recv; 174 uint16_t idx; 175 struct lpfc_queue *hrq; /* ptr to associated Header RQ */ 176 struct lpfc_queue *drq; /* ptr to associated Data RQ */ 177 }; 178 179 /* Priority bit. Set value to exceed low water mark in lpfc_mem. */ 180 #define MEM_PRI 0x100 181 182 183 /****************************************************************************/ 184 /* Device VPD save area */ 185 /****************************************************************************/ 186 typedef struct lpfc_vpd { 187 uint32_t status; /* vpd status value */ 188 uint32_t length; /* number of bytes actually returned */ 189 struct { 190 uint32_t rsvd1; /* Revision numbers */ 191 uint32_t biuRev; 192 uint32_t smRev; 193 uint32_t smFwRev; 194 uint32_t endecRev; 195 uint16_t rBit; 196 uint8_t fcphHigh; 197 uint8_t fcphLow; 198 uint8_t feaLevelHigh; 199 uint8_t feaLevelLow; 200 uint32_t postKernRev; 201 uint32_t opFwRev; 202 uint8_t opFwName[16]; 203 uint32_t sli1FwRev; 204 uint8_t sli1FwName[16]; 205 uint32_t sli2FwRev; 206 uint8_t sli2FwName[16]; 207 } rev; 208 struct { 209 #ifdef __BIG_ENDIAN_BITFIELD 210 uint32_t rsvd3 :20; /* Reserved */ 211 uint32_t rsvd2 : 3; /* Reserved */ 212 uint32_t cbg : 1; /* Configure BlockGuard */ 213 uint32_t cmv : 1; /* Configure Max VPIs */ 214 uint32_t ccrp : 1; /* Config Command Ring Polling */ 215 uint32_t csah : 1; /* Configure Synchronous Abort Handling */ 216 uint32_t chbs : 1; /* Cofigure Host Backing store */ 217 uint32_t cinb : 1; /* Enable Interrupt Notification Block */ 218 uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */ 219 uint32_t cmx : 1; /* Configure Max XRIs */ 220 uint32_t cmr : 1; /* Configure Max RPIs */ 221 #else /* __LITTLE_ENDIAN */ 222 uint32_t cmr : 1; /* Configure Max RPIs */ 223 uint32_t cmx : 1; /* Configure Max XRIs */ 224 uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */ 225 uint32_t cinb : 1; /* Enable Interrupt Notification Block */ 226 uint32_t chbs : 1; /* Cofigure Host Backing store */ 227 uint32_t csah : 1; /* Configure Synchronous Abort Handling */ 228 uint32_t ccrp : 1; /* Config Command Ring Polling */ 229 uint32_t cmv : 1; /* Configure Max VPIs */ 230 uint32_t cbg : 1; /* Configure BlockGuard */ 231 uint32_t rsvd2 : 3; /* Reserved */ 232 uint32_t rsvd3 :20; /* Reserved */ 233 #endif 234 } sli3Feat; 235 } lpfc_vpd_t; 236 237 238 /* 239 * lpfc stat counters 240 */ 241 struct lpfc_stats { 242 /* Statistics for ELS commands */ 243 uint32_t elsLogiCol; 244 uint32_t elsRetryExceeded; 245 uint32_t elsXmitRetry; 246 uint32_t elsDelayRetry; 247 uint32_t elsRcvDrop; 248 uint32_t elsRcvFrame; 249 uint32_t elsRcvRSCN; 250 uint32_t elsRcvRNID; 251 uint32_t elsRcvFARP; 252 uint32_t elsRcvFARPR; 253 uint32_t elsRcvFLOGI; 254 uint32_t elsRcvPLOGI; 255 uint32_t elsRcvADISC; 256 uint32_t elsRcvPDISC; 257 uint32_t elsRcvFAN; 258 uint32_t elsRcvLOGO; 259 uint32_t elsRcvPRLO; 260 uint32_t elsRcvPRLI; 261 uint32_t elsRcvLIRR; 262 uint32_t elsRcvRLS; 263 uint32_t elsRcvRPL; 264 uint32_t elsRcvRRQ; 265 uint32_t elsRcvRTV; 266 uint32_t elsRcvECHO; 267 uint32_t elsRcvLCB; 268 uint32_t elsRcvRDP; 269 uint32_t elsRcvRDF; 270 uint32_t elsXmitFLOGI; 271 uint32_t elsXmitFDISC; 272 uint32_t elsXmitPLOGI; 273 uint32_t elsXmitPRLI; 274 uint32_t elsXmitADISC; 275 uint32_t elsXmitLOGO; 276 uint32_t elsXmitSCR; 277 uint32_t elsXmitRSCN; 278 uint32_t elsXmitRNID; 279 uint32_t elsXmitFARP; 280 uint32_t elsXmitFARPR; 281 uint32_t elsXmitACC; 282 uint32_t elsXmitLSRJT; 283 284 uint32_t frameRcvBcast; 285 uint32_t frameRcvMulti; 286 uint32_t strayXmitCmpl; 287 uint32_t frameXmitDelay; 288 uint32_t xriCmdCmpl; 289 uint32_t xriStatErr; 290 uint32_t LinkUp; 291 uint32_t LinkDown; 292 uint32_t LinkMultiEvent; 293 uint32_t NoRcvBuf; 294 uint32_t fcpCmd; 295 uint32_t fcpCmpl; 296 uint32_t fcpRspErr; 297 uint32_t fcpRemoteStop; 298 uint32_t fcpPortRjt; 299 uint32_t fcpPortBusy; 300 uint32_t fcpError; 301 uint32_t fcpLocalErr; 302 }; 303 304 struct lpfc_hba; 305 306 307 #define LPFC_VMID_TIMER 300 /* timer interval in seconds */ 308 309 #define LPFC_MAX_VMID_SIZE 256 310 #define LPFC_COMPRESS_VMID_SIZE 16 311 312 union lpfc_vmid_io_tag { 313 u32 app_id; /* App Id vmid */ 314 u8 cs_ctl_vmid; /* Priority tag vmid */ 315 }; 316 317 #define JIFFIES_PER_HR (HZ * 60 * 60) 318 319 struct lpfc_vmid { 320 u8 flag; 321 #define LPFC_VMID_SLOT_FREE 0x0 322 #define LPFC_VMID_SLOT_USED 0x1 323 #define LPFC_VMID_REQ_REGISTER 0x2 324 #define LPFC_VMID_REGISTERED 0x4 325 #define LPFC_VMID_DE_REGISTER 0x8 326 char host_vmid[LPFC_MAX_VMID_SIZE]; 327 union lpfc_vmid_io_tag un; 328 struct hlist_node hnode; 329 u64 io_rd_cnt; 330 u64 io_wr_cnt; 331 u8 vmid_len; 332 u8 delete_inactive; /* Delete if inactive flag 0 = no, 1 = yes */ 333 u32 hash_index; 334 u64 __percpu *last_io_time; 335 }; 336 337 #define lpfc_vmid_is_type_priority_tag(vport)\ 338 (vport->vmid_priority_tagging ? 1 : 0) 339 340 #define LPFC_VMID_HASH_SIZE 256 341 #define LPFC_VMID_HASH_MASK 255 342 #define LPFC_VMID_HASH_SHIFT 6 343 344 struct lpfc_vmid_context { 345 struct lpfc_vmid *vmp; 346 struct lpfc_nodelist *nlp; 347 bool instantiated; 348 }; 349 350 struct lpfc_vmid_priority_range { 351 u8 low; 352 u8 high; 353 u8 qos; 354 }; 355 356 struct lpfc_vmid_priority_info { 357 u32 num_descriptors; 358 struct lpfc_vmid_priority_range *vmid_range; 359 }; 360 361 #define QFPA_EVEN_ONLY 0x01 362 #define QFPA_ODD_ONLY 0x02 363 #define QFPA_EVEN_ODD 0x03 364 365 enum discovery_state { 366 LPFC_VPORT_UNKNOWN = 0, /* vport state is unknown */ 367 LPFC_VPORT_FAILED = 1, /* vport has failed */ 368 LPFC_LOCAL_CFG_LINK = 6, /* local NPORT Id configured */ 369 LPFC_FLOGI = 7, /* FLOGI sent to Fabric */ 370 LPFC_FDISC = 8, /* FDISC sent for vport */ 371 LPFC_FABRIC_CFG_LINK = 9, /* Fabric assigned NPORT Id 372 * configured */ 373 LPFC_NS_REG = 10, /* Register with NameServer */ 374 LPFC_NS_QRY = 11, /* Query NameServer for NPort ID list */ 375 LPFC_BUILD_DISC_LIST = 12, /* Build ADISC and PLOGI lists for 376 * device authentication / discovery */ 377 LPFC_DISC_AUTH = 13, /* Processing ADISC list */ 378 LPFC_VPORT_READY = 32, 379 }; 380 381 enum hba_state { 382 LPFC_LINK_UNKNOWN = 0, /* HBA state is unknown */ 383 LPFC_WARM_START = 1, /* HBA state after selective reset */ 384 LPFC_INIT_START = 2, /* Initial state after board reset */ 385 LPFC_INIT_MBX_CMDS = 3, /* Initialize HBA with mbox commands */ 386 LPFC_LINK_DOWN = 4, /* HBA initialized, link is down */ 387 LPFC_LINK_UP = 5, /* Link is up - issue READ_LA */ 388 LPFC_CLEAR_LA = 6, /* authentication cmplt - issue 389 * CLEAR_LA */ 390 LPFC_HBA_READY = 32, 391 LPFC_HBA_ERROR = -1 392 }; 393 394 struct lpfc_trunk_link_state { 395 enum hba_state state; 396 uint8_t fault; 397 }; 398 399 struct lpfc_trunk_link { 400 struct lpfc_trunk_link_state link0, 401 link1, 402 link2, 403 link3; 404 }; 405 406 struct lpfc_vport { 407 struct lpfc_hba *phba; 408 struct list_head listentry; 409 uint8_t port_type; 410 #define LPFC_PHYSICAL_PORT 1 411 #define LPFC_NPIV_PORT 2 412 #define LPFC_FABRIC_PORT 3 413 enum discovery_state port_state; 414 415 uint16_t vpi; 416 uint16_t vfi; 417 uint8_t vpi_state; 418 #define LPFC_VPI_REGISTERED 0x1 419 420 uint32_t fc_flag; /* FC flags */ 421 /* Several of these flags are HBA centric and should be moved to 422 * phba->link_flag (e.g. FC_PTP, FC_PUBLIC_LOOP) 423 */ 424 #define FC_PT2PT 0x1 /* pt2pt with no fabric */ 425 #define FC_PT2PT_PLOGI 0x2 /* pt2pt initiate PLOGI */ 426 #define FC_DISC_TMO 0x4 /* Discovery timer running */ 427 #define FC_PUBLIC_LOOP 0x8 /* Public loop */ 428 #define FC_LBIT 0x10 /* LOGIN bit in loopinit set */ 429 #define FC_RSCN_MODE 0x20 /* RSCN cmd rcv'ed */ 430 #define FC_NLP_MORE 0x40 /* More node to process in node tbl */ 431 #define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */ 432 #define FC_FABRIC 0x100 /* We are fabric attached */ 433 #define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */ 434 #define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */ 435 #define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/ 436 #define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */ 437 #define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */ 438 #define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */ 439 #define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */ 440 #define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */ 441 #define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */ 442 #define FC_VPORT_NEEDS_INIT_VPI 0x200000 /* Need to INIT_VPI before FDISC */ 443 #define FC_VPORT_CVL_RCVD 0x400000 /* VLink failed due to CVL */ 444 #define FC_VFI_REGISTERED 0x800000 /* VFI is registered */ 445 #define FC_FDISC_COMPLETED 0x1000000/* FDISC completed */ 446 #define FC_DISC_DELAYED 0x2000000/* Delay NPort discovery */ 447 448 uint32_t ct_flags; 449 #define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */ 450 #define FC_CT_RNN_ID 0x2 /* RNN_ID accepted by switch */ 451 #define FC_CT_RSNN_NN 0x4 /* RSNN_NN accepted by switch */ 452 #define FC_CT_RSPN_ID 0x8 /* RSPN_ID accepted by switch */ 453 #define FC_CT_RFT_ID 0x10 /* RFT_ID accepted by switch */ 454 455 struct list_head fc_nodes; 456 457 /* Keep counters for the number of entries in each list. */ 458 uint16_t fc_plogi_cnt; 459 uint16_t fc_adisc_cnt; 460 uint16_t fc_reglogin_cnt; 461 uint16_t fc_prli_cnt; 462 uint16_t fc_unmap_cnt; 463 uint16_t fc_map_cnt; 464 uint16_t fc_npr_cnt; 465 uint16_t fc_unused_cnt; 466 struct serv_parm fc_sparam; /* buffer for our service parameters */ 467 468 uint32_t fc_myDID; /* fibre channel S_ID */ 469 uint32_t fc_prevDID; /* previous fibre channel S_ID */ 470 struct lpfc_name fabric_portname; 471 struct lpfc_name fabric_nodename; 472 473 int32_t stopped; /* HBA has not been restarted since last ERATT */ 474 uint8_t fc_linkspeed; /* Link speed after last READ_LA */ 475 476 uint32_t num_disc_nodes; /* in addition to hba_state */ 477 uint32_t gidft_inp; /* cnt of outstanding GID_FTs */ 478 479 uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */ 480 uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */ 481 uint32_t fc_rscn_flush; /* flag use of fc_rscn_id_list */ 482 struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN]; 483 struct lpfc_name fc_nodename; /* fc nodename */ 484 struct lpfc_name fc_portname; /* fc portname */ 485 486 struct lpfc_work_evt disc_timeout_evt; 487 488 struct timer_list fc_disctmo; /* Discovery rescue timer */ 489 uint8_t fc_ns_retry; /* retries for fabric nameserver */ 490 uint32_t fc_prli_sent; /* cntr for outstanding PRLIs */ 491 492 spinlock_t work_port_lock; 493 uint32_t work_port_events; /* Timeout to be handled */ 494 #define WORKER_DISC_TMO 0x1 /* vport: Discovery timeout */ 495 #define WORKER_ELS_TMO 0x2 /* vport: ELS timeout */ 496 #define WORKER_DELAYED_DISC_TMO 0x8 /* vport: delayed discovery */ 497 498 #define WORKER_MBOX_TMO 0x100 /* hba: MBOX timeout */ 499 #define WORKER_HB_TMO 0x200 /* hba: Heart beat timeout */ 500 #define WORKER_FABRIC_BLOCK_TMO 0x400 /* hba: fabric block timeout */ 501 #define WORKER_RAMP_DOWN_QUEUE 0x800 /* hba: Decrease Q depth */ 502 #define WORKER_RAMP_UP_QUEUE 0x1000 /* hba: Increase Q depth */ 503 #define WORKER_SERVICE_TXQ 0x2000 /* hba: IOCBs on the txq */ 504 #define WORKER_CHECK_INACTIVE_VMID 0x4000 /* hba: check inactive vmids */ 505 #define WORKER_CHECK_VMID_ISSUE_QFPA 0x8000 /* vport: Check if qfpa needs 506 * to be issued */ 507 508 struct timer_list els_tmofunc; 509 struct timer_list delayed_disc_tmo; 510 511 int unreg_vpi_cmpl; 512 513 uint8_t load_flag; 514 #define FC_LOADING 0x1 /* HBA in process of loading drvr */ 515 #define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */ 516 #define FC_ALLOW_FDMI 0x4 /* port is ready for FDMI requests */ 517 #define FC_ALLOW_VMID 0x8 /* Allow VMID I/Os */ 518 #define FC_DEREGISTER_ALL_APP_ID 0x10 /* Deregister all VMIDs */ 519 /* Vport Config Parameters */ 520 uint32_t cfg_scan_down; 521 uint32_t cfg_lun_queue_depth; 522 uint32_t cfg_nodev_tmo; 523 uint32_t cfg_devloss_tmo; 524 uint32_t cfg_restrict_login; 525 uint32_t cfg_peer_port_login; 526 uint32_t cfg_fcp_class; 527 uint32_t cfg_use_adisc; 528 uint32_t cfg_discovery_threads; 529 uint32_t cfg_log_verbose; 530 uint32_t cfg_enable_fc4_type; 531 uint32_t cfg_max_luns; 532 uint32_t cfg_enable_da_id; 533 uint32_t cfg_max_scsicmpl_time; 534 uint32_t cfg_tgt_queue_depth; 535 uint32_t cfg_first_burst_size; 536 uint32_t dev_loss_tmo_changed; 537 /* VMID parameters */ 538 u8 lpfc_vmid_host_uuid[LPFC_COMPRESS_VMID_SIZE]; 539 u32 max_vmid; /* maximum VMIDs allowed per port */ 540 u32 cur_vmid_cnt; /* Current VMID count */ 541 #define LPFC_MIN_VMID 4 542 #define LPFC_MAX_VMID 255 543 u32 vmid_inactivity_timeout; /* Time after which the VMID */ 544 /* deregisters from switch */ 545 u32 vmid_priority_tagging; 546 #define LPFC_VMID_PRIO_TAG_DISABLE 0 /* Disable */ 547 #define LPFC_VMID_PRIO_TAG_SUP_TARGETS 1 /* Allow supported targets only */ 548 #define LPFC_VMID_PRIO_TAG_ALL_TARGETS 2 /* Allow all targets */ 549 unsigned long *vmid_priority_range; 550 #define LPFC_VMID_MAX_PRIORITY_RANGE 256 551 #define LPFC_VMID_PRIORITY_BITMAP_SIZE 32 552 u8 vmid_flag; 553 #define LPFC_VMID_IN_USE 0x1 554 #define LPFC_VMID_ISSUE_QFPA 0x2 555 #define LPFC_VMID_QFPA_CMPL 0x4 556 #define LPFC_VMID_QOS_ENABLED 0x8 557 #define LPFC_VMID_TIMER_ENBLD 0x10 558 struct fc_qfpa_res *qfpa_res; 559 560 struct fc_vport *fc_vport; 561 562 struct lpfc_vmid *vmid; 563 DECLARE_HASHTABLE(hash_table, 8); 564 rwlock_t vmid_lock; 565 struct lpfc_vmid_priority_info vmid_priority; 566 567 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 568 struct dentry *debug_disc_trc; 569 struct dentry *debug_nodelist; 570 struct dentry *debug_nvmestat; 571 struct dentry *debug_scsistat; 572 struct dentry *debug_ioktime; 573 struct dentry *debug_hdwqstat; 574 struct dentry *vport_debugfs_root; 575 struct lpfc_debugfs_trc *disc_trc; 576 atomic_t disc_trc_cnt; 577 #endif 578 uint8_t stat_data_enabled; 579 uint8_t stat_data_blocked; 580 struct list_head rcv_buffer_list; 581 unsigned long rcv_buffer_time_stamp; 582 uint32_t vport_flag; 583 #define STATIC_VPORT 1 584 #define FAWWPN_SET 2 585 #define FAWWPN_PARAM_CHG 4 586 587 uint16_t fdmi_num_disc; 588 uint32_t fdmi_hba_mask; 589 uint32_t fdmi_port_mask; 590 591 /* There is a single nvme instance per vport. */ 592 struct nvme_fc_local_port *localport; 593 uint8_t nvmei_support; /* driver supports NVME Initiator */ 594 uint32_t last_fcp_wqidx; 595 uint32_t rcv_flogi_cnt; /* How many unsol FLOGIs ACK'd. */ 596 }; 597 598 struct hbq_s { 599 uint16_t entry_count; /* Current number of HBQ slots */ 600 uint16_t buffer_count; /* Current number of buffers posted */ 601 uint32_t next_hbqPutIdx; /* Index to next HBQ slot to use */ 602 uint32_t hbqPutIdx; /* HBQ slot to use */ 603 uint32_t local_hbqGetIdx; /* Local copy of Get index from Port */ 604 void *hbq_virt; /* Virtual ptr to this hbq */ 605 struct list_head hbq_buffer_list; /* buffers assigned to this HBQ */ 606 /* Callback for HBQ buffer allocation */ 607 struct hbq_dmabuf *(*hbq_alloc_buffer) (struct lpfc_hba *); 608 /* Callback for HBQ buffer free */ 609 void (*hbq_free_buffer) (struct lpfc_hba *, 610 struct hbq_dmabuf *); 611 }; 612 613 /* this matches the position in the lpfc_hbq_defs array */ 614 #define LPFC_ELS_HBQ 0 615 #define LPFC_MAX_HBQS 1 616 617 enum hba_temp_state { 618 HBA_NORMAL_TEMP, 619 HBA_OVER_TEMP 620 }; 621 622 enum intr_type_t { 623 NONE = 0, 624 INTx, 625 MSI, 626 MSIX, 627 }; 628 629 #define LPFC_CT_CTX_MAX 64 630 struct unsol_rcv_ct_ctx { 631 uint32_t ctxt_id; 632 uint32_t SID; 633 uint32_t valid; 634 #define UNSOL_INVALID 0 635 #define UNSOL_VALID 1 636 uint16_t oxid; 637 uint16_t rxid; 638 }; 639 640 #define LPFC_USER_LINK_SPEED_AUTO 0 /* auto select (default)*/ 641 #define LPFC_USER_LINK_SPEED_1G 1 /* 1 Gigabaud */ 642 #define LPFC_USER_LINK_SPEED_2G 2 /* 2 Gigabaud */ 643 #define LPFC_USER_LINK_SPEED_4G 4 /* 4 Gigabaud */ 644 #define LPFC_USER_LINK_SPEED_8G 8 /* 8 Gigabaud */ 645 #define LPFC_USER_LINK_SPEED_10G 10 /* 10 Gigabaud */ 646 #define LPFC_USER_LINK_SPEED_16G 16 /* 16 Gigabaud */ 647 #define LPFC_USER_LINK_SPEED_32G 32 /* 32 Gigabaud */ 648 #define LPFC_USER_LINK_SPEED_64G 64 /* 64 Gigabaud */ 649 #define LPFC_USER_LINK_SPEED_MAX LPFC_USER_LINK_SPEED_64G 650 651 #define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16, 32, 64" 652 653 enum nemb_type { 654 nemb_mse = 1, 655 nemb_hbd 656 }; 657 658 enum mbox_type { 659 mbox_rd = 1, 660 mbox_wr 661 }; 662 663 enum dma_type { 664 dma_mbox = 1, 665 dma_ebuf 666 }; 667 668 enum sta_type { 669 sta_pre_addr = 1, 670 sta_pos_addr 671 }; 672 673 struct lpfc_mbox_ext_buf_ctx { 674 uint32_t state; 675 #define LPFC_BSG_MBOX_IDLE 0 676 #define LPFC_BSG_MBOX_HOST 1 677 #define LPFC_BSG_MBOX_PORT 2 678 #define LPFC_BSG_MBOX_DONE 3 679 #define LPFC_BSG_MBOX_ABTS 4 680 enum nemb_type nembType; 681 enum mbox_type mboxType; 682 uint32_t numBuf; 683 uint32_t mbxTag; 684 uint32_t seqNum; 685 struct lpfc_dmabuf *mbx_dmabuf; 686 struct list_head ext_dmabuf_list; 687 }; 688 689 struct lpfc_epd_pool { 690 /* Expedite pool */ 691 struct list_head list; 692 u32 count; 693 spinlock_t lock; /* lock for expedite pool */ 694 }; 695 696 enum ras_state { 697 INACTIVE, 698 REG_INPROGRESS, 699 ACTIVE 700 }; 701 702 struct lpfc_ras_fwlog { 703 uint8_t *fwlog_buff; 704 uint32_t fw_buffcount; /* Buffer size posted to FW */ 705 #define LPFC_RAS_BUFF_ENTERIES 16 /* Each entry can hold max of 64k */ 706 #define LPFC_RAS_MAX_ENTRY_SIZE (64 * 1024) 707 #define LPFC_RAS_MIN_BUFF_POST_SIZE (256 * 1024) 708 #define LPFC_RAS_MAX_BUFF_POST_SIZE (1024 * 1024) 709 uint32_t fw_loglevel; /* Log level set */ 710 struct lpfc_dmabuf lwpd; 711 struct list_head fwlog_buff_list; 712 713 /* RAS support status on adapter */ 714 bool ras_hwsupport; /* RAS Support available on HW or not */ 715 bool ras_enabled; /* Ras Enabled for the function */ 716 #define LPFC_RAS_DISABLE_LOGGING 0x00 717 #define LPFC_RAS_ENABLE_LOGGING 0x01 718 enum ras_state state; /* RAS logging running state */ 719 }; 720 721 #define DBG_LOG_STR_SZ 256 722 #define DBG_LOG_SZ 256 723 724 struct dbg_log_ent { 725 char log[DBG_LOG_STR_SZ]; 726 u64 t_ns; 727 }; 728 729 enum lpfc_irq_chann_mode { 730 /* Assign IRQs to all possible cpus that have hardware queues */ 731 NORMAL_MODE, 732 733 /* Assign IRQs only to cpus on the same numa node as HBA */ 734 NUMA_MODE, 735 736 /* Assign IRQs only on non-hyperthreaded CPUs. This is the 737 * same as normal_mode, but assign IRQS only on physical CPUs. 738 */ 739 NHT_MODE, 740 }; 741 742 struct lpfc_hba { 743 /* SCSI interface function jump table entries */ 744 struct lpfc_io_buf * (*lpfc_get_scsi_buf) 745 (struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 746 struct scsi_cmnd *cmnd); 747 int (*lpfc_scsi_prep_dma_buf) 748 (struct lpfc_hba *, struct lpfc_io_buf *); 749 void (*lpfc_scsi_unprep_dma_buf) 750 (struct lpfc_hba *, struct lpfc_io_buf *); 751 void (*lpfc_release_scsi_buf) 752 (struct lpfc_hba *, struct lpfc_io_buf *); 753 void (*lpfc_rampdown_queue_depth) 754 (struct lpfc_hba *); 755 void (*lpfc_scsi_prep_cmnd) 756 (struct lpfc_vport *, struct lpfc_io_buf *, 757 struct lpfc_nodelist *); 758 int (*lpfc_scsi_prep_cmnd_buf) 759 (struct lpfc_vport *vport, 760 struct lpfc_io_buf *lpfc_cmd, 761 uint8_t tmo); 762 763 /* IOCB interface function jump table entries */ 764 int (*__lpfc_sli_issue_iocb) 765 (struct lpfc_hba *, uint32_t, 766 struct lpfc_iocbq *, uint32_t); 767 int (*__lpfc_sli_issue_fcp_io) 768 (struct lpfc_hba *phba, uint32_t ring_number, 769 struct lpfc_iocbq *piocb, uint32_t flag); 770 void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *, 771 struct lpfc_iocbq *); 772 int (*lpfc_hba_down_post)(struct lpfc_hba *phba); 773 IOCB_t * (*lpfc_get_iocb_from_iocbq) 774 (struct lpfc_iocbq *); 775 void (*lpfc_scsi_cmd_iocb_cmpl) 776 (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); 777 778 /* MBOX interface function jump table entries */ 779 int (*lpfc_sli_issue_mbox) 780 (struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 781 782 /* Slow-path IOCB process function jump table entries */ 783 void (*lpfc_sli_handle_slow_ring_event) 784 (struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 785 uint32_t mask); 786 787 /* INIT device interface function jump table entries */ 788 int (*lpfc_sli_hbq_to_firmware) 789 (struct lpfc_hba *, uint32_t, struct hbq_dmabuf *); 790 int (*lpfc_sli_brdrestart) 791 (struct lpfc_hba *); 792 int (*lpfc_sli_brdready) 793 (struct lpfc_hba *, uint32_t); 794 void (*lpfc_handle_eratt) 795 (struct lpfc_hba *); 796 void (*lpfc_stop_port) 797 (struct lpfc_hba *); 798 int (*lpfc_hba_init_link) 799 (struct lpfc_hba *, uint32_t); 800 int (*lpfc_hba_down_link) 801 (struct lpfc_hba *, uint32_t); 802 int (*lpfc_selective_reset) 803 (struct lpfc_hba *); 804 805 int (*lpfc_bg_scsi_prep_dma_buf) 806 (struct lpfc_hba *, struct lpfc_io_buf *); 807 /* Add new entries here */ 808 809 /* expedite pool */ 810 struct lpfc_epd_pool epd_pool; 811 812 /* SLI4 specific HBA data structure */ 813 struct lpfc_sli4_hba sli4_hba; 814 815 struct workqueue_struct *wq; 816 struct delayed_work eq_delay_work; 817 818 #define LPFC_IDLE_STAT_DELAY 1000 819 struct delayed_work idle_stat_delay_work; 820 821 struct lpfc_sli sli; 822 uint8_t pci_dev_grp; /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */ 823 uint32_t sli_rev; /* SLI2, SLI3, or SLI4 */ 824 uint32_t sli3_options; /* Mask of enabled SLI3 options */ 825 #define LPFC_SLI3_HBQ_ENABLED 0x01 826 #define LPFC_SLI3_NPIV_ENABLED 0x02 827 #define LPFC_SLI3_VPORT_TEARDOWN 0x04 828 #define LPFC_SLI3_CRP_ENABLED 0x08 829 #define LPFC_SLI3_BG_ENABLED 0x20 830 #define LPFC_SLI3_DSS_ENABLED 0x40 831 #define LPFC_SLI4_PERFH_ENABLED 0x80 832 #define LPFC_SLI4_PHWQ_ENABLED 0x100 833 uint32_t iocb_cmd_size; 834 uint32_t iocb_rsp_size; 835 836 struct lpfc_trunk_link trunk_link; 837 enum hba_state link_state; 838 uint32_t link_flag; /* link state flags */ 839 #define LS_LOOPBACK_MODE 0x1 /* NPort is in Loopback mode */ 840 /* This flag is set while issuing */ 841 /* INIT_LINK mailbox command */ 842 #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ 843 #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ 844 #define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */ 845 #define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */ 846 #define LS_CT_VEN_RPA 0x20 /* Vendor RPA sent to switch */ 847 848 uint32_t hba_flag; /* hba generic flags */ 849 #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ 850 #define DEFER_ERATT 0x2 /* Deferred error attention in progress */ 851 #define HBA_FCOE_MODE 0x4 /* HBA function in FCoE Mode */ 852 #define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/ 853 #define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */ 854 #define HBA_PERSISTENT_TOPO 0x20 /* Persistent topology support in hba */ 855 #define ELS_XRI_ABORT_EVENT 0x40 /* ELS_XRI abort event was queued */ 856 #define ASYNC_EVENT 0x80 857 #define LINK_DISABLED 0x100 /* Link disabled by user */ 858 #define FCF_TS_INPROG 0x200 /* FCF table scan in progress */ 859 #define FCF_RR_INPROG 0x400 /* FCF roundrobin flogi in progress */ 860 #define HBA_FIP_SUPPORT 0x800 /* FIP support in HBA */ 861 #define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */ 862 #define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ 863 #define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */ 864 #define HBA_IOQ_FLUSH 0x8000 /* FCP/NVME I/O queues being flushed */ 865 #define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */ 866 #define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */ 867 #define HBA_FORCED_LINK_SPEED 0x40000 /* 868 * Firmware supports Forced Link Speed 869 * capability 870 */ 871 #define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */ 872 #define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */ 873 #define HBA_NEEDS_CFG_PORT 0x2000000 /* SLI3 - needs a CONFIG_PORT mbox */ 874 #define HBA_HBEAT_INP 0x4000000 /* mbox HBEAT is in progress */ 875 #define HBA_HBEAT_TMO 0x8000000 /* HBEAT initiated after timeout */ 876 #define HBA_FLOGI_OUTSTANDING 0x10000000 /* FLOGI is outstanding */ 877 878 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ 879 struct lpfc_dmabuf slim2p; 880 881 MAILBOX_t *mbox; 882 uint32_t *mbox_ext; 883 struct lpfc_mbox_ext_buf_ctx mbox_ext_buf_ctx; 884 uint32_t ha_copy; 885 struct _PCB *pcb; 886 struct _IOCB *IOCBs; 887 888 struct lpfc_dmabuf hbqslimp; 889 890 uint16_t pci_cfg_value; 891 892 uint8_t fc_linkspeed; /* Link speed after last READ_LA */ 893 894 uint32_t fc_eventTag; /* event tag for link attention */ 895 uint32_t link_events; 896 897 /* These fields used to be binfo */ 898 uint32_t fc_pref_DID; /* preferred D_ID */ 899 uint8_t fc_pref_ALPA; /* preferred AL_PA */ 900 uint32_t fc_edtovResol; /* E_D_TOV timer resolution */ 901 uint32_t fc_edtov; /* E_D_TOV timer value */ 902 uint32_t fc_arbtov; /* ARB_TOV timer value */ 903 uint32_t fc_ratov; /* R_A_TOV timer value */ 904 uint32_t fc_rttov; /* R_T_TOV timer value */ 905 uint32_t fc_altov; /* AL_TOV timer value */ 906 uint32_t fc_crtov; /* C_R_TOV timer value */ 907 908 struct serv_parm fc_fabparam; /* fabric service parameters buffer */ 909 uint8_t alpa_map[128]; /* AL_PA map from READ_LA */ 910 911 uint32_t lmt; 912 913 uint32_t fc_topology; /* link topology, from LINK INIT */ 914 uint32_t fc_topology_changed; /* link topology, from LINK INIT */ 915 916 struct lpfc_stats fc_stat; 917 918 struct lpfc_nodelist fc_fcpnodev; /* nodelist entry for no device */ 919 uint32_t nport_event_cnt; /* timestamp for nlplist entry */ 920 921 uint8_t wwnn[8]; 922 uint8_t wwpn[8]; 923 uint32_t RandomData[7]; 924 uint8_t fcp_embed_io; 925 uint8_t nvmet_support; /* driver supports NVMET */ 926 #define LPFC_NVMET_MAX_PORTS 32 927 uint8_t mds_diags_support; 928 uint8_t bbcredit_support; 929 uint8_t enab_exp_wqcq_pages; 930 u8 nsler; /* Firmware supports FC-NVMe-2 SLER */ 931 932 /* HBA Config Parameters */ 933 uint32_t cfg_ack0; 934 uint32_t cfg_xri_rebalancing; 935 uint32_t cfg_xpsgl; 936 uint32_t cfg_enable_npiv; 937 uint32_t cfg_enable_rrq; 938 uint32_t cfg_topology; 939 uint32_t cfg_link_speed; 940 #define LPFC_FCF_FOV 1 /* Fast fcf failover */ 941 #define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */ 942 uint32_t cfg_fcf_failover_policy; 943 uint32_t cfg_fcp_io_sched; 944 uint32_t cfg_ns_query; 945 uint32_t cfg_fcp2_no_tgt_reset; 946 uint32_t cfg_cr_delay; 947 uint32_t cfg_cr_count; 948 uint32_t cfg_multi_ring_support; 949 uint32_t cfg_multi_ring_rctl; 950 uint32_t cfg_multi_ring_type; 951 uint32_t cfg_poll; 952 uint32_t cfg_poll_tmo; 953 uint32_t cfg_task_mgmt_tmo; 954 uint32_t cfg_use_msi; 955 uint32_t cfg_auto_imax; 956 uint32_t cfg_fcp_imax; 957 uint32_t cfg_force_rscn; 958 uint32_t cfg_cq_poll_threshold; 959 uint32_t cfg_cq_max_proc_limit; 960 uint32_t cfg_fcp_cpu_map; 961 uint32_t cfg_fcp_mq_threshold; 962 uint32_t cfg_hdw_queue; 963 uint32_t cfg_irq_chann; 964 uint32_t cfg_suppress_rsp; 965 uint32_t cfg_nvme_oas; 966 uint32_t cfg_nvme_embed_cmd; 967 uint32_t cfg_nvmet_mrq_post; 968 uint32_t cfg_nvmet_mrq; 969 uint32_t cfg_enable_nvmet; 970 uint32_t cfg_nvme_enable_fb; 971 uint32_t cfg_nvmet_fb_size; 972 uint32_t cfg_total_seg_cnt; 973 uint32_t cfg_sg_seg_cnt; 974 uint32_t cfg_nvme_seg_cnt; 975 uint32_t cfg_scsi_seg_cnt; 976 uint32_t cfg_sg_dma_buf_size; 977 uint64_t cfg_soft_wwnn; 978 uint64_t cfg_soft_wwpn; 979 uint32_t cfg_hba_queue_depth; 980 uint32_t cfg_enable_hba_reset; 981 uint32_t cfg_enable_hba_heartbeat; 982 uint32_t cfg_fof; 983 uint32_t cfg_EnableXLane; 984 uint8_t cfg_oas_tgt_wwpn[8]; 985 uint8_t cfg_oas_vpt_wwpn[8]; 986 uint32_t cfg_oas_lun_state; 987 #define OAS_LUN_ENABLE 1 988 #define OAS_LUN_DISABLE 0 989 uint32_t cfg_oas_lun_status; 990 #define OAS_LUN_STATUS_EXISTS 0x01 991 uint32_t cfg_oas_flags; 992 #define OAS_FIND_ANY_VPORT 0x01 993 #define OAS_FIND_ANY_TARGET 0x02 994 #define OAS_LUN_VALID 0x04 995 uint32_t cfg_oas_priority; 996 uint32_t cfg_XLanePriority; 997 uint32_t cfg_enable_bg; 998 uint32_t cfg_prot_mask; 999 uint32_t cfg_prot_guard; 1000 uint32_t cfg_hostmem_hgp; 1001 uint32_t cfg_log_verbose; 1002 uint32_t cfg_enable_fc4_type; 1003 uint32_t cfg_aer_support; 1004 uint32_t cfg_sriov_nr_virtfn; 1005 uint32_t cfg_request_firmware_upgrade; 1006 uint32_t cfg_suppress_link_up; 1007 uint32_t cfg_rrq_xri_bitmap_sz; 1008 u32 cfg_fcp_wait_abts_rsp; 1009 uint32_t cfg_delay_discovery; 1010 uint32_t cfg_sli_mode; 1011 #define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */ 1012 #define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */ 1013 #define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */ 1014 uint32_t cfg_fdmi_on; 1015 #define LPFC_FDMI_NO_SUPPORT 0 /* FDMI not supported */ 1016 #define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */ 1017 uint32_t cfg_enable_SmartSAN; 1018 uint32_t cfg_enable_mds_diags; 1019 uint32_t cfg_ras_fwlog_level; 1020 uint32_t cfg_ras_fwlog_buffsize; 1021 uint32_t cfg_ras_fwlog_func; 1022 uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */ 1023 uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */ 1024 #define LPFC_ENABLE_FCP 1 1025 #define LPFC_ENABLE_NVME 2 1026 #define LPFC_ENABLE_BOTH 3 1027 uint32_t cfg_enable_pbde; 1028 uint32_t cfg_enable_mi; 1029 struct nvmet_fc_target_port *targetport; 1030 lpfc_vpd_t vpd; /* vital product data */ 1031 1032 u32 cfg_max_vmid; /* maximum VMIDs allowed per port */ 1033 u32 cfg_vmid_app_header; 1034 #define LPFC_VMID_APP_HEADER_DISABLE 0 1035 #define LPFC_VMID_APP_HEADER_ENABLE 1 1036 u32 cfg_vmid_priority_tagging; 1037 u32 cfg_vmid_inactivity_timeout; /* Time after which the VMID */ 1038 /* deregisters from switch */ 1039 struct pci_dev *pcidev; 1040 struct list_head work_list; 1041 uint32_t work_ha; /* Host Attention Bits for WT */ 1042 uint32_t work_ha_mask; /* HA Bits owned by WT */ 1043 uint32_t work_hs; /* HS stored in case of ERRAT */ 1044 uint32_t work_status[2]; /* Extra status from SLIM */ 1045 1046 wait_queue_head_t work_waitq; 1047 struct task_struct *worker_thread; 1048 unsigned long data_flags; 1049 uint32_t border_sge_num; 1050 1051 uint32_t hbq_in_use; /* HBQs in use flag */ 1052 uint32_t hbq_count; /* Count of configured HBQs */ 1053 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ 1054 1055 atomic_t fcp_qidx; /* next FCP WQ (RR Policy) */ 1056 atomic_t nvme_qidx; /* next NVME WQ (RR Policy) */ 1057 1058 phys_addr_t pci_bar0_map; /* Physical address for PCI BAR0 */ 1059 phys_addr_t pci_bar1_map; /* Physical address for PCI BAR1 */ 1060 phys_addr_t pci_bar2_map; /* Physical address for PCI BAR2 */ 1061 void __iomem *slim_memmap_p; /* Kernel memory mapped address for 1062 PCI BAR0 */ 1063 void __iomem *ctrl_regs_memmap_p;/* Kernel memory mapped address for 1064 PCI BAR2 */ 1065 1066 void __iomem *pci_bar0_memmap_p; /* Kernel memory mapped address for 1067 PCI BAR0 with dual-ULP support */ 1068 void __iomem *pci_bar2_memmap_p; /* Kernel memory mapped address for 1069 PCI BAR2 with dual-ULP support */ 1070 void __iomem *pci_bar4_memmap_p; /* Kernel memory mapped address for 1071 PCI BAR4 with dual-ULP support */ 1072 #define PCI_64BIT_BAR0 0 1073 #define PCI_64BIT_BAR2 2 1074 #define PCI_64BIT_BAR4 4 1075 void __iomem *MBslimaddr; /* virtual address for mbox cmds */ 1076 void __iomem *HAregaddr; /* virtual address for host attn reg */ 1077 void __iomem *CAregaddr; /* virtual address for chip attn reg */ 1078 void __iomem *HSregaddr; /* virtual address for host status 1079 reg */ 1080 void __iomem *HCregaddr; /* virtual address for host ctl reg */ 1081 1082 struct lpfc_hgp __iomem *host_gp; /* Host side get/put pointers */ 1083 struct lpfc_pgp *port_gp; 1084 uint32_t __iomem *hbq_put; /* Address in SLIM to HBQ put ptrs */ 1085 uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */ 1086 1087 int brd_no; /* FC board number */ 1088 char SerialNumber[32]; /* adapter Serial Number */ 1089 char OptionROMVersion[32]; /* adapter BIOS / Fcode version */ 1090 char BIOSVersion[16]; /* Boot BIOS version */ 1091 char ModelDesc[256]; /* Model Description */ 1092 char ModelName[80]; /* Model Name */ 1093 char ProgramType[256]; /* Program Type */ 1094 char Port[20]; /* Port No */ 1095 uint8_t vpd_flag; /* VPD data flag */ 1096 1097 #define VPD_MODEL_DESC 0x1 /* valid vpd model description */ 1098 #define VPD_MODEL_NAME 0x2 /* valid vpd model name */ 1099 #define VPD_PROGRAM_TYPE 0x4 /* valid vpd program type */ 1100 #define VPD_PORT 0x8 /* valid vpd port data */ 1101 #define VPD_MASK 0xf /* mask for any vpd data */ 1102 1103 uint8_t soft_wwn_enable; 1104 1105 struct timer_list fcp_poll_timer; 1106 struct timer_list eratt_poll; 1107 uint32_t eratt_poll_interval; 1108 1109 uint64_t bg_guard_err_cnt; 1110 uint64_t bg_apptag_err_cnt; 1111 uint64_t bg_reftag_err_cnt; 1112 1113 /* fastpath list. */ 1114 spinlock_t scsi_buf_list_get_lock; /* SCSI buf alloc list lock */ 1115 spinlock_t scsi_buf_list_put_lock; /* SCSI buf free list lock */ 1116 struct list_head lpfc_scsi_buf_list_get; 1117 struct list_head lpfc_scsi_buf_list_put; 1118 uint32_t total_scsi_bufs; 1119 struct list_head lpfc_iocb_list; 1120 uint32_t total_iocbq_bufs; 1121 struct list_head active_rrq_list; 1122 spinlock_t hbalock; 1123 1124 /* dma_mem_pools */ 1125 struct dma_pool *lpfc_sg_dma_buf_pool; 1126 struct dma_pool *lpfc_mbuf_pool; 1127 struct dma_pool *lpfc_hrb_pool; /* header receive buffer pool */ 1128 struct dma_pool *lpfc_drb_pool; /* data receive buffer pool */ 1129 struct dma_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */ 1130 struct dma_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */ 1131 struct dma_pool *lpfc_cmd_rsp_buf_pool; 1132 struct lpfc_dma_pool lpfc_mbuf_safety_pool; 1133 1134 mempool_t *mbox_mem_pool; 1135 mempool_t *nlp_mem_pool; 1136 mempool_t *rrq_pool; 1137 mempool_t *active_rrq_pool; 1138 1139 struct fc_host_statistics link_stats; 1140 enum lpfc_irq_chann_mode irq_chann_mode; 1141 enum intr_type_t intr_type; 1142 uint32_t intr_mode; 1143 #define LPFC_INTR_ERROR 0xFFFFFFFF 1144 struct list_head port_list; 1145 spinlock_t port_list_lock; /* lock for port_list mutations */ 1146 struct lpfc_vport *pport; /* physical lpfc_vport pointer */ 1147 uint16_t max_vpi; /* Maximum virtual nports */ 1148 #define LPFC_MAX_VPI 0xFF /* Max number VPI supported 0 - 0xff */ 1149 #define LPFC_MAX_VPORTS 0x100 /* Max vports per port, with pport */ 1150 uint16_t max_vports; /* 1151 * For IOV HBAs max_vpi can change 1152 * after a reset. max_vports is max 1153 * number of vports present. This can 1154 * be greater than max_vpi. 1155 */ 1156 uint16_t vpi_base; 1157 uint16_t vfi_base; 1158 unsigned long *vpi_bmask; /* vpi allocation table */ 1159 uint16_t *vpi_ids; 1160 uint16_t vpi_count; 1161 struct list_head lpfc_vpi_blk_list; 1162 1163 /* Data structure used by fabric iocb scheduler */ 1164 struct list_head fabric_iocb_list; 1165 atomic_t fabric_iocb_count; 1166 struct timer_list fabric_block_timer; 1167 unsigned long bit_flags; 1168 #define FABRIC_COMANDS_BLOCKED 0 1169 atomic_t num_rsrc_err; 1170 atomic_t num_cmd_success; 1171 unsigned long last_rsrc_error_time; 1172 unsigned long last_ramp_down_time; 1173 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1174 struct dentry *hba_debugfs_root; 1175 atomic_t debugfs_vport_count; 1176 struct dentry *debug_multixri_pools; 1177 struct dentry *debug_hbqinfo; 1178 struct dentry *debug_dumpHostSlim; 1179 struct dentry *debug_dumpHBASlim; 1180 struct dentry *debug_InjErrLBA; /* LBA to inject errors at */ 1181 struct dentry *debug_InjErrNPortID; /* NPortID to inject errors at */ 1182 struct dentry *debug_InjErrWWPN; /* WWPN to inject errors at */ 1183 struct dentry *debug_writeGuard; /* inject write guard_tag errors */ 1184 struct dentry *debug_writeApp; /* inject write app_tag errors */ 1185 struct dentry *debug_writeRef; /* inject write ref_tag errors */ 1186 struct dentry *debug_readGuard; /* inject read guard_tag errors */ 1187 struct dentry *debug_readApp; /* inject read app_tag errors */ 1188 struct dentry *debug_readRef; /* inject read ref_tag errors */ 1189 1190 struct dentry *debug_nvmeio_trc; 1191 struct lpfc_debugfs_nvmeio_trc *nvmeio_trc; 1192 struct dentry *debug_hdwqinfo; 1193 #ifdef LPFC_HDWQ_LOCK_STAT 1194 struct dentry *debug_lockstat; 1195 #endif 1196 struct dentry *debug_ras_log; 1197 atomic_t nvmeio_trc_cnt; 1198 uint32_t nvmeio_trc_size; 1199 uint32_t nvmeio_trc_output_idx; 1200 1201 /* T10 DIF error injection */ 1202 uint32_t lpfc_injerr_wgrd_cnt; 1203 uint32_t lpfc_injerr_wapp_cnt; 1204 uint32_t lpfc_injerr_wref_cnt; 1205 uint32_t lpfc_injerr_rgrd_cnt; 1206 uint32_t lpfc_injerr_rapp_cnt; 1207 uint32_t lpfc_injerr_rref_cnt; 1208 uint32_t lpfc_injerr_nportid; 1209 struct lpfc_name lpfc_injerr_wwpn; 1210 sector_t lpfc_injerr_lba; 1211 #define LPFC_INJERR_LBA_OFF (sector_t)(-1) 1212 1213 struct dentry *debug_slow_ring_trc; 1214 struct lpfc_debugfs_trc *slow_ring_trc; 1215 atomic_t slow_ring_trc_cnt; 1216 /* iDiag debugfs sub-directory */ 1217 struct dentry *idiag_root; 1218 struct dentry *idiag_pci_cfg; 1219 struct dentry *idiag_bar_acc; 1220 struct dentry *idiag_que_info; 1221 struct dentry *idiag_que_acc; 1222 struct dentry *idiag_drb_acc; 1223 struct dentry *idiag_ctl_acc; 1224 struct dentry *idiag_mbx_acc; 1225 struct dentry *idiag_ext_acc; 1226 uint8_t lpfc_idiag_last_eq; 1227 #endif 1228 uint16_t nvmeio_trc_on; 1229 1230 /* Used for deferred freeing of ELS data buffers */ 1231 struct list_head elsbuf; 1232 int elsbuf_cnt; 1233 int elsbuf_prev_cnt; 1234 1235 uint8_t temp_sensor_support; 1236 /* Fields used for heart beat. */ 1237 unsigned long last_completion_time; 1238 unsigned long skipped_hb; 1239 struct timer_list hb_tmofunc; 1240 struct timer_list rrq_tmr; 1241 enum hba_temp_state over_temp_state; 1242 /* 1243 * Following bit will be set for all buffer tags which are not 1244 * associated with any HBQ. 1245 */ 1246 #define QUE_BUFTAG_BIT (1<<31) 1247 uint32_t buffer_tag_count; 1248 int wait_4_mlo_maint_flg; 1249 wait_queue_head_t wait_4_mlo_m_q; 1250 /* data structure used for latency data collection */ 1251 #define LPFC_NO_BUCKET 0 1252 #define LPFC_LINEAR_BUCKET 1 1253 #define LPFC_POWER2_BUCKET 2 1254 uint8_t bucket_type; 1255 uint32_t bucket_base; 1256 uint32_t bucket_step; 1257 1258 /* Maximum number of events that can be outstanding at any time*/ 1259 #define LPFC_MAX_EVT_COUNT 512 1260 atomic_t fast_event_count; 1261 uint32_t fcoe_eventtag; 1262 uint32_t fcoe_eventtag_at_fcf_scan; 1263 uint32_t fcoe_cvl_eventtag; 1264 uint32_t fcoe_cvl_eventtag_attn; 1265 struct lpfc_fcf fcf; 1266 uint8_t fc_map[3]; 1267 uint8_t valid_vlan; 1268 uint16_t vlan_id; 1269 struct list_head fcf_conn_rec_list; 1270 1271 bool defer_flogi_acc_flag; 1272 uint16_t defer_flogi_acc_rx_id; 1273 uint16_t defer_flogi_acc_ox_id; 1274 1275 spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */ 1276 struct list_head ct_ev_waiters; 1277 struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX]; 1278 uint32_t ctx_idx; 1279 struct timer_list inactive_vmid_poll; 1280 1281 /* RAS Support */ 1282 struct lpfc_ras_fwlog ras_fwlog; 1283 1284 uint8_t menlo_flag; /* menlo generic flags */ 1285 #define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */ 1286 uint32_t iocb_cnt; 1287 uint32_t iocb_max; 1288 atomic_t sdev_cnt; 1289 spinlock_t devicelock; /* lock for luns list */ 1290 mempool_t *device_data_mem_pool; 1291 struct list_head luns; 1292 #define LPFC_TRANSGRESSION_HIGH_TEMPERATURE 0x0080 1293 #define LPFC_TRANSGRESSION_LOW_TEMPERATURE 0x0040 1294 #define LPFC_TRANSGRESSION_HIGH_VOLTAGE 0x0020 1295 #define LPFC_TRANSGRESSION_LOW_VOLTAGE 0x0010 1296 #define LPFC_TRANSGRESSION_HIGH_TXBIAS 0x0008 1297 #define LPFC_TRANSGRESSION_LOW_TXBIAS 0x0004 1298 #define LPFC_TRANSGRESSION_HIGH_TXPOWER 0x0002 1299 #define LPFC_TRANSGRESSION_LOW_TXPOWER 0x0001 1300 #define LPFC_TRANSGRESSION_HIGH_RXPOWER 0x8000 1301 #define LPFC_TRANSGRESSION_LOW_RXPOWER 0x4000 1302 uint16_t sfp_alarm; 1303 uint16_t sfp_warning; 1304 1305 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1306 uint16_t hdwqstat_on; 1307 #define LPFC_CHECK_OFF 0 1308 #define LPFC_CHECK_NVME_IO 1 1309 #define LPFC_CHECK_NVMET_IO 2 1310 #define LPFC_CHECK_SCSI_IO 4 1311 uint16_t ktime_on; 1312 uint64_t ktime_data_samples; 1313 uint64_t ktime_status_samples; 1314 uint64_t ktime_last_cmd; 1315 uint64_t ktime_seg1_total; 1316 uint64_t ktime_seg1_min; 1317 uint64_t ktime_seg1_max; 1318 uint64_t ktime_seg2_total; 1319 uint64_t ktime_seg2_min; 1320 uint64_t ktime_seg2_max; 1321 uint64_t ktime_seg3_total; 1322 uint64_t ktime_seg3_min; 1323 uint64_t ktime_seg3_max; 1324 uint64_t ktime_seg4_total; 1325 uint64_t ktime_seg4_min; 1326 uint64_t ktime_seg4_max; 1327 uint64_t ktime_seg5_total; 1328 uint64_t ktime_seg5_min; 1329 uint64_t ktime_seg5_max; 1330 uint64_t ktime_seg6_total; 1331 uint64_t ktime_seg6_min; 1332 uint64_t ktime_seg6_max; 1333 uint64_t ktime_seg7_total; 1334 uint64_t ktime_seg7_min; 1335 uint64_t ktime_seg7_max; 1336 uint64_t ktime_seg8_total; 1337 uint64_t ktime_seg8_min; 1338 uint64_t ktime_seg8_max; 1339 uint64_t ktime_seg9_total; 1340 uint64_t ktime_seg9_min; 1341 uint64_t ktime_seg9_max; 1342 uint64_t ktime_seg10_total; 1343 uint64_t ktime_seg10_min; 1344 uint64_t ktime_seg10_max; 1345 #endif 1346 1347 struct hlist_node cpuhp; /* used for cpuhp per hba callback */ 1348 struct timer_list cpuhp_poll_timer; 1349 struct list_head poll_list; /* slowpath eq polling list */ 1350 #define LPFC_POLL_HB 1 /* slowpath heartbeat */ 1351 #define LPFC_POLL_FASTPATH 0 /* called from fastpath */ 1352 #define LPFC_POLL_SLOWPATH 1 /* called from slowpath */ 1353 1354 char os_host_name[MAXHOSTNAMELEN]; 1355 1356 /* SCSI host template information - for physical port */ 1357 struct scsi_host_template port_template; 1358 /* SCSI host template information - for all vports */ 1359 struct scsi_host_template vport_template; 1360 atomic_t dbg_log_idx; 1361 atomic_t dbg_log_cnt; 1362 atomic_t dbg_log_dmping; 1363 struct dbg_log_ent dbg_log[DBG_LOG_SZ]; 1364 }; 1365 1366 static inline struct Scsi_Host * 1367 lpfc_shost_from_vport(struct lpfc_vport *vport) 1368 { 1369 return container_of((void *) vport, struct Scsi_Host, hostdata[0]); 1370 } 1371 1372 static inline void 1373 lpfc_set_loopback_flag(struct lpfc_hba *phba) 1374 { 1375 if (phba->cfg_topology == FLAGS_LOCAL_LB) 1376 phba->link_flag |= LS_LOOPBACK_MODE; 1377 else 1378 phba->link_flag &= ~LS_LOOPBACK_MODE; 1379 } 1380 1381 static inline int 1382 lpfc_is_link_up(struct lpfc_hba *phba) 1383 { 1384 return phba->link_state == LPFC_LINK_UP || 1385 phba->link_state == LPFC_CLEAR_LA || 1386 phba->link_state == LPFC_HBA_READY; 1387 } 1388 1389 static inline void 1390 lpfc_worker_wake_up(struct lpfc_hba *phba) 1391 { 1392 /* Set the lpfc data pending flag */ 1393 set_bit(LPFC_DATA_READY, &phba->data_flags); 1394 1395 /* Wake up worker thread */ 1396 wake_up(&phba->work_waitq); 1397 return; 1398 } 1399 1400 static inline int 1401 lpfc_readl(void __iomem *addr, uint32_t *data) 1402 { 1403 uint32_t temp; 1404 temp = readl(addr); 1405 if (temp == 0xffffffff) 1406 return -EIO; 1407 *data = temp; 1408 return 0; 1409 } 1410 1411 static inline int 1412 lpfc_sli_read_hs(struct lpfc_hba *phba) 1413 { 1414 /* 1415 * There was a link/board error. Read the status register to retrieve 1416 * the error event and process it. 1417 */ 1418 phba->sli.slistat.err_attn_event++; 1419 1420 /* Save status info and check for unplug error */ 1421 if (lpfc_readl(phba->HSregaddr, &phba->work_hs) || 1422 lpfc_readl(phba->MBslimaddr + 0xa8, &phba->work_status[0]) || 1423 lpfc_readl(phba->MBslimaddr + 0xac, &phba->work_status[1])) { 1424 return -EIO; 1425 } 1426 1427 /* Clear chip Host Attention error bit */ 1428 writel(HA_ERATT, phba->HAregaddr); 1429 readl(phba->HAregaddr); /* flush */ 1430 phba->pport->stopped = 1; 1431 1432 return 0; 1433 } 1434 1435 static inline struct lpfc_sli_ring * 1436 lpfc_phba_elsring(struct lpfc_hba *phba) 1437 { 1438 /* Return NULL if sli_rev has become invalid due to bad fw */ 1439 if (phba->sli_rev != LPFC_SLI_REV4 && 1440 phba->sli_rev != LPFC_SLI_REV3 && 1441 phba->sli_rev != LPFC_SLI_REV2) 1442 return NULL; 1443 1444 if (phba->sli_rev == LPFC_SLI_REV4) { 1445 if (phba->sli4_hba.els_wq) 1446 return phba->sli4_hba.els_wq->pring; 1447 else 1448 return NULL; 1449 } 1450 return &phba->sli.sli3_ring[LPFC_ELS_RING]; 1451 } 1452 1453 /** 1454 * lpfc_next_online_cpu - Finds next online CPU on cpumask 1455 * @mask: Pointer to phba's cpumask member. 1456 * @start: starting cpu index 1457 * 1458 * Note: If no valid cpu found, then nr_cpu_ids is returned. 1459 * 1460 **/ 1461 static inline unsigned int 1462 lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start) 1463 { 1464 unsigned int cpu_it; 1465 1466 for_each_cpu_wrap(cpu_it, mask, start) { 1467 if (cpu_online(cpu_it)) 1468 break; 1469 } 1470 1471 return cpu_it; 1472 } 1473 /** 1474 * lpfc_sli4_mod_hba_eq_delay - update EQ delay 1475 * @phba: Pointer to HBA context object. 1476 * @q: The Event Queue to update. 1477 * @delay: The delay value (in us) to be written. 1478 * 1479 **/ 1480 static inline void 1481 lpfc_sli4_mod_hba_eq_delay(struct lpfc_hba *phba, struct lpfc_queue *eq, 1482 u32 delay) 1483 { 1484 struct lpfc_register reg_data; 1485 1486 reg_data.word0 = 0; 1487 bf_set(lpfc_sliport_eqdelay_id, ®_data, eq->queue_id); 1488 bf_set(lpfc_sliport_eqdelay_delay, ®_data, delay); 1489 writel(reg_data.word0, phba->sli4_hba.u.if_type2.EQDregaddr); 1490 eq->q_mode = delay; 1491 } 1492 1493 1494 /* 1495 * Macro that declares tables and a routine to perform enum type to 1496 * ascii string lookup. 1497 * 1498 * Defines a <key,value> table for an enum. Uses xxx_INIT defines for 1499 * the enum to populate the table. Macro defines a routine (named 1500 * by caller) that will search all elements of the table for the key 1501 * and return the name string if found or "Unrecognized" if not found. 1502 */ 1503 #define DECLARE_ENUM2STR_LOOKUP(routine, enum_name, enum_init) \ 1504 static struct { \ 1505 enum enum_name value; \ 1506 char *name; \ 1507 } fc_##enum_name##_e2str_names[] = enum_init; \ 1508 static const char *routine(enum enum_name table_key) \ 1509 { \ 1510 int i; \ 1511 char *name = "Unrecognized"; \ 1512 \ 1513 for (i = 0; i < ARRAY_SIZE(fc_##enum_name##_e2str_names); i++) {\ 1514 if (fc_##enum_name##_e2str_names[i].value == table_key) {\ 1515 name = fc_##enum_name##_e2str_names[i].name; \ 1516 break; \ 1517 } \ 1518 } \ 1519 return name; \ 1520 } 1521 1522 /** 1523 * lpfc_is_vmid_enabled - returns if VMID is enabled for either switch types 1524 * @phba: Pointer to HBA context object. 1525 * 1526 * Relationship between the enable, target support and if vmid tag is required 1527 * for the particular combination 1528 * --------------------------------------------------- 1529 * Switch Enable Flag Target Support VMID Needed 1530 * --------------------------------------------------- 1531 * App Id 0 NA N 1532 * App Id 1 0 N 1533 * App Id 1 1 Y 1534 * Pr Tag 0 NA N 1535 * Pr Tag 1 0 N 1536 * Pr Tag 1 1 Y 1537 * Pr Tag 2 * Y 1538 --------------------------------------------------- 1539 * 1540 **/ 1541 static inline int lpfc_is_vmid_enabled(struct lpfc_hba *phba) 1542 { 1543 return phba->cfg_vmid_app_header || phba->cfg_vmid_priority_tagging; 1544 } 1545