1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * * 10 * This program is free software; you can redistribute it and/or * 11 * modify it under the terms of version 2 of the GNU General * 12 * Public License as published by the Free Software Foundation. * 13 * This program is distributed in the hope that it will be useful. * 14 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 15 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 16 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 17 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 18 * TO BE LEGALLY INVALID. See the GNU General Public License for * 19 * more details, a copy of which can be found in the file COPYING * 20 * included with this package. * 21 *******************************************************************/ 22 23 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS) 24 #define CONFIG_SCSI_LPFC_DEBUG_FS 25 #endif 26 27 /* forward declaration for LPFC_IOCB_t's use */ 28 struct lpfc_hba; 29 struct lpfc_vport; 30 31 /* Define the context types that SLI handles for abort and sums. */ 32 typedef enum _lpfc_ctx_cmd { 33 LPFC_CTX_LUN, 34 LPFC_CTX_TGT, 35 LPFC_CTX_HOST 36 } lpfc_ctx_cmd; 37 38 union lpfc_vmid_tag { 39 uint32_t app_id; 40 uint8_t cs_ctl_vmid; 41 struct lpfc_vmid_context *vmid_context; /* UVEM context information */ 42 }; 43 44 struct lpfc_cq_event { 45 struct list_head list; 46 uint16_t hdwq; 47 union { 48 struct lpfc_mcqe mcqe_cmpl; 49 struct lpfc_acqe_link acqe_link; 50 struct lpfc_acqe_fip acqe_fip; 51 struct lpfc_acqe_dcbx acqe_dcbx; 52 struct lpfc_acqe_grp5 acqe_grp5; 53 struct lpfc_acqe_fc_la acqe_fc; 54 struct lpfc_acqe_sli acqe_sli; 55 struct lpfc_rcqe rcqe_cmpl; 56 struct sli4_wcqe_xri_aborted wcqe_axri; 57 struct lpfc_wcqe_complete wcqe_cmpl; 58 } cqe; 59 }; 60 61 /* This structure is used to handle IOCB requests / responses */ 62 struct lpfc_iocbq { 63 /* lpfc_iocbqs are used in double linked lists */ 64 struct list_head list; 65 struct list_head clist; 66 struct list_head dlist; 67 uint16_t iotag; /* pre-assigned IO tag */ 68 uint16_t sli4_lxritag; /* logical pre-assigned XRI. */ 69 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ 70 uint16_t hba_wqidx; /* index to HBA work queue */ 71 struct lpfc_cq_event cq_event; 72 uint64_t isr_timestamp; 73 74 union lpfc_wqe128 wqe; /* SLI-4 */ 75 IOCB_t iocb; /* SLI-3 */ 76 struct lpfc_wcqe_complete wcqe_cmpl; /* WQE cmpl */ 77 78 u32 unsol_rcv_len; /* Receive len in usol path */ 79 80 uint8_t num_bdes; 81 uint8_t abort_bls; /* ABTS by initiator or responder */ 82 u8 abort_rctl; /* ACC or RJT flag */ 83 uint8_t priority; /* OAS priority */ 84 uint8_t retry; /* retry counter for IOCB cmd - if needed */ 85 86 u32 cmd_flag; 87 #define LPFC_IO_LIBDFC 1 /* libdfc iocb */ 88 #define LPFC_IO_WAKE 2 /* Synchronous I/O completed */ 89 #define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */ 90 #define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */ 91 #define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ 92 #define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ 93 #define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ 94 #define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */ 95 #define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */ 96 #define DSS_SECURITY_OP 0x100 /* security IO */ 97 #define LPFC_IO_ON_TXCMPLQ 0x200 /* The IO is still on the TXCMPLQ */ 98 #define LPFC_IO_DIF_PASS 0x400 /* T10 DIF IO pass-thru prot */ 99 #define LPFC_IO_DIF_STRIP 0x800 /* T10 DIF IO strip prot */ 100 #define LPFC_IO_DIF_INSERT 0x1000 /* T10 DIF IO insert prot */ 101 #define LPFC_IO_CMD_OUTSTANDING 0x2000 /* timeout handler abort window */ 102 103 #define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ 104 #define LPFC_FIP_ELS_ID_SHIFT 14 105 106 #define LPFC_IO_OAS 0x10000 /* OAS FCP IO */ 107 #define LPFC_IO_FOF 0x20000 /* FOF FCP IO */ 108 #define LPFC_IO_LOOPBACK 0x40000 /* Loopback IO */ 109 #define LPFC_PRLI_NVME_REQ 0x80000 /* This is an NVME PRLI. */ 110 #define LPFC_PRLI_FCP_REQ 0x100000 /* This is an NVME PRLI. */ 111 #define LPFC_IO_NVME 0x200000 /* NVME FCP command */ 112 #define LPFC_IO_NVME_LS 0x400000 /* NVME LS command */ 113 #define LPFC_IO_NVMET 0x800000 /* NVMET command */ 114 #define LPFC_IO_VMID 0x1000000 /* VMID tagged IO */ 115 #define LPFC_IO_CMF 0x4000000 /* CMF command */ 116 117 uint32_t drvrTimeout; /* driver timeout in seconds */ 118 struct lpfc_vport *vport;/* virtual port pointer */ 119 void *context1; /* caller context information */ 120 void *context2; /* caller context information */ 121 void *context3; /* caller context information */ 122 uint32_t event_tag; /* LA Event tag */ 123 union { 124 wait_queue_head_t *wait_queue; 125 struct lpfc_iocbq *rsp_iocb; 126 struct lpfcMboxq *mbox; 127 struct lpfc_nodelist *ndlp; 128 struct lpfc_node_rrq *rrq; 129 } context_un; 130 131 union lpfc_vmid_tag vmid_tag; 132 void (*fabric_cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd, 133 struct lpfc_iocbq *rsp); 134 void (*wait_cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd, 135 struct lpfc_iocbq *rsp); 136 void (*cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd, 137 struct lpfc_iocbq *rsp); 138 }; 139 140 #define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ 141 142 #define IOCB_SUCCESS 0 143 #define IOCB_BUSY 1 144 #define IOCB_ERROR 2 145 #define IOCB_TIMEDOUT 3 146 #define IOCB_ABORTED 4 147 #define IOCB_ABORTING 5 148 #define IOCB_NORESOURCE 6 149 150 #define SLI_WQE_RET_WQE 1 /* Return WQE if cmd ring full */ 151 152 #define WQE_SUCCESS 0 153 #define WQE_BUSY 1 154 #define WQE_ERROR 2 155 #define WQE_TIMEDOUT 3 156 #define WQE_ABORTED 4 157 #define WQE_ABORTING 5 158 #define WQE_NORESOURCE 6 159 160 #define LPFC_MBX_WAKE 1 161 #define LPFC_MBX_IMED_UNREG 2 162 163 typedef struct lpfcMboxq { 164 /* MBOXQs are used in single linked lists */ 165 struct list_head list; /* ptr to next mailbox command */ 166 union { 167 MAILBOX_t mb; /* Mailbox cmd */ 168 struct lpfc_mqe mqe; 169 } u; 170 struct lpfc_vport *vport; /* virtual port pointer */ 171 void *ctx_ndlp; /* caller ndlp information */ 172 void *ctx_buf; /* caller buffer information */ 173 void *context3; 174 175 void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *); 176 uint8_t mbox_flag; 177 uint16_t in_ext_byte_len; 178 uint16_t out_ext_byte_len; 179 uint8_t mbox_offset_word; 180 struct lpfc_mcqe mcqe; 181 struct lpfc_mbx_nembed_sge_virt *sge_array; 182 } LPFC_MBOXQ_t; 183 184 #define MBX_POLL 1 /* poll mailbox till command done, then 185 return */ 186 #define MBX_NOWAIT 2 /* issue command then return immediately */ 187 188 #define LPFC_MAX_RING_MASK 5 /* max num of rctl/type masks allowed per 189 ring */ 190 #define LPFC_SLI3_MAX_RING 4 /* Max num of SLI3 rings used by driver. 191 For SLI4, an additional ring for each 192 FCP WQ will be allocated. */ 193 194 struct lpfc_sli_ring; 195 196 struct lpfc_sli_ring_mask { 197 uint8_t profile; /* profile associated with ring */ 198 uint8_t rctl; /* rctl / type pair configured for ring */ 199 uint8_t type; /* rctl / type pair configured for ring */ 200 uint8_t rsvd; 201 /* rcv'd unsol event */ 202 void (*lpfc_sli_rcv_unsol_event) (struct lpfc_hba *, 203 struct lpfc_sli_ring *, 204 struct lpfc_iocbq *); 205 }; 206 207 208 /* Structure used to hold SLI statistical counters and info */ 209 struct lpfc_sli_ring_stat { 210 uint64_t iocb_event; /* IOCB event counters */ 211 uint64_t iocb_cmd; /* IOCB cmd issued */ 212 uint64_t iocb_rsp; /* IOCB rsp received */ 213 uint64_t iocb_cmd_delay; /* IOCB cmd ring delay */ 214 uint64_t iocb_cmd_full; /* IOCB cmd ring full */ 215 uint64_t iocb_cmd_empty; /* IOCB cmd ring is now empty */ 216 uint64_t iocb_rsp_full; /* IOCB rsp ring full */ 217 }; 218 219 struct lpfc_sli3_ring { 220 uint32_t local_getidx; /* last available cmd index (from cmdGetInx) */ 221 uint32_t next_cmdidx; /* next_cmd index */ 222 uint32_t rspidx; /* current index in response ring */ 223 uint32_t cmdidx; /* current index in command ring */ 224 uint16_t numCiocb; /* number of command iocb's per ring */ 225 uint16_t numRiocb; /* number of rsp iocb's per ring */ 226 uint16_t sizeCiocb; /* Size of command iocb's in this ring */ 227 uint16_t sizeRiocb; /* Size of response iocb's in this ring */ 228 uint32_t *cmdringaddr; /* virtual address for cmd rings */ 229 uint32_t *rspringaddr; /* virtual address for rsp rings */ 230 }; 231 232 struct lpfc_sli4_ring { 233 struct lpfc_queue *wqp; /* Pointer to associated WQ */ 234 }; 235 236 237 /* Structure used to hold SLI ring information */ 238 struct lpfc_sli_ring { 239 uint16_t flag; /* ring flags */ 240 #define LPFC_DEFERRED_RING_EVENT 0x001 /* Deferred processing a ring event */ 241 #define LPFC_CALL_RING_AVAILABLE 0x002 /* indicates cmd was full */ 242 #define LPFC_STOP_IOCB_EVENT 0x020 /* Stop processing IOCB cmds event */ 243 uint16_t abtsiotag; /* tracks next iotag to use for ABTS */ 244 245 uint8_t rsvd; 246 uint8_t ringno; /* ring number */ 247 248 spinlock_t ring_lock; /* lock for issuing commands */ 249 250 uint32_t fast_iotag; /* max fastlookup based iotag */ 251 uint32_t iotag_ctr; /* keeps track of the next iotag to use */ 252 uint32_t iotag_max; /* max iotag value to use */ 253 struct list_head txq; 254 uint16_t txq_cnt; /* current length of queue */ 255 uint16_t txq_max; /* max length */ 256 struct list_head txcmplq; 257 uint16_t txcmplq_cnt; /* current length of queue */ 258 uint16_t txcmplq_max; /* max length */ 259 uint32_t missbufcnt; /* keep track of buffers to post */ 260 struct list_head postbufq; 261 uint16_t postbufq_cnt; /* current length of queue */ 262 uint16_t postbufq_max; /* max length */ 263 struct list_head iocb_continueq; 264 uint16_t iocb_continueq_cnt; /* current length of queue */ 265 uint16_t iocb_continueq_max; /* max length */ 266 struct list_head iocb_continue_saveq; 267 268 struct lpfc_sli_ring_mask prt[LPFC_MAX_RING_MASK]; 269 uint32_t num_mask; /* number of mask entries in prt array */ 270 void (*lpfc_sli_rcv_async_status) (struct lpfc_hba *, 271 struct lpfc_sli_ring *, struct lpfc_iocbq *); 272 273 struct lpfc_sli_ring_stat stats; /* SLI statistical info */ 274 275 /* cmd ring available */ 276 void (*lpfc_sli_cmd_available) (struct lpfc_hba *, 277 struct lpfc_sli_ring *); 278 union { 279 struct lpfc_sli3_ring sli3; 280 struct lpfc_sli4_ring sli4; 281 } sli; 282 }; 283 284 /* Structure used for configuring rings to a specific profile or rctl / type */ 285 struct lpfc_hbq_init { 286 uint32_t rn; /* Receive buffer notification */ 287 uint32_t entry_count; /* max # of entries in HBQ */ 288 uint32_t headerLen; /* 0 if not profile 4 or 5 */ 289 uint32_t logEntry; /* Set to 1 if this HBQ used for LogEntry */ 290 uint32_t profile; /* Selection profile 0=all, 7=logentry */ 291 uint32_t ring_mask; /* Binds HBQ to a ring e.g. Ring0=b0001, 292 * ring2=b0100 */ 293 uint32_t hbq_index; /* index of this hbq in ring .HBQs[] */ 294 295 uint32_t seqlenoff; 296 uint32_t maxlen; 297 uint32_t seqlenbcnt; 298 uint32_t cmdcodeoff; 299 uint32_t cmdmatch[8]; 300 uint32_t mask_count; /* number of mask entries in prt array */ 301 struct hbq_mask hbqMasks[6]; 302 303 /* Non-config rings fields to keep track of buffer allocations */ 304 uint32_t buffer_count; /* number of buffers allocated */ 305 uint32_t init_count; /* number to allocate when initialized */ 306 uint32_t add_count; /* number to allocate when starved */ 307 } ; 308 309 /* Structure used to hold SLI statistical counters and info */ 310 struct lpfc_sli_stat { 311 uint64_t mbox_stat_err; /* Mbox cmds completed status error */ 312 uint64_t mbox_cmd; /* Mailbox commands issued */ 313 uint64_t sli_intr; /* Count of Host Attention interrupts */ 314 uint64_t sli_prev_intr; /* Previous cnt of Host Attention interrupts */ 315 uint64_t sli_ips; /* Host Attention interrupts per sec */ 316 uint32_t err_attn_event; /* Error Attn event counters */ 317 uint32_t link_event; /* Link event counters */ 318 uint32_t mbox_event; /* Mailbox event counters */ 319 uint32_t mbox_busy; /* Mailbox cmd busy */ 320 }; 321 322 /* Structure to store link status values when port stats are reset */ 323 struct lpfc_lnk_stat { 324 uint32_t link_failure_count; 325 uint32_t loss_of_sync_count; 326 uint32_t loss_of_signal_count; 327 uint32_t prim_seq_protocol_err_count; 328 uint32_t invalid_tx_word_count; 329 uint32_t invalid_crc_count; 330 uint32_t error_frames; 331 uint32_t link_events; 332 }; 333 334 /* Structure used to hold SLI information */ 335 struct lpfc_sli { 336 uint32_t num_rings; 337 uint32_t sli_flag; 338 339 /* Additional sli_flags */ 340 #define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */ 341 #define LPFC_SLI_ACTIVE 0x200 /* SLI in firmware is active */ 342 #define LPFC_PROCESS_LA 0x400 /* Able to process link attention */ 343 #define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */ 344 #define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */ 345 #define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */ 346 #define LPFC_SLI_SUPPRESS_RSP 0x4000 /* Suppress RSP feature is supported */ 347 #define LPFC_SLI_USE_EQDR 0x8000 /* EQ Delay Register is supported */ 348 #define LPFC_QUEUE_FREE_INIT 0x10000 /* Queue freeing is in progress */ 349 #define LPFC_QUEUE_FREE_WAIT 0x20000 /* Hold Queue free as it is being 350 * used outside worker thread 351 */ 352 353 struct lpfc_sli_ring *sli3_ring; 354 355 struct lpfc_sli_stat slistat; /* SLI statistical info */ 356 struct list_head mboxq; 357 uint16_t mboxq_cnt; /* current length of queue */ 358 uint16_t mboxq_max; /* max length */ 359 LPFC_MBOXQ_t *mbox_active; /* active mboxq information */ 360 struct list_head mboxq_cmpl; 361 362 struct timer_list mbox_tmo; /* Hold clk to timeout active mbox 363 cmd */ 364 365 #define LPFC_IOCBQ_LOOKUP_INCREMENT 1024 366 struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */ 367 size_t iocbq_lookup_len; /* current lengs of the array */ 368 uint16_t last_iotag; /* last allocated IOTAG */ 369 time64_t stats_start; /* in seconds */ 370 struct lpfc_lnk_stat lnk_stat_offsets; 371 }; 372 373 /* Timeout for normal outstanding mbox command (Seconds) */ 374 #define LPFC_MBOX_TMO 30 375 /* Timeout for non-flash-based outstanding sli_config mbox command (Seconds) */ 376 #define LPFC_MBOX_SLI4_CONFIG_TMO 60 377 /* Timeout for flash-based outstanding sli_config mbox command (Seconds) */ 378 #define LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO 300 379 /* Timeout for other flash-based outstanding mbox command (Seconds) */ 380 #define LPFC_MBOX_TMO_FLASH_CMD 300 381 382 struct lpfc_io_buf { 383 /* Common fields */ 384 struct list_head list; 385 void *data; 386 387 dma_addr_t dma_handle; 388 dma_addr_t dma_phys_sgl; 389 390 struct sli4_sge *dma_sgl; /* initial segment chunk */ 391 392 /* linked list of extra sli4_hybrid_sge */ 393 struct list_head dma_sgl_xtra_list; 394 395 /* list head for fcp_cmd_rsp buf */ 396 struct list_head dma_cmd_rsp_list; 397 398 struct lpfc_iocbq cur_iocbq; 399 struct lpfc_sli4_hdw_queue *hdwq; 400 uint16_t hdwq_no; 401 uint16_t cpu; 402 403 struct lpfc_nodelist *ndlp; 404 uint32_t timeout; 405 uint16_t flags; 406 #define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */ 407 #define LPFC_SBUF_BUMP_QDEPTH 0x2 /* bumped queue depth counter */ 408 /* External DIF device IO conversions */ 409 #define LPFC_SBUF_NORMAL_DIF 0x4 /* normal mode to insert/strip */ 410 #define LPFC_SBUF_PASS_DIF 0x8 /* insert/strip mode to passthru */ 411 #define LPFC_SBUF_NOT_POSTED 0x10 /* SGL failed post to FW. */ 412 uint16_t status; /* From IOCB Word 7- ulpStatus */ 413 uint32_t result; /* From IOCB Word 4. */ 414 415 uint32_t seg_cnt; /* Number of scatter-gather segments returned by 416 * dma_map_sg. The driver needs this for calls 417 * to dma_unmap_sg. 418 */ 419 unsigned long start_time; 420 spinlock_t buf_lock; /* lock used in case of simultaneous abort */ 421 bool expedite; /* this is an expedite io_buf */ 422 423 union { 424 /* SCSI specific fields */ 425 struct { 426 struct scsi_cmnd *pCmd; 427 struct lpfc_rport_data *rdata; 428 uint32_t prot_seg_cnt; /* seg_cnt's counterpart for 429 * protection data 430 */ 431 432 /* 433 * data and dma_handle are the kernel virtual and bus 434 * address of the dma-able buffer containing the 435 * fcp_cmd, fcp_rsp and a scatter gather bde list that 436 * supports the sg_tablesize value. 437 */ 438 struct fcp_cmnd *fcp_cmnd; 439 struct fcp_rsp *fcp_rsp; 440 441 wait_queue_head_t *waitq; 442 443 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 444 /* Used to restore any changes to protection data for 445 * error injection 446 */ 447 void *prot_data_segment; 448 uint32_t prot_data; 449 uint32_t prot_data_type; 450 #define LPFC_INJERR_REFTAG 1 451 #define LPFC_INJERR_APPTAG 2 452 #define LPFC_INJERR_GUARD 3 453 #endif 454 }; 455 456 /* NVME specific fields */ 457 struct { 458 struct nvmefc_fcp_req *nvmeCmd; 459 uint16_t qidx; 460 }; 461 }; 462 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 463 uint64_t ts_cmd_start; 464 uint64_t ts_last_cmd; 465 uint64_t ts_cmd_wqput; 466 uint64_t ts_isr_cmpl; 467 uint64_t ts_data_io; 468 #endif 469 uint64_t rx_cmd_start; 470 }; 471