1 /* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ 2 /* Copyright (c) 2015 - 2021 Intel Corporation */ 3 #ifndef IRDMA_TYPE_H 4 #define IRDMA_TYPE_H 5 #include "osdep.h" 6 #include "irdma.h" 7 #include "user.h" 8 #include "hmc.h" 9 #include "uda.h" 10 #include "ws.h" 11 #define IRDMA_DEBUG_ERR "ERR" 12 #define IRDMA_DEBUG_INIT "INIT" 13 #define IRDMA_DEBUG_DEV "DEV" 14 #define IRDMA_DEBUG_CM "CM" 15 #define IRDMA_DEBUG_VERBS "VERBS" 16 #define IRDMA_DEBUG_PUDA "PUDA" 17 #define IRDMA_DEBUG_ILQ "ILQ" 18 #define IRDMA_DEBUG_IEQ "IEQ" 19 #define IRDMA_DEBUG_QP "QP" 20 #define IRDMA_DEBUG_CQ "CQ" 21 #define IRDMA_DEBUG_MR "MR" 22 #define IRDMA_DEBUG_PBLE "PBLE" 23 #define IRDMA_DEBUG_WQE "WQE" 24 #define IRDMA_DEBUG_AEQ "AEQ" 25 #define IRDMA_DEBUG_CQP "CQP" 26 #define IRDMA_DEBUG_HMC "HMC" 27 #define IRDMA_DEBUG_USER "USER" 28 #define IRDMA_DEBUG_VIRT "VIRT" 29 #define IRDMA_DEBUG_DCB "DCB" 30 #define IRDMA_DEBUG_CQE "CQE" 31 #define IRDMA_DEBUG_CLNT "CLNT" 32 #define IRDMA_DEBUG_WS "WS" 33 #define IRDMA_DEBUG_STATS "STATS" 34 35 enum irdma_page_size { 36 IRDMA_PAGE_SIZE_4K = 0, 37 IRDMA_PAGE_SIZE_2M, 38 IRDMA_PAGE_SIZE_1G, 39 }; 40 41 enum irdma_hdrct_flags { 42 DDP_LEN_FLAG = 0x80, 43 DDP_HDR_FLAG = 0x40, 44 RDMA_HDR_FLAG = 0x20, 45 }; 46 47 enum irdma_term_layers { 48 LAYER_RDMA = 0, 49 LAYER_DDP = 1, 50 LAYER_MPA = 2, 51 }; 52 53 enum irdma_term_error_types { 54 RDMAP_REMOTE_PROT = 1, 55 RDMAP_REMOTE_OP = 2, 56 DDP_CATASTROPHIC = 0, 57 DDP_TAGGED_BUF = 1, 58 DDP_UNTAGGED_BUF = 2, 59 DDP_LLP = 3, 60 }; 61 62 enum irdma_term_rdma_errors { 63 RDMAP_INV_STAG = 0x00, 64 RDMAP_INV_BOUNDS = 0x01, 65 RDMAP_ACCESS = 0x02, 66 RDMAP_UNASSOC_STAG = 0x03, 67 RDMAP_TO_WRAP = 0x04, 68 RDMAP_INV_RDMAP_VER = 0x05, 69 RDMAP_UNEXPECTED_OP = 0x06, 70 RDMAP_CATASTROPHIC_LOCAL = 0x07, 71 RDMAP_CATASTROPHIC_GLOBAL = 0x08, 72 RDMAP_CANT_INV_STAG = 0x09, 73 RDMAP_UNSPECIFIED = 0xff, 74 }; 75 76 enum irdma_term_ddp_errors { 77 DDP_CATASTROPHIC_LOCAL = 0x00, 78 DDP_TAGGED_INV_STAG = 0x00, 79 DDP_TAGGED_BOUNDS = 0x01, 80 DDP_TAGGED_UNASSOC_STAG = 0x02, 81 DDP_TAGGED_TO_WRAP = 0x03, 82 DDP_TAGGED_INV_DDP_VER = 0x04, 83 DDP_UNTAGGED_INV_QN = 0x01, 84 DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02, 85 DDP_UNTAGGED_INV_MSN_RANGE = 0x03, 86 DDP_UNTAGGED_INV_MO = 0x04, 87 DDP_UNTAGGED_INV_TOO_LONG = 0x05, 88 DDP_UNTAGGED_INV_DDP_VER = 0x06, 89 }; 90 91 enum irdma_term_mpa_errors { 92 MPA_CLOSED = 0x01, 93 MPA_CRC = 0x02, 94 MPA_MARKER = 0x03, 95 MPA_REQ_RSP = 0x04, 96 }; 97 98 enum irdma_qp_event_type { 99 IRDMA_QP_EVENT_CATASTROPHIC, 100 IRDMA_QP_EVENT_ACCESS_ERR, 101 IRDMA_QP_EVENT_REQ_ERR, 102 }; 103 104 enum irdma_hw_stats_index { 105 /* gen1 - 32-bit */ 106 IRDMA_HW_STAT_INDEX_IP4RXDISCARD = 0, 107 IRDMA_HW_STAT_INDEX_IP4RXTRUNC = 1, 108 IRDMA_HW_STAT_INDEX_IP4TXNOROUTE = 2, 109 IRDMA_HW_STAT_INDEX_IP6RXDISCARD = 3, 110 IRDMA_HW_STAT_INDEX_IP6RXTRUNC = 4, 111 IRDMA_HW_STAT_INDEX_IP6TXNOROUTE = 5, 112 IRDMA_HW_STAT_INDEX_TCPRTXSEG = 6, 113 IRDMA_HW_STAT_INDEX_TCPRXOPTERR = 7, 114 IRDMA_HW_STAT_INDEX_TCPRXPROTOERR = 8, 115 IRDMA_HW_STAT_INDEX_RXVLANERR = 9, 116 /* gen1 - 64-bit */ 117 IRDMA_HW_STAT_INDEX_IP4RXOCTS = 10, 118 IRDMA_HW_STAT_INDEX_IP4RXPKTS = 11, 119 IRDMA_HW_STAT_INDEX_IP4RXFRAGS = 12, 120 IRDMA_HW_STAT_INDEX_IP4RXMCPKTS = 13, 121 IRDMA_HW_STAT_INDEX_IP4TXOCTS = 14, 122 IRDMA_HW_STAT_INDEX_IP4TXPKTS = 15, 123 IRDMA_HW_STAT_INDEX_IP4TXFRAGS = 16, 124 IRDMA_HW_STAT_INDEX_IP4TXMCPKTS = 17, 125 IRDMA_HW_STAT_INDEX_IP6RXOCTS = 18, 126 IRDMA_HW_STAT_INDEX_IP6RXPKTS = 19, 127 IRDMA_HW_STAT_INDEX_IP6RXFRAGS = 20, 128 IRDMA_HW_STAT_INDEX_IP6RXMCPKTS = 21, 129 IRDMA_HW_STAT_INDEX_IP6TXOCTS = 22, 130 IRDMA_HW_STAT_INDEX_IP6TXPKTS = 23, 131 IRDMA_HW_STAT_INDEX_IP6TXFRAGS = 24, 132 IRDMA_HW_STAT_INDEX_IP6TXMCPKTS = 25, 133 IRDMA_HW_STAT_INDEX_TCPRXSEGS = 26, 134 IRDMA_HW_STAT_INDEX_TCPTXSEG = 27, 135 IRDMA_HW_STAT_INDEX_RDMARXRDS = 28, 136 IRDMA_HW_STAT_INDEX_RDMARXSNDS = 29, 137 IRDMA_HW_STAT_INDEX_RDMARXWRS = 30, 138 IRDMA_HW_STAT_INDEX_RDMATXRDS = 31, 139 IRDMA_HW_STAT_INDEX_RDMATXSNDS = 32, 140 IRDMA_HW_STAT_INDEX_RDMATXWRS = 33, 141 IRDMA_HW_STAT_INDEX_RDMAVBND = 34, 142 IRDMA_HW_STAT_INDEX_RDMAVINV = 35, 143 IRDMA_HW_STAT_INDEX_IP4RXMCOCTS = 36, 144 IRDMA_HW_STAT_INDEX_IP4TXMCOCTS = 37, 145 IRDMA_HW_STAT_INDEX_IP6RXMCOCTS = 38, 146 IRDMA_HW_STAT_INDEX_IP6TXMCOCTS = 39, 147 IRDMA_HW_STAT_INDEX_UDPRXPKTS = 40, 148 IRDMA_HW_STAT_INDEX_UDPTXPKTS = 41, 149 IRDMA_HW_STAT_INDEX_MAX_GEN_1 = 42, /* Must be same value as next entry */ 150 /* gen2 - 64-bit */ 151 IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS = 42, 152 /* gen2 - 32-bit */ 153 IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED = 43, 154 IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED = 44, 155 IRDMA_HW_STAT_INDEX_TXNPCNPSENT = 45, 156 IRDMA_HW_STAT_INDEX_MAX_GEN_2 = 46, 157 }; 158 159 enum irdma_feature_type { 160 IRDMA_FEATURE_FW_INFO = 0, 161 IRDMA_HW_VERSION_INFO = 1, 162 IRDMA_QSETS_MAX = 26, 163 IRDMA_MAX_FEATURES, /* Must be last entry */ 164 }; 165 166 enum irdma_sched_prio_type { 167 IRDMA_PRIO_WEIGHTED_RR = 1, 168 IRDMA_PRIO_STRICT = 2, 169 IRDMA_PRIO_WEIGHTED_STRICT = 3, 170 }; 171 172 enum irdma_vm_vf_type { 173 IRDMA_VF_TYPE = 0, 174 IRDMA_VM_TYPE, 175 IRDMA_PF_TYPE, 176 }; 177 178 enum irdma_cqp_hmc_profile { 179 IRDMA_HMC_PROFILE_DEFAULT = 1, 180 IRDMA_HMC_PROFILE_FAVOR_VF = 2, 181 IRDMA_HMC_PROFILE_EQUAL = 3, 182 }; 183 184 enum irdma_quad_entry_type { 185 IRDMA_QHASH_TYPE_TCP_ESTABLISHED = 1, 186 IRDMA_QHASH_TYPE_TCP_SYN, 187 IRDMA_QHASH_TYPE_UDP_UNICAST, 188 IRDMA_QHASH_TYPE_UDP_MCAST, 189 IRDMA_QHASH_TYPE_ROCE_MCAST, 190 IRDMA_QHASH_TYPE_ROCEV2_HW, 191 }; 192 193 enum irdma_quad_hash_manage_type { 194 IRDMA_QHASH_MANAGE_TYPE_DELETE = 0, 195 IRDMA_QHASH_MANAGE_TYPE_ADD, 196 IRDMA_QHASH_MANAGE_TYPE_MODIFY, 197 }; 198 199 enum irdma_syn_rst_handling { 200 IRDMA_SYN_RST_HANDLING_HW_TCP_SECURE = 0, 201 IRDMA_SYN_RST_HANDLING_HW_TCP, 202 IRDMA_SYN_RST_HANDLING_FW_TCP_SECURE, 203 IRDMA_SYN_RST_HANDLING_FW_TCP, 204 }; 205 206 enum irdma_queue_type { 207 IRDMA_QUEUE_TYPE_SQ_RQ = 0, 208 IRDMA_QUEUE_TYPE_CQP, 209 }; 210 211 struct irdma_sc_dev; 212 struct irdma_vsi_pestat; 213 214 struct irdma_dcqcn_cc_params { 215 u8 cc_cfg_valid; 216 u8 min_dec_factor; 217 u8 min_rate; 218 u8 dcqcn_f; 219 u16 rai_factor; 220 u16 hai_factor; 221 u16 dcqcn_t; 222 u32 dcqcn_b; 223 u32 rreduce_mperiod; 224 }; 225 226 struct irdma_cqp_init_info { 227 u64 cqp_compl_ctx; 228 u64 host_ctx_pa; 229 u64 sq_pa; 230 struct irdma_sc_dev *dev; 231 struct irdma_cqp_quanta *sq; 232 struct irdma_dcqcn_cc_params dcqcn_params; 233 __le64 *host_ctx; 234 u64 *scratch_array; 235 u32 sq_size; 236 u16 hw_maj_ver; 237 u16 hw_min_ver; 238 u8 struct_ver; 239 u8 hmc_profile; 240 u8 ena_vf_count; 241 u8 ceqs_per_vf; 242 bool en_datacenter_tcp:1; 243 bool disable_packed:1; 244 bool rocev2_rto_policy:1; 245 enum irdma_protocol_used protocol_used; 246 }; 247 248 struct irdma_terminate_hdr { 249 u8 layer_etype; 250 u8 error_code; 251 u8 hdrct; 252 u8 rsvd; 253 }; 254 255 struct irdma_cqp_sq_wqe { 256 __le64 buf[IRDMA_CQP_WQE_SIZE]; 257 }; 258 259 struct irdma_sc_aeqe { 260 __le64 buf[IRDMA_AEQE_SIZE]; 261 }; 262 263 struct irdma_ceqe { 264 __le64 buf[IRDMA_CEQE_SIZE]; 265 }; 266 267 struct irdma_cqp_ctx { 268 __le64 buf[IRDMA_CQP_CTX_SIZE]; 269 }; 270 271 struct irdma_cq_shadow_area { 272 __le64 buf[IRDMA_SHADOW_AREA_SIZE]; 273 }; 274 275 struct irdma_dev_hw_stats_offsets { 276 u32 stats_offset[IRDMA_HW_STAT_INDEX_MAX_GEN_1]; 277 }; 278 279 struct irdma_dev_hw_stats { 280 u64 stats_val[IRDMA_GATHER_STATS_BUF_SIZE / sizeof(u64)]; 281 }; 282 283 struct irdma_gather_stats { 284 u64 val[IRDMA_GATHER_STATS_BUF_SIZE / sizeof(u64)]; 285 }; 286 287 struct irdma_hw_stat_map { 288 u16 byteoff; 289 u8 bitoff; 290 u64 bitmask; 291 }; 292 293 struct irdma_stats_gather_info { 294 bool use_hmc_fcn_index:1; 295 bool use_stats_inst:1; 296 u8 hmc_fcn_index; 297 u8 stats_inst_index; 298 struct irdma_dma_mem stats_buff_mem; 299 void *gather_stats_va; 300 void *last_gather_stats_va; 301 }; 302 303 struct irdma_vsi_pestat { 304 struct irdma_hw *hw; 305 struct irdma_dev_hw_stats hw_stats; 306 struct irdma_stats_gather_info gather_info; 307 struct timer_list stats_timer; 308 struct irdma_sc_vsi *vsi; 309 struct irdma_dev_hw_stats last_hw_stats; 310 spinlock_t lock; /* rdma stats lock */ 311 }; 312 313 struct irdma_hw { 314 u8 __iomem *hw_addr; 315 u8 __iomem *priv_hw_addr; 316 struct device *device; 317 struct irdma_hmc_info hmc; 318 }; 319 320 struct irdma_pfpdu { 321 struct list_head rxlist; 322 u32 rcv_nxt; 323 u32 fps; 324 u32 max_fpdu_data; 325 u32 nextseqnum; 326 u32 rcv_start_seq; 327 bool mode:1; 328 bool mpa_crc_err:1; 329 u8 marker_len; 330 u64 total_ieq_bufs; 331 u64 fpdu_processed; 332 u64 bad_seq_num; 333 u64 crc_err; 334 u64 no_tx_bufs; 335 u64 tx_err; 336 u64 out_of_order; 337 u64 pmode_count; 338 struct irdma_sc_ah *ah; 339 struct irdma_puda_buf *ah_buf; 340 spinlock_t lock; /* fpdu processing lock */ 341 struct irdma_puda_buf *lastrcv_buf; 342 }; 343 344 struct irdma_sc_pd { 345 struct irdma_sc_dev *dev; 346 u32 pd_id; 347 int abi_ver; 348 }; 349 350 struct irdma_cqp_quanta { 351 __le64 elem[IRDMA_CQP_WQE_SIZE]; 352 }; 353 354 struct irdma_sc_cqp { 355 u32 size; 356 u64 sq_pa; 357 u64 host_ctx_pa; 358 void *back_cqp; 359 struct irdma_sc_dev *dev; 360 int (*process_cqp_sds)(struct irdma_sc_dev *dev, 361 struct irdma_update_sds_info *info); 362 struct irdma_dma_mem sdbuf; 363 struct irdma_ring sq_ring; 364 struct irdma_cqp_quanta *sq_base; 365 struct irdma_dcqcn_cc_params dcqcn_params; 366 __le64 *host_ctx; 367 u64 *scratch_array; 368 u32 cqp_id; 369 u32 sq_size; 370 u32 hw_sq_size; 371 u16 hw_maj_ver; 372 u16 hw_min_ver; 373 u8 struct_ver; 374 u8 polarity; 375 u8 hmc_profile; 376 u8 ena_vf_count; 377 u8 timeout_count; 378 u8 ceqs_per_vf; 379 bool en_datacenter_tcp:1; 380 bool disable_packed:1; 381 bool rocev2_rto_policy:1; 382 enum irdma_protocol_used protocol_used; 383 }; 384 385 struct irdma_sc_aeq { 386 u32 size; 387 u64 aeq_elem_pa; 388 struct irdma_sc_dev *dev; 389 struct irdma_sc_aeqe *aeqe_base; 390 void *pbl_list; 391 u32 elem_cnt; 392 struct irdma_ring aeq_ring; 393 u8 pbl_chunk_size; 394 u32 first_pm_pbl_idx; 395 u32 msix_idx; 396 u8 polarity; 397 bool virtual_map:1; 398 }; 399 400 struct irdma_sc_ceq { 401 u32 size; 402 u64 ceq_elem_pa; 403 struct irdma_sc_dev *dev; 404 struct irdma_ceqe *ceqe_base; 405 void *pbl_list; 406 u32 ceq_id; 407 u32 elem_cnt; 408 struct irdma_ring ceq_ring; 409 u8 pbl_chunk_size; 410 u8 tph_val; 411 u32 first_pm_pbl_idx; 412 u8 polarity; 413 struct irdma_sc_vsi *vsi; 414 struct irdma_sc_cq **reg_cq; 415 u32 reg_cq_size; 416 spinlock_t req_cq_lock; /* protect access to reg_cq array */ 417 bool virtual_map:1; 418 bool tph_en:1; 419 bool itr_no_expire:1; 420 }; 421 422 struct irdma_sc_cq { 423 struct irdma_cq_uk cq_uk; 424 u64 cq_pa; 425 u64 shadow_area_pa; 426 struct irdma_sc_dev *dev; 427 struct irdma_sc_vsi *vsi; 428 void *pbl_list; 429 void *back_cq; 430 u32 ceq_id; 431 u32 shadow_read_threshold; 432 u8 pbl_chunk_size; 433 u8 cq_type; 434 u8 tph_val; 435 u32 first_pm_pbl_idx; 436 bool ceqe_mask:1; 437 bool virtual_map:1; 438 bool check_overflow:1; 439 bool ceq_id_valid:1; 440 bool tph_en; 441 }; 442 443 struct irdma_sc_qp { 444 struct irdma_qp_uk qp_uk; 445 u64 sq_pa; 446 u64 rq_pa; 447 u64 hw_host_ctx_pa; 448 u64 shadow_area_pa; 449 u64 q2_pa; 450 struct irdma_sc_dev *dev; 451 struct irdma_sc_vsi *vsi; 452 struct irdma_sc_pd *pd; 453 __le64 *hw_host_ctx; 454 void *llp_stream_handle; 455 struct irdma_pfpdu pfpdu; 456 u32 ieq_qp; 457 u8 *q2_buf; 458 u64 qp_compl_ctx; 459 u32 push_idx; 460 u16 qs_handle; 461 u16 push_offset; 462 u8 flush_wqes_count; 463 u8 sq_tph_val; 464 u8 rq_tph_val; 465 u8 qp_state; 466 u8 hw_sq_size; 467 u8 hw_rq_size; 468 u8 src_mac_addr_idx; 469 bool on_qoslist:1; 470 bool ieq_pass_thru:1; 471 bool sq_tph_en:1; 472 bool rq_tph_en:1; 473 bool rcv_tph_en:1; 474 bool xmit_tph_en:1; 475 bool virtual_map:1; 476 bool flush_sq:1; 477 bool flush_rq:1; 478 bool sq_flush_code:1; 479 bool rq_flush_code:1; 480 enum irdma_flush_opcode flush_code; 481 enum irdma_qp_event_type event_type; 482 u8 term_flags; 483 u8 user_pri; 484 struct list_head list; 485 }; 486 487 struct irdma_stats_inst_info { 488 bool use_hmc_fcn_index; 489 u8 hmc_fn_id; 490 u8 stats_idx; 491 }; 492 493 struct irdma_up_info { 494 u8 map[8]; 495 u8 cnp_up_override; 496 u8 hmc_fcn_idx; 497 bool use_vlan:1; 498 bool use_cnp_up_override:1; 499 }; 500 501 #define IRDMA_MAX_WS_NODES 0x3FF 502 #define IRDMA_WS_NODE_INVALID 0xFFFF 503 504 struct irdma_ws_node_info { 505 u16 id; 506 u16 vsi; 507 u16 parent_id; 508 u16 qs_handle; 509 bool type_leaf:1; 510 bool enable:1; 511 u8 prio_type; 512 u8 tc; 513 u8 weight; 514 }; 515 516 struct irdma_hmc_fpm_misc { 517 u32 max_ceqs; 518 u32 max_sds; 519 u32 xf_block_size; 520 u32 q1_block_size; 521 u32 ht_multiplier; 522 u32 timer_bucket; 523 u32 rrf_block_size; 524 u32 ooiscf_block_size; 525 }; 526 527 #define IRDMA_LEAF_DEFAULT_REL_BW 64 528 #define IRDMA_PARENT_DEFAULT_REL_BW 1 529 530 struct irdma_qos { 531 struct list_head qplist; 532 struct mutex qos_mutex; /* protect QoS attributes per QoS level */ 533 u64 lan_qos_handle; 534 u32 l2_sched_node_id; 535 u16 qs_handle; 536 u8 traffic_class; 537 u8 rel_bw; 538 u8 prio_type; 539 bool valid; 540 }; 541 542 #define IRDMA_INVALID_STATS_IDX 0xff 543 struct irdma_sc_vsi { 544 u16 vsi_idx; 545 struct irdma_sc_dev *dev; 546 void *back_vsi; 547 u32 ilq_count; 548 struct irdma_virt_mem ilq_mem; 549 struct irdma_puda_rsrc *ilq; 550 u32 ieq_count; 551 struct irdma_virt_mem ieq_mem; 552 struct irdma_puda_rsrc *ieq; 553 u32 exception_lan_q; 554 u16 mtu; 555 u16 vm_id; 556 enum irdma_vm_vf_type vm_vf_type; 557 bool stats_inst_alloc:1; 558 bool tc_change_pending:1; 559 struct irdma_vsi_pestat *pestat; 560 atomic_t qp_suspend_reqs; 561 int (*register_qset)(struct irdma_sc_vsi *vsi, 562 struct irdma_ws_node *tc_node); 563 void (*unregister_qset)(struct irdma_sc_vsi *vsi, 564 struct irdma_ws_node *tc_node); 565 u8 qos_rel_bw; 566 u8 qos_prio_type; 567 u8 stats_idx; 568 u8 dscp_map[IIDC_MAX_DSCP_MAPPING]; 569 struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY]; 570 u64 hw_stats_regs[IRDMA_HW_STAT_INDEX_MAX_GEN_1]; 571 bool dscp_mode:1; 572 }; 573 574 struct irdma_sc_dev { 575 struct list_head cqp_cmd_head; /* head of the CQP command list */ 576 spinlock_t cqp_lock; /* protect CQP list access */ 577 bool stats_idx_array[IRDMA_MAX_STATS_COUNT_GEN_1]; 578 struct irdma_dma_mem vf_fpm_query_buf[IRDMA_MAX_PE_ENA_VF_COUNT]; 579 u64 fpm_query_buf_pa; 580 u64 fpm_commit_buf_pa; 581 __le64 *fpm_query_buf; 582 __le64 *fpm_commit_buf; 583 struct irdma_hw *hw; 584 u8 __iomem *db_addr; 585 u32 __iomem *wqe_alloc_db; 586 u32 __iomem *cq_arm_db; 587 u32 __iomem *aeq_alloc_db; 588 u32 __iomem *cqp_db; 589 u32 __iomem *cq_ack_db; 590 u32 __iomem *ceq_itr_mask_db; 591 u32 __iomem *aeq_itr_mask_db; 592 u32 __iomem *hw_regs[IRDMA_MAX_REGS]; 593 u32 ceq_itr; /* Interrupt throttle, usecs between interrupts: 0 disabled. 2 - 8160 */ 594 u64 hw_masks[IRDMA_MAX_MASKS]; 595 u64 hw_shifts[IRDMA_MAX_SHIFTS]; 596 const struct irdma_hw_stat_map *hw_stats_map; 597 u64 hw_stats_regs[IRDMA_HW_STAT_INDEX_MAX_GEN_1]; 598 u64 feature_info[IRDMA_MAX_FEATURES]; 599 u64 cqp_cmd_stats[IRDMA_MAX_CQP_OPS]; 600 struct irdma_hw_attrs hw_attrs; 601 struct irdma_hmc_info *hmc_info; 602 struct irdma_sc_cqp *cqp; 603 struct irdma_sc_aeq *aeq; 604 struct irdma_sc_ceq *ceq[IRDMA_CEQ_MAX_COUNT]; 605 struct irdma_sc_cq *ccq; 606 const struct irdma_irq_ops *irq_ops; 607 struct irdma_hmc_fpm_misc hmc_fpm_misc; 608 struct irdma_ws_node *ws_tree_root; 609 struct mutex ws_mutex; /* ws tree mutex */ 610 u16 num_vfs; 611 u8 hmc_fn_id; 612 u8 vf_id; 613 bool vchnl_up:1; 614 bool ceq_valid:1; 615 u8 pci_rev; 616 int (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri); 617 void (*ws_remove)(struct irdma_sc_vsi *vsi, u8 user_pri); 618 void (*ws_reset)(struct irdma_sc_vsi *vsi); 619 }; 620 621 struct irdma_modify_cq_info { 622 u64 cq_pa; 623 struct irdma_cqe *cq_base; 624 u32 cq_size; 625 u32 shadow_read_threshold; 626 u8 pbl_chunk_size; 627 u32 first_pm_pbl_idx; 628 bool virtual_map:1; 629 bool check_overflow; 630 bool cq_resize:1; 631 }; 632 633 struct irdma_create_qp_info { 634 bool ord_valid:1; 635 bool tcp_ctx_valid:1; 636 bool cq_num_valid:1; 637 bool arp_cache_idx_valid:1; 638 bool mac_valid:1; 639 bool force_lpb; 640 u8 next_iwarp_state; 641 }; 642 643 struct irdma_modify_qp_info { 644 u64 rx_win0; 645 u64 rx_win1; 646 u16 new_mss; 647 u8 next_iwarp_state; 648 u8 curr_iwarp_state; 649 u8 termlen; 650 bool ord_valid:1; 651 bool tcp_ctx_valid:1; 652 bool udp_ctx_valid:1; 653 bool cq_num_valid:1; 654 bool arp_cache_idx_valid:1; 655 bool reset_tcp_conn:1; 656 bool remove_hash_idx:1; 657 bool dont_send_term:1; 658 bool dont_send_fin:1; 659 bool cached_var_valid:1; 660 bool mss_change:1; 661 bool force_lpb:1; 662 bool mac_valid:1; 663 }; 664 665 struct irdma_ccq_cqe_info { 666 struct irdma_sc_cqp *cqp; 667 u64 scratch; 668 u32 op_ret_val; 669 u16 maj_err_code; 670 u16 min_err_code; 671 u8 op_code; 672 bool error; 673 }; 674 675 struct irdma_dcb_app_info { 676 u8 priority; 677 u8 selector; 678 u16 prot_id; 679 }; 680 681 struct irdma_qos_tc_info { 682 u64 tc_ctx; 683 u8 rel_bw; 684 u8 prio_type; 685 u8 egress_virt_up; 686 u8 ingress_virt_up; 687 }; 688 689 struct irdma_l2params { 690 struct irdma_qos_tc_info tc_info[IRDMA_MAX_USER_PRIORITY]; 691 struct irdma_dcb_app_info apps[IRDMA_MAX_APPS]; 692 u32 num_apps; 693 u16 qs_handle_list[IRDMA_MAX_USER_PRIORITY]; 694 u16 mtu; 695 u8 up2tc[IRDMA_MAX_USER_PRIORITY]; 696 u8 dscp_map[IIDC_MAX_DSCP_MAPPING]; 697 u8 num_tc; 698 u8 vsi_rel_bw; 699 u8 vsi_prio_type; 700 bool mtu_changed:1; 701 bool tc_changed:1; 702 bool dscp_mode:1; 703 }; 704 705 struct irdma_vsi_init_info { 706 struct irdma_sc_dev *dev; 707 void *back_vsi; 708 struct irdma_l2params *params; 709 u16 exception_lan_q; 710 u16 pf_data_vsi_num; 711 enum irdma_vm_vf_type vm_vf_type; 712 u16 vm_id; 713 int (*register_qset)(struct irdma_sc_vsi *vsi, 714 struct irdma_ws_node *tc_node); 715 void (*unregister_qset)(struct irdma_sc_vsi *vsi, 716 struct irdma_ws_node *tc_node); 717 }; 718 719 struct irdma_vsi_stats_info { 720 struct irdma_vsi_pestat *pestat; 721 u8 fcn_id; 722 bool alloc_stats_inst; 723 }; 724 725 struct irdma_device_init_info { 726 u64 fpm_query_buf_pa; 727 u64 fpm_commit_buf_pa; 728 __le64 *fpm_query_buf; 729 __le64 *fpm_commit_buf; 730 struct irdma_hw *hw; 731 void __iomem *bar0; 732 u8 hmc_fn_id; 733 }; 734 735 struct irdma_ceq_init_info { 736 u64 ceqe_pa; 737 struct irdma_sc_dev *dev; 738 u64 *ceqe_base; 739 void *pbl_list; 740 u32 elem_cnt; 741 u32 ceq_id; 742 bool virtual_map:1; 743 bool tph_en:1; 744 bool itr_no_expire:1; 745 u8 pbl_chunk_size; 746 u8 tph_val; 747 u32 first_pm_pbl_idx; 748 struct irdma_sc_vsi *vsi; 749 struct irdma_sc_cq **reg_cq; 750 u32 reg_cq_idx; 751 }; 752 753 struct irdma_aeq_init_info { 754 u64 aeq_elem_pa; 755 struct irdma_sc_dev *dev; 756 u32 *aeqe_base; 757 void *pbl_list; 758 u32 elem_cnt; 759 bool virtual_map; 760 u8 pbl_chunk_size; 761 u32 first_pm_pbl_idx; 762 u32 msix_idx; 763 }; 764 765 struct irdma_ccq_init_info { 766 u64 cq_pa; 767 u64 shadow_area_pa; 768 struct irdma_sc_dev *dev; 769 struct irdma_cqe *cq_base; 770 __le64 *shadow_area; 771 void *pbl_list; 772 u32 num_elem; 773 u32 ceq_id; 774 u32 shadow_read_threshold; 775 bool ceqe_mask:1; 776 bool ceq_id_valid:1; 777 bool avoid_mem_cflct:1; 778 bool virtual_map:1; 779 bool tph_en:1; 780 u8 tph_val; 781 u8 pbl_chunk_size; 782 u32 first_pm_pbl_idx; 783 struct irdma_sc_vsi *vsi; 784 }; 785 786 struct irdma_udp_offload_info { 787 bool ipv4:1; 788 bool insert_vlan_tag:1; 789 u8 ttl; 790 u8 tos; 791 u16 src_port; 792 u16 dst_port; 793 u32 dest_ip_addr[4]; 794 u32 snd_mss; 795 u16 vlan_tag; 796 u16 arp_idx; 797 u32 flow_label; 798 u8 udp_state; 799 u32 psn_nxt; 800 u32 lsn; 801 u32 epsn; 802 u32 psn_max; 803 u32 psn_una; 804 u32 local_ipaddr[4]; 805 u32 cwnd; 806 u8 rexmit_thresh; 807 u8 rnr_nak_thresh; 808 }; 809 810 struct irdma_roce_offload_info { 811 u16 p_key; 812 u16 err_rq_idx; 813 u32 qkey; 814 u32 dest_qp; 815 u8 roce_tver; 816 u8 ack_credits; 817 u8 err_rq_idx_valid; 818 u32 pd_id; 819 u16 ord_size; 820 u16 ird_size; 821 bool is_qp1:1; 822 bool udprivcq_en:1; 823 bool dcqcn_en:1; 824 bool rcv_no_icrc:1; 825 bool wr_rdresp_en:1; 826 bool bind_en:1; 827 bool fast_reg_en:1; 828 bool priv_mode_en:1; 829 bool rd_en:1; 830 bool timely_en:1; 831 bool dctcp_en:1; 832 bool fw_cc_enable:1; 833 bool use_stats_inst:1; 834 u16 t_high; 835 u16 t_low; 836 u8 last_byte_sent; 837 u8 mac_addr[ETH_ALEN]; 838 u8 rtomin; 839 }; 840 841 struct irdma_iwarp_offload_info { 842 u16 rcv_mark_offset; 843 u16 snd_mark_offset; 844 u8 ddp_ver; 845 u8 rdmap_ver; 846 u8 iwarp_mode; 847 u16 err_rq_idx; 848 u32 pd_id; 849 u16 ord_size; 850 u16 ird_size; 851 bool ib_rd_en:1; 852 bool align_hdrs:1; 853 bool rcv_no_mpa_crc:1; 854 bool err_rq_idx_valid:1; 855 bool snd_mark_en:1; 856 bool rcv_mark_en:1; 857 bool wr_rdresp_en:1; 858 bool bind_en:1; 859 bool fast_reg_en:1; 860 bool priv_mode_en:1; 861 bool rd_en:1; 862 bool timely_en:1; 863 bool use_stats_inst:1; 864 bool ecn_en:1; 865 bool dctcp_en:1; 866 u16 t_high; 867 u16 t_low; 868 u8 last_byte_sent; 869 u8 mac_addr[ETH_ALEN]; 870 u8 rtomin; 871 }; 872 873 struct irdma_tcp_offload_info { 874 bool ipv4:1; 875 bool no_nagle:1; 876 bool insert_vlan_tag:1; 877 bool time_stamp:1; 878 bool drop_ooo_seg:1; 879 bool avoid_stretch_ack:1; 880 bool wscale:1; 881 bool ignore_tcp_opt:1; 882 bool ignore_tcp_uns_opt:1; 883 u8 cwnd_inc_limit; 884 u8 dup_ack_thresh; 885 u8 ttl; 886 u8 src_mac_addr_idx; 887 u8 tos; 888 u16 src_port; 889 u16 dst_port; 890 u32 dest_ip_addr[4]; 891 //u32 dest_ip_addr0; 892 //u32 dest_ip_addr1; 893 //u32 dest_ip_addr2; 894 //u32 dest_ip_addr3; 895 u32 snd_mss; 896 u16 syn_rst_handling; 897 u16 vlan_tag; 898 u16 arp_idx; 899 u32 flow_label; 900 u8 tcp_state; 901 u8 snd_wscale; 902 u8 rcv_wscale; 903 u32 time_stamp_recent; 904 u32 time_stamp_age; 905 u32 snd_nxt; 906 u32 snd_wnd; 907 u32 rcv_nxt; 908 u32 rcv_wnd; 909 u32 snd_max; 910 u32 snd_una; 911 u32 srtt; 912 u32 rtt_var; 913 u32 ss_thresh; 914 u32 cwnd; 915 u32 snd_wl1; 916 u32 snd_wl2; 917 u32 max_snd_window; 918 u8 rexmit_thresh; 919 u32 local_ipaddr[4]; 920 }; 921 922 struct irdma_qp_host_ctx_info { 923 u64 qp_compl_ctx; 924 union { 925 struct irdma_tcp_offload_info *tcp_info; 926 struct irdma_udp_offload_info *udp_info; 927 }; 928 union { 929 struct irdma_iwarp_offload_info *iwarp_info; 930 struct irdma_roce_offload_info *roce_info; 931 }; 932 u32 send_cq_num; 933 u32 rcv_cq_num; 934 u32 rem_endpoint_idx; 935 u8 stats_idx; 936 bool srq_valid:1; 937 bool tcp_info_valid:1; 938 bool iwarp_info_valid:1; 939 bool stats_idx_valid:1; 940 u8 user_pri; 941 }; 942 943 struct irdma_aeqe_info { 944 u64 compl_ctx; 945 u32 qp_cq_id; 946 u16 ae_id; 947 u16 wqe_idx; 948 u8 tcp_state; 949 u8 iwarp_state; 950 bool qp:1; 951 bool cq:1; 952 bool sq:1; 953 bool rq:1; 954 bool in_rdrsp_wr:1; 955 bool out_rdrsp:1; 956 bool aeqe_overflow:1; 957 u8 q2_data_written; 958 u8 ae_src; 959 }; 960 961 struct irdma_allocate_stag_info { 962 u64 total_len; 963 u64 first_pm_pbl_idx; 964 u32 chunk_size; 965 u32 stag_idx; 966 u32 page_size; 967 u32 pd_id; 968 u16 access_rights; 969 bool remote_access:1; 970 bool use_hmc_fcn_index:1; 971 bool use_pf_rid:1; 972 u8 hmc_fcn_index; 973 }; 974 975 struct irdma_mw_alloc_info { 976 u32 mw_stag_index; 977 u32 page_size; 978 u32 pd_id; 979 bool remote_access:1; 980 bool mw_wide:1; 981 bool mw1_bind_dont_vldt_key:1; 982 }; 983 984 struct irdma_reg_ns_stag_info { 985 u64 reg_addr_pa; 986 u64 va; 987 u64 total_len; 988 u32 page_size; 989 u32 chunk_size; 990 u32 first_pm_pbl_index; 991 enum irdma_addressing_type addr_type; 992 irdma_stag_index stag_idx; 993 u16 access_rights; 994 u32 pd_id; 995 irdma_stag_key stag_key; 996 bool use_hmc_fcn_index:1; 997 u8 hmc_fcn_index; 998 bool use_pf_rid:1; 999 }; 1000 1001 struct irdma_fast_reg_stag_info { 1002 u64 wr_id; 1003 u64 reg_addr_pa; 1004 u64 fbo; 1005 void *va; 1006 u64 total_len; 1007 u32 page_size; 1008 u32 chunk_size; 1009 u32 first_pm_pbl_index; 1010 enum irdma_addressing_type addr_type; 1011 irdma_stag_index stag_idx; 1012 u16 access_rights; 1013 u32 pd_id; 1014 irdma_stag_key stag_key; 1015 bool local_fence:1; 1016 bool read_fence:1; 1017 bool signaled:1; 1018 bool push_wqe:1; 1019 bool use_hmc_fcn_index:1; 1020 u8 hmc_fcn_index; 1021 bool use_pf_rid:1; 1022 bool defer_flag:1; 1023 }; 1024 1025 struct irdma_dealloc_stag_info { 1026 u32 stag_idx; 1027 u32 pd_id; 1028 bool mr:1; 1029 bool dealloc_pbl:1; 1030 }; 1031 1032 struct irdma_register_shared_stag { 1033 u64 va; 1034 enum irdma_addressing_type addr_type; 1035 irdma_stag_index new_stag_idx; 1036 irdma_stag_index parent_stag_idx; 1037 u32 access_rights; 1038 u32 pd_id; 1039 u32 page_size; 1040 irdma_stag_key new_stag_key; 1041 }; 1042 1043 struct irdma_qp_init_info { 1044 struct irdma_qp_uk_init_info qp_uk_init_info; 1045 struct irdma_sc_pd *pd; 1046 struct irdma_sc_vsi *vsi; 1047 __le64 *host_ctx; 1048 u8 *q2; 1049 u64 sq_pa; 1050 u64 rq_pa; 1051 u64 host_ctx_pa; 1052 u64 q2_pa; 1053 u64 shadow_area_pa; 1054 u8 sq_tph_val; 1055 u8 rq_tph_val; 1056 bool sq_tph_en:1; 1057 bool rq_tph_en:1; 1058 bool rcv_tph_en:1; 1059 bool xmit_tph_en:1; 1060 bool virtual_map:1; 1061 }; 1062 1063 struct irdma_cq_init_info { 1064 struct irdma_sc_dev *dev; 1065 u64 cq_base_pa; 1066 u64 shadow_area_pa; 1067 u32 ceq_id; 1068 u32 shadow_read_threshold; 1069 u8 pbl_chunk_size; 1070 u32 first_pm_pbl_idx; 1071 bool virtual_map:1; 1072 bool ceqe_mask:1; 1073 bool ceq_id_valid:1; 1074 bool tph_en:1; 1075 u8 tph_val; 1076 u8 type; 1077 struct irdma_cq_uk_init_info cq_uk_init_info; 1078 struct irdma_sc_vsi *vsi; 1079 }; 1080 1081 struct irdma_upload_context_info { 1082 u64 buf_pa; 1083 u32 qp_id; 1084 u8 qp_type; 1085 bool freeze_qp:1; 1086 bool raw_format:1; 1087 }; 1088 1089 struct irdma_local_mac_entry_info { 1090 u8 mac_addr[6]; 1091 u16 entry_idx; 1092 }; 1093 1094 struct irdma_add_arp_cache_entry_info { 1095 u8 mac_addr[ETH_ALEN]; 1096 u32 reach_max; 1097 u16 arp_index; 1098 bool permanent; 1099 }; 1100 1101 struct irdma_apbvt_info { 1102 u16 port; 1103 bool add; 1104 }; 1105 1106 struct irdma_qhash_table_info { 1107 struct irdma_sc_vsi *vsi; 1108 enum irdma_quad_hash_manage_type manage; 1109 enum irdma_quad_entry_type entry_type; 1110 bool vlan_valid:1; 1111 bool ipv4_valid:1; 1112 u8 mac_addr[ETH_ALEN]; 1113 u16 vlan_id; 1114 u8 user_pri; 1115 u32 qp_num; 1116 u32 dest_ip[4]; 1117 u32 src_ip[4]; 1118 u16 dest_port; 1119 u16 src_port; 1120 }; 1121 1122 struct irdma_cqp_manage_push_page_info { 1123 u32 push_idx; 1124 u16 qs_handle; 1125 u8 free_page; 1126 u8 push_page_type; 1127 }; 1128 1129 struct irdma_qp_flush_info { 1130 u16 sq_minor_code; 1131 u16 sq_major_code; 1132 u16 rq_minor_code; 1133 u16 rq_major_code; 1134 u16 ae_code; 1135 u8 ae_src; 1136 bool sq:1; 1137 bool rq:1; 1138 bool userflushcode:1; 1139 bool generate_ae:1; 1140 }; 1141 1142 struct irdma_gen_ae_info { 1143 u16 ae_code; 1144 u8 ae_src; 1145 }; 1146 1147 struct irdma_cqp_timeout { 1148 u64 compl_cqp_cmds; 1149 u32 count; 1150 }; 1151 1152 struct irdma_irq_ops { 1153 void (*irdma_cfg_aeq)(struct irdma_sc_dev *dev, u32 idx, bool enable); 1154 void (*irdma_cfg_ceq)(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx, 1155 bool enable); 1156 void (*irdma_dis_irq)(struct irdma_sc_dev *dev, u32 idx); 1157 void (*irdma_en_irq)(struct irdma_sc_dev *dev, u32 idx); 1158 }; 1159 1160 void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq); 1161 int irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch, 1162 bool check_overflow, bool post_sq); 1163 int irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq); 1164 int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq, 1165 struct irdma_ccq_cqe_info *info); 1166 int irdma_sc_ccq_init(struct irdma_sc_cq *ccq, 1167 struct irdma_ccq_init_info *info); 1168 1169 int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch); 1170 int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq); 1171 1172 int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq); 1173 int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq, 1174 struct irdma_ceq_init_info *info); 1175 void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq); 1176 void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq); 1177 1178 int irdma_sc_aeq_init(struct irdma_sc_aeq *aeq, 1179 struct irdma_aeq_init_info *info); 1180 int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq, 1181 struct irdma_aeqe_info *info); 1182 void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count); 1183 1184 void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id, 1185 int abi_ver); 1186 void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable); 1187 void irdma_check_cqp_progress(struct irdma_cqp_timeout *cqp_timeout, 1188 struct irdma_sc_dev *dev); 1189 int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err); 1190 int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp); 1191 int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp, 1192 struct irdma_cqp_init_info *info); 1193 void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp); 1194 int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 opcode, 1195 struct irdma_ccq_cqe_info *cmpl_info); 1196 int irdma_sc_fast_register(struct irdma_sc_qp *qp, 1197 struct irdma_fast_reg_stag_info *info, bool post_sq); 1198 int irdma_sc_qp_create(struct irdma_sc_qp *qp, 1199 struct irdma_create_qp_info *info, u64 scratch, 1200 bool post_sq); 1201 int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch, 1202 bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq); 1203 int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp, 1204 struct irdma_qp_flush_info *info, u64 scratch, 1205 bool post_sq); 1206 int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info); 1207 int irdma_sc_qp_modify(struct irdma_sc_qp *qp, 1208 struct irdma_modify_qp_info *info, u64 scratch, 1209 bool post_sq); 1210 void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size, 1211 irdma_stag stag); 1212 1213 void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read); 1214 void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx, 1215 struct irdma_qp_host_ctx_info *info); 1216 void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx, 1217 struct irdma_qp_host_ctx_info *info); 1218 int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq); 1219 int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info); 1220 void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info); 1221 int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch, 1222 u8 hmc_fn_id, bool post_sq, 1223 bool poll_registers); 1224 1225 void sc_vsi_update_stats(struct irdma_sc_vsi *vsi); 1226 struct cqp_info { 1227 union { 1228 struct { 1229 struct irdma_sc_qp *qp; 1230 struct irdma_create_qp_info info; 1231 u64 scratch; 1232 } qp_create; 1233 1234 struct { 1235 struct irdma_sc_qp *qp; 1236 struct irdma_modify_qp_info info; 1237 u64 scratch; 1238 } qp_modify; 1239 1240 struct { 1241 struct irdma_sc_qp *qp; 1242 u64 scratch; 1243 bool remove_hash_idx; 1244 bool ignore_mw_bnd; 1245 } qp_destroy; 1246 1247 struct { 1248 struct irdma_sc_cq *cq; 1249 u64 scratch; 1250 bool check_overflow; 1251 } cq_create; 1252 1253 struct { 1254 struct irdma_sc_cq *cq; 1255 struct irdma_modify_cq_info info; 1256 u64 scratch; 1257 } cq_modify; 1258 1259 struct { 1260 struct irdma_sc_cq *cq; 1261 u64 scratch; 1262 } cq_destroy; 1263 1264 struct { 1265 struct irdma_sc_dev *dev; 1266 struct irdma_allocate_stag_info info; 1267 u64 scratch; 1268 } alloc_stag; 1269 1270 struct { 1271 struct irdma_sc_dev *dev; 1272 struct irdma_mw_alloc_info info; 1273 u64 scratch; 1274 } mw_alloc; 1275 1276 struct { 1277 struct irdma_sc_dev *dev; 1278 struct irdma_reg_ns_stag_info info; 1279 u64 scratch; 1280 } mr_reg_non_shared; 1281 1282 struct { 1283 struct irdma_sc_dev *dev; 1284 struct irdma_dealloc_stag_info info; 1285 u64 scratch; 1286 } dealloc_stag; 1287 1288 struct { 1289 struct irdma_sc_cqp *cqp; 1290 struct irdma_add_arp_cache_entry_info info; 1291 u64 scratch; 1292 } add_arp_cache_entry; 1293 1294 struct { 1295 struct irdma_sc_cqp *cqp; 1296 u64 scratch; 1297 u16 arp_index; 1298 } del_arp_cache_entry; 1299 1300 struct { 1301 struct irdma_sc_cqp *cqp; 1302 struct irdma_local_mac_entry_info info; 1303 u64 scratch; 1304 } add_local_mac_entry; 1305 1306 struct { 1307 struct irdma_sc_cqp *cqp; 1308 u64 scratch; 1309 u8 entry_idx; 1310 u8 ignore_ref_count; 1311 } del_local_mac_entry; 1312 1313 struct { 1314 struct irdma_sc_cqp *cqp; 1315 u64 scratch; 1316 } alloc_local_mac_entry; 1317 1318 struct { 1319 struct irdma_sc_cqp *cqp; 1320 struct irdma_cqp_manage_push_page_info info; 1321 u64 scratch; 1322 } manage_push_page; 1323 1324 struct { 1325 struct irdma_sc_dev *dev; 1326 struct irdma_upload_context_info info; 1327 u64 scratch; 1328 } qp_upload_context; 1329 1330 struct { 1331 struct irdma_sc_dev *dev; 1332 struct irdma_hmc_fcn_info info; 1333 u64 scratch; 1334 } manage_hmc_pm; 1335 1336 struct { 1337 struct irdma_sc_ceq *ceq; 1338 u64 scratch; 1339 } ceq_create; 1340 1341 struct { 1342 struct irdma_sc_ceq *ceq; 1343 u64 scratch; 1344 } ceq_destroy; 1345 1346 struct { 1347 struct irdma_sc_aeq *aeq; 1348 u64 scratch; 1349 } aeq_create; 1350 1351 struct { 1352 struct irdma_sc_aeq *aeq; 1353 u64 scratch; 1354 } aeq_destroy; 1355 1356 struct { 1357 struct irdma_sc_qp *qp; 1358 struct irdma_qp_flush_info info; 1359 u64 scratch; 1360 } qp_flush_wqes; 1361 1362 struct { 1363 struct irdma_sc_qp *qp; 1364 struct irdma_gen_ae_info info; 1365 u64 scratch; 1366 } gen_ae; 1367 1368 struct { 1369 struct irdma_sc_cqp *cqp; 1370 void *fpm_val_va; 1371 u64 fpm_val_pa; 1372 u8 hmc_fn_id; 1373 u64 scratch; 1374 } query_fpm_val; 1375 1376 struct { 1377 struct irdma_sc_cqp *cqp; 1378 void *fpm_val_va; 1379 u64 fpm_val_pa; 1380 u8 hmc_fn_id; 1381 u64 scratch; 1382 } commit_fpm_val; 1383 1384 struct { 1385 struct irdma_sc_cqp *cqp; 1386 struct irdma_apbvt_info info; 1387 u64 scratch; 1388 } manage_apbvt_entry; 1389 1390 struct { 1391 struct irdma_sc_cqp *cqp; 1392 struct irdma_qhash_table_info info; 1393 u64 scratch; 1394 } manage_qhash_table_entry; 1395 1396 struct { 1397 struct irdma_sc_dev *dev; 1398 struct irdma_update_sds_info info; 1399 u64 scratch; 1400 } update_pe_sds; 1401 1402 struct { 1403 struct irdma_sc_cqp *cqp; 1404 struct irdma_sc_qp *qp; 1405 u64 scratch; 1406 } suspend_resume; 1407 1408 struct { 1409 struct irdma_sc_cqp *cqp; 1410 struct irdma_ah_info info; 1411 u64 scratch; 1412 } ah_create; 1413 1414 struct { 1415 struct irdma_sc_cqp *cqp; 1416 struct irdma_ah_info info; 1417 u64 scratch; 1418 } ah_destroy; 1419 1420 struct { 1421 struct irdma_sc_cqp *cqp; 1422 struct irdma_mcast_grp_info info; 1423 u64 scratch; 1424 } mc_create; 1425 1426 struct { 1427 struct irdma_sc_cqp *cqp; 1428 struct irdma_mcast_grp_info info; 1429 u64 scratch; 1430 } mc_destroy; 1431 1432 struct { 1433 struct irdma_sc_cqp *cqp; 1434 struct irdma_mcast_grp_info info; 1435 u64 scratch; 1436 } mc_modify; 1437 1438 struct { 1439 struct irdma_sc_cqp *cqp; 1440 struct irdma_stats_inst_info info; 1441 u64 scratch; 1442 } stats_manage; 1443 1444 struct { 1445 struct irdma_sc_cqp *cqp; 1446 struct irdma_stats_gather_info info; 1447 u64 scratch; 1448 } stats_gather; 1449 1450 struct { 1451 struct irdma_sc_cqp *cqp; 1452 struct irdma_ws_node_info info; 1453 u64 scratch; 1454 } ws_node; 1455 1456 struct { 1457 struct irdma_sc_cqp *cqp; 1458 struct irdma_up_info info; 1459 u64 scratch; 1460 } up_map; 1461 1462 struct { 1463 struct irdma_sc_cqp *cqp; 1464 struct irdma_dma_mem query_buff_mem; 1465 u64 scratch; 1466 } query_rdma; 1467 } u; 1468 }; 1469 1470 struct cqp_cmds_info { 1471 struct list_head cqp_cmd_entry; 1472 u8 cqp_cmd; 1473 u8 post_sq; 1474 struct cqp_info in; 1475 }; 1476 1477 __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch, 1478 u32 *wqe_idx); 1479 1480 /** 1481 * irdma_sc_cqp_get_next_send_wqe - get next wqe on cqp sq 1482 * @cqp: struct for cqp hw 1483 * @scratch: private data for CQP WQE 1484 */ 1485 static inline __le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch) 1486 { 1487 u32 wqe_idx; 1488 1489 return irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx); 1490 } 1491 #endif /* IRDMA_TYPE_H */ 1492