1 /* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ 2 /* Copyright (c) 2015 - 2021 Intel Corporation */ 3 #ifndef IRDMA_VERBS_H 4 #define IRDMA_VERBS_H 5 6 #define IRDMA_MAX_SAVED_PHY_PGADDR 4 7 #define IRDMA_FLUSH_DELAY_MS 20 8 9 #define IRDMA_PKEY_TBL_SZ 1 10 #define IRDMA_DEFAULT_PKEY 0xFFFF 11 12 struct irdma_ucontext { 13 struct ib_ucontext ibucontext; 14 struct irdma_device *iwdev; 15 struct rdma_user_mmap_entry *db_mmap_entry; 16 struct list_head cq_reg_mem_list; 17 spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */ 18 struct list_head qp_reg_mem_list; 19 spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */ 20 int abi_ver; 21 bool legacy_mode; 22 }; 23 24 struct irdma_pd { 25 struct ib_pd ibpd; 26 struct irdma_sc_pd sc_pd; 27 }; 28 29 union irdma_sockaddr { 30 struct sockaddr_in saddr_in; 31 struct sockaddr_in6 saddr_in6; 32 }; 33 34 struct irdma_av { 35 u8 macaddr[16]; 36 struct rdma_ah_attr attrs; 37 union irdma_sockaddr sgid_addr; 38 union irdma_sockaddr dgid_addr; 39 u8 net_type; 40 }; 41 42 struct irdma_ah { 43 struct ib_ah ibah; 44 struct irdma_sc_ah sc_ah; 45 struct irdma_pd *pd; 46 struct irdma_av av; 47 u8 sgid_index; 48 union ib_gid dgid; 49 struct hlist_node list; 50 refcount_t refcnt; 51 struct irdma_ah *parent_ah; /* AH from cached list */ 52 }; 53 54 struct irdma_hmc_pble { 55 union { 56 u32 idx; 57 dma_addr_t addr; 58 }; 59 }; 60 61 struct irdma_cq_mr { 62 struct irdma_hmc_pble cq_pbl; 63 dma_addr_t shadow; 64 bool split; 65 }; 66 67 struct irdma_qp_mr { 68 struct irdma_hmc_pble sq_pbl; 69 struct irdma_hmc_pble rq_pbl; 70 dma_addr_t shadow; 71 struct page *sq_page; 72 }; 73 74 struct irdma_cq_buf { 75 struct irdma_dma_mem kmem_buf; 76 struct irdma_cq_uk cq_uk; 77 struct irdma_hw *hw; 78 struct list_head list; 79 struct work_struct work; 80 }; 81 82 struct irdma_pbl { 83 struct list_head list; 84 union { 85 struct irdma_qp_mr qp_mr; 86 struct irdma_cq_mr cq_mr; 87 }; 88 89 bool pbl_allocated:1; 90 bool on_list:1; 91 u64 user_base; 92 struct irdma_pble_alloc pble_alloc; 93 struct irdma_mr *iwmr; 94 }; 95 96 struct irdma_mr { 97 union { 98 struct ib_mr ibmr; 99 struct ib_mw ibmw; 100 }; 101 struct ib_umem *region; 102 u16 type; 103 u32 page_cnt; 104 u64 page_size; 105 u32 npages; 106 u32 stag; 107 u64 len; 108 u64 pgaddrmem[IRDMA_MAX_SAVED_PHY_PGADDR]; 109 struct irdma_pbl iwpbl; 110 }; 111 112 struct irdma_cq { 113 struct ib_cq ibcq; 114 struct irdma_sc_cq sc_cq; 115 u16 cq_head; 116 u16 cq_size; 117 u16 cq_num; 118 bool user_mode; 119 atomic_t armed; 120 enum irdma_cmpl_notify last_notify; 121 u32 polled_cmpls; 122 u32 cq_mem_size; 123 struct irdma_dma_mem kmem; 124 struct irdma_dma_mem kmem_shadow; 125 spinlock_t lock; /* for poll cq */ 126 struct irdma_pbl *iwpbl; 127 struct irdma_pbl *iwpbl_shadow; 128 struct list_head resize_list; 129 struct irdma_cq_poll_info cur_cqe; 130 struct list_head cmpl_generated; 131 }; 132 133 struct irdma_cmpl_gen { 134 struct list_head list; 135 struct irdma_cq_poll_info cpi; 136 }; 137 138 struct disconn_work { 139 struct work_struct work; 140 struct irdma_qp *iwqp; 141 }; 142 143 struct iw_cm_id; 144 145 struct irdma_qp_kmode { 146 struct irdma_dma_mem dma_mem; 147 struct irdma_sq_uk_wr_trk_info *sq_wrid_mem; 148 u64 *rq_wrid_mem; 149 }; 150 151 struct irdma_qp { 152 struct ib_qp ibqp; 153 struct irdma_sc_qp sc_qp; 154 struct irdma_device *iwdev; 155 struct irdma_cq *iwscq; 156 struct irdma_cq *iwrcq; 157 struct irdma_pd *iwpd; 158 struct rdma_user_mmap_entry *push_wqe_mmap_entry; 159 struct rdma_user_mmap_entry *push_db_mmap_entry; 160 struct irdma_qp_host_ctx_info ctx_info; 161 union { 162 struct irdma_iwarp_offload_info iwarp_info; 163 struct irdma_roce_offload_info roce_info; 164 }; 165 166 union { 167 struct irdma_tcp_offload_info tcp_info; 168 struct irdma_udp_offload_info udp_info; 169 }; 170 171 struct irdma_ah roce_ah; 172 struct list_head teardown_entry; 173 refcount_t refcnt; 174 struct iw_cm_id *cm_id; 175 struct irdma_cm_node *cm_node; 176 struct delayed_work dwork_flush; 177 struct ib_mr *lsmm_mr; 178 atomic_t hw_mod_qp_pend; 179 enum ib_qp_state ibqp_state; 180 u32 qp_mem_size; 181 u32 last_aeq; 182 int max_send_wr; 183 int max_recv_wr; 184 atomic_t close_timer_started; 185 spinlock_t lock; /* serialize posting WRs to SQ/RQ */ 186 struct irdma_qp_context *iwqp_context; 187 void *pbl_vbase; 188 dma_addr_t pbl_pbase; 189 struct page *page; 190 u8 active_conn : 1; 191 u8 user_mode : 1; 192 u8 hte_added : 1; 193 u8 flush_issued : 1; 194 u8 sig_all : 1; 195 u8 pau_mode : 1; 196 u8 rsvd : 1; 197 u8 iwarp_state; 198 u16 term_sq_flush_code; 199 u16 term_rq_flush_code; 200 u8 hw_iwarp_state; 201 u8 hw_tcp_state; 202 struct irdma_qp_kmode kqp; 203 struct irdma_dma_mem host_ctx; 204 struct timer_list terminate_timer; 205 struct irdma_pbl *iwpbl; 206 struct irdma_dma_mem q2_ctx_mem; 207 struct irdma_dma_mem ietf_mem; 208 struct completion free_qp; 209 wait_queue_head_t waitq; 210 wait_queue_head_t mod_qp_waitq; 211 u8 rts_ae_rcvd; 212 }; 213 214 enum irdma_mmap_flag { 215 IRDMA_MMAP_IO_NC, 216 IRDMA_MMAP_IO_WC, 217 }; 218 219 struct irdma_user_mmap_entry { 220 struct rdma_user_mmap_entry rdma_entry; 221 u64 bar_offset; 222 u8 mmap_flag; 223 }; 224 225 static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev) 226 { 227 return (u16)FIELD_GET(IRDMA_FW_VER_MAJOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]); 228 } 229 230 static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev) 231 { 232 return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]); 233 } 234 235 static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info, 236 struct ib_wc *entry) 237 { 238 switch (cq_poll_info->op_type) { 239 case IRDMA_OP_TYPE_RDMA_WRITE: 240 case IRDMA_OP_TYPE_RDMA_WRITE_SOL: 241 entry->opcode = IB_WC_RDMA_WRITE; 242 break; 243 case IRDMA_OP_TYPE_RDMA_READ_INV_STAG: 244 case IRDMA_OP_TYPE_RDMA_READ: 245 entry->opcode = IB_WC_RDMA_READ; 246 break; 247 case IRDMA_OP_TYPE_SEND_SOL: 248 case IRDMA_OP_TYPE_SEND_SOL_INV: 249 case IRDMA_OP_TYPE_SEND_INV: 250 case IRDMA_OP_TYPE_SEND: 251 entry->opcode = IB_WC_SEND; 252 break; 253 case IRDMA_OP_TYPE_FAST_REG_NSMR: 254 entry->opcode = IB_WC_REG_MR; 255 break; 256 case IRDMA_OP_TYPE_INV_STAG: 257 entry->opcode = IB_WC_LOCAL_INV; 258 break; 259 default: 260 entry->status = IB_WC_GENERAL_ERR; 261 } 262 } 263 264 static inline void set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info, 265 struct ib_wc *entry, bool send_imm_support) 266 { 267 /** 268 * iWARP does not support sendImm, so the presence of Imm data 269 * must be WriteImm. 270 */ 271 if (!send_imm_support) { 272 entry->opcode = cq_poll_info->imm_valid ? 273 IB_WC_RECV_RDMA_WITH_IMM : 274 IB_WC_RECV; 275 return; 276 } 277 278 switch (cq_poll_info->op_type) { 279 case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE: 280 case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE: 281 entry->opcode = IB_WC_RECV_RDMA_WITH_IMM; 282 break; 283 default: 284 entry->opcode = IB_WC_RECV; 285 } 286 } 287 288 void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4); 289 int irdma_ib_register_device(struct irdma_device *iwdev); 290 void irdma_ib_unregister_device(struct irdma_device *iwdev); 291 void irdma_ib_dealloc_device(struct ib_device *ibdev); 292 void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event); 293 void irdma_generate_flush_completions(struct irdma_qp *iwqp); 294 void irdma_remove_cmpls_list(struct irdma_cq *iwcq); 295 int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info); 296 #endif /* IRDMA_VERBS_H */ 297