1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 /* 3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 5 */ 6 7 #ifndef RXE_VERBS_H 8 #define RXE_VERBS_H 9 10 #include <linux/interrupt.h> 11 #include <linux/workqueue.h> 12 #include <rdma/rdma_user_rxe.h> 13 #include "rxe_pool.h" 14 #include "rxe_task.h" 15 #include "rxe_hw_counters.h" 16 17 static inline int pkey_match(u16 key1, u16 key2) 18 { 19 return (((key1 & 0x7fff) != 0) && 20 ((key1 & 0x7fff) == (key2 & 0x7fff)) && 21 ((key1 & 0x8000) || (key2 & 0x8000))) ? 1 : 0; 22 } 23 24 /* Return >0 if psn_a > psn_b 25 * 0 if psn_a == psn_b 26 * <0 if psn_a < psn_b 27 */ 28 static inline int psn_compare(u32 psn_a, u32 psn_b) 29 { 30 s32 diff; 31 32 diff = (psn_a - psn_b) << 8; 33 return diff; 34 } 35 36 struct rxe_ucontext { 37 struct ib_ucontext ibuc; 38 struct rxe_pool_entry pelem; 39 }; 40 41 struct rxe_pd { 42 struct ib_pd ibpd; 43 struct rxe_pool_entry pelem; 44 }; 45 46 struct rxe_ah { 47 struct ib_ah ibah; 48 struct rxe_pool_entry pelem; 49 struct rxe_pd *pd; 50 struct rxe_av av; 51 }; 52 53 struct rxe_cqe { 54 union { 55 struct ib_wc ibwc; 56 struct ib_uverbs_wc uibwc; 57 }; 58 }; 59 60 struct rxe_cq { 61 struct ib_cq ibcq; 62 struct rxe_pool_entry pelem; 63 struct rxe_queue *queue; 64 spinlock_t cq_lock; 65 u8 notify; 66 bool is_dying; 67 int is_user; 68 struct tasklet_struct comp_task; 69 }; 70 71 enum wqe_state { 72 wqe_state_posted, 73 wqe_state_processing, 74 wqe_state_pending, 75 wqe_state_done, 76 wqe_state_error, 77 }; 78 79 struct rxe_sq { 80 int max_wr; 81 int max_sge; 82 int max_inline; 83 spinlock_t sq_lock; /* guard queue */ 84 struct rxe_queue *queue; 85 }; 86 87 struct rxe_rq { 88 int max_wr; 89 int max_sge; 90 spinlock_t producer_lock; /* guard queue producer */ 91 spinlock_t consumer_lock; /* guard queue consumer */ 92 struct rxe_queue *queue; 93 }; 94 95 struct rxe_srq { 96 struct ib_srq ibsrq; 97 struct rxe_pool_entry pelem; 98 struct rxe_pd *pd; 99 struct rxe_rq rq; 100 u32 srq_num; 101 102 int limit; 103 int error; 104 }; 105 106 enum rxe_qp_state { 107 QP_STATE_RESET, 108 QP_STATE_INIT, 109 QP_STATE_READY, 110 QP_STATE_DRAIN, /* req only */ 111 QP_STATE_DRAINED, /* req only */ 112 QP_STATE_ERROR 113 }; 114 115 struct rxe_req_info { 116 enum rxe_qp_state state; 117 int wqe_index; 118 u32 psn; 119 int opcode; 120 atomic_t rd_atomic; 121 int wait_fence; 122 int need_rd_atomic; 123 int wait_psn; 124 int need_retry; 125 int noack_pkts; 126 struct rxe_task task; 127 }; 128 129 struct rxe_comp_info { 130 u32 psn; 131 int opcode; 132 int timeout; 133 int timeout_retry; 134 int started_retry; 135 u32 retry_cnt; 136 u32 rnr_retry; 137 struct rxe_task task; 138 }; 139 140 enum rdatm_res_state { 141 rdatm_res_state_next, 142 rdatm_res_state_new, 143 rdatm_res_state_replay, 144 }; 145 146 struct resp_res { 147 int type; 148 int replay; 149 u32 first_psn; 150 u32 last_psn; 151 u32 cur_psn; 152 enum rdatm_res_state state; 153 154 union { 155 struct { 156 struct sk_buff *skb; 157 } atomic; 158 struct { 159 struct rxe_mr *mr; 160 u64 va_org; 161 u32 rkey; 162 u32 length; 163 u64 va; 164 u32 resid; 165 } read; 166 }; 167 }; 168 169 struct rxe_resp_info { 170 enum rxe_qp_state state; 171 u32 msn; 172 u32 psn; 173 u32 ack_psn; 174 int opcode; 175 int drop_msg; 176 int goto_error; 177 int sent_psn_nak; 178 enum ib_wc_status status; 179 u8 aeth_syndrome; 180 181 /* Receive only */ 182 struct rxe_recv_wqe *wqe; 183 184 /* RDMA read / atomic only */ 185 u64 va; 186 struct rxe_mr *mr; 187 u32 resid; 188 u32 rkey; 189 u32 length; 190 u64 atomic_orig; 191 192 /* SRQ only */ 193 struct { 194 struct rxe_recv_wqe wqe; 195 struct ib_sge sge[RXE_MAX_SGE]; 196 } srq_wqe; 197 198 /* Responder resources. It's a circular list where the oldest 199 * resource is dropped first. 200 */ 201 struct resp_res *resources; 202 unsigned int res_head; 203 unsigned int res_tail; 204 struct resp_res *res; 205 struct rxe_task task; 206 }; 207 208 struct rxe_qp { 209 struct rxe_pool_entry pelem; 210 struct ib_qp ibqp; 211 struct ib_qp_attr attr; 212 unsigned int valid; 213 unsigned int mtu; 214 int is_user; 215 216 struct rxe_pd *pd; 217 struct rxe_srq *srq; 218 struct rxe_cq *scq; 219 struct rxe_cq *rcq; 220 221 enum ib_sig_type sq_sig_type; 222 223 struct rxe_sq sq; 224 struct rxe_rq rq; 225 226 struct socket *sk; 227 u32 dst_cookie; 228 u16 src_port; 229 230 struct rxe_av pri_av; 231 struct rxe_av alt_av; 232 233 /* list of mcast groups qp has joined (for cleanup) */ 234 struct list_head grp_list; 235 spinlock_t grp_lock; /* guard grp_list */ 236 237 struct sk_buff_head req_pkts; 238 struct sk_buff_head resp_pkts; 239 struct sk_buff_head send_pkts; 240 241 struct rxe_req_info req; 242 struct rxe_comp_info comp; 243 struct rxe_resp_info resp; 244 245 atomic_t ssn; 246 atomic_t skb_out; 247 int need_req_skb; 248 249 /* Timer for retranmitting packet when ACKs have been lost. RC 250 * only. The requester sets it when it is not already 251 * started. The responder resets it whenever an ack is 252 * received. 253 */ 254 struct timer_list retrans_timer; 255 u64 qp_timeout_jiffies; 256 257 /* Timer for handling RNR NAKS. */ 258 struct timer_list rnr_nak_timer; 259 260 spinlock_t state_lock; /* guard requester and completer */ 261 262 struct execute_work cleanup_work; 263 }; 264 265 enum rxe_mr_state { 266 RXE_MR_STATE_ZOMBIE, 267 RXE_MR_STATE_INVALID, 268 RXE_MR_STATE_FREE, 269 RXE_MR_STATE_VALID, 270 }; 271 272 enum rxe_mr_type { 273 RXE_MR_TYPE_NONE, 274 RXE_MR_TYPE_DMA, 275 RXE_MR_TYPE_MR, 276 RXE_MR_TYPE_MW, 277 }; 278 279 #define RXE_BUF_PER_MAP (PAGE_SIZE / sizeof(struct rxe_phys_buf)) 280 281 struct rxe_phys_buf { 282 u64 addr; 283 u64 size; 284 }; 285 286 struct rxe_map { 287 struct rxe_phys_buf buf[RXE_BUF_PER_MAP]; 288 }; 289 290 struct rxe_mr { 291 struct rxe_pool_entry pelem; 292 struct ib_mr ibmr; 293 294 struct ib_umem *umem; 295 296 enum rxe_mr_state state; 297 enum rxe_mr_type type; 298 u64 va; 299 u64 iova; 300 size_t length; 301 u32 offset; 302 int access; 303 304 int page_shift; 305 int page_mask; 306 int map_shift; 307 int map_mask; 308 309 u32 num_buf; 310 u32 nbuf; 311 312 u32 max_buf; 313 u32 num_map; 314 315 struct rxe_map **map; 316 }; 317 318 enum rxe_mw_state { 319 RXE_MW_STATE_INVALID = RXE_MR_STATE_INVALID, 320 RXE_MW_STATE_FREE = RXE_MR_STATE_FREE, 321 RXE_MW_STATE_VALID = RXE_MR_STATE_VALID, 322 }; 323 324 struct rxe_mw { 325 struct ib_mw ibmw; 326 struct rxe_pool_entry pelem; 327 }; 328 329 struct rxe_mc_grp { 330 struct rxe_pool_entry pelem; 331 spinlock_t mcg_lock; /* guard group */ 332 struct rxe_dev *rxe; 333 struct list_head qp_list; 334 union ib_gid mgid; 335 int num_qp; 336 u32 qkey; 337 u16 pkey; 338 }; 339 340 struct rxe_mc_elem { 341 struct rxe_pool_entry pelem; 342 struct list_head qp_list; 343 struct list_head grp_list; 344 struct rxe_qp *qp; 345 struct rxe_mc_grp *grp; 346 }; 347 348 struct rxe_port { 349 struct ib_port_attr attr; 350 __be64 port_guid; 351 __be64 subnet_prefix; 352 spinlock_t port_lock; /* guard port */ 353 unsigned int mtu_cap; 354 /* special QPs */ 355 u32 qp_smi_index; 356 u32 qp_gsi_index; 357 }; 358 359 struct rxe_dev { 360 struct ib_device ib_dev; 361 struct ib_device_attr attr; 362 int max_ucontext; 363 int max_inline_data; 364 struct mutex usdev_lock; 365 366 struct net_device *ndev; 367 368 int xmit_errors; 369 370 struct rxe_pool uc_pool; 371 struct rxe_pool pd_pool; 372 struct rxe_pool ah_pool; 373 struct rxe_pool srq_pool; 374 struct rxe_pool qp_pool; 375 struct rxe_pool cq_pool; 376 struct rxe_pool mr_pool; 377 struct rxe_pool mw_pool; 378 struct rxe_pool mc_grp_pool; 379 struct rxe_pool mc_elem_pool; 380 381 spinlock_t pending_lock; /* guard pending_mmaps */ 382 struct list_head pending_mmaps; 383 384 spinlock_t mmap_offset_lock; /* guard mmap_offset */ 385 u64 mmap_offset; 386 387 atomic64_t stats_counters[RXE_NUM_OF_COUNTERS]; 388 389 struct rxe_port port; 390 struct crypto_shash *tfm; 391 }; 392 393 static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index) 394 { 395 atomic64_inc(&rxe->stats_counters[index]); 396 } 397 398 static inline struct rxe_dev *to_rdev(struct ib_device *dev) 399 { 400 return dev ? container_of(dev, struct rxe_dev, ib_dev) : NULL; 401 } 402 403 static inline struct rxe_ucontext *to_ruc(struct ib_ucontext *uc) 404 { 405 return uc ? container_of(uc, struct rxe_ucontext, ibuc) : NULL; 406 } 407 408 static inline struct rxe_pd *to_rpd(struct ib_pd *pd) 409 { 410 return pd ? container_of(pd, struct rxe_pd, ibpd) : NULL; 411 } 412 413 static inline struct rxe_ah *to_rah(struct ib_ah *ah) 414 { 415 return ah ? container_of(ah, struct rxe_ah, ibah) : NULL; 416 } 417 418 static inline struct rxe_srq *to_rsrq(struct ib_srq *srq) 419 { 420 return srq ? container_of(srq, struct rxe_srq, ibsrq) : NULL; 421 } 422 423 static inline struct rxe_qp *to_rqp(struct ib_qp *qp) 424 { 425 return qp ? container_of(qp, struct rxe_qp, ibqp) : NULL; 426 } 427 428 static inline struct rxe_cq *to_rcq(struct ib_cq *cq) 429 { 430 return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL; 431 } 432 433 static inline struct rxe_mr *to_rmr(struct ib_mr *mr) 434 { 435 return mr ? container_of(mr, struct rxe_mr, ibmr) : NULL; 436 } 437 438 static inline struct rxe_mw *to_rmw(struct ib_mw *mw) 439 { 440 return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL; 441 } 442 443 static inline struct rxe_pd *mr_pd(struct rxe_mr *mr) 444 { 445 return to_rpd(mr->ibmr.pd); 446 } 447 448 static inline u32 mr_lkey(struct rxe_mr *mr) 449 { 450 return mr->ibmr.lkey; 451 } 452 453 static inline u32 mr_rkey(struct rxe_mr *mr) 454 { 455 return mr->ibmr.rkey; 456 } 457 458 int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name); 459 460 void rxe_mc_cleanup(struct rxe_pool_entry *arg); 461 462 #endif /* RXE_VERBS_H */ 463