1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 /* 3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 5 */ 6 7 #ifndef RXE_VERBS_H 8 #define RXE_VERBS_H 9 10 #include <linux/interrupt.h> 11 #include <linux/workqueue.h> 12 #include "rxe_pool.h" 13 #include "rxe_task.h" 14 #include "rxe_hw_counters.h" 15 16 static inline int pkey_match(u16 key1, u16 key2) 17 { 18 return (((key1 & 0x7fff) != 0) && 19 ((key1 & 0x7fff) == (key2 & 0x7fff)) && 20 ((key1 & 0x8000) || (key2 & 0x8000))) ? 1 : 0; 21 } 22 23 /* Return >0 if psn_a > psn_b 24 * 0 if psn_a == psn_b 25 * <0 if psn_a < psn_b 26 */ 27 static inline int psn_compare(u32 psn_a, u32 psn_b) 28 { 29 s32 diff; 30 31 diff = (psn_a - psn_b) << 8; 32 return diff; 33 } 34 35 struct rxe_ucontext { 36 struct ib_ucontext ibuc; 37 struct rxe_pool_elem elem; 38 }; 39 40 struct rxe_pd { 41 struct ib_pd ibpd; 42 struct rxe_pool_elem elem; 43 }; 44 45 struct rxe_ah { 46 struct ib_ah ibah; 47 struct rxe_pool_elem elem; 48 struct rxe_av av; 49 bool is_user; 50 int ah_num; 51 }; 52 53 struct rxe_cqe { 54 union { 55 struct ib_wc ibwc; 56 struct ib_uverbs_wc uibwc; 57 }; 58 }; 59 60 struct rxe_cq { 61 struct ib_cq ibcq; 62 struct rxe_pool_elem elem; 63 struct rxe_queue *queue; 64 spinlock_t cq_lock; 65 u8 notify; 66 bool is_dying; 67 bool is_user; 68 struct tasklet_struct comp_task; 69 atomic_t num_wq; 70 }; 71 72 enum wqe_state { 73 wqe_state_posted, 74 wqe_state_processing, 75 wqe_state_pending, 76 wqe_state_done, 77 wqe_state_error, 78 }; 79 80 struct rxe_sq { 81 int max_wr; 82 int max_sge; 83 int max_inline; 84 spinlock_t sq_lock; /* guard queue */ 85 struct rxe_queue *queue; 86 }; 87 88 struct rxe_rq { 89 int max_wr; 90 int max_sge; 91 spinlock_t producer_lock; /* guard queue producer */ 92 spinlock_t consumer_lock; /* guard queue consumer */ 93 struct rxe_queue *queue; 94 }; 95 96 struct rxe_srq { 97 struct ib_srq ibsrq; 98 struct rxe_pool_elem elem; 99 struct rxe_pd *pd; 100 struct rxe_rq rq; 101 u32 srq_num; 102 103 int limit; 104 int error; 105 }; 106 107 enum rxe_qp_state { 108 QP_STATE_RESET, 109 QP_STATE_INIT, 110 QP_STATE_READY, 111 QP_STATE_DRAIN, /* req only */ 112 QP_STATE_DRAINED, /* req only */ 113 QP_STATE_ERROR 114 }; 115 116 struct rxe_req_info { 117 enum rxe_qp_state state; 118 int wqe_index; 119 u32 psn; 120 int opcode; 121 atomic_t rd_atomic; 122 int wait_fence; 123 int need_rd_atomic; 124 int wait_psn; 125 int need_retry; 126 int wait_for_rnr_timer; 127 int noack_pkts; 128 struct rxe_task task; 129 }; 130 131 struct rxe_comp_info { 132 enum rxe_qp_state state; 133 u32 psn; 134 int opcode; 135 int timeout; 136 int timeout_retry; 137 int started_retry; 138 u32 retry_cnt; 139 u32 rnr_retry; 140 struct rxe_task task; 141 }; 142 143 enum rdatm_res_state { 144 rdatm_res_state_next, 145 rdatm_res_state_new, 146 rdatm_res_state_replay, 147 }; 148 149 struct resp_res { 150 int type; 151 int replay; 152 u32 first_psn; 153 u32 last_psn; 154 u32 cur_psn; 155 enum rdatm_res_state state; 156 157 union { 158 struct { 159 u64 orig_val; 160 } atomic; 161 struct { 162 u64 va_org; 163 u32 rkey; 164 u32 length; 165 u64 va; 166 u32 resid; 167 } read; 168 struct { 169 u32 length; 170 u64 va; 171 u8 type; 172 u8 level; 173 } flush; 174 }; 175 }; 176 177 struct rxe_resp_info { 178 enum rxe_qp_state state; 179 u32 msn; 180 u32 psn; 181 u32 ack_psn; 182 int opcode; 183 int drop_msg; 184 int goto_error; 185 int sent_psn_nak; 186 enum ib_wc_status status; 187 u8 aeth_syndrome; 188 189 /* Receive only */ 190 struct rxe_recv_wqe *wqe; 191 192 /* RDMA read / atomic only */ 193 u64 va; 194 u64 offset; 195 struct rxe_mr *mr; 196 u32 resid; 197 u32 rkey; 198 u32 length; 199 200 /* SRQ only */ 201 struct { 202 struct rxe_recv_wqe wqe; 203 struct ib_sge sge[RXE_MAX_SGE]; 204 } srq_wqe; 205 206 /* Responder resources. It's a circular list where the oldest 207 * resource is dropped first. 208 */ 209 struct resp_res *resources; 210 unsigned int res_head; 211 unsigned int res_tail; 212 struct resp_res *res; 213 struct rxe_task task; 214 }; 215 216 struct rxe_qp { 217 struct ib_qp ibqp; 218 struct rxe_pool_elem elem; 219 struct ib_qp_attr attr; 220 unsigned int valid; 221 unsigned int mtu; 222 bool is_user; 223 224 struct rxe_pd *pd; 225 struct rxe_srq *srq; 226 struct rxe_cq *scq; 227 struct rxe_cq *rcq; 228 229 enum ib_sig_type sq_sig_type; 230 231 struct rxe_sq sq; 232 struct rxe_rq rq; 233 234 struct socket *sk; 235 u32 dst_cookie; 236 u16 src_port; 237 238 struct rxe_av pri_av; 239 struct rxe_av alt_av; 240 241 atomic_t mcg_num; 242 243 struct sk_buff_head req_pkts; 244 struct sk_buff_head resp_pkts; 245 246 struct rxe_req_info req; 247 struct rxe_comp_info comp; 248 struct rxe_resp_info resp; 249 250 atomic_t ssn; 251 atomic_t skb_out; 252 int need_req_skb; 253 254 /* Timer for retranmitting packet when ACKs have been lost. RC 255 * only. The requester sets it when it is not already 256 * started. The responder resets it whenever an ack is 257 * received. 258 */ 259 struct timer_list retrans_timer; 260 u64 qp_timeout_jiffies; 261 262 /* Timer for handling RNR NAKS. */ 263 struct timer_list rnr_nak_timer; 264 265 spinlock_t state_lock; /* guard requester and completer */ 266 267 struct execute_work cleanup_work; 268 }; 269 270 enum rxe_mr_state { 271 RXE_MR_STATE_INVALID, 272 RXE_MR_STATE_FREE, 273 RXE_MR_STATE_VALID, 274 }; 275 276 enum rxe_mr_copy_dir { 277 RXE_TO_MR_OBJ, 278 RXE_FROM_MR_OBJ, 279 }; 280 281 enum rxe_mr_lookup_type { 282 RXE_LOOKUP_LOCAL, 283 RXE_LOOKUP_REMOTE, 284 }; 285 286 static inline int rkey_is_mw(u32 rkey) 287 { 288 u32 index = rkey >> 8; 289 290 return (index >= RXE_MIN_MW_INDEX) && (index <= RXE_MAX_MW_INDEX); 291 } 292 293 struct rxe_mr { 294 struct rxe_pool_elem elem; 295 struct ib_mr ibmr; 296 297 struct ib_umem *umem; 298 299 u32 lkey; 300 u32 rkey; 301 enum rxe_mr_state state; 302 int access; 303 atomic_t num_mw; 304 305 unsigned int page_offset; 306 unsigned int page_shift; 307 u64 page_mask; 308 309 u32 num_buf; 310 u32 nbuf; 311 312 struct xarray page_list; 313 }; 314 315 static inline unsigned int mr_page_size(struct rxe_mr *mr) 316 { 317 return mr ? mr->ibmr.page_size : PAGE_SIZE; 318 } 319 320 enum rxe_mw_state { 321 RXE_MW_STATE_INVALID = RXE_MR_STATE_INVALID, 322 RXE_MW_STATE_FREE = RXE_MR_STATE_FREE, 323 RXE_MW_STATE_VALID = RXE_MR_STATE_VALID, 324 }; 325 326 struct rxe_mw { 327 struct ib_mw ibmw; 328 struct rxe_pool_elem elem; 329 spinlock_t lock; 330 enum rxe_mw_state state; 331 struct rxe_qp *qp; /* Type 2 only */ 332 struct rxe_mr *mr; 333 u32 rkey; 334 int access; 335 u64 addr; 336 u64 length; 337 }; 338 339 struct rxe_mcg { 340 struct rb_node node; 341 struct kref ref_cnt; 342 struct rxe_dev *rxe; 343 struct list_head qp_list; 344 union ib_gid mgid; 345 atomic_t qp_num; 346 u32 qkey; 347 u16 pkey; 348 }; 349 350 struct rxe_mca { 351 struct list_head qp_list; 352 struct rxe_qp *qp; 353 }; 354 355 struct rxe_port { 356 struct ib_port_attr attr; 357 __be64 port_guid; 358 __be64 subnet_prefix; 359 spinlock_t port_lock; /* guard port */ 360 unsigned int mtu_cap; 361 /* special QPs */ 362 u32 qp_gsi_index; 363 }; 364 365 struct rxe_dev { 366 struct ib_device ib_dev; 367 struct ib_device_attr attr; 368 int max_ucontext; 369 int max_inline_data; 370 struct mutex usdev_lock; 371 372 struct net_device *ndev; 373 374 struct rxe_pool uc_pool; 375 struct rxe_pool pd_pool; 376 struct rxe_pool ah_pool; 377 struct rxe_pool srq_pool; 378 struct rxe_pool qp_pool; 379 struct rxe_pool cq_pool; 380 struct rxe_pool mr_pool; 381 struct rxe_pool mw_pool; 382 383 /* multicast support */ 384 spinlock_t mcg_lock; 385 struct rb_root mcg_tree; 386 atomic_t mcg_num; 387 atomic_t mcg_attach; 388 389 spinlock_t pending_lock; /* guard pending_mmaps */ 390 struct list_head pending_mmaps; 391 392 spinlock_t mmap_offset_lock; /* guard mmap_offset */ 393 u64 mmap_offset; 394 395 atomic64_t stats_counters[RXE_NUM_OF_COUNTERS]; 396 397 struct rxe_port port; 398 struct crypto_shash *tfm; 399 }; 400 401 static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index) 402 { 403 atomic64_inc(&rxe->stats_counters[index]); 404 } 405 406 static inline struct rxe_dev *to_rdev(struct ib_device *dev) 407 { 408 return dev ? container_of(dev, struct rxe_dev, ib_dev) : NULL; 409 } 410 411 static inline struct rxe_ucontext *to_ruc(struct ib_ucontext *uc) 412 { 413 return uc ? container_of(uc, struct rxe_ucontext, ibuc) : NULL; 414 } 415 416 static inline struct rxe_pd *to_rpd(struct ib_pd *pd) 417 { 418 return pd ? container_of(pd, struct rxe_pd, ibpd) : NULL; 419 } 420 421 static inline struct rxe_ah *to_rah(struct ib_ah *ah) 422 { 423 return ah ? container_of(ah, struct rxe_ah, ibah) : NULL; 424 } 425 426 static inline struct rxe_srq *to_rsrq(struct ib_srq *srq) 427 { 428 return srq ? container_of(srq, struct rxe_srq, ibsrq) : NULL; 429 } 430 431 static inline struct rxe_qp *to_rqp(struct ib_qp *qp) 432 { 433 return qp ? container_of(qp, struct rxe_qp, ibqp) : NULL; 434 } 435 436 static inline struct rxe_cq *to_rcq(struct ib_cq *cq) 437 { 438 return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL; 439 } 440 441 static inline struct rxe_mr *to_rmr(struct ib_mr *mr) 442 { 443 return mr ? container_of(mr, struct rxe_mr, ibmr) : NULL; 444 } 445 446 static inline struct rxe_mw *to_rmw(struct ib_mw *mw) 447 { 448 return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL; 449 } 450 451 static inline struct rxe_pd *rxe_ah_pd(struct rxe_ah *ah) 452 { 453 return to_rpd(ah->ibah.pd); 454 } 455 456 static inline struct rxe_pd *mr_pd(struct rxe_mr *mr) 457 { 458 return to_rpd(mr->ibmr.pd); 459 } 460 461 static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw) 462 { 463 return to_rpd(mw->ibmw.pd); 464 } 465 466 int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name); 467 468 #endif /* RXE_VERBS_H */ 469