1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6 
7 #ifndef RXE_VERBS_H
8 #define RXE_VERBS_H
9 
10 #include <linux/interrupt.h>
11 #include <linux/workqueue.h>
12 #include <rdma/rdma_user_rxe.h>
13 #include "rxe_pool.h"
14 #include "rxe_task.h"
15 #include "rxe_hw_counters.h"
16 
17 static inline int pkey_match(u16 key1, u16 key2)
18 {
19 	return (((key1 & 0x7fff) != 0) &&
20 		((key1 & 0x7fff) == (key2 & 0x7fff)) &&
21 		((key1 & 0x8000) || (key2 & 0x8000))) ? 1 : 0;
22 }
23 
24 /* Return >0 if psn_a > psn_b
25  *	   0 if psn_a == psn_b
26  *	  <0 if psn_a < psn_b
27  */
28 static inline int psn_compare(u32 psn_a, u32 psn_b)
29 {
30 	s32 diff;
31 
32 	diff = (psn_a - psn_b) << 8;
33 	return diff;
34 }
35 
36 struct rxe_ucontext {
37 	struct ib_ucontext ibuc;
38 	struct rxe_pool_elem	elem;
39 };
40 
41 struct rxe_pd {
42 	struct ib_pd            ibpd;
43 	struct rxe_pool_elem	elem;
44 };
45 
46 struct rxe_ah {
47 	struct ib_ah		ibah;
48 	struct rxe_pool_elem	elem;
49 	struct rxe_av		av;
50 	bool			is_user;
51 	int			ah_num;
52 };
53 
54 struct rxe_cqe {
55 	union {
56 		struct ib_wc		ibwc;
57 		struct ib_uverbs_wc	uibwc;
58 	};
59 };
60 
61 struct rxe_cq {
62 	struct ib_cq		ibcq;
63 	struct rxe_pool_elem	elem;
64 	struct rxe_queue	*queue;
65 	spinlock_t		cq_lock;
66 	u8			notify;
67 	bool			is_dying;
68 	bool			is_user;
69 	struct tasklet_struct	comp_task;
70 	atomic_t		num_wq;
71 };
72 
73 enum wqe_state {
74 	wqe_state_posted,
75 	wqe_state_processing,
76 	wqe_state_pending,
77 	wqe_state_done,
78 	wqe_state_error,
79 };
80 
81 struct rxe_sq {
82 	int			max_wr;
83 	int			max_sge;
84 	int			max_inline;
85 	spinlock_t		sq_lock; /* guard queue */
86 	struct rxe_queue	*queue;
87 };
88 
89 struct rxe_rq {
90 	int			max_wr;
91 	int			max_sge;
92 	spinlock_t		producer_lock; /* guard queue producer */
93 	spinlock_t		consumer_lock; /* guard queue consumer */
94 	struct rxe_queue	*queue;
95 };
96 
97 struct rxe_srq {
98 	struct ib_srq		ibsrq;
99 	struct rxe_pool_elem	elem;
100 	struct rxe_pd		*pd;
101 	struct rxe_rq		rq;
102 	u32			srq_num;
103 
104 	int			limit;
105 	int			error;
106 };
107 
108 enum rxe_qp_state {
109 	QP_STATE_RESET,
110 	QP_STATE_INIT,
111 	QP_STATE_READY,
112 	QP_STATE_DRAIN,		/* req only */
113 	QP_STATE_DRAINED,	/* req only */
114 	QP_STATE_ERROR
115 };
116 
117 struct rxe_req_info {
118 	enum rxe_qp_state	state;
119 	int			wqe_index;
120 	u32			psn;
121 	int			opcode;
122 	atomic_t		rd_atomic;
123 	int			wait_fence;
124 	int			need_rd_atomic;
125 	int			wait_psn;
126 	int			need_retry;
127 	int			noack_pkts;
128 	struct rxe_task		task;
129 };
130 
131 struct rxe_comp_info {
132 	u32			psn;
133 	int			opcode;
134 	int			timeout;
135 	int			timeout_retry;
136 	int			started_retry;
137 	u32			retry_cnt;
138 	u32			rnr_retry;
139 	struct rxe_task		task;
140 };
141 
142 enum rdatm_res_state {
143 	rdatm_res_state_next,
144 	rdatm_res_state_new,
145 	rdatm_res_state_replay,
146 };
147 
148 struct resp_res {
149 	int			type;
150 	int			replay;
151 	u32			first_psn;
152 	u32			last_psn;
153 	u32			cur_psn;
154 	enum rdatm_res_state	state;
155 
156 	union {
157 		struct {
158 			struct sk_buff	*skb;
159 		} atomic;
160 		struct {
161 			u64		va_org;
162 			u32		rkey;
163 			u32		length;
164 			u64		va;
165 			u32		resid;
166 		} read;
167 	};
168 };
169 
170 struct rxe_resp_info {
171 	enum rxe_qp_state	state;
172 	u32			msn;
173 	u32			psn;
174 	u32			ack_psn;
175 	int			opcode;
176 	int			drop_msg;
177 	int			goto_error;
178 	int			sent_psn_nak;
179 	enum ib_wc_status	status;
180 	u8			aeth_syndrome;
181 
182 	/* Receive only */
183 	struct rxe_recv_wqe	*wqe;
184 
185 	/* RDMA read / atomic only */
186 	u64			va;
187 	u64			offset;
188 	struct rxe_mr		*mr;
189 	u32			resid;
190 	u32			rkey;
191 	u32			length;
192 	u64			atomic_orig;
193 
194 	/* SRQ only */
195 	struct {
196 		struct rxe_recv_wqe	wqe;
197 		struct ib_sge		sge[RXE_MAX_SGE];
198 	} srq_wqe;
199 
200 	/* Responder resources. It's a circular list where the oldest
201 	 * resource is dropped first.
202 	 */
203 	struct resp_res		*resources;
204 	unsigned int		res_head;
205 	unsigned int		res_tail;
206 	struct resp_res		*res;
207 	struct rxe_task		task;
208 };
209 
210 struct rxe_qp {
211 	struct ib_qp		ibqp;
212 	struct rxe_pool_elem	elem;
213 	struct ib_qp_attr	attr;
214 	unsigned int		valid;
215 	unsigned int		mtu;
216 	bool			is_user;
217 
218 	struct rxe_pd		*pd;
219 	struct rxe_srq		*srq;
220 	struct rxe_cq		*scq;
221 	struct rxe_cq		*rcq;
222 
223 	enum ib_sig_type	sq_sig_type;
224 
225 	struct rxe_sq		sq;
226 	struct rxe_rq		rq;
227 
228 	struct socket		*sk;
229 	u32			dst_cookie;
230 	u16			src_port;
231 
232 	struct rxe_av		pri_av;
233 	struct rxe_av		alt_av;
234 
235 	atomic_t		mcg_num;
236 
237 	struct sk_buff_head	req_pkts;
238 	struct sk_buff_head	resp_pkts;
239 
240 	struct rxe_req_info	req;
241 	struct rxe_comp_info	comp;
242 	struct rxe_resp_info	resp;
243 
244 	atomic_t		ssn;
245 	atomic_t		skb_out;
246 	int			need_req_skb;
247 
248 	/* Timer for retranmitting packet when ACKs have been lost. RC
249 	 * only. The requester sets it when it is not already
250 	 * started. The responder resets it whenever an ack is
251 	 * received.
252 	 */
253 	struct timer_list retrans_timer;
254 	u64 qp_timeout_jiffies;
255 
256 	/* Timer for handling RNR NAKS. */
257 	struct timer_list rnr_nak_timer;
258 
259 	spinlock_t		state_lock; /* guard requester and completer */
260 
261 	struct execute_work	cleanup_work;
262 };
263 
264 enum rxe_mr_state {
265 	RXE_MR_STATE_INVALID,
266 	RXE_MR_STATE_FREE,
267 	RXE_MR_STATE_VALID,
268 };
269 
270 enum rxe_mr_copy_dir {
271 	RXE_TO_MR_OBJ,
272 	RXE_FROM_MR_OBJ,
273 };
274 
275 enum rxe_mr_lookup_type {
276 	RXE_LOOKUP_LOCAL,
277 	RXE_LOOKUP_REMOTE,
278 };
279 
280 #define RXE_BUF_PER_MAP		(PAGE_SIZE / sizeof(struct rxe_phys_buf))
281 
282 struct rxe_phys_buf {
283 	u64      addr;
284 	u64      size;
285 };
286 
287 struct rxe_map {
288 	struct rxe_phys_buf	buf[RXE_BUF_PER_MAP];
289 };
290 
291 struct rxe_map_set {
292 	struct rxe_map		**map;
293 	u64			va;
294 	u64			iova;
295 	size_t			length;
296 	u32			offset;
297 	u32			nbuf;
298 	int			page_shift;
299 	int			page_mask;
300 };
301 
302 static inline int rkey_is_mw(u32 rkey)
303 {
304 	u32 index = rkey >> 8;
305 
306 	return (index >= RXE_MIN_MW_INDEX) && (index <= RXE_MAX_MW_INDEX);
307 }
308 
309 struct rxe_mr {
310 	struct rxe_pool_elem	elem;
311 	struct ib_mr		ibmr;
312 
313 	struct ib_umem		*umem;
314 
315 	u32			lkey;
316 	u32			rkey;
317 	enum rxe_mr_state	state;
318 	enum ib_mr_type		type;
319 	int			access;
320 
321 	int			map_shift;
322 	int			map_mask;
323 
324 	u32			num_buf;
325 
326 	u32			max_buf;
327 	u32			num_map;
328 
329 	atomic_t		num_mw;
330 
331 	struct rxe_map_set	*cur_map_set;
332 	struct rxe_map_set	*next_map_set;
333 };
334 
335 enum rxe_mw_state {
336 	RXE_MW_STATE_INVALID	= RXE_MR_STATE_INVALID,
337 	RXE_MW_STATE_FREE	= RXE_MR_STATE_FREE,
338 	RXE_MW_STATE_VALID	= RXE_MR_STATE_VALID,
339 };
340 
341 struct rxe_mw {
342 	struct ib_mw		ibmw;
343 	struct rxe_pool_elem	elem;
344 	spinlock_t		lock;
345 	enum rxe_mw_state	state;
346 	struct rxe_qp		*qp; /* Type 2 only */
347 	struct rxe_mr		*mr;
348 	u32			rkey;
349 	int			access;
350 	u64			addr;
351 	u64			length;
352 };
353 
354 struct rxe_mcg {
355 	struct rb_node		node;
356 	struct kref		ref_cnt;
357 	struct rxe_dev		*rxe;
358 	struct list_head	qp_list;
359 	union ib_gid		mgid;
360 	atomic_t		qp_num;
361 	u32			qkey;
362 	u16			pkey;
363 };
364 
365 struct rxe_mca {
366 	struct list_head	qp_list;
367 	struct rxe_qp		*qp;
368 };
369 
370 struct rxe_port {
371 	struct ib_port_attr	attr;
372 	__be64			port_guid;
373 	__be64			subnet_prefix;
374 	spinlock_t		port_lock; /* guard port */
375 	unsigned int		mtu_cap;
376 	/* special QPs */
377 	u32			qp_gsi_index;
378 };
379 
380 struct rxe_dev {
381 	struct ib_device	ib_dev;
382 	struct ib_device_attr	attr;
383 	int			max_ucontext;
384 	int			max_inline_data;
385 	struct mutex	usdev_lock;
386 
387 	struct net_device	*ndev;
388 
389 	struct rxe_pool		uc_pool;
390 	struct rxe_pool		pd_pool;
391 	struct rxe_pool		ah_pool;
392 	struct rxe_pool		srq_pool;
393 	struct rxe_pool		qp_pool;
394 	struct rxe_pool		cq_pool;
395 	struct rxe_pool		mr_pool;
396 	struct rxe_pool		mw_pool;
397 
398 	/* multicast support */
399 	spinlock_t		mcg_lock;
400 	struct rb_root		mcg_tree;
401 	atomic_t		mcg_num;
402 	atomic_t		mcg_attach;
403 
404 	spinlock_t		pending_lock; /* guard pending_mmaps */
405 	struct list_head	pending_mmaps;
406 
407 	spinlock_t		mmap_offset_lock; /* guard mmap_offset */
408 	u64			mmap_offset;
409 
410 	atomic64_t		stats_counters[RXE_NUM_OF_COUNTERS];
411 
412 	struct rxe_port		port;
413 	struct crypto_shash	*tfm;
414 };
415 
416 static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index)
417 {
418 	atomic64_inc(&rxe->stats_counters[index]);
419 }
420 
421 static inline struct rxe_dev *to_rdev(struct ib_device *dev)
422 {
423 	return dev ? container_of(dev, struct rxe_dev, ib_dev) : NULL;
424 }
425 
426 static inline struct rxe_ucontext *to_ruc(struct ib_ucontext *uc)
427 {
428 	return uc ? container_of(uc, struct rxe_ucontext, ibuc) : NULL;
429 }
430 
431 static inline struct rxe_pd *to_rpd(struct ib_pd *pd)
432 {
433 	return pd ? container_of(pd, struct rxe_pd, ibpd) : NULL;
434 }
435 
436 static inline struct rxe_ah *to_rah(struct ib_ah *ah)
437 {
438 	return ah ? container_of(ah, struct rxe_ah, ibah) : NULL;
439 }
440 
441 static inline struct rxe_srq *to_rsrq(struct ib_srq *srq)
442 {
443 	return srq ? container_of(srq, struct rxe_srq, ibsrq) : NULL;
444 }
445 
446 static inline struct rxe_qp *to_rqp(struct ib_qp *qp)
447 {
448 	return qp ? container_of(qp, struct rxe_qp, ibqp) : NULL;
449 }
450 
451 static inline struct rxe_cq *to_rcq(struct ib_cq *cq)
452 {
453 	return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL;
454 }
455 
456 static inline struct rxe_mr *to_rmr(struct ib_mr *mr)
457 {
458 	return mr ? container_of(mr, struct rxe_mr, ibmr) : NULL;
459 }
460 
461 static inline struct rxe_mw *to_rmw(struct ib_mw *mw)
462 {
463 	return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL;
464 }
465 
466 static inline struct rxe_pd *rxe_ah_pd(struct rxe_ah *ah)
467 {
468 	return to_rpd(ah->ibah.pd);
469 }
470 
471 static inline struct rxe_pd *mr_pd(struct rxe_mr *mr)
472 {
473 	return to_rpd(mr->ibmr.pd);
474 }
475 
476 static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
477 {
478 	return to_rpd(mw->ibmw.pd);
479 }
480 
481 int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
482 
483 #endif /* RXE_VERBS_H */
484