1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6 
7 #ifndef RXE_VERBS_H
8 #define RXE_VERBS_H
9 
10 #include <linux/interrupt.h>
11 #include <linux/workqueue.h>
12 #include <rdma/rdma_user_rxe.h>
13 #include "rxe_pool.h"
14 #include "rxe_task.h"
15 #include "rxe_hw_counters.h"
16 
17 static inline int pkey_match(u16 key1, u16 key2)
18 {
19 	return (((key1 & 0x7fff) != 0) &&
20 		((key1 & 0x7fff) == (key2 & 0x7fff)) &&
21 		((key1 & 0x8000) || (key2 & 0x8000))) ? 1 : 0;
22 }
23 
24 /* Return >0 if psn_a > psn_b
25  *	   0 if psn_a == psn_b
26  *	  <0 if psn_a < psn_b
27  */
28 static inline int psn_compare(u32 psn_a, u32 psn_b)
29 {
30 	s32 diff;
31 
32 	diff = (psn_a - psn_b) << 8;
33 	return diff;
34 }
35 
36 struct rxe_ucontext {
37 	struct ib_ucontext ibuc;
38 	struct rxe_pool_entry	pelem;
39 };
40 
41 struct rxe_pd {
42 	struct ib_pd            ibpd;
43 	struct rxe_pool_entry	pelem;
44 };
45 
46 struct rxe_ah {
47 	struct ib_ah		ibah;
48 	struct rxe_pool_entry	pelem;
49 	struct rxe_pd		*pd;
50 	struct rxe_av		av;
51 };
52 
53 struct rxe_cqe {
54 	union {
55 		struct ib_wc		ibwc;
56 		struct ib_uverbs_wc	uibwc;
57 	};
58 };
59 
60 struct rxe_cq {
61 	struct ib_cq		ibcq;
62 	struct rxe_pool_entry	pelem;
63 	struct rxe_queue	*queue;
64 	spinlock_t		cq_lock;
65 	u8			notify;
66 	bool			is_dying;
67 	int			is_user;
68 	struct tasklet_struct	comp_task;
69 };
70 
71 enum wqe_state {
72 	wqe_state_posted,
73 	wqe_state_processing,
74 	wqe_state_pending,
75 	wqe_state_done,
76 	wqe_state_error,
77 };
78 
79 struct rxe_sq {
80 	bool			is_user;
81 	int			max_wr;
82 	int			max_sge;
83 	int			max_inline;
84 	spinlock_t		sq_lock; /* guard queue */
85 	struct rxe_queue	*queue;
86 };
87 
88 struct rxe_rq {
89 	bool			is_user;
90 	int			max_wr;
91 	int			max_sge;
92 	spinlock_t		producer_lock; /* guard queue producer */
93 	spinlock_t		consumer_lock; /* guard queue consumer */
94 	struct rxe_queue	*queue;
95 };
96 
97 struct rxe_srq {
98 	struct ib_srq		ibsrq;
99 	struct rxe_pool_entry	pelem;
100 	struct rxe_pd		*pd;
101 	struct rxe_rq		rq;
102 	u32			srq_num;
103 	bool			is_user;
104 
105 	int			limit;
106 	int			error;
107 };
108 
109 enum rxe_qp_state {
110 	QP_STATE_RESET,
111 	QP_STATE_INIT,
112 	QP_STATE_READY,
113 	QP_STATE_DRAIN,		/* req only */
114 	QP_STATE_DRAINED,	/* req only */
115 	QP_STATE_ERROR
116 };
117 
118 struct rxe_req_info {
119 	enum rxe_qp_state	state;
120 	int			wqe_index;
121 	u32			psn;
122 	int			opcode;
123 	atomic_t		rd_atomic;
124 	int			wait_fence;
125 	int			need_rd_atomic;
126 	int			wait_psn;
127 	int			need_retry;
128 	int			noack_pkts;
129 	struct rxe_task		task;
130 };
131 
132 struct rxe_comp_info {
133 	u32			psn;
134 	int			opcode;
135 	int			timeout;
136 	int			timeout_retry;
137 	int			started_retry;
138 	u32			retry_cnt;
139 	u32			rnr_retry;
140 	struct rxe_task		task;
141 };
142 
143 enum rdatm_res_state {
144 	rdatm_res_state_next,
145 	rdatm_res_state_new,
146 	rdatm_res_state_replay,
147 };
148 
149 struct resp_res {
150 	int			type;
151 	int			replay;
152 	u32			first_psn;
153 	u32			last_psn;
154 	u32			cur_psn;
155 	enum rdatm_res_state	state;
156 
157 	union {
158 		struct {
159 			struct sk_buff	*skb;
160 		} atomic;
161 		struct {
162 			struct rxe_mr	*mr;
163 			u64		va_org;
164 			u32		rkey;
165 			u32		length;
166 			u64		va;
167 			u32		resid;
168 		} read;
169 	};
170 };
171 
172 struct rxe_resp_info {
173 	enum rxe_qp_state	state;
174 	u32			msn;
175 	u32			psn;
176 	u32			ack_psn;
177 	int			opcode;
178 	int			drop_msg;
179 	int			goto_error;
180 	int			sent_psn_nak;
181 	enum ib_wc_status	status;
182 	u8			aeth_syndrome;
183 
184 	/* Receive only */
185 	struct rxe_recv_wqe	*wqe;
186 
187 	/* RDMA read / atomic only */
188 	u64			va;
189 	u64			offset;
190 	struct rxe_mr		*mr;
191 	u32			resid;
192 	u32			rkey;
193 	u32			length;
194 	u64			atomic_orig;
195 
196 	/* SRQ only */
197 	struct {
198 		struct rxe_recv_wqe	wqe;
199 		struct ib_sge		sge[RXE_MAX_SGE];
200 	} srq_wqe;
201 
202 	/* Responder resources. It's a circular list where the oldest
203 	 * resource is dropped first.
204 	 */
205 	struct resp_res		*resources;
206 	unsigned int		res_head;
207 	unsigned int		res_tail;
208 	struct resp_res		*res;
209 	struct rxe_task		task;
210 };
211 
212 struct rxe_qp {
213 	struct rxe_pool_entry	pelem;
214 	struct ib_qp		ibqp;
215 	struct ib_qp_attr	attr;
216 	unsigned int		valid;
217 	unsigned int		mtu;
218 	bool			is_user;
219 
220 	struct rxe_pd		*pd;
221 	struct rxe_srq		*srq;
222 	struct rxe_cq		*scq;
223 	struct rxe_cq		*rcq;
224 
225 	enum ib_sig_type	sq_sig_type;
226 
227 	struct rxe_sq		sq;
228 	struct rxe_rq		rq;
229 
230 	struct socket		*sk;
231 	u32			dst_cookie;
232 	u16			src_port;
233 
234 	struct rxe_av		pri_av;
235 	struct rxe_av		alt_av;
236 
237 	/* list of mcast groups qp has joined (for cleanup) */
238 	struct list_head	grp_list;
239 	spinlock_t		grp_lock; /* guard grp_list */
240 
241 	struct sk_buff_head	req_pkts;
242 	struct sk_buff_head	resp_pkts;
243 	struct sk_buff_head	send_pkts;
244 
245 	struct rxe_req_info	req;
246 	struct rxe_comp_info	comp;
247 	struct rxe_resp_info	resp;
248 
249 	atomic_t		ssn;
250 	atomic_t		skb_out;
251 	int			need_req_skb;
252 
253 	/* Timer for retranmitting packet when ACKs have been lost. RC
254 	 * only. The requester sets it when it is not already
255 	 * started. The responder resets it whenever an ack is
256 	 * received.
257 	 */
258 	struct timer_list retrans_timer;
259 	u64 qp_timeout_jiffies;
260 
261 	/* Timer for handling RNR NAKS. */
262 	struct timer_list rnr_nak_timer;
263 
264 	spinlock_t		state_lock; /* guard requester and completer */
265 
266 	struct execute_work	cleanup_work;
267 };
268 
269 enum rxe_mr_state {
270 	RXE_MR_STATE_ZOMBIE,
271 	RXE_MR_STATE_INVALID,
272 	RXE_MR_STATE_FREE,
273 	RXE_MR_STATE_VALID,
274 };
275 
276 enum rxe_mr_type {
277 	RXE_MR_TYPE_NONE,
278 	RXE_MR_TYPE_DMA,
279 	RXE_MR_TYPE_MR,
280 };
281 
282 enum rxe_mr_copy_dir {
283 	RXE_TO_MR_OBJ,
284 	RXE_FROM_MR_OBJ,
285 };
286 
287 enum rxe_mr_lookup_type {
288 	RXE_LOOKUP_LOCAL,
289 	RXE_LOOKUP_REMOTE,
290 };
291 
292 #define RXE_BUF_PER_MAP		(PAGE_SIZE / sizeof(struct rxe_phys_buf))
293 
294 struct rxe_phys_buf {
295 	u64      addr;
296 	u64      size;
297 };
298 
299 struct rxe_map {
300 	struct rxe_phys_buf	buf[RXE_BUF_PER_MAP];
301 };
302 
303 static inline int rkey_is_mw(u32 rkey)
304 {
305 	u32 index = rkey >> 8;
306 
307 	return (index >= RXE_MIN_MW_INDEX) && (index <= RXE_MAX_MW_INDEX);
308 }
309 
310 struct rxe_mr {
311 	struct rxe_pool_entry	pelem;
312 	struct ib_mr		ibmr;
313 
314 	struct ib_umem		*umem;
315 
316 	enum rxe_mr_state	state;
317 	enum rxe_mr_type	type;
318 	u64			va;
319 	u64			iova;
320 	size_t			length;
321 	u32			offset;
322 	int			access;
323 
324 	int			page_shift;
325 	int			page_mask;
326 	int			map_shift;
327 	int			map_mask;
328 
329 	u32			num_buf;
330 	u32			nbuf;
331 
332 	u32			max_buf;
333 	u32			num_map;
334 
335 	atomic_t		num_mw;
336 
337 	struct rxe_map		**map;
338 };
339 
340 enum rxe_mw_state {
341 	RXE_MW_STATE_INVALID	= RXE_MR_STATE_INVALID,
342 	RXE_MW_STATE_FREE	= RXE_MR_STATE_FREE,
343 	RXE_MW_STATE_VALID	= RXE_MR_STATE_VALID,
344 };
345 
346 struct rxe_mw {
347 	struct ib_mw		ibmw;
348 	struct rxe_pool_entry	pelem;
349 	spinlock_t		lock;
350 	enum rxe_mw_state	state;
351 	struct rxe_qp		*qp; /* Type 2 only */
352 	struct rxe_mr		*mr;
353 	int			access;
354 	u64			addr;
355 	u64			length;
356 };
357 
358 struct rxe_mc_grp {
359 	struct rxe_pool_entry	pelem;
360 	spinlock_t		mcg_lock; /* guard group */
361 	struct rxe_dev		*rxe;
362 	struct list_head	qp_list;
363 	union ib_gid		mgid;
364 	int			num_qp;
365 	u32			qkey;
366 	u16			pkey;
367 };
368 
369 struct rxe_mc_elem {
370 	struct rxe_pool_entry	pelem;
371 	struct list_head	qp_list;
372 	struct list_head	grp_list;
373 	struct rxe_qp		*qp;
374 	struct rxe_mc_grp	*grp;
375 };
376 
377 struct rxe_port {
378 	struct ib_port_attr	attr;
379 	__be64			port_guid;
380 	__be64			subnet_prefix;
381 	spinlock_t		port_lock; /* guard port */
382 	unsigned int		mtu_cap;
383 	/* special QPs */
384 	u32			qp_smi_index;
385 	u32			qp_gsi_index;
386 };
387 
388 struct rxe_dev {
389 	struct ib_device	ib_dev;
390 	struct ib_device_attr	attr;
391 	int			max_ucontext;
392 	int			max_inline_data;
393 	struct mutex	usdev_lock;
394 
395 	struct net_device	*ndev;
396 
397 	int			xmit_errors;
398 
399 	struct rxe_pool		uc_pool;
400 	struct rxe_pool		pd_pool;
401 	struct rxe_pool		ah_pool;
402 	struct rxe_pool		srq_pool;
403 	struct rxe_pool		qp_pool;
404 	struct rxe_pool		cq_pool;
405 	struct rxe_pool		mr_pool;
406 	struct rxe_pool		mw_pool;
407 	struct rxe_pool		mc_grp_pool;
408 	struct rxe_pool		mc_elem_pool;
409 
410 	spinlock_t		pending_lock; /* guard pending_mmaps */
411 	struct list_head	pending_mmaps;
412 
413 	spinlock_t		mmap_offset_lock; /* guard mmap_offset */
414 	u64			mmap_offset;
415 
416 	atomic64_t		stats_counters[RXE_NUM_OF_COUNTERS];
417 
418 	struct rxe_port		port;
419 	struct crypto_shash	*tfm;
420 };
421 
422 static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index)
423 {
424 	atomic64_inc(&rxe->stats_counters[index]);
425 }
426 
427 static inline struct rxe_dev *to_rdev(struct ib_device *dev)
428 {
429 	return dev ? container_of(dev, struct rxe_dev, ib_dev) : NULL;
430 }
431 
432 static inline struct rxe_ucontext *to_ruc(struct ib_ucontext *uc)
433 {
434 	return uc ? container_of(uc, struct rxe_ucontext, ibuc) : NULL;
435 }
436 
437 static inline struct rxe_pd *to_rpd(struct ib_pd *pd)
438 {
439 	return pd ? container_of(pd, struct rxe_pd, ibpd) : NULL;
440 }
441 
442 static inline struct rxe_ah *to_rah(struct ib_ah *ah)
443 {
444 	return ah ? container_of(ah, struct rxe_ah, ibah) : NULL;
445 }
446 
447 static inline struct rxe_srq *to_rsrq(struct ib_srq *srq)
448 {
449 	return srq ? container_of(srq, struct rxe_srq, ibsrq) : NULL;
450 }
451 
452 static inline struct rxe_qp *to_rqp(struct ib_qp *qp)
453 {
454 	return qp ? container_of(qp, struct rxe_qp, ibqp) : NULL;
455 }
456 
457 static inline struct rxe_cq *to_rcq(struct ib_cq *cq)
458 {
459 	return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL;
460 }
461 
462 static inline struct rxe_mr *to_rmr(struct ib_mr *mr)
463 {
464 	return mr ? container_of(mr, struct rxe_mr, ibmr) : NULL;
465 }
466 
467 static inline struct rxe_mw *to_rmw(struct ib_mw *mw)
468 {
469 	return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL;
470 }
471 
472 static inline struct rxe_pd *mr_pd(struct rxe_mr *mr)
473 {
474 	return to_rpd(mr->ibmr.pd);
475 }
476 
477 static inline u32 mr_lkey(struct rxe_mr *mr)
478 {
479 	return mr->ibmr.lkey;
480 }
481 
482 static inline u32 mr_rkey(struct rxe_mr *mr)
483 {
484 	return mr->ibmr.rkey;
485 }
486 
487 static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
488 {
489 	return to_rpd(mw->ibmw.pd);
490 }
491 
492 static inline u32 rxe_mw_rkey(struct rxe_mw *mw)
493 {
494 	return mw->ibmw.rkey;
495 }
496 
497 int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
498 
499 void rxe_mc_cleanup(struct rxe_pool_entry *arg);
500 
501 #endif /* RXE_VERBS_H */
502