xref: /openbmc/linux/include/rdma/rdmavt_qp.h (revision 293d5b43)
1 #ifndef DEF_RDMAVT_INCQP_H
2 #define DEF_RDMAVT_INCQP_H
3 
4 /*
5  * Copyright(c) 2016 Intel Corporation.
6  *
7  * This file is provided under a dual BSD/GPLv2 license.  When using or
8  * redistributing this file, you may do so under either license.
9  *
10  * GPL LICENSE SUMMARY
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of version 2 of the GNU General Public License as
14  * published by the Free Software Foundation.
15  *
16  * This program is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * General Public License for more details.
20  *
21  * BSD LICENSE
22  *
23  * Redistribution and use in source and binary forms, with or without
24  * modification, are permitted provided that the following conditions
25  * are met:
26  *
27  *  - Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  *  - Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in
31  *    the documentation and/or other materials provided with the
32  *    distribution.
33  *  - Neither the name of Intel Corporation nor the names of its
34  *    contributors may be used to endorse or promote products derived
35  *    from this software without specific prior written permission.
36  *
37  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50 
51 #include <rdma/rdma_vt.h>
52 #include <rdma/ib_pack.h>
53 #include <rdma/ib_verbs.h>
54 /*
55  * Atomic bit definitions for r_aflags.
56  */
57 #define RVT_R_WRID_VALID        0
58 #define RVT_R_REWIND_SGE        1
59 
60 /*
61  * Bit definitions for r_flags.
62  */
63 #define RVT_R_REUSE_SGE 0x01
64 #define RVT_R_RDMAR_SEQ 0x02
65 #define RVT_R_RSP_NAK   0x04
66 #define RVT_R_RSP_SEND  0x08
67 #define RVT_R_COMM_EST  0x10
68 
69 /*
70  * Bit definitions for s_flags.
71  *
72  * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
73  * RVT_S_BUSY - send tasklet is processing the QP
74  * RVT_S_TIMER - the RC retry timer is active
75  * RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
76  * RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
77  *                         before processing the next SWQE
78  * RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
79  *                         before processing the next SWQE
80  * RVT_S_WAIT_RNR - waiting for RNR timeout
81  * RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
82  * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating
83  *                  next send completion entry not via send DMA
84  * RVT_S_WAIT_PIO - waiting for a send buffer to be available
85  * RVT_S_WAIT_PIO_DRAIN - waiting for a qp to drain pio packets
86  * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available
87  * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
88  * RVT_S_WAIT_KMEM - waiting for kernel memory to be available
89  * RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
90  * RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests
91  * RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK
92  * RVT_S_ECN - a BECN was queued to the send engine
93  */
94 #define RVT_S_SIGNAL_REQ_WR	0x0001
95 #define RVT_S_BUSY		0x0002
96 #define RVT_S_TIMER		0x0004
97 #define RVT_S_RESP_PENDING	0x0008
98 #define RVT_S_ACK_PENDING	0x0010
99 #define RVT_S_WAIT_FENCE	0x0020
100 #define RVT_S_WAIT_RDMAR	0x0040
101 #define RVT_S_WAIT_RNR		0x0080
102 #define RVT_S_WAIT_SSN_CREDIT	0x0100
103 #define RVT_S_WAIT_DMA		0x0200
104 #define RVT_S_WAIT_PIO		0x0400
105 #define RVT_S_WAIT_PIO_DRAIN    0x0800
106 #define RVT_S_WAIT_TX		0x1000
107 #define RVT_S_WAIT_DMA_DESC	0x2000
108 #define RVT_S_WAIT_KMEM		0x4000
109 #define RVT_S_WAIT_PSN		0x8000
110 #define RVT_S_WAIT_ACK		0x10000
111 #define RVT_S_SEND_ONE		0x20000
112 #define RVT_S_UNLIMITED_CREDIT	0x40000
113 #define RVT_S_AHG_VALID		0x80000
114 #define RVT_S_AHG_CLEAR		0x100000
115 #define RVT_S_ECN		0x200000
116 
117 /*
118  * Wait flags that would prevent any packet type from being sent.
119  */
120 #define RVT_S_ANY_WAIT_IO \
121 	(RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN | RVT_S_WAIT_TX | \
122 	 RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
123 
124 /*
125  * Wait flags that would prevent send work requests from making progress.
126  */
127 #define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \
128 	RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \
129 	RVT_S_WAIT_PSN | RVT_S_WAIT_ACK)
130 
131 #define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
132 
133 /* Number of bits to pay attention to in the opcode for checking qp type */
134 #define RVT_OPCODE_QP_MASK 0xE0
135 
136 /* Flags for checking QP state (see ib_rvt_state_ops[]) */
137 #define RVT_POST_SEND_OK                0x01
138 #define RVT_POST_RECV_OK                0x02
139 #define RVT_PROCESS_RECV_OK             0x04
140 #define RVT_PROCESS_SEND_OK             0x08
141 #define RVT_PROCESS_NEXT_SEND_OK        0x10
142 #define RVT_FLUSH_SEND			0x20
143 #define RVT_FLUSH_RECV			0x40
144 #define RVT_PROCESS_OR_FLUSH_SEND \
145 	(RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND)
146 
147 /*
148  * Internal send flags
149  */
150 #define RVT_SEND_RESERVE_USED           IB_SEND_RESERVED_START
151 #define RVT_SEND_COMPLETION_ONLY	(IB_SEND_RESERVED_START << 1)
152 
153 /*
154  * Send work request queue entry.
155  * The size of the sg_list is determined when the QP is created and stored
156  * in qp->s_max_sge.
157  */
158 struct rvt_swqe {
159 	union {
160 		struct ib_send_wr wr;   /* don't use wr.sg_list */
161 		struct ib_ud_wr ud_wr;
162 		struct ib_reg_wr reg_wr;
163 		struct ib_rdma_wr rdma_wr;
164 		struct ib_atomic_wr atomic_wr;
165 	};
166 	u32 psn;                /* first packet sequence number */
167 	u32 lpsn;               /* last packet sequence number */
168 	u32 ssn;                /* send sequence number */
169 	u32 length;             /* total length of data in sg_list */
170 	struct rvt_sge sg_list[0];
171 };
172 
173 /*
174  * Receive work request queue entry.
175  * The size of the sg_list is determined when the QP (or SRQ) is created
176  * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
177  */
178 struct rvt_rwqe {
179 	u64 wr_id;
180 	u8 num_sge;
181 	struct ib_sge sg_list[0];
182 };
183 
184 /*
185  * This structure is used to contain the head pointer, tail pointer,
186  * and receive work queue entries as a single memory allocation so
187  * it can be mmap'ed into user space.
188  * Note that the wq array elements are variable size so you can't
189  * just index into the array to get the N'th element;
190  * use get_rwqe_ptr() instead.
191  */
192 struct rvt_rwq {
193 	u32 head;               /* new work requests posted to the head */
194 	u32 tail;               /* receives pull requests from here. */
195 	struct rvt_rwqe wq[0];
196 };
197 
198 struct rvt_rq {
199 	struct rvt_rwq *wq;
200 	u32 size;               /* size of RWQE array */
201 	u8 max_sge;
202 	/* protect changes in this struct */
203 	spinlock_t lock ____cacheline_aligned_in_smp;
204 };
205 
206 /*
207  * This structure is used by rvt_mmap() to validate an offset
208  * when an mmap() request is made.  The vm_area_struct then uses
209  * this as its vm_private_data.
210  */
211 struct rvt_mmap_info {
212 	struct list_head pending_mmaps;
213 	struct ib_ucontext *context;
214 	void *obj;
215 	__u64 offset;
216 	struct kref ref;
217 	unsigned size;
218 };
219 
220 /*
221  * This structure holds the information that the send tasklet needs
222  * to send a RDMA read response or atomic operation.
223  */
224 struct rvt_ack_entry {
225 	struct rvt_sge rdma_sge;
226 	u64 atomic_data;
227 	u32 psn;
228 	u32 lpsn;
229 	u8 opcode;
230 	u8 sent;
231 };
232 
233 #define	RC_QP_SCALING_INTERVAL	5
234 
235 #define RVT_OPERATION_PRIV        0x00000001
236 #define RVT_OPERATION_ATOMIC      0x00000002
237 #define RVT_OPERATION_ATOMIC_SGE  0x00000004
238 #define RVT_OPERATION_LOCAL       0x00000008
239 #define RVT_OPERATION_USE_RESERVE 0x00000010
240 
241 #define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
242 
243 /**
244  * rvt_operation_params - op table entry
245  * @length - the length to copy into the swqe entry
246  * @qpt_support - a bit mask indicating QP type support
247  * @flags - RVT_OPERATION flags (see above)
248  *
249  * This supports table driven post send so that
250  * the driver can have differing an potentially
251  * different sets of operations.
252  *
253  **/
254 
255 struct rvt_operation_params {
256 	size_t length;
257 	u32 qpt_support;
258 	u32 flags;
259 };
260 
261 /*
262  * Common variables are protected by both r_rq.lock and s_lock in that order
263  * which only happens in modify_qp() or changing the QP 'state'.
264  */
265 struct rvt_qp {
266 	struct ib_qp ibqp;
267 	void *priv; /* Driver private data */
268 	/* read mostly fields above and below */
269 	struct ib_ah_attr remote_ah_attr;
270 	struct ib_ah_attr alt_ah_attr;
271 	struct rvt_qp __rcu *next;           /* link list for QPN hash table */
272 	struct rvt_swqe *s_wq;  /* send work queue */
273 	struct rvt_mmap_info *ip;
274 
275 	unsigned long timeout_jiffies;  /* computed from timeout */
276 
277 	enum ib_mtu path_mtu;
278 	int srate_mbps;		/* s_srate (below) converted to Mbit/s */
279 	pid_t pid;		/* pid for user mode QPs */
280 	u32 remote_qpn;
281 	u32 qkey;               /* QKEY for this QP (for UD or RD) */
282 	u32 s_size;             /* send work queue size */
283 	u32 s_ahgpsn;           /* set to the psn in the copy of the header */
284 
285 	u16 pmtu;		/* decoded from path_mtu */
286 	u8 log_pmtu;		/* shift for pmtu */
287 	u8 state;               /* QP state */
288 	u8 allowed_ops;		/* high order bits of allowed opcodes */
289 	u8 qp_access_flags;
290 	u8 alt_timeout;         /* Alternate path timeout for this QP */
291 	u8 timeout;             /* Timeout for this QP */
292 	u8 s_srate;
293 	u8 s_mig_state;
294 	u8 port_num;
295 	u8 s_pkey_index;        /* PKEY index to use */
296 	u8 s_alt_pkey_index;    /* Alternate path PKEY index to use */
297 	u8 r_max_rd_atomic;     /* max number of RDMA read/atomic to receive */
298 	u8 s_max_rd_atomic;     /* max number of RDMA read/atomic to send */
299 	u8 s_retry_cnt;         /* number of times to retry */
300 	u8 s_rnr_retry_cnt;
301 	u8 r_min_rnr_timer;     /* retry timeout value for RNR NAKs */
302 	u8 s_max_sge;           /* size of s_wq->sg_list */
303 	u8 s_draining;
304 
305 	/* start of read/write fields */
306 	atomic_t refcount ____cacheline_aligned_in_smp;
307 	wait_queue_head_t wait;
308 
309 	struct rvt_ack_entry *s_ack_queue;
310 	struct rvt_sge_state s_rdma_read_sge;
311 
312 	spinlock_t r_lock ____cacheline_aligned_in_smp;      /* used for APM */
313 	u32 r_psn;              /* expected rcv packet sequence number */
314 	unsigned long r_aflags;
315 	u64 r_wr_id;            /* ID for current receive WQE */
316 	u32 r_ack_psn;          /* PSN for next ACK or atomic ACK */
317 	u32 r_len;              /* total length of r_sge */
318 	u32 r_rcv_len;          /* receive data len processed */
319 	u32 r_msn;              /* message sequence number */
320 
321 	u8 r_state;             /* opcode of last packet received */
322 	u8 r_flags;
323 	u8 r_head_ack_queue;    /* index into s_ack_queue[] */
324 
325 	struct list_head rspwait;       /* link for waiting to respond */
326 
327 	struct rvt_sge_state r_sge;     /* current receive data */
328 	struct rvt_rq r_rq;             /* receive work queue */
329 
330 	/* post send line */
331 	spinlock_t s_hlock ____cacheline_aligned_in_smp;
332 	u32 s_head;             /* new entries added here */
333 	u32 s_next_psn;         /* PSN for next request */
334 	u32 s_avail;            /* number of entries avail */
335 	u32 s_ssn;              /* SSN of tail entry */
336 	atomic_t s_reserved_used; /* reserved entries in use */
337 
338 	spinlock_t s_lock ____cacheline_aligned_in_smp;
339 	u32 s_flags;
340 	struct rvt_sge_state *s_cur_sge;
341 	struct rvt_swqe *s_wqe;
342 	struct rvt_sge_state s_sge;     /* current send request data */
343 	struct rvt_mregion *s_rdma_mr;
344 	u32 s_cur_size;         /* size of send packet in bytes */
345 	u32 s_len;              /* total length of s_sge */
346 	u32 s_rdma_read_len;    /* total length of s_rdma_read_sge */
347 	u32 s_last_psn;         /* last response PSN processed */
348 	u32 s_sending_psn;      /* lowest PSN that is being sent */
349 	u32 s_sending_hpsn;     /* highest PSN that is being sent */
350 	u32 s_psn;              /* current packet sequence number */
351 	u32 s_ack_rdma_psn;     /* PSN for sending RDMA read responses */
352 	u32 s_ack_psn;          /* PSN for acking sends and RDMA writes */
353 	u32 s_tail;             /* next entry to process */
354 	u32 s_cur;              /* current work queue entry */
355 	u32 s_acked;            /* last un-ACK'ed entry */
356 	u32 s_last;             /* last completed entry */
357 	u32 s_lsn;              /* limit sequence number (credit) */
358 	u16 s_hdrwords;         /* size of s_hdr in 32 bit words */
359 	u16 s_rdma_ack_cnt;
360 	s8 s_ahgidx;
361 	u8 s_state;             /* opcode of last packet sent */
362 	u8 s_ack_state;         /* opcode of packet to ACK */
363 	u8 s_nak_state;         /* non-zero if NAK is pending */
364 	u8 r_nak_state;         /* non-zero if NAK is pending */
365 	u8 s_retry;             /* requester retry counter */
366 	u8 s_rnr_retry;         /* requester RNR retry counter */
367 	u8 s_num_rd_atomic;     /* number of RDMA read/atomic pending */
368 	u8 s_tail_ack_queue;    /* index into s_ack_queue[] */
369 
370 	struct rvt_sge_state s_ack_rdma_sge;
371 	struct timer_list s_timer;
372 
373 	atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */
374 
375 	/*
376 	 * This sge list MUST be last. Do not add anything below here.
377 	 */
378 	struct rvt_sge r_sg_list[0] /* verified SGEs */
379 		____cacheline_aligned_in_smp;
380 };
381 
382 struct rvt_srq {
383 	struct ib_srq ibsrq;
384 	struct rvt_rq rq;
385 	struct rvt_mmap_info *ip;
386 	/* send signal when number of RWQEs < limit */
387 	u32 limit;
388 };
389 
390 #define RVT_QPN_MAX                 BIT(24)
391 #define RVT_QPNMAP_ENTRIES          (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
392 #define RVT_BITS_PER_PAGE           (PAGE_SIZE * BITS_PER_BYTE)
393 #define RVT_BITS_PER_PAGE_MASK      (RVT_BITS_PER_PAGE - 1)
394 #define RVT_QPN_MASK		    0xFFFFFF
395 
396 /*
397  * QPN-map pages start out as NULL, they get allocated upon
398  * first use and are never deallocated. This way,
399  * large bitmaps are not allocated unless large numbers of QPs are used.
400  */
401 struct rvt_qpn_map {
402 	void *page;
403 };
404 
405 struct rvt_qpn_table {
406 	spinlock_t lock; /* protect changes to the qp table */
407 	unsigned flags;         /* flags for QP0/1 allocated for each port */
408 	u32 last;               /* last QP number allocated */
409 	u32 nmaps;              /* size of the map table */
410 	u16 limit;
411 	u8  incr;
412 	/* bit map of free QP numbers other than 0/1 */
413 	struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES];
414 };
415 
416 struct rvt_qp_ibdev {
417 	u32 qp_table_size;
418 	u32 qp_table_bits;
419 	struct rvt_qp __rcu **qp_table;
420 	spinlock_t qpt_lock; /* qptable lock */
421 	struct rvt_qpn_table qpn_table;
422 };
423 
424 /*
425  * There is one struct rvt_mcast for each multicast GID.
426  * All attached QPs are then stored as a list of
427  * struct rvt_mcast_qp.
428  */
429 struct rvt_mcast_qp {
430 	struct list_head list;
431 	struct rvt_qp *qp;
432 };
433 
434 struct rvt_mcast {
435 	struct rb_node rb_node;
436 	union ib_gid mgid;
437 	struct list_head qp_list;
438 	wait_queue_head_t wait;
439 	atomic_t refcount;
440 	int n_attached;
441 };
442 
443 /*
444  * Since struct rvt_swqe is not a fixed size, we can't simply index into
445  * struct rvt_qp.s_wq.  This function does the array index computation.
446  */
447 static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp,
448 						unsigned n)
449 {
450 	return (struct rvt_swqe *)((char *)qp->s_wq +
451 				     (sizeof(struct rvt_swqe) +
452 				      qp->s_max_sge *
453 				      sizeof(struct rvt_sge)) * n);
454 }
455 
456 /*
457  * Since struct rvt_rwqe is not a fixed size, we can't simply index into
458  * struct rvt_rwq.wq.  This function does the array index computation.
459  */
460 static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
461 {
462 	return (struct rvt_rwqe *)
463 		((char *)rq->wq->wq +
464 		 (sizeof(struct rvt_rwqe) +
465 		  rq->max_sge * sizeof(struct ib_sge)) * n);
466 }
467 
468 /**
469  * rvt_qp_wqe_reserve - reserve operation
470  * @qp - the rvt qp
471  * @wqe - the send wqe
472  *
473  * This routine used in post send to record
474  * a wqe relative reserved operation use.
475  */
476 static inline void rvt_qp_wqe_reserve(
477 	struct rvt_qp *qp,
478 	struct rvt_swqe *wqe)
479 {
480 	wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
481 	atomic_inc(&qp->s_reserved_used);
482 }
483 
484 /**
485  * rvt_qp_wqe_unreserve - clean reserved operation
486  * @qp - the rvt qp
487  * @wqe - the send wqe
488  *
489  * This decrements the reserve use count.
490  *
491  * This call MUST precede the change to
492  * s_last to insure that post send sees a stable
493  * s_avail.
494  *
495  * An smp_mp__after_atomic() is used to insure
496  * the compiler does not juggle the order of the s_last
497  * ring index and the decrementing of s_reserved_used.
498  */
499 static inline void rvt_qp_wqe_unreserve(
500 	struct rvt_qp *qp,
501 	struct rvt_swqe *wqe)
502 {
503 	if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
504 		wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
505 		atomic_dec(&qp->s_reserved_used);
506 		/* insure no compiler re-order up to s_last change */
507 		smp_mb__after_atomic();
508 	}
509 }
510 
511 extern const int  ib_rvt_state_ops[];
512 
513 struct rvt_dev_info;
514 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
515 
516 #endif          /* DEF_RDMAVT_INCQP_H */
517