xref: /openbmc/linux/include/rdma/rdmavt_qp.h (revision d3964221)
1 #ifndef DEF_RDMAVT_INCQP_H
2 #define DEF_RDMAVT_INCQP_H
3 
4 /*
5  * Copyright(c) 2016, 2017 Intel Corporation.
6  *
7  * This file is provided under a dual BSD/GPLv2 license.  When using or
8  * redistributing this file, you may do so under either license.
9  *
10  * GPL LICENSE SUMMARY
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of version 2 of the GNU General Public License as
14  * published by the Free Software Foundation.
15  *
16  * This program is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * General Public License for more details.
20  *
21  * BSD LICENSE
22  *
23  * Redistribution and use in source and binary forms, with or without
24  * modification, are permitted provided that the following conditions
25  * are met:
26  *
27  *  - Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  *  - Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in
31  *    the documentation and/or other materials provided with the
32  *    distribution.
33  *  - Neither the name of Intel Corporation nor the names of its
34  *    contributors may be used to endorse or promote products derived
35  *    from this software without specific prior written permission.
36  *
37  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50 
51 #include <rdma/rdma_vt.h>
52 #include <rdma/ib_pack.h>
53 #include <rdma/ib_verbs.h>
54 #include <rdma/rdmavt_cq.h>
55 /*
56  * Atomic bit definitions for r_aflags.
57  */
58 #define RVT_R_WRID_VALID        0
59 #define RVT_R_REWIND_SGE        1
60 
61 /*
62  * Bit definitions for r_flags.
63  */
64 #define RVT_R_REUSE_SGE 0x01
65 #define RVT_R_RDMAR_SEQ 0x02
66 #define RVT_R_RSP_NAK   0x04
67 #define RVT_R_RSP_SEND  0x08
68 #define RVT_R_COMM_EST  0x10
69 
70 /*
71  * Bit definitions for s_flags.
72  *
73  * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
74  * RVT_S_BUSY - send tasklet is processing the QP
75  * RVT_S_TIMER - the RC retry timer is active
76  * RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
77  * RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
78  *                         before processing the next SWQE
79  * RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
80  *                         before processing the next SWQE
81  * RVT_S_WAIT_RNR - waiting for RNR timeout
82  * RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
83  * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating
84  *                  next send completion entry not via send DMA
85  * RVT_S_WAIT_PIO - waiting for a send buffer to be available
86  * RVT_S_WAIT_PIO_DRAIN - waiting for a qp to drain pio packets
87  * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available
88  * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
89  * RVT_S_WAIT_KMEM - waiting for kernel memory to be available
90  * RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
91  * RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests
92  * RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK
93  * RVT_S_ECN - a BECN was queued to the send engine
94  */
95 #define RVT_S_SIGNAL_REQ_WR	0x0001
96 #define RVT_S_BUSY		0x0002
97 #define RVT_S_TIMER		0x0004
98 #define RVT_S_RESP_PENDING	0x0008
99 #define RVT_S_ACK_PENDING	0x0010
100 #define RVT_S_WAIT_FENCE	0x0020
101 #define RVT_S_WAIT_RDMAR	0x0040
102 #define RVT_S_WAIT_RNR		0x0080
103 #define RVT_S_WAIT_SSN_CREDIT	0x0100
104 #define RVT_S_WAIT_DMA		0x0200
105 #define RVT_S_WAIT_PIO		0x0400
106 #define RVT_S_WAIT_PIO_DRAIN    0x0800
107 #define RVT_S_WAIT_TX		0x1000
108 #define RVT_S_WAIT_DMA_DESC	0x2000
109 #define RVT_S_WAIT_KMEM		0x4000
110 #define RVT_S_WAIT_PSN		0x8000
111 #define RVT_S_WAIT_ACK		0x10000
112 #define RVT_S_SEND_ONE		0x20000
113 #define RVT_S_UNLIMITED_CREDIT	0x40000
114 #define RVT_S_AHG_VALID		0x80000
115 #define RVT_S_AHG_CLEAR		0x100000
116 #define RVT_S_ECN		0x200000
117 
118 /*
119  * Wait flags that would prevent any packet type from being sent.
120  */
121 #define RVT_S_ANY_WAIT_IO \
122 	(RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN | RVT_S_WAIT_TX | \
123 	 RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
124 
125 /*
126  * Wait flags that would prevent send work requests from making progress.
127  */
128 #define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \
129 	RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \
130 	RVT_S_WAIT_PSN | RVT_S_WAIT_ACK)
131 
132 #define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
133 
134 /* Number of bits to pay attention to in the opcode for checking qp type */
135 #define RVT_OPCODE_QP_MASK 0xE0
136 
137 /* Flags for checking QP state (see ib_rvt_state_ops[]) */
138 #define RVT_POST_SEND_OK                0x01
139 #define RVT_POST_RECV_OK                0x02
140 #define RVT_PROCESS_RECV_OK             0x04
141 #define RVT_PROCESS_SEND_OK             0x08
142 #define RVT_PROCESS_NEXT_SEND_OK        0x10
143 #define RVT_FLUSH_SEND			0x20
144 #define RVT_FLUSH_RECV			0x40
145 #define RVT_PROCESS_OR_FLUSH_SEND \
146 	(RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND)
147 #define RVT_SEND_OR_FLUSH_OR_RECV_OK \
148 	(RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND | RVT_PROCESS_RECV_OK)
149 
150 /*
151  * Internal send flags
152  */
153 #define RVT_SEND_RESERVE_USED           IB_SEND_RESERVED_START
154 #define RVT_SEND_COMPLETION_ONLY	(IB_SEND_RESERVED_START << 1)
155 
156 /*
157  * Send work request queue entry.
158  * The size of the sg_list is determined when the QP is created and stored
159  * in qp->s_max_sge.
160  */
161 struct rvt_swqe {
162 	union {
163 		struct ib_send_wr wr;   /* don't use wr.sg_list */
164 		struct ib_ud_wr ud_wr;
165 		struct ib_reg_wr reg_wr;
166 		struct ib_rdma_wr rdma_wr;
167 		struct ib_atomic_wr atomic_wr;
168 	};
169 	u32 psn;                /* first packet sequence number */
170 	u32 lpsn;               /* last packet sequence number */
171 	u32 ssn;                /* send sequence number */
172 	u32 length;             /* total length of data in sg_list */
173 	struct rvt_sge sg_list[0];
174 };
175 
176 /*
177  * Receive work request queue entry.
178  * The size of the sg_list is determined when the QP (or SRQ) is created
179  * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
180  */
181 struct rvt_rwqe {
182 	u64 wr_id;
183 	u8 num_sge;
184 	struct ib_sge sg_list[0];
185 };
186 
187 /*
188  * This structure is used to contain the head pointer, tail pointer,
189  * and receive work queue entries as a single memory allocation so
190  * it can be mmap'ed into user space.
191  * Note that the wq array elements are variable size so you can't
192  * just index into the array to get the N'th element;
193  * use get_rwqe_ptr() instead.
194  */
195 struct rvt_rwq {
196 	u32 head;               /* new work requests posted to the head */
197 	u32 tail;               /* receives pull requests from here. */
198 	struct rvt_rwqe wq[0];
199 };
200 
201 struct rvt_rq {
202 	struct rvt_rwq *wq;
203 	u32 size;               /* size of RWQE array */
204 	u8 max_sge;
205 	/* protect changes in this struct */
206 	spinlock_t lock ____cacheline_aligned_in_smp;
207 };
208 
209 /*
210  * This structure is used by rvt_mmap() to validate an offset
211  * when an mmap() request is made.  The vm_area_struct then uses
212  * this as its vm_private_data.
213  */
214 struct rvt_mmap_info {
215 	struct list_head pending_mmaps;
216 	struct ib_ucontext *context;
217 	void *obj;
218 	__u64 offset;
219 	struct kref ref;
220 	unsigned size;
221 };
222 
223 /*
224  * This structure holds the information that the send tasklet needs
225  * to send a RDMA read response or atomic operation.
226  */
227 struct rvt_ack_entry {
228 	struct rvt_sge rdma_sge;
229 	u64 atomic_data;
230 	u32 psn;
231 	u32 lpsn;
232 	u8 opcode;
233 	u8 sent;
234 };
235 
236 #define	RC_QP_SCALING_INTERVAL	5
237 
238 #define RVT_OPERATION_PRIV        0x00000001
239 #define RVT_OPERATION_ATOMIC      0x00000002
240 #define RVT_OPERATION_ATOMIC_SGE  0x00000004
241 #define RVT_OPERATION_LOCAL       0x00000008
242 #define RVT_OPERATION_USE_RESERVE 0x00000010
243 
244 #define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
245 
246 /**
247  * rvt_operation_params - op table entry
248  * @length - the length to copy into the swqe entry
249  * @qpt_support - a bit mask indicating QP type support
250  * @flags - RVT_OPERATION flags (see above)
251  *
252  * This supports table driven post send so that
253  * the driver can have differing an potentially
254  * different sets of operations.
255  *
256  **/
257 
258 struct rvt_operation_params {
259 	size_t length;
260 	u32 qpt_support;
261 	u32 flags;
262 };
263 
264 /*
265  * Common variables are protected by both r_rq.lock and s_lock in that order
266  * which only happens in modify_qp() or changing the QP 'state'.
267  */
268 struct rvt_qp {
269 	struct ib_qp ibqp;
270 	void *priv; /* Driver private data */
271 	/* read mostly fields above and below */
272 	struct rdma_ah_attr remote_ah_attr;
273 	struct rdma_ah_attr alt_ah_attr;
274 	struct rvt_qp __rcu *next;           /* link list for QPN hash table */
275 	struct rvt_swqe *s_wq;  /* send work queue */
276 	struct rvt_mmap_info *ip;
277 
278 	unsigned long timeout_jiffies;  /* computed from timeout */
279 
280 	int srate_mbps;		/* s_srate (below) converted to Mbit/s */
281 	pid_t pid;		/* pid for user mode QPs */
282 	u32 remote_qpn;
283 	u32 qkey;               /* QKEY for this QP (for UD or RD) */
284 	u32 s_size;             /* send work queue size */
285 	u32 s_ahgpsn;           /* set to the psn in the copy of the header */
286 
287 	u16 pmtu;		/* decoded from path_mtu */
288 	u8 log_pmtu;		/* shift for pmtu */
289 	u8 state;               /* QP state */
290 	u8 allowed_ops;		/* high order bits of allowed opcodes */
291 	u8 qp_access_flags;
292 	u8 alt_timeout;         /* Alternate path timeout for this QP */
293 	u8 timeout;             /* Timeout for this QP */
294 	u8 s_srate;
295 	u8 s_mig_state;
296 	u8 port_num;
297 	u8 s_pkey_index;        /* PKEY index to use */
298 	u8 s_alt_pkey_index;    /* Alternate path PKEY index to use */
299 	u8 r_max_rd_atomic;     /* max number of RDMA read/atomic to receive */
300 	u8 s_max_rd_atomic;     /* max number of RDMA read/atomic to send */
301 	u8 s_retry_cnt;         /* number of times to retry */
302 	u8 s_rnr_retry_cnt;
303 	u8 r_min_rnr_timer;     /* retry timeout value for RNR NAKs */
304 	u8 s_max_sge;           /* size of s_wq->sg_list */
305 	u8 s_draining;
306 
307 	/* start of read/write fields */
308 	atomic_t refcount ____cacheline_aligned_in_smp;
309 	wait_queue_head_t wait;
310 
311 	struct rvt_ack_entry *s_ack_queue;
312 	struct rvt_sge_state s_rdma_read_sge;
313 
314 	spinlock_t r_lock ____cacheline_aligned_in_smp;      /* used for APM */
315 	u32 r_psn;              /* expected rcv packet sequence number */
316 	unsigned long r_aflags;
317 	u64 r_wr_id;            /* ID for current receive WQE */
318 	u32 r_ack_psn;          /* PSN for next ACK or atomic ACK */
319 	u32 r_len;              /* total length of r_sge */
320 	u32 r_rcv_len;          /* receive data len processed */
321 	u32 r_msn;              /* message sequence number */
322 
323 	u8 r_state;             /* opcode of last packet received */
324 	u8 r_flags;
325 	u8 r_head_ack_queue;    /* index into s_ack_queue[] */
326 	u8 r_adefered;          /* defered ack count */
327 
328 	struct list_head rspwait;       /* link for waiting to respond */
329 
330 	struct rvt_sge_state r_sge;     /* current receive data */
331 	struct rvt_rq r_rq;             /* receive work queue */
332 
333 	/* post send line */
334 	spinlock_t s_hlock ____cacheline_aligned_in_smp;
335 	u32 s_head;             /* new entries added here */
336 	u32 s_next_psn;         /* PSN for next request */
337 	u32 s_avail;            /* number of entries avail */
338 	u32 s_ssn;              /* SSN of tail entry */
339 	atomic_t s_reserved_used; /* reserved entries in use */
340 
341 	spinlock_t s_lock ____cacheline_aligned_in_smp;
342 	u32 s_flags;
343 	struct rvt_sge_state *s_cur_sge;
344 	struct rvt_swqe *s_wqe;
345 	struct rvt_sge_state s_sge;     /* current send request data */
346 	struct rvt_mregion *s_rdma_mr;
347 	u32 s_cur_size;         /* size of send packet in bytes */
348 	u32 s_len;              /* total length of s_sge */
349 	u32 s_rdma_read_len;    /* total length of s_rdma_read_sge */
350 	u32 s_last_psn;         /* last response PSN processed */
351 	u32 s_sending_psn;      /* lowest PSN that is being sent */
352 	u32 s_sending_hpsn;     /* highest PSN that is being sent */
353 	u32 s_psn;              /* current packet sequence number */
354 	u32 s_ack_rdma_psn;     /* PSN for sending RDMA read responses */
355 	u32 s_ack_psn;          /* PSN for acking sends and RDMA writes */
356 	u32 s_tail;             /* next entry to process */
357 	u32 s_cur;              /* current work queue entry */
358 	u32 s_acked;            /* last un-ACK'ed entry */
359 	u32 s_last;             /* last completed entry */
360 	u32 s_lsn;              /* limit sequence number (credit) */
361 	u16 s_hdrwords;         /* size of s_hdr in 32 bit words */
362 	u16 s_rdma_ack_cnt;
363 	s8 s_ahgidx;
364 	u8 s_state;             /* opcode of last packet sent */
365 	u8 s_ack_state;         /* opcode of packet to ACK */
366 	u8 s_nak_state;         /* non-zero if NAK is pending */
367 	u8 r_nak_state;         /* non-zero if NAK is pending */
368 	u8 s_retry;             /* requester retry counter */
369 	u8 s_rnr_retry;         /* requester RNR retry counter */
370 	u8 s_num_rd_atomic;     /* number of RDMA read/atomic pending */
371 	u8 s_tail_ack_queue;    /* index into s_ack_queue[] */
372 
373 	struct rvt_sge_state s_ack_rdma_sge;
374 	struct timer_list s_timer;
375 	struct hrtimer s_rnr_timer;
376 
377 	atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */
378 
379 	/*
380 	 * This sge list MUST be last. Do not add anything below here.
381 	 */
382 	struct rvt_sge r_sg_list[0] /* verified SGEs */
383 		____cacheline_aligned_in_smp;
384 };
385 
386 struct rvt_srq {
387 	struct ib_srq ibsrq;
388 	struct rvt_rq rq;
389 	struct rvt_mmap_info *ip;
390 	/* send signal when number of RWQEs < limit */
391 	u32 limit;
392 };
393 
394 #define RVT_QPN_MAX                 BIT(24)
395 #define RVT_QPNMAP_ENTRIES          (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
396 #define RVT_BITS_PER_PAGE           (PAGE_SIZE * BITS_PER_BYTE)
397 #define RVT_BITS_PER_PAGE_MASK      (RVT_BITS_PER_PAGE - 1)
398 #define RVT_QPN_MASK		    IB_QPN_MASK
399 
400 /*
401  * QPN-map pages start out as NULL, they get allocated upon
402  * first use and are never deallocated. This way,
403  * large bitmaps are not allocated unless large numbers of QPs are used.
404  */
405 struct rvt_qpn_map {
406 	void *page;
407 };
408 
409 struct rvt_qpn_table {
410 	spinlock_t lock; /* protect changes to the qp table */
411 	unsigned flags;         /* flags for QP0/1 allocated for each port */
412 	u32 last;               /* last QP number allocated */
413 	u32 nmaps;              /* size of the map table */
414 	u16 limit;
415 	u8  incr;
416 	/* bit map of free QP numbers other than 0/1 */
417 	struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES];
418 };
419 
420 struct rvt_qp_ibdev {
421 	u32 qp_table_size;
422 	u32 qp_table_bits;
423 	struct rvt_qp __rcu **qp_table;
424 	spinlock_t qpt_lock; /* qptable lock */
425 	struct rvt_qpn_table qpn_table;
426 };
427 
428 /*
429  * There is one struct rvt_mcast for each multicast GID.
430  * All attached QPs are then stored as a list of
431  * struct rvt_mcast_qp.
432  */
433 struct rvt_mcast_qp {
434 	struct list_head list;
435 	struct rvt_qp *qp;
436 };
437 
438 struct rvt_mcast_addr {
439 	union ib_gid mgid;
440 	u16 lid;
441 };
442 
443 struct rvt_mcast {
444 	struct rb_node rb_node;
445 	struct rvt_mcast_addr mcast_addr;
446 	struct list_head qp_list;
447 	wait_queue_head_t wait;
448 	atomic_t refcount;
449 	int n_attached;
450 };
451 
452 /*
453  * Since struct rvt_swqe is not a fixed size, we can't simply index into
454  * struct rvt_qp.s_wq.  This function does the array index computation.
455  */
456 static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp,
457 						unsigned n)
458 {
459 	return (struct rvt_swqe *)((char *)qp->s_wq +
460 				     (sizeof(struct rvt_swqe) +
461 				      qp->s_max_sge *
462 				      sizeof(struct rvt_sge)) * n);
463 }
464 
465 /*
466  * Since struct rvt_rwqe is not a fixed size, we can't simply index into
467  * struct rvt_rwq.wq.  This function does the array index computation.
468  */
469 static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
470 {
471 	return (struct rvt_rwqe *)
472 		((char *)rq->wq->wq +
473 		 (sizeof(struct rvt_rwqe) +
474 		  rq->max_sge * sizeof(struct ib_sge)) * n);
475 }
476 
477 /**
478  * rvt_is_user_qp - return if this is user mode QP
479  * @qp - the target QP
480  */
481 static inline bool rvt_is_user_qp(struct rvt_qp *qp)
482 {
483 	return !!qp->pid;
484 }
485 
486 /**
487  * rvt_get_qp - get a QP reference
488  * @qp - the QP to hold
489  */
490 static inline void rvt_get_qp(struct rvt_qp *qp)
491 {
492 	atomic_inc(&qp->refcount);
493 }
494 
495 /**
496  * rvt_put_qp - release a QP reference
497  * @qp - the QP to release
498  */
499 static inline void rvt_put_qp(struct rvt_qp *qp)
500 {
501 	if (qp && atomic_dec_and_test(&qp->refcount))
502 		wake_up(&qp->wait);
503 }
504 
505 /**
506  * rvt_put_swqe - drop mr refs held by swqe
507  * @wqe - the send wqe
508  *
509  * This drops any mr references held by the swqe
510  */
511 static inline void rvt_put_swqe(struct rvt_swqe *wqe)
512 {
513 	int i;
514 
515 	for (i = 0; i < wqe->wr.num_sge; i++) {
516 		struct rvt_sge *sge = &wqe->sg_list[i];
517 
518 		rvt_put_mr(sge->mr);
519 	}
520 }
521 
522 /**
523  * rvt_qp_wqe_reserve - reserve operation
524  * @qp - the rvt qp
525  * @wqe - the send wqe
526  *
527  * This routine used in post send to record
528  * a wqe relative reserved operation use.
529  */
530 static inline void rvt_qp_wqe_reserve(
531 	struct rvt_qp *qp,
532 	struct rvt_swqe *wqe)
533 {
534 	atomic_inc(&qp->s_reserved_used);
535 }
536 
537 /**
538  * rvt_qp_wqe_unreserve - clean reserved operation
539  * @qp - the rvt qp
540  * @wqe - the send wqe
541  *
542  * This decrements the reserve use count.
543  *
544  * This call MUST precede the change to
545  * s_last to insure that post send sees a stable
546  * s_avail.
547  *
548  * An smp_mp__after_atomic() is used to insure
549  * the compiler does not juggle the order of the s_last
550  * ring index and the decrementing of s_reserved_used.
551  */
552 static inline void rvt_qp_wqe_unreserve(
553 	struct rvt_qp *qp,
554 	struct rvt_swqe *wqe)
555 {
556 	if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
557 		atomic_dec(&qp->s_reserved_used);
558 		/* insure no compiler re-order up to s_last change */
559 		smp_mb__after_atomic();
560 	}
561 }
562 
563 extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
564 
565 /**
566  * rvt_qp_swqe_complete() - insert send completion
567  * @qp - the qp
568  * @wqe - the send wqe
569  * @status - completion status
570  *
571  * Insert a send completion into the completion
572  * queue if the qp indicates it should be done.
573  *
574  * See IBTA 10.7.3.1 for info on completion
575  * control.
576  */
577 static inline void rvt_qp_swqe_complete(
578 	struct rvt_qp *qp,
579 	struct rvt_swqe *wqe,
580 	enum ib_wc_opcode opcode,
581 	enum ib_wc_status status)
582 {
583 	if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED))
584 		return;
585 	if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
586 	    (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
587 	     status != IB_WC_SUCCESS) {
588 		struct ib_wc wc;
589 
590 		memset(&wc, 0, sizeof(wc));
591 		wc.wr_id = wqe->wr.wr_id;
592 		wc.status = status;
593 		wc.opcode = opcode;
594 		wc.qp = &qp->ibqp;
595 		wc.byte_len = wqe->length;
596 		rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
597 			     status != IB_WC_SUCCESS);
598 	}
599 }
600 
601 /*
602  * Compare the lower 24 bits of the msn values.
603  * Returns an integer <, ==, or > than zero.
604  */
605 static inline int rvt_cmp_msn(u32 a, u32 b)
606 {
607 	return (((int)a) - ((int)b)) << 8;
608 }
609 
610 /**
611  * rvt_compute_aeth - compute the AETH (syndrome + MSN)
612  * @qp: the queue pair to compute the AETH for
613  *
614  * Returns the AETH.
615  */
616 __be32 rvt_compute_aeth(struct rvt_qp *qp);
617 
618 /**
619  * rvt_get_credit - flush the send work queue of a QP
620  * @qp: the qp who's send work queue to flush
621  * @aeth: the Acknowledge Extended Transport Header
622  *
623  * The QP s_lock should be held.
624  */
625 void rvt_get_credit(struct rvt_qp *qp, u32 aeth);
626 
627 /**
628  * @qp - the qp pair
629  * @len - the length
630  *
631  * Perform a shift based mtu round up divide
632  */
633 static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len)
634 {
635 	return (len + qp->pmtu - 1) >> qp->log_pmtu;
636 }
637 
638 /**
639  * @qp - the qp pair
640  * @len - the length
641  *
642  * Perform a shift based mtu divide
643  */
644 static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
645 {
646 	return len >> qp->log_pmtu;
647 }
648 
649 /**
650  * rvt_timeout_to_jiffies - Convert a ULP timeout input into jiffies
651  * @timeout - timeout input(0 - 31).
652  *
653  * Return a timeout value in jiffies.
654  */
655 static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
656 {
657 	if (timeout > 31)
658 		timeout = 31;
659 
660 	return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL;
661 }
662 
663 extern const int  ib_rvt_state_ops[];
664 
665 struct rvt_dev_info;
666 void rvt_comm_est(struct rvt_qp *qp);
667 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
668 void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
669 unsigned long rvt_rnr_tbl_to_usec(u32 index);
670 enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t);
671 void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth);
672 void rvt_del_timers_sync(struct rvt_qp *qp);
673 void rvt_stop_rc_timers(struct rvt_qp *qp);
674 void rvt_add_retry_timer(struct rvt_qp *qp);
675 
676 /**
677  * struct rvt_qp_iter - the iterator for QPs
678  * @qp - the current QP
679  *
680  * This structure defines the current iterator
681  * state for sequenced access to all QPs relative
682  * to an rvt_dev_info.
683  */
684 struct rvt_qp_iter {
685 	struct rvt_qp *qp;
686 	/* private: backpointer */
687 	struct rvt_dev_info *rdi;
688 	/* private: callback routine */
689 	void (*cb)(struct rvt_qp *qp, u64 v);
690 	/* private: for arg to callback routine */
691 	u64 v;
692 	/* private: number of SMI,GSI QPs for device */
693 	int specials;
694 	/* private: current iterator index */
695 	int n;
696 };
697 
698 struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
699 				     u64 v,
700 				     void (*cb)(struct rvt_qp *qp, u64 v));
701 int rvt_qp_iter_next(struct rvt_qp_iter *iter);
702 void rvt_qp_iter(struct rvt_dev_info *rdi,
703 		 u64 v,
704 		 void (*cb)(struct rvt_qp *qp, u64 v));
705 void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey);
706 #endif          /* DEF_RDMAVT_INCQP_H */
707