xref: /openbmc/linux/include/rdma/rdmavt_qp.h (revision 762f99f4f3cb41a775b5157dd761217beba65873)
16bf9d8f6SLeon Romanovsky /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2b4e64397SDennis Dalessandro /*
384e3b19aSGary Leshner  * Copyright(c) 2016 - 2020 Intel Corporation.
4b4e64397SDennis Dalessandro  */
5b4e64397SDennis Dalessandro 
66bf9d8f6SLeon Romanovsky #ifndef DEF_RDMAVT_INCQP_H
76bf9d8f6SLeon Romanovsky #define DEF_RDMAVT_INCQP_H
86bf9d8f6SLeon Romanovsky 
95a9cf6f2SDennis Dalessandro #include <rdma/rdma_vt.h>
10050eb7fbSDennis Dalessandro #include <rdma/ib_pack.h>
114e74080bSDennis Dalessandro #include <rdma/ib_verbs.h>
12f2dc9cdcSMike Marciniszyn #include <rdma/rdmavt_cq.h>
13dabac6e4SKamenee Arumugam #include <rdma/rvt-abi.h>
14050eb7fbSDennis Dalessandro /*
15050eb7fbSDennis Dalessandro  * Atomic bit definitions for r_aflags.
16050eb7fbSDennis Dalessandro  */
17050eb7fbSDennis Dalessandro #define RVT_R_WRID_VALID        0
18050eb7fbSDennis Dalessandro #define RVT_R_REWIND_SGE        1
19050eb7fbSDennis Dalessandro 
20050eb7fbSDennis Dalessandro /*
21050eb7fbSDennis Dalessandro  * Bit definitions for r_flags.
22050eb7fbSDennis Dalessandro  */
23050eb7fbSDennis Dalessandro #define RVT_R_REUSE_SGE 0x01
24050eb7fbSDennis Dalessandro #define RVT_R_RDMAR_SEQ 0x02
25050eb7fbSDennis Dalessandro #define RVT_R_RSP_NAK   0x04
26050eb7fbSDennis Dalessandro #define RVT_R_RSP_SEND  0x08
27050eb7fbSDennis Dalessandro #define RVT_R_COMM_EST  0x10
28050eb7fbSDennis Dalessandro 
29050eb7fbSDennis Dalessandro /*
3084e3b19aSGary Leshner  * If a packet's QP[23:16] bits match this value, then it is
3184e3b19aSGary Leshner  * a PSM packet and the hardware will expect a KDETH header
3284e3b19aSGary Leshner  * following the BTH.
3384e3b19aSGary Leshner  */
3484e3b19aSGary Leshner #define RVT_KDETH_QP_PREFIX       0x80
3584e3b19aSGary Leshner #define RVT_KDETH_QP_SUFFIX       0xffff
3684e3b19aSGary Leshner #define RVT_KDETH_QP_PREFIX_MASK  0x00ff0000
3784e3b19aSGary Leshner #define RVT_KDETH_QP_PREFIX_SHIFT 16
3884e3b19aSGary Leshner #define RVT_KDETH_QP_BASE         (u32)(RVT_KDETH_QP_PREFIX << \
3984e3b19aSGary Leshner 					RVT_KDETH_QP_PREFIX_SHIFT)
4084e3b19aSGary Leshner #define RVT_KDETH_QP_MAX          (u32)(RVT_KDETH_QP_BASE + RVT_KDETH_QP_SUFFIX)
4184e3b19aSGary Leshner 
4284e3b19aSGary Leshner /*
4384e3b19aSGary Leshner  * If a packet's LNH == BTH and DEST QPN[23:16] in the BTH match this
4484e3b19aSGary Leshner  * prefix value, then it is an AIP packet with a DETH containing the entropy
4584e3b19aSGary Leshner  * value in byte 4 following the BTH.
4684e3b19aSGary Leshner  */
4784e3b19aSGary Leshner #define RVT_AIP_QP_PREFIX       0x81
4884e3b19aSGary Leshner #define RVT_AIP_QP_SUFFIX       0xffff
4984e3b19aSGary Leshner #define RVT_AIP_QP_PREFIX_MASK  0x00ff0000
5084e3b19aSGary Leshner #define RVT_AIP_QP_PREFIX_SHIFT 16
5184e3b19aSGary Leshner #define RVT_AIP_QP_BASE         (u32)(RVT_AIP_QP_PREFIX << \
5284e3b19aSGary Leshner 				      RVT_AIP_QP_PREFIX_SHIFT)
5384e3b19aSGary Leshner #define RVT_AIP_QPN_MAX         BIT(RVT_AIP_QP_PREFIX_SHIFT)
5484e3b19aSGary Leshner #define RVT_AIP_QP_MAX          (u32)(RVT_AIP_QP_BASE + RVT_AIP_QPN_MAX - 1)
5584e3b19aSGary Leshner 
5684e3b19aSGary Leshner /*
57050eb7fbSDennis Dalessandro  * Bit definitions for s_flags.
58050eb7fbSDennis Dalessandro  *
59050eb7fbSDennis Dalessandro  * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
60050eb7fbSDennis Dalessandro  * RVT_S_BUSY - send tasklet is processing the QP
61050eb7fbSDennis Dalessandro  * RVT_S_TIMER - the RC retry timer is active
62050eb7fbSDennis Dalessandro  * RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
63050eb7fbSDennis Dalessandro  * RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
64050eb7fbSDennis Dalessandro  *                         before processing the next SWQE
65050eb7fbSDennis Dalessandro  * RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
66050eb7fbSDennis Dalessandro  *                         before processing the next SWQE
67050eb7fbSDennis Dalessandro  * RVT_S_WAIT_RNR - waiting for RNR timeout
68050eb7fbSDennis Dalessandro  * RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
69050eb7fbSDennis Dalessandro  * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating
70050eb7fbSDennis Dalessandro  *                  next send completion entry not via send DMA
71050eb7fbSDennis Dalessandro  * RVT_S_WAIT_PIO - waiting for a send buffer to be available
72050eb7fbSDennis Dalessandro  * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available
73050eb7fbSDennis Dalessandro  * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
74050eb7fbSDennis Dalessandro  * RVT_S_WAIT_KMEM - waiting for kernel memory to be available
75050eb7fbSDennis Dalessandro  * RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
76050eb7fbSDennis Dalessandro  * RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests
77050eb7fbSDennis Dalessandro  * RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK
78050eb7fbSDennis Dalessandro  * RVT_S_ECN - a BECN was queued to the send engine
792e2ba09eSMike Marciniszyn  * RVT_S_MAX_BIT_MASK - The max bit that can be used by rdmavt
80050eb7fbSDennis Dalessandro  */
81050eb7fbSDennis Dalessandro #define RVT_S_SIGNAL_REQ_WR	0x0001
82050eb7fbSDennis Dalessandro #define RVT_S_BUSY		0x0002
83050eb7fbSDennis Dalessandro #define RVT_S_TIMER		0x0004
84050eb7fbSDennis Dalessandro #define RVT_S_RESP_PENDING	0x0008
85050eb7fbSDennis Dalessandro #define RVT_S_ACK_PENDING	0x0010
86050eb7fbSDennis Dalessandro #define RVT_S_WAIT_FENCE	0x0020
87050eb7fbSDennis Dalessandro #define RVT_S_WAIT_RDMAR	0x0040
88050eb7fbSDennis Dalessandro #define RVT_S_WAIT_RNR		0x0080
89050eb7fbSDennis Dalessandro #define RVT_S_WAIT_SSN_CREDIT	0x0100
90050eb7fbSDennis Dalessandro #define RVT_S_WAIT_DMA		0x0200
91050eb7fbSDennis Dalessandro #define RVT_S_WAIT_PIO		0x0400
922e2ba09eSMike Marciniszyn #define RVT_S_WAIT_TX		0x0800
932e2ba09eSMike Marciniszyn #define RVT_S_WAIT_DMA_DESC	0x1000
942e2ba09eSMike Marciniszyn #define RVT_S_WAIT_KMEM		0x2000
952e2ba09eSMike Marciniszyn #define RVT_S_WAIT_PSN		0x4000
962e2ba09eSMike Marciniszyn #define RVT_S_WAIT_ACK		0x8000
972e2ba09eSMike Marciniszyn #define RVT_S_SEND_ONE		0x10000
982e2ba09eSMike Marciniszyn #define RVT_S_UNLIMITED_CREDIT	0x20000
992e2ba09eSMike Marciniszyn #define RVT_S_ECN		0x40000
1002e2ba09eSMike Marciniszyn #define RVT_S_MAX_BIT_MASK	0x800000
1012e2ba09eSMike Marciniszyn 
1022e2ba09eSMike Marciniszyn /*
1032e2ba09eSMike Marciniszyn  * Drivers should use s_flags starting with bit 31 down to the bit next to
1042e2ba09eSMike Marciniszyn  * RVT_S_MAX_BIT_MASK
1052e2ba09eSMike Marciniszyn  */
106050eb7fbSDennis Dalessandro 
107050eb7fbSDennis Dalessandro /*
108050eb7fbSDennis Dalessandro  * Wait flags that would prevent any packet type from being sent.
109050eb7fbSDennis Dalessandro  */
110f39cc34dSMike Marciniszyn #define RVT_S_ANY_WAIT_IO \
1112e2ba09eSMike Marciniszyn 	(RVT_S_WAIT_PIO | RVT_S_WAIT_TX | \
112050eb7fbSDennis Dalessandro 	 RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
113050eb7fbSDennis Dalessandro 
114050eb7fbSDennis Dalessandro /*
115050eb7fbSDennis Dalessandro  * Wait flags that would prevent send work requests from making progress.
116050eb7fbSDennis Dalessandro  */
117050eb7fbSDennis Dalessandro #define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \
118050eb7fbSDennis Dalessandro 	RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \
119050eb7fbSDennis Dalessandro 	RVT_S_WAIT_PSN | RVT_S_WAIT_ACK)
120050eb7fbSDennis Dalessandro 
121050eb7fbSDennis Dalessandro #define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
122050eb7fbSDennis Dalessandro 
123050eb7fbSDennis Dalessandro /* Number of bits to pay attention to in the opcode for checking qp type */
124050eb7fbSDennis Dalessandro #define RVT_OPCODE_QP_MASK 0xE0
125050eb7fbSDennis Dalessandro 
126bfbac097SDennis Dalessandro /* Flags for checking QP state (see ib_rvt_state_ops[]) */
127bfbac097SDennis Dalessandro #define RVT_POST_SEND_OK                0x01
128bfbac097SDennis Dalessandro #define RVT_POST_RECV_OK                0x02
129bfbac097SDennis Dalessandro #define RVT_PROCESS_RECV_OK             0x04
130bfbac097SDennis Dalessandro #define RVT_PROCESS_SEND_OK             0x08
131bfbac097SDennis Dalessandro #define RVT_PROCESS_NEXT_SEND_OK        0x10
132bfbac097SDennis Dalessandro #define RVT_FLUSH_SEND			0x20
133bfbac097SDennis Dalessandro #define RVT_FLUSH_RECV			0x40
134bfbac097SDennis Dalessandro #define RVT_PROCESS_OR_FLUSH_SEND \
135bfbac097SDennis Dalessandro 	(RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND)
136f9215b5eSMike Marciniszyn #define RVT_SEND_OR_FLUSH_OR_RECV_OK \
137f9215b5eSMike Marciniszyn 	(RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND | RVT_PROCESS_RECV_OK)
138bfbac097SDennis Dalessandro 
139b4e64397SDennis Dalessandro /*
140856cc4c2SMike Marciniszyn  * Internal send flags
141856cc4c2SMike Marciniszyn  */
142856cc4c2SMike Marciniszyn #define RVT_SEND_RESERVE_USED           IB_SEND_RESERVED_START
143d9b13c20SJianxin Xiong #define RVT_SEND_COMPLETION_ONLY	(IB_SEND_RESERVED_START << 1)
144856cc4c2SMike Marciniszyn 
145d310c4bfSMichael J. Ruhl /**
146d310c4bfSMichael J. Ruhl  * rvt_ud_wr - IB UD work plus AH cache
147d310c4bfSMichael J. Ruhl  * @wr: valid IB work request
148d310c4bfSMichael J. Ruhl  * @attr: pointer to an allocated AH attribute
149d310c4bfSMichael J. Ruhl  *
150d310c4bfSMichael J. Ruhl  * Special case the UD WR so we can keep track of the AH attributes.
151d310c4bfSMichael J. Ruhl  *
152d310c4bfSMichael J. Ruhl  * NOTE: This data structure is stricly ordered wr then attr. I.e the attr
153d310c4bfSMichael J. Ruhl  * MUST come after wr.  The ib_ud_wr is sized and copied in rvt_post_one_wr.
154d310c4bfSMichael J. Ruhl  * The copy assumes that wr is first.
155d310c4bfSMichael J. Ruhl  */
156d310c4bfSMichael J. Ruhl struct rvt_ud_wr {
157d310c4bfSMichael J. Ruhl 	struct ib_ud_wr wr;
158d310c4bfSMichael J. Ruhl 	struct rdma_ah_attr *attr;
159d310c4bfSMichael J. Ruhl };
160d310c4bfSMichael J. Ruhl 
161856cc4c2SMike Marciniszyn /*
162b4e64397SDennis Dalessandro  * Send work request queue entry.
163b4e64397SDennis Dalessandro  * The size of the sg_list is determined when the QP is created and stored
164b4e64397SDennis Dalessandro  * in qp->s_max_sge.
165b4e64397SDennis Dalessandro  */
166b4e64397SDennis Dalessandro struct rvt_swqe {
167b4e64397SDennis Dalessandro 	union {
168b4e64397SDennis Dalessandro 		struct ib_send_wr wr;   /* don't use wr.sg_list */
169d310c4bfSMichael J. Ruhl 		struct rvt_ud_wr ud_wr;
170b4e64397SDennis Dalessandro 		struct ib_reg_wr reg_wr;
171b4e64397SDennis Dalessandro 		struct ib_rdma_wr rdma_wr;
172b4e64397SDennis Dalessandro 		struct ib_atomic_wr atomic_wr;
173b4e64397SDennis Dalessandro 	};
174b4e64397SDennis Dalessandro 	u32 psn;                /* first packet sequence number */
175b4e64397SDennis Dalessandro 	u32 lpsn;               /* last packet sequence number */
176b4e64397SDennis Dalessandro 	u32 ssn;                /* send sequence number */
177b4e64397SDennis Dalessandro 	u32 length;             /* total length of data in sg_list */
178838b6fd2SKaike Wan 	void *priv;             /* driver dependent field */
1795b361328SGustavo A. R. Silva 	struct rvt_sge sg_list[];
180b4e64397SDennis Dalessandro };
181b4e64397SDennis Dalessandro 
182dabac6e4SKamenee Arumugam /**
183dabac6e4SKamenee Arumugam  * struct rvt_krwq - kernel struct receive work request
184f592ae3cSKamenee Arumugam  * @p_lock: lock to protect producer of the kernel buffer
185dabac6e4SKamenee Arumugam  * @head: index of next entry to fill
186f592ae3cSKamenee Arumugam  * @c_lock:lock to protect consumer of the kernel buffer
187dabac6e4SKamenee Arumugam  * @tail: index of next entry to pull
188dabac6e4SKamenee Arumugam  * @count: count is aproximate of total receive enteries posted
189dabac6e4SKamenee Arumugam  * @rvt_rwqe: struct of receive work request queue entry
190dabac6e4SKamenee Arumugam  *
191dabac6e4SKamenee Arumugam  * This structure is used to contain the head pointer,
192dabac6e4SKamenee Arumugam  * tail pointer and receive work queue entries for kernel
193dabac6e4SKamenee Arumugam  * mode user.
194b4e64397SDennis Dalessandro  */
195dabac6e4SKamenee Arumugam struct rvt_krwq {
196f592ae3cSKamenee Arumugam 	spinlock_t p_lock;	/* protect producer */
197b4e64397SDennis Dalessandro 	u32 head;               /* new work requests posted to the head */
198f592ae3cSKamenee Arumugam 
199f592ae3cSKamenee Arumugam 	/* protect consumer */
200f592ae3cSKamenee Arumugam 	spinlock_t c_lock ____cacheline_aligned_in_smp;
201b4e64397SDennis Dalessandro 	u32 tail;               /* receives pull requests from here. */
202f592ae3cSKamenee Arumugam 	u32 count;		/* approx count of receive entries posted */
203dabac6e4SKamenee Arumugam 	struct rvt_rwqe *curr_wq;
204dabac6e4SKamenee Arumugam 	struct rvt_rwqe wq[];
205b4e64397SDennis Dalessandro };
206b4e64397SDennis Dalessandro 
2072b0ad2daSMichael J. Ruhl /*
2082b0ad2daSMichael J. Ruhl  * rvt_get_swqe_ah - Return the pointer to the struct rvt_ah
2092b0ad2daSMichael J. Ruhl  * @swqe: valid Send WQE
2102b0ad2daSMichael J. Ruhl  *
2112b0ad2daSMichael J. Ruhl  */
rvt_get_swqe_ah(struct rvt_swqe * swqe)2122b0ad2daSMichael J. Ruhl static inline struct rvt_ah *rvt_get_swqe_ah(struct rvt_swqe *swqe)
2132b0ad2daSMichael J. Ruhl {
2142b0ad2daSMichael J. Ruhl 	return ibah_to_rvtah(swqe->ud_wr.wr.ah);
2152b0ad2daSMichael J. Ruhl }
2162b0ad2daSMichael J. Ruhl 
2172b0ad2daSMichael J. Ruhl /**
2182b0ad2daSMichael J. Ruhl  * rvt_get_swqe_ah_attr - Return the cached ah attribute information
2192b0ad2daSMichael J. Ruhl  * @swqe: valid Send WQE
2202b0ad2daSMichael J. Ruhl  *
2212b0ad2daSMichael J. Ruhl  */
rvt_get_swqe_ah_attr(struct rvt_swqe * swqe)2222b0ad2daSMichael J. Ruhl static inline struct rdma_ah_attr *rvt_get_swqe_ah_attr(struct rvt_swqe *swqe)
2232b0ad2daSMichael J. Ruhl {
2242b0ad2daSMichael J. Ruhl 	return swqe->ud_wr.attr;
2252b0ad2daSMichael J. Ruhl }
2262b0ad2daSMichael J. Ruhl 
2272b0ad2daSMichael J. Ruhl /**
2282b0ad2daSMichael J. Ruhl  * rvt_get_swqe_remote_qpn - Access the remote QPN value
2292b0ad2daSMichael J. Ruhl  * @swqe: valid Send WQE
2302b0ad2daSMichael J. Ruhl  *
2312b0ad2daSMichael J. Ruhl  */
rvt_get_swqe_remote_qpn(struct rvt_swqe * swqe)2322b0ad2daSMichael J. Ruhl static inline u32 rvt_get_swqe_remote_qpn(struct rvt_swqe *swqe)
2332b0ad2daSMichael J. Ruhl {
2342b0ad2daSMichael J. Ruhl 	return swqe->ud_wr.wr.remote_qpn;
2352b0ad2daSMichael J. Ruhl }
2362b0ad2daSMichael J. Ruhl 
2372b0ad2daSMichael J. Ruhl /**
2382b0ad2daSMichael J. Ruhl  * rvt_get_swqe_remote_qkey - Acces the remote qkey value
2392b0ad2daSMichael J. Ruhl  * @swqe: valid Send WQE
2402b0ad2daSMichael J. Ruhl  *
2412b0ad2daSMichael J. Ruhl  */
rvt_get_swqe_remote_qkey(struct rvt_swqe * swqe)2422b0ad2daSMichael J. Ruhl static inline u32 rvt_get_swqe_remote_qkey(struct rvt_swqe *swqe)
2432b0ad2daSMichael J. Ruhl {
2442b0ad2daSMichael J. Ruhl 	return swqe->ud_wr.wr.remote_qkey;
2452b0ad2daSMichael J. Ruhl }
2462b0ad2daSMichael J. Ruhl 
2472b0ad2daSMichael J. Ruhl /**
2482b0ad2daSMichael J. Ruhl  * rvt_get_swqe_pkey_index - Access the pkey index
2492b0ad2daSMichael J. Ruhl  * @swqe: valid Send WQE
2502b0ad2daSMichael J. Ruhl  *
2512b0ad2daSMichael J. Ruhl  */
rvt_get_swqe_pkey_index(struct rvt_swqe * swqe)2522b0ad2daSMichael J. Ruhl static inline u16 rvt_get_swqe_pkey_index(struct rvt_swqe *swqe)
2532b0ad2daSMichael J. Ruhl {
2542b0ad2daSMichael J. Ruhl 	return swqe->ud_wr.wr.pkey_index;
2552b0ad2daSMichael J. Ruhl }
2562b0ad2daSMichael J. Ruhl 
257b4e64397SDennis Dalessandro struct rvt_rq {
258b4e64397SDennis Dalessandro 	struct rvt_rwq *wq;
259dabac6e4SKamenee Arumugam 	struct rvt_krwq *kwq;
260b4e64397SDennis Dalessandro 	u32 size;               /* size of RWQE array */
261b4e64397SDennis Dalessandro 	u8 max_sge;
262b4e64397SDennis Dalessandro 	/* protect changes in this struct */
263b4e64397SDennis Dalessandro 	spinlock_t lock ____cacheline_aligned_in_smp;
264b4e64397SDennis Dalessandro };
265b4e64397SDennis Dalessandro 
26654a485e9SMike Marciniszyn /**
26754a485e9SMike Marciniszyn  * rvt_get_rq_count - count numbers of request work queue entries
26854a485e9SMike Marciniszyn  * in circular buffer
26954a485e9SMike Marciniszyn  * @rq: data structure for request queue entry
27054a485e9SMike Marciniszyn  * @head: head indices of the circular buffer
27154a485e9SMike Marciniszyn  * @tail: tail indices of the circular buffer
27254a485e9SMike Marciniszyn  *
27354a485e9SMike Marciniszyn  * Return - total number of entries in the Receive Queue
27454a485e9SMike Marciniszyn  */
27554a485e9SMike Marciniszyn 
rvt_get_rq_count(struct rvt_rq * rq,u32 head,u32 tail)27654a485e9SMike Marciniszyn static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail)
27754a485e9SMike Marciniszyn {
27854a485e9SMike Marciniszyn 	u32 count = head - tail;
27954a485e9SMike Marciniszyn 
28054a485e9SMike Marciniszyn 	if ((s32)count < 0)
28154a485e9SMike Marciniszyn 		count += rq->size;
28254a485e9SMike Marciniszyn 	return count;
28354a485e9SMike Marciniszyn }
28454a485e9SMike Marciniszyn 
285b4e64397SDennis Dalessandro /*
286b4e64397SDennis Dalessandro  * This structure holds the information that the send tasklet needs
287b4e64397SDennis Dalessandro  * to send a RDMA read response or atomic operation.
288b4e64397SDennis Dalessandro  */
289b4e64397SDennis Dalessandro struct rvt_ack_entry {
290b4e64397SDennis Dalessandro 	struct rvt_sge rdma_sge;
291b4e64397SDennis Dalessandro 	u64 atomic_data;
292fe508272SIra Weiny 	u32 psn;
293fe508272SIra Weiny 	u32 lpsn;
294fe508272SIra Weiny 	u8 opcode;
295fe508272SIra Weiny 	u8 sent;
296838b6fd2SKaike Wan 	void *priv;
297b4e64397SDennis Dalessandro };
298b4e64397SDennis Dalessandro 
299bfee5e32SVennila Megavannan #define	RC_QP_SCALING_INTERVAL	5
300bfee5e32SVennila Megavannan 
301afcf8f76SMike Marciniszyn #define RVT_OPERATION_PRIV        0x00000001
302afcf8f76SMike Marciniszyn #define RVT_OPERATION_ATOMIC      0x00000002
303afcf8f76SMike Marciniszyn #define RVT_OPERATION_ATOMIC_SGE  0x00000004
304d9f87239SJianxin Xiong #define RVT_OPERATION_LOCAL       0x00000008
305856cc4c2SMike Marciniszyn #define RVT_OPERATION_USE_RESERVE 0x00000010
3063c6cb20aSKaike Wan #define RVT_OPERATION_IGN_RNR_CNT 0x00000020
307afcf8f76SMike Marciniszyn 
308afcf8f76SMike Marciniszyn #define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
309afcf8f76SMike Marciniszyn 
310afcf8f76SMike Marciniszyn /**
311afcf8f76SMike Marciniszyn  * rvt_operation_params - op table entry
312afcf8f76SMike Marciniszyn  * @length - the length to copy into the swqe entry
313afcf8f76SMike Marciniszyn  * @qpt_support - a bit mask indicating QP type support
314afcf8f76SMike Marciniszyn  * @flags - RVT_OPERATION flags (see above)
315b4e64397SDennis Dalessandro  *
316afcf8f76SMike Marciniszyn  * This supports table driven post send so that
317afcf8f76SMike Marciniszyn  * the driver can have differing an potentially
318afcf8f76SMike Marciniszyn  * different sets of operations.
319afcf8f76SMike Marciniszyn  *
320afcf8f76SMike Marciniszyn  **/
321afcf8f76SMike Marciniszyn 
322afcf8f76SMike Marciniszyn struct rvt_operation_params {
323afcf8f76SMike Marciniszyn 	size_t length;
324afcf8f76SMike Marciniszyn 	u32 qpt_support;
325afcf8f76SMike Marciniszyn 	u32 flags;
326afcf8f76SMike Marciniszyn };
327afcf8f76SMike Marciniszyn 
328afcf8f76SMike Marciniszyn /*
329b4e64397SDennis Dalessandro  * Common variables are protected by both r_rq.lock and s_lock in that order
330b4e64397SDennis Dalessandro  * which only happens in modify_qp() or changing the QP 'state'.
331b4e64397SDennis Dalessandro  */
332b4e64397SDennis Dalessandro struct rvt_qp {
333b4e64397SDennis Dalessandro 	struct ib_qp ibqp;
334b4e64397SDennis Dalessandro 	void *priv; /* Driver private data */
335b4e64397SDennis Dalessandro 	/* read mostly fields above and below */
33690898850SDasaratharaman Chandramouli 	struct rdma_ah_attr remote_ah_attr;
33790898850SDasaratharaman Chandramouli 	struct rdma_ah_attr alt_ah_attr;
338b4e64397SDennis Dalessandro 	struct rvt_qp __rcu *next;           /* link list for QPN hash table */
339b4e64397SDennis Dalessandro 	struct rvt_swqe *s_wq;  /* send work queue */
340b4e64397SDennis Dalessandro 	struct rvt_mmap_info *ip;
341b4e64397SDennis Dalessandro 
342b4e64397SDennis Dalessandro 	unsigned long timeout_jiffies;  /* computed from timeout */
343b4e64397SDennis Dalessandro 
344b4e64397SDennis Dalessandro 	int srate_mbps;		/* s_srate (below) converted to Mbit/s */
345ef086c0dSMike Marciniszyn 	pid_t pid;		/* pid for user mode QPs */
346b4e64397SDennis Dalessandro 	u32 remote_qpn;
347b4e64397SDennis Dalessandro 	u32 qkey;               /* QKEY for this QP (for UD or RD) */
348b4e64397SDennis Dalessandro 	u32 s_size;             /* send work queue size */
349b4e64397SDennis Dalessandro 
35046a80d62SMike Marciniszyn 	u16 pmtu;		/* decoded from path_mtu */
35146a80d62SMike Marciniszyn 	u8 log_pmtu;		/* shift for pmtu */
352b4e64397SDennis Dalessandro 	u8 state;               /* QP state */
353b4e64397SDennis Dalessandro 	u8 allowed_ops;		/* high order bits of allowed opcodes */
354b4e64397SDennis Dalessandro 	u8 qp_access_flags;
355b4e64397SDennis Dalessandro 	u8 alt_timeout;         /* Alternate path timeout for this QP */
356b4e64397SDennis Dalessandro 	u8 timeout;             /* Timeout for this QP */
357b4e64397SDennis Dalessandro 	u8 s_srate;
358b4e64397SDennis Dalessandro 	u8 s_mig_state;
359b4e64397SDennis Dalessandro 	u8 port_num;
360b4e64397SDennis Dalessandro 	u8 s_pkey_index;        /* PKEY index to use */
361b4e64397SDennis Dalessandro 	u8 s_alt_pkey_index;    /* Alternate path PKEY index to use */
362b4e64397SDennis Dalessandro 	u8 r_max_rd_atomic;     /* max number of RDMA read/atomic to receive */
363b4e64397SDennis Dalessandro 	u8 s_max_rd_atomic;     /* max number of RDMA read/atomic to send */
364b4e64397SDennis Dalessandro 	u8 s_retry_cnt;         /* number of times to retry */
365b4e64397SDennis Dalessandro 	u8 s_rnr_retry_cnt;
366b4e64397SDennis Dalessandro 	u8 r_min_rnr_timer;     /* retry timeout value for RNR NAKs */
367b4e64397SDennis Dalessandro 	u8 s_max_sge;           /* size of s_wq->sg_list */
368b4e64397SDennis Dalessandro 	u8 s_draining;
369b4e64397SDennis Dalessandro 
370b4e64397SDennis Dalessandro 	/* start of read/write fields */
371b4e64397SDennis Dalessandro 	atomic_t refcount ____cacheline_aligned_in_smp;
372b4e64397SDennis Dalessandro 	wait_queue_head_t wait;
373b4e64397SDennis Dalessandro 
3748b103e9cSMike Marciniszyn 	struct rvt_ack_entry *s_ack_queue;
375b4e64397SDennis Dalessandro 	struct rvt_sge_state s_rdma_read_sge;
376b4e64397SDennis Dalessandro 
377b4e64397SDennis Dalessandro 	spinlock_t r_lock ____cacheline_aligned_in_smp;      /* used for APM */
378d2421a82SMike Marciniszyn 	u32 r_psn;              /* expected rcv packet sequence number */
379b4e64397SDennis Dalessandro 	unsigned long r_aflags;
380b4e64397SDennis Dalessandro 	u64 r_wr_id;            /* ID for current receive WQE */
381b4e64397SDennis Dalessandro 	u32 r_ack_psn;          /* PSN for next ACK or atomic ACK */
382b4e64397SDennis Dalessandro 	u32 r_len;              /* total length of r_sge */
383b4e64397SDennis Dalessandro 	u32 r_rcv_len;          /* receive data len processed */
384b4e64397SDennis Dalessandro 	u32 r_msn;              /* message sequence number */
385b4e64397SDennis Dalessandro 
386b4e64397SDennis Dalessandro 	u8 r_state;             /* opcode of last packet received */
387b4e64397SDennis Dalessandro 	u8 r_flags;
388b4e64397SDennis Dalessandro 	u8 r_head_ack_queue;    /* index into s_ack_queue[] */
389688f21c0SMike Marciniszyn 	u8 r_adefered;          /* defered ack count */
390b4e64397SDennis Dalessandro 
391b4e64397SDennis Dalessandro 	struct list_head rspwait;       /* link for waiting to respond */
392b4e64397SDennis Dalessandro 
393b4e64397SDennis Dalessandro 	struct rvt_sge_state r_sge;     /* current receive data */
394b4e64397SDennis Dalessandro 	struct rvt_rq r_rq;             /* receive work queue */
395b4e64397SDennis Dalessandro 
39646a80d62SMike Marciniszyn 	/* post send line */
39746a80d62SMike Marciniszyn 	spinlock_t s_hlock ____cacheline_aligned_in_smp;
39846a80d62SMike Marciniszyn 	u32 s_head;             /* new entries added here */
39946a80d62SMike Marciniszyn 	u32 s_next_psn;         /* PSN for next request */
40046a80d62SMike Marciniszyn 	u32 s_avail;            /* number of entries avail */
40146a80d62SMike Marciniszyn 	u32 s_ssn;              /* SSN of tail entry */
402856cc4c2SMike Marciniszyn 	atomic_t s_reserved_used; /* reserved entries in use */
40346a80d62SMike Marciniszyn 
404b4e64397SDennis Dalessandro 	spinlock_t s_lock ____cacheline_aligned_in_smp;
405b4e64397SDennis Dalessandro 	u32 s_flags;
406d2421a82SMike Marciniszyn 	struct rvt_sge_state *s_cur_sge;
407b4e64397SDennis Dalessandro 	struct rvt_swqe *s_wqe;
408b4e64397SDennis Dalessandro 	struct rvt_sge_state s_sge;     /* current send request data */
409b4e64397SDennis Dalessandro 	struct rvt_mregion *s_rdma_mr;
410b4e64397SDennis Dalessandro 	u32 s_len;              /* total length of s_sge */
411b4e64397SDennis Dalessandro 	u32 s_rdma_read_len;    /* total length of s_rdma_read_sge */
412b4e64397SDennis Dalessandro 	u32 s_last_psn;         /* last response PSN processed */
413b4e64397SDennis Dalessandro 	u32 s_sending_psn;      /* lowest PSN that is being sent */
414b4e64397SDennis Dalessandro 	u32 s_sending_hpsn;     /* highest PSN that is being sent */
415b4e64397SDennis Dalessandro 	u32 s_psn;              /* current packet sequence number */
416b4e64397SDennis Dalessandro 	u32 s_ack_rdma_psn;     /* PSN for sending RDMA read responses */
417b4e64397SDennis Dalessandro 	u32 s_ack_psn;          /* PSN for acking sends and RDMA writes */
418b4e64397SDennis Dalessandro 	u32 s_tail;             /* next entry to process */
419b4e64397SDennis Dalessandro 	u32 s_cur;              /* current work queue entry */
420b4e64397SDennis Dalessandro 	u32 s_acked;            /* last un-ACK'ed entry */
421b4e64397SDennis Dalessandro 	u32 s_last;             /* last completed entry */
422b4e64397SDennis Dalessandro 	u32 s_lsn;              /* limit sequence number (credit) */
4237ebfc93eSSebastian Sanchez 	u32 s_ahgpsn;           /* set to the psn in the copy of the header */
4247ebfc93eSSebastian Sanchez 	u16 s_cur_size;         /* size of send packet in bytes */
425b4e64397SDennis Dalessandro 	u16 s_rdma_ack_cnt;
4267ebfc93eSSebastian Sanchez 	u8 s_hdrwords;         /* size of s_hdr in 32 bit words */
427b4e64397SDennis Dalessandro 	s8 s_ahgidx;
428b4e64397SDennis Dalessandro 	u8 s_state;             /* opcode of last packet sent */
429b4e64397SDennis Dalessandro 	u8 s_ack_state;         /* opcode of packet to ACK */
430b4e64397SDennis Dalessandro 	u8 s_nak_state;         /* non-zero if NAK is pending */
431b4e64397SDennis Dalessandro 	u8 r_nak_state;         /* non-zero if NAK is pending */
432b4e64397SDennis Dalessandro 	u8 s_retry;             /* requester retry counter */
433b4e64397SDennis Dalessandro 	u8 s_rnr_retry;         /* requester RNR retry counter */
434b4e64397SDennis Dalessandro 	u8 s_num_rd_atomic;     /* number of RDMA read/atomic pending */
435b4e64397SDennis Dalessandro 	u8 s_tail_ack_queue;    /* index into s_ack_queue[] */
4364f9264d1SKaike Wan 	u8 s_acked_ack_queue;   /* index into s_ack_queue[] */
437b4e64397SDennis Dalessandro 
438b4e64397SDennis Dalessandro 	struct rvt_sge_state s_ack_rdma_sge;
439b4e64397SDennis Dalessandro 	struct timer_list s_timer;
44011a10d4bSVenkata Sandeep Dhanalakota 	struct hrtimer s_rnr_timer;
441b4e64397SDennis Dalessandro 
442d9f87239SJianxin Xiong 	atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */
443d9f87239SJianxin Xiong 
444b4e64397SDennis Dalessandro 	/*
445b4e64397SDennis Dalessandro 	 * This sge list MUST be last. Do not add anything below here.
446b4e64397SDennis Dalessandro 	 */
447*44da3730SLeon Romanovsky 	struct rvt_sge *r_sg_list /* verified SGEs */
448b4e64397SDennis Dalessandro 		____cacheline_aligned_in_smp;
449b4e64397SDennis Dalessandro };
450b4e64397SDennis Dalessandro 
451b4e64397SDennis Dalessandro struct rvt_srq {
452b4e64397SDennis Dalessandro 	struct ib_srq ibsrq;
453b4e64397SDennis Dalessandro 	struct rvt_rq rq;
454b4e64397SDennis Dalessandro 	struct rvt_mmap_info *ip;
455b4e64397SDennis Dalessandro 	/* send signal when number of RWQEs < limit */
456b4e64397SDennis Dalessandro 	u32 limit;
457b4e64397SDennis Dalessandro };
458b4e64397SDennis Dalessandro 
ibsrq_to_rvtsrq(struct ib_srq * ibsrq)459715ab1a8SMike Marciniszyn static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq)
460715ab1a8SMike Marciniszyn {
461715ab1a8SMike Marciniszyn 	return container_of(ibsrq, struct rvt_srq, ibsrq);
462715ab1a8SMike Marciniszyn }
463715ab1a8SMike Marciniszyn 
ibqp_to_rvtqp(struct ib_qp * ibqp)464715ab1a8SMike Marciniszyn static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp)
465715ab1a8SMike Marciniszyn {
466715ab1a8SMike Marciniszyn 	return container_of(ibqp, struct rvt_qp, ibqp);
467715ab1a8SMike Marciniszyn }
468715ab1a8SMike Marciniszyn 
4690acb0cc7SDennis Dalessandro #define RVT_QPN_MAX                 BIT(24)
4700acb0cc7SDennis Dalessandro #define RVT_QPNMAP_ENTRIES          (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
4710acb0cc7SDennis Dalessandro #define RVT_BITS_PER_PAGE           (PAGE_SIZE * BITS_PER_BYTE)
4720acb0cc7SDennis Dalessandro #define RVT_BITS_PER_PAGE_MASK      (RVT_BITS_PER_PAGE - 1)
4737dafbab3SDon Hiatt #define RVT_QPN_MASK		    IB_QPN_MASK
4740acb0cc7SDennis Dalessandro 
4750acb0cc7SDennis Dalessandro /*
4760acb0cc7SDennis Dalessandro  * QPN-map pages start out as NULL, they get allocated upon
4770acb0cc7SDennis Dalessandro  * first use and are never deallocated. This way,
4780acb0cc7SDennis Dalessandro  * large bitmaps are not allocated unless large numbers of QPs are used.
4790acb0cc7SDennis Dalessandro  */
4800acb0cc7SDennis Dalessandro struct rvt_qpn_map {
4810acb0cc7SDennis Dalessandro 	void *page;
4820acb0cc7SDennis Dalessandro };
4830acb0cc7SDennis Dalessandro 
4840acb0cc7SDennis Dalessandro struct rvt_qpn_table {
4850acb0cc7SDennis Dalessandro 	spinlock_t lock; /* protect changes to the qp table */
4860acb0cc7SDennis Dalessandro 	unsigned flags;         /* flags for QP0/1 allocated for each port */
4870acb0cc7SDennis Dalessandro 	u32 last;               /* last QP number allocated */
4880acb0cc7SDennis Dalessandro 	u32 nmaps;              /* size of the map table */
4890acb0cc7SDennis Dalessandro 	u16 limit;
4900acb0cc7SDennis Dalessandro 	u8  incr;
4910acb0cc7SDennis Dalessandro 	/* bit map of free QP numbers other than 0/1 */
4920acb0cc7SDennis Dalessandro 	struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES];
4930acb0cc7SDennis Dalessandro };
4940acb0cc7SDennis Dalessandro 
4950acb0cc7SDennis Dalessandro struct rvt_qp_ibdev {
4960acb0cc7SDennis Dalessandro 	u32 qp_table_size;
4970acb0cc7SDennis Dalessandro 	u32 qp_table_bits;
4980acb0cc7SDennis Dalessandro 	struct rvt_qp __rcu **qp_table;
4990acb0cc7SDennis Dalessandro 	spinlock_t qpt_lock; /* qptable lock */
5000acb0cc7SDennis Dalessandro 	struct rvt_qpn_table qpn_table;
5010acb0cc7SDennis Dalessandro };
5020acb0cc7SDennis Dalessandro 
503bfbac097SDennis Dalessandro /*
5044e74080bSDennis Dalessandro  * There is one struct rvt_mcast for each multicast GID.
5054e74080bSDennis Dalessandro  * All attached QPs are then stored as a list of
5064e74080bSDennis Dalessandro  * struct rvt_mcast_qp.
5074e74080bSDennis Dalessandro  */
5084e74080bSDennis Dalessandro struct rvt_mcast_qp {
5094e74080bSDennis Dalessandro 	struct list_head list;
5104e74080bSDennis Dalessandro 	struct rvt_qp *qp;
5114e74080bSDennis Dalessandro };
5124e74080bSDennis Dalessandro 
513aad9ff97SMichael J. Ruhl struct rvt_mcast_addr {
514aad9ff97SMichael J. Ruhl 	union ib_gid mgid;
515aad9ff97SMichael J. Ruhl 	u16 lid;
516aad9ff97SMichael J. Ruhl };
517aad9ff97SMichael J. Ruhl 
5184e74080bSDennis Dalessandro struct rvt_mcast {
5194e74080bSDennis Dalessandro 	struct rb_node rb_node;
520aad9ff97SMichael J. Ruhl 	struct rvt_mcast_addr mcast_addr;
5214e74080bSDennis Dalessandro 	struct list_head qp_list;
5224e74080bSDennis Dalessandro 	wait_queue_head_t wait;
5234e74080bSDennis Dalessandro 	atomic_t refcount;
5244e74080bSDennis Dalessandro 	int n_attached;
5254e74080bSDennis Dalessandro };
5264e74080bSDennis Dalessandro 
5274e74080bSDennis Dalessandro /*
528bfbac097SDennis Dalessandro  * Since struct rvt_swqe is not a fixed size, we can't simply index into
5294e74080bSDennis Dalessandro  * struct rvt_qp.s_wq.  This function does the array index computation.
530bfbac097SDennis Dalessandro  */
rvt_get_swqe_ptr(struct rvt_qp * qp,unsigned n)531bfbac097SDennis Dalessandro static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp,
532bfbac097SDennis Dalessandro 						unsigned n)
533bfbac097SDennis Dalessandro {
534bfbac097SDennis Dalessandro 	return (struct rvt_swqe *)((char *)qp->s_wq +
535bfbac097SDennis Dalessandro 				     (sizeof(struct rvt_swqe) +
536bfbac097SDennis Dalessandro 				      qp->s_max_sge *
537bfbac097SDennis Dalessandro 				      sizeof(struct rvt_sge)) * n);
538bfbac097SDennis Dalessandro }
539bfbac097SDennis Dalessandro 
5403b0b3fb3SDennis Dalessandro /*
5413b0b3fb3SDennis Dalessandro  * Since struct rvt_rwqe is not a fixed size, we can't simply index into
5423b0b3fb3SDennis Dalessandro  * struct rvt_rwq.wq.  This function does the array index computation.
5433b0b3fb3SDennis Dalessandro  */
rvt_get_rwqe_ptr(struct rvt_rq * rq,unsigned n)5443b0b3fb3SDennis Dalessandro static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
5453b0b3fb3SDennis Dalessandro {
5463b0b3fb3SDennis Dalessandro 	return (struct rvt_rwqe *)
547dabac6e4SKamenee Arumugam 		((char *)rq->kwq->curr_wq +
5483b0b3fb3SDennis Dalessandro 		 (sizeof(struct rvt_rwqe) +
5493b0b3fb3SDennis Dalessandro 		  rq->max_sge * sizeof(struct ib_sge)) * n);
5503b0b3fb3SDennis Dalessandro }
5513b0b3fb3SDennis Dalessandro 
552856cc4c2SMike Marciniszyn /**
5530128fceaSBrian Welty  * rvt_is_user_qp - return if this is user mode QP
5540128fceaSBrian Welty  * @qp - the target QP
5550128fceaSBrian Welty  */
rvt_is_user_qp(struct rvt_qp * qp)5560128fceaSBrian Welty static inline bool rvt_is_user_qp(struct rvt_qp *qp)
5570128fceaSBrian Welty {
5580128fceaSBrian Welty 	return !!qp->pid;
5590128fceaSBrian Welty }
5600128fceaSBrian Welty 
5610128fceaSBrian Welty /**
5624107b8a0SMike Marciniszyn  * rvt_get_qp - get a QP reference
5634107b8a0SMike Marciniszyn  * @qp - the QP to hold
5644107b8a0SMike Marciniszyn  */
rvt_get_qp(struct rvt_qp * qp)5654107b8a0SMike Marciniszyn static inline void rvt_get_qp(struct rvt_qp *qp)
5664107b8a0SMike Marciniszyn {
5674107b8a0SMike Marciniszyn 	atomic_inc(&qp->refcount);
5684107b8a0SMike Marciniszyn }
5694107b8a0SMike Marciniszyn 
5704107b8a0SMike Marciniszyn /**
5714107b8a0SMike Marciniszyn  * rvt_put_qp - release a QP reference
5724107b8a0SMike Marciniszyn  * @qp - the QP to release
5734107b8a0SMike Marciniszyn  */
rvt_put_qp(struct rvt_qp * qp)5744107b8a0SMike Marciniszyn static inline void rvt_put_qp(struct rvt_qp *qp)
5754107b8a0SMike Marciniszyn {
5764107b8a0SMike Marciniszyn 	if (qp && atomic_dec_and_test(&qp->refcount))
5774107b8a0SMike Marciniszyn 		wake_up(&qp->wait);
5784107b8a0SMike Marciniszyn }
5794107b8a0SMike Marciniszyn 
5804107b8a0SMike Marciniszyn /**
581f6475223SMike Marciniszyn  * rvt_put_swqe - drop mr refs held by swqe
582f6475223SMike Marciniszyn  * @wqe - the send wqe
583f6475223SMike Marciniszyn  *
584f6475223SMike Marciniszyn  * This drops any mr references held by the swqe
585f6475223SMike Marciniszyn  */
rvt_put_swqe(struct rvt_swqe * wqe)586f6475223SMike Marciniszyn static inline void rvt_put_swqe(struct rvt_swqe *wqe)
587f6475223SMike Marciniszyn {
588f6475223SMike Marciniszyn 	int i;
589f6475223SMike Marciniszyn 
590f6475223SMike Marciniszyn 	for (i = 0; i < wqe->wr.num_sge; i++) {
591f6475223SMike Marciniszyn 		struct rvt_sge *sge = &wqe->sg_list[i];
592f6475223SMike Marciniszyn 
593f6475223SMike Marciniszyn 		rvt_put_mr(sge->mr);
594f6475223SMike Marciniszyn 	}
595f6475223SMike Marciniszyn }
596f6475223SMike Marciniszyn 
597f6475223SMike Marciniszyn /**
598856cc4c2SMike Marciniszyn  * rvt_qp_wqe_reserve - reserve operation
599856cc4c2SMike Marciniszyn  * @qp - the rvt qp
600856cc4c2SMike Marciniszyn  * @wqe - the send wqe
601856cc4c2SMike Marciniszyn  *
602856cc4c2SMike Marciniszyn  * This routine used in post send to record
603856cc4c2SMike Marciniszyn  * a wqe relative reserved operation use.
604856cc4c2SMike Marciniszyn  */
rvt_qp_wqe_reserve(struct rvt_qp * qp,struct rvt_swqe * wqe)605856cc4c2SMike Marciniszyn static inline void rvt_qp_wqe_reserve(
606856cc4c2SMike Marciniszyn 	struct rvt_qp *qp,
607856cc4c2SMike Marciniszyn 	struct rvt_swqe *wqe)
608856cc4c2SMike Marciniszyn {
609856cc4c2SMike Marciniszyn 	atomic_inc(&qp->s_reserved_used);
610856cc4c2SMike Marciniszyn }
611856cc4c2SMike Marciniszyn 
612856cc4c2SMike Marciniszyn /**
613856cc4c2SMike Marciniszyn  * rvt_qp_wqe_unreserve - clean reserved operation
614856cc4c2SMike Marciniszyn  * @qp - the rvt qp
6152b74c878SKaike Wan  * @flags - send wqe flags
616856cc4c2SMike Marciniszyn  *
617856cc4c2SMike Marciniszyn  * This decrements the reserve use count.
618856cc4c2SMike Marciniszyn  *
619856cc4c2SMike Marciniszyn  * This call MUST precede the change to
620856cc4c2SMike Marciniszyn  * s_last to insure that post send sees a stable
621856cc4c2SMike Marciniszyn  * s_avail.
622856cc4c2SMike Marciniszyn  *
623856cc4c2SMike Marciniszyn  * An smp_mp__after_atomic() is used to insure
624856cc4c2SMike Marciniszyn  * the compiler does not juggle the order of the s_last
625856cc4c2SMike Marciniszyn  * ring index and the decrementing of s_reserved_used.
626856cc4c2SMike Marciniszyn  */
rvt_qp_wqe_unreserve(struct rvt_qp * qp,int flags)6272b74c878SKaike Wan static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags)
628856cc4c2SMike Marciniszyn {
6292b74c878SKaike Wan 	if (unlikely(flags & RVT_SEND_RESERVE_USED)) {
630856cc4c2SMike Marciniszyn 		atomic_dec(&qp->s_reserved_used);
631856cc4c2SMike Marciniszyn 		/* insure no compiler re-order up to s_last change */
632856cc4c2SMike Marciniszyn 		smp_mb__after_atomic();
633856cc4c2SMike Marciniszyn 	}
634856cc4c2SMike Marciniszyn }
635856cc4c2SMike Marciniszyn 
636f2dc9cdcSMike Marciniszyn extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
637f2dc9cdcSMike Marciniszyn 
638696513e8SBrian Welty /*
639696513e8SBrian Welty  * Compare the lower 24 bits of the msn values.
640696513e8SBrian Welty  * Returns an integer <, ==, or > than zero.
641696513e8SBrian Welty  */
rvt_cmp_msn(u32 a,u32 b)642696513e8SBrian Welty static inline int rvt_cmp_msn(u32 a, u32 b)
643696513e8SBrian Welty {
644696513e8SBrian Welty 	return (((int)a) - ((int)b)) << 8;
645696513e8SBrian Welty }
646696513e8SBrian Welty 
647696513e8SBrian Welty __be32 rvt_compute_aeth(struct rvt_qp *qp);
648696513e8SBrian Welty 
649696513e8SBrian Welty void rvt_get_credit(struct rvt_qp *qp, u32 aeth);
650696513e8SBrian Welty 
651385156c5SKaike Wan u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len);
652385156c5SKaike Wan 
653385156c5SKaike Wan /**
6544ad6429dSMike Marciniszyn  * rvt_div_round_up_mtu - round up divide
6555dc80605SMike Marciniszyn  * @qp - the qp pair
6565dc80605SMike Marciniszyn  * @len - the length
6575dc80605SMike Marciniszyn  *
6585dc80605SMike Marciniszyn  * Perform a shift based mtu round up divide
6595dc80605SMike Marciniszyn  */
rvt_div_round_up_mtu(struct rvt_qp * qp,u32 len)6605dc80605SMike Marciniszyn static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len)
6615dc80605SMike Marciniszyn {
6625dc80605SMike Marciniszyn 	return (len + qp->pmtu - 1) >> qp->log_pmtu;
6635dc80605SMike Marciniszyn }
6645dc80605SMike Marciniszyn 
6655dc80605SMike Marciniszyn /**
6665dc80605SMike Marciniszyn  * @qp - the qp pair
6675dc80605SMike Marciniszyn  * @len - the length
6685dc80605SMike Marciniszyn  *
6695dc80605SMike Marciniszyn  * Perform a shift based mtu divide
6705dc80605SMike Marciniszyn  */
rvt_div_mtu(struct rvt_qp * qp,u32 len)6715dc80605SMike Marciniszyn static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
6725dc80605SMike Marciniszyn {
6735dc80605SMike Marciniszyn 	return len >> qp->log_pmtu;
6745dc80605SMike Marciniszyn }
6755dc80605SMike Marciniszyn 
676a25ce427SKaike Wan /**
677a25ce427SKaike Wan  * rvt_timeout_to_jiffies - Convert a ULP timeout input into jiffies
678a25ce427SKaike Wan  * @timeout - timeout input(0 - 31).
679a25ce427SKaike Wan  *
680a25ce427SKaike Wan  * Return a timeout value in jiffies.
681a25ce427SKaike Wan  */
rvt_timeout_to_jiffies(u8 timeout)682a25ce427SKaike Wan static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
683a25ce427SKaike Wan {
684a25ce427SKaike Wan 	if (timeout > 31)
685a25ce427SKaike Wan 		timeout = 31;
686a25ce427SKaike Wan 
687a25ce427SKaike Wan 	return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL;
688a25ce427SKaike Wan }
689a25ce427SKaike Wan 
690715ab1a8SMike Marciniszyn /**
691715ab1a8SMike Marciniszyn  * rvt_lookup_qpn - return the QP with the given QPN
692715ab1a8SMike Marciniszyn  * @ibp: the ibport
693715ab1a8SMike Marciniszyn  * @qpn: the QP number to look up
694715ab1a8SMike Marciniszyn  *
695715ab1a8SMike Marciniszyn  * The caller must hold the rcu_read_lock(), and keep the lock until
696715ab1a8SMike Marciniszyn  * the returned qp is no longer in use.
697715ab1a8SMike Marciniszyn  */
rvt_lookup_qpn(struct rvt_dev_info * rdi,struct rvt_ibport * rvp,u32 qpn)698715ab1a8SMike Marciniszyn static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
699715ab1a8SMike Marciniszyn 					    struct rvt_ibport *rvp,
700715ab1a8SMike Marciniszyn 					    u32 qpn) __must_hold(RCU)
701715ab1a8SMike Marciniszyn {
702715ab1a8SMike Marciniszyn 	struct rvt_qp *qp = NULL;
703715ab1a8SMike Marciniszyn 
704715ab1a8SMike Marciniszyn 	if (unlikely(qpn <= 1)) {
705715ab1a8SMike Marciniszyn 		qp = rcu_dereference(rvp->qp[qpn]);
706715ab1a8SMike Marciniszyn 	} else {
707715ab1a8SMike Marciniszyn 		u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits);
708715ab1a8SMike Marciniszyn 
709715ab1a8SMike Marciniszyn 		for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp;
710715ab1a8SMike Marciniszyn 			qp = rcu_dereference(qp->next))
711715ab1a8SMike Marciniszyn 			if (qp->ibqp.qp_num == qpn)
712715ab1a8SMike Marciniszyn 				break;
713715ab1a8SMike Marciniszyn 	}
714715ab1a8SMike Marciniszyn 	return qp;
715715ab1a8SMike Marciniszyn }
716715ab1a8SMike Marciniszyn 
717715ab1a8SMike Marciniszyn /**
718715ab1a8SMike Marciniszyn  * rvt_mod_retry_timer - mod a retry timer
719715ab1a8SMike Marciniszyn  * @qp - the QP
720715ab1a8SMike Marciniszyn  * @shift - timeout shift to wait for multiple packets
721715ab1a8SMike Marciniszyn  * Modify a potentially already running retry timer
722715ab1a8SMike Marciniszyn  */
rvt_mod_retry_timer_ext(struct rvt_qp * qp,u8 shift)723715ab1a8SMike Marciniszyn static inline void rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift)
724715ab1a8SMike Marciniszyn {
725715ab1a8SMike Marciniszyn 	struct ib_qp *ibqp = &qp->ibqp;
726715ab1a8SMike Marciniszyn 	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
727715ab1a8SMike Marciniszyn 
728715ab1a8SMike Marciniszyn 	lockdep_assert_held(&qp->s_lock);
729715ab1a8SMike Marciniszyn 	qp->s_flags |= RVT_S_TIMER;
730715ab1a8SMike Marciniszyn 	/* 4.096 usec. * (1 << qp->timeout) */
731715ab1a8SMike Marciniszyn 	mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies +
732715ab1a8SMike Marciniszyn 		  (qp->timeout_jiffies << shift));
733715ab1a8SMike Marciniszyn }
734715ab1a8SMike Marciniszyn 
rvt_mod_retry_timer(struct rvt_qp * qp)735715ab1a8SMike Marciniszyn static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
736715ab1a8SMike Marciniszyn {
737715ab1a8SMike Marciniszyn 	return rvt_mod_retry_timer_ext(qp, 0);
738715ab1a8SMike Marciniszyn }
739715ab1a8SMike Marciniszyn 
740d40f69c9SMike Marciniszyn /**
741d40f69c9SMike Marciniszyn  * rvt_put_qp_swqe - drop refs held by swqe
742d40f69c9SMike Marciniszyn  * @qp: the send qp
743d40f69c9SMike Marciniszyn  * @wqe: the send wqe
744d40f69c9SMike Marciniszyn  *
745d40f69c9SMike Marciniszyn  * This drops any references held by the swqe
746d40f69c9SMike Marciniszyn  */
rvt_put_qp_swqe(struct rvt_qp * qp,struct rvt_swqe * wqe)747d40f69c9SMike Marciniszyn static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
748d40f69c9SMike Marciniszyn {
749d40f69c9SMike Marciniszyn 	rvt_put_swqe(wqe);
750d40f69c9SMike Marciniszyn 	if (qp->allowed_ops == IB_OPCODE_UD)
751d310c4bfSMichael J. Ruhl 		rdma_destroy_ah_attr(wqe->ud_wr.attr);
752d40f69c9SMike Marciniszyn }
753d40f69c9SMike Marciniszyn 
754f56044d6SMike Marciniszyn /**
755f56044d6SMike Marciniszyn  * rvt_qp_sqwe_incr - increment ring index
756f56044d6SMike Marciniszyn  * @qp: the qp
757f56044d6SMike Marciniszyn  * @val: the starting value
758f56044d6SMike Marciniszyn  *
759f56044d6SMike Marciniszyn  * Return: the new value wrapping as appropriate
760f56044d6SMike Marciniszyn  */
761f56044d6SMike Marciniszyn static inline u32
rvt_qp_swqe_incr(struct rvt_qp * qp,u32 val)762f56044d6SMike Marciniszyn rvt_qp_swqe_incr(struct rvt_qp *qp, u32 val)
763f56044d6SMike Marciniszyn {
764f56044d6SMike Marciniszyn 	if (++val >= qp->s_size)
765f56044d6SMike Marciniszyn 		val = 0;
766f56044d6SMike Marciniszyn 	return val;
767f56044d6SMike Marciniszyn }
768f56044d6SMike Marciniszyn 
7695136bfeaSKamenee Arumugam int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
7705136bfeaSKamenee Arumugam 
7715136bfeaSKamenee Arumugam /**
7725136bfeaSKamenee Arumugam  * rvt_recv_cq - add a new entry to completion queue
7735136bfeaSKamenee Arumugam  *			by receive queue
7745136bfeaSKamenee Arumugam  * @qp: receive queue
7755136bfeaSKamenee Arumugam  * @wc: work completion entry to add
7765136bfeaSKamenee Arumugam  * @solicited: true if @entry is solicited
7775136bfeaSKamenee Arumugam  *
7785136bfeaSKamenee Arumugam  * This is wrapper function for rvt_enter_cq function call by
7795136bfeaSKamenee Arumugam  * receive queue. If rvt_cq_enter return false, it means cq is
7805136bfeaSKamenee Arumugam  * full and the qp is put into error state.
7815136bfeaSKamenee Arumugam  */
rvt_recv_cq(struct rvt_qp * qp,struct ib_wc * wc,bool solicited)7825136bfeaSKamenee Arumugam static inline void rvt_recv_cq(struct rvt_qp *qp, struct ib_wc *wc,
7835136bfeaSKamenee Arumugam 			       bool solicited)
7845136bfeaSKamenee Arumugam {
7855136bfeaSKamenee Arumugam 	struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.recv_cq);
7865136bfeaSKamenee Arumugam 
7875136bfeaSKamenee Arumugam 	if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
7885136bfeaSKamenee Arumugam 		rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
7895136bfeaSKamenee Arumugam }
7905136bfeaSKamenee Arumugam 
7915136bfeaSKamenee Arumugam /**
7925136bfeaSKamenee Arumugam  * rvt_send_cq - add a new entry to completion queue
7935136bfeaSKamenee Arumugam  *                        by send queue
7945136bfeaSKamenee Arumugam  * @qp: send queue
7955136bfeaSKamenee Arumugam  * @wc: work completion entry to add
7965136bfeaSKamenee Arumugam  * @solicited: true if @entry is solicited
7975136bfeaSKamenee Arumugam  *
7985136bfeaSKamenee Arumugam  * This is wrapper function for rvt_enter_cq function call by
7995136bfeaSKamenee Arumugam  * send queue. If rvt_cq_enter return false, it means cq is
8005136bfeaSKamenee Arumugam  * full and the qp is put into error state.
8015136bfeaSKamenee Arumugam  */
rvt_send_cq(struct rvt_qp * qp,struct ib_wc * wc,bool solicited)8025136bfeaSKamenee Arumugam static inline void rvt_send_cq(struct rvt_qp *qp, struct ib_wc *wc,
8035136bfeaSKamenee Arumugam 			       bool solicited)
8045136bfeaSKamenee Arumugam {
8055136bfeaSKamenee Arumugam 	struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq);
8065136bfeaSKamenee Arumugam 
8075136bfeaSKamenee Arumugam 	if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
8085136bfeaSKamenee Arumugam 		rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
8095136bfeaSKamenee Arumugam }
8105136bfeaSKamenee Arumugam 
811f56044d6SMike Marciniszyn /**
812f56044d6SMike Marciniszyn  * rvt_qp_complete_swqe - insert send completion
813f56044d6SMike Marciniszyn  * @qp - the qp
814f56044d6SMike Marciniszyn  * @wqe - the send wqe
815f56044d6SMike Marciniszyn  * @opcode - wc operation (driver dependent)
816f56044d6SMike Marciniszyn  * @status - completion status
817f56044d6SMike Marciniszyn  *
818f56044d6SMike Marciniszyn  * Update the s_last information, and then insert a send
819f56044d6SMike Marciniszyn  * completion into the completion
820f56044d6SMike Marciniszyn  * queue if the qp indicates it should be done.
821f56044d6SMike Marciniszyn  *
822f56044d6SMike Marciniszyn  * See IBTA 10.7.3.1 for info on completion
823f56044d6SMike Marciniszyn  * control.
824f56044d6SMike Marciniszyn  *
825f56044d6SMike Marciniszyn  * Return: new last
826f56044d6SMike Marciniszyn  */
827f56044d6SMike Marciniszyn static inline u32
rvt_qp_complete_swqe(struct rvt_qp * qp,struct rvt_swqe * wqe,enum ib_wc_opcode opcode,enum ib_wc_status status)828f56044d6SMike Marciniszyn rvt_qp_complete_swqe(struct rvt_qp *qp,
829f56044d6SMike Marciniszyn 		     struct rvt_swqe *wqe,
830f56044d6SMike Marciniszyn 		     enum ib_wc_opcode opcode,
831f56044d6SMike Marciniszyn 		     enum ib_wc_status status)
832f56044d6SMike Marciniszyn {
833f56044d6SMike Marciniszyn 	bool need_completion;
834f56044d6SMike Marciniszyn 	u64 wr_id;
835f56044d6SMike Marciniszyn 	u32 byte_len, last;
836f56044d6SMike Marciniszyn 	int flags = wqe->wr.send_flags;
837f56044d6SMike Marciniszyn 
8382b74c878SKaike Wan 	rvt_qp_wqe_unreserve(qp, flags);
839f56044d6SMike Marciniszyn 	rvt_put_qp_swqe(qp, wqe);
840f56044d6SMike Marciniszyn 
841f56044d6SMike Marciniszyn 	need_completion =
842f56044d6SMike Marciniszyn 		!(flags & RVT_SEND_RESERVE_USED) &&
843f56044d6SMike Marciniszyn 		(!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
844f56044d6SMike Marciniszyn 		(flags & IB_SEND_SIGNALED) ||
845f56044d6SMike Marciniszyn 		status != IB_WC_SUCCESS);
846f56044d6SMike Marciniszyn 	if (need_completion) {
847f56044d6SMike Marciniszyn 		wr_id = wqe->wr.wr_id;
848f56044d6SMike Marciniszyn 		byte_len = wqe->length;
849f56044d6SMike Marciniszyn 		/* above fields required before writing s_last */
850f56044d6SMike Marciniszyn 	}
851f56044d6SMike Marciniszyn 	last = rvt_qp_swqe_incr(qp, qp->s_last);
852f56044d6SMike Marciniszyn 	/* see rvt_qp_is_avail() */
853f56044d6SMike Marciniszyn 	smp_store_release(&qp->s_last, last);
854f56044d6SMike Marciniszyn 	if (need_completion) {
855f56044d6SMike Marciniszyn 		struct ib_wc w = {
856f56044d6SMike Marciniszyn 			.wr_id = wr_id,
857f56044d6SMike Marciniszyn 			.status = status,
858f56044d6SMike Marciniszyn 			.opcode = opcode,
859f56044d6SMike Marciniszyn 			.qp = &qp->ibqp,
860f56044d6SMike Marciniszyn 			.byte_len = byte_len,
861f56044d6SMike Marciniszyn 		};
8625136bfeaSKamenee Arumugam 		rvt_send_cq(qp, &w, status != IB_WC_SUCCESS);
863f56044d6SMike Marciniszyn 	}
864f56044d6SMike Marciniszyn 	return last;
865f56044d6SMike Marciniszyn }
866f56044d6SMike Marciniszyn 
867bfbac097SDennis Dalessandro extern const int  ib_rvt_state_ops[];
868bfbac097SDennis Dalessandro 
8693b0b3fb3SDennis Dalessandro struct rvt_dev_info;
870832369faSBrian Welty int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only);
871beb5a042SBrian Welty void rvt_comm_est(struct rvt_qp *qp);
872beb5a042SBrian Welty void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
873881fccb8SDon Hiatt unsigned long rvt_rnr_tbl_to_usec(u32 index);
87411a10d4bSVenkata Sandeep Dhanalakota enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t);
87511a10d4bSVenkata Sandeep Dhanalakota void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth);
87611a10d4bSVenkata Sandeep Dhanalakota void rvt_del_timers_sync(struct rvt_qp *qp);
87711a10d4bSVenkata Sandeep Dhanalakota void rvt_stop_rc_timers(struct rvt_qp *qp);
878039cd3daSKaike Wan void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift);
rvt_add_retry_timer(struct rvt_qp * qp)879039cd3daSKaike Wan static inline void rvt_add_retry_timer(struct rvt_qp *qp)
880039cd3daSKaike Wan {
881039cd3daSKaike Wan 	rvt_add_retry_timer_ext(qp, 0);
882039cd3daSKaike Wan }
8833b0b3fb3SDennis Dalessandro 
884019f118bSBrian Welty void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
885019f118bSBrian Welty 		  void *data, u32 length,
886019f118bSBrian Welty 		  bool release, bool copy_last);
887116aa033SVenkata Sandeep Dhanalakota void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
888116aa033SVenkata Sandeep Dhanalakota 		       enum ib_wc_status status);
88915703461SVenkata Sandeep Dhanalakota void rvt_ruc_loopback(struct rvt_qp *qp);
890019f118bSBrian Welty 
8914734b4f4SMike Marciniszyn /**
8924734b4f4SMike Marciniszyn  * struct rvt_qp_iter - the iterator for QPs
8934734b4f4SMike Marciniszyn  * @qp - the current QP
8944734b4f4SMike Marciniszyn  *
8954734b4f4SMike Marciniszyn  * This structure defines the current iterator
8964734b4f4SMike Marciniszyn  * state for sequenced access to all QPs relative
8974734b4f4SMike Marciniszyn  * to an rvt_dev_info.
8984734b4f4SMike Marciniszyn  */
8994734b4f4SMike Marciniszyn struct rvt_qp_iter {
9004734b4f4SMike Marciniszyn 	struct rvt_qp *qp;
9014734b4f4SMike Marciniszyn 	/* private: backpointer */
9024734b4f4SMike Marciniszyn 	struct rvt_dev_info *rdi;
9034734b4f4SMike Marciniszyn 	/* private: callback routine */
9044734b4f4SMike Marciniszyn 	void (*cb)(struct rvt_qp *qp, u64 v);
9054734b4f4SMike Marciniszyn 	/* private: for arg to callback routine */
9064734b4f4SMike Marciniszyn 	u64 v;
9074734b4f4SMike Marciniszyn 	/* private: number of SMI,GSI QPs for device */
9084734b4f4SMike Marciniszyn 	int specials;
9094734b4f4SMike Marciniszyn 	/* private: current iterator index */
9104734b4f4SMike Marciniszyn 	int n;
9114734b4f4SMike Marciniszyn };
9124734b4f4SMike Marciniszyn 
913239b0e52SKamenee Arumugam /**
914239b0e52SKamenee Arumugam  * ib_cq_tail - Return tail index of cq buffer
915239b0e52SKamenee Arumugam  * @send_cq - The cq for send
916239b0e52SKamenee Arumugam  *
917239b0e52SKamenee Arumugam  * This is called in qp_iter_print to get tail
918239b0e52SKamenee Arumugam  * of cq buffer.
919239b0e52SKamenee Arumugam  */
ib_cq_tail(struct ib_cq * send_cq)920239b0e52SKamenee Arumugam static inline u32 ib_cq_tail(struct ib_cq *send_cq)
921239b0e52SKamenee Arumugam {
922239b0e52SKamenee Arumugam 	struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
923239b0e52SKamenee Arumugam 
924239b0e52SKamenee Arumugam 	return ibcq_to_rvtcq(send_cq)->ip ?
925239b0e52SKamenee Arumugam 	       RDMA_READ_UAPI_ATOMIC(cq->queue->tail) :
926239b0e52SKamenee Arumugam 	       ibcq_to_rvtcq(send_cq)->kqueue->tail;
927239b0e52SKamenee Arumugam }
928239b0e52SKamenee Arumugam 
929239b0e52SKamenee Arumugam /**
930239b0e52SKamenee Arumugam  * ib_cq_head - Return head index of cq buffer
931239b0e52SKamenee Arumugam  * @send_cq - The cq for send
932239b0e52SKamenee Arumugam  *
933239b0e52SKamenee Arumugam  * This is called in qp_iter_print to get head
934239b0e52SKamenee Arumugam  * of cq buffer.
935239b0e52SKamenee Arumugam  */
ib_cq_head(struct ib_cq * send_cq)936239b0e52SKamenee Arumugam static inline u32 ib_cq_head(struct ib_cq *send_cq)
937239b0e52SKamenee Arumugam {
938239b0e52SKamenee Arumugam 	struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
939239b0e52SKamenee Arumugam 
940239b0e52SKamenee Arumugam 	return ibcq_to_rvtcq(send_cq)->ip ?
941239b0e52SKamenee Arumugam 	       RDMA_READ_UAPI_ATOMIC(cq->queue->head) :
942239b0e52SKamenee Arumugam 	       ibcq_to_rvtcq(send_cq)->kqueue->head;
943239b0e52SKamenee Arumugam }
944239b0e52SKamenee Arumugam 
945dabac6e4SKamenee Arumugam /**
946dabac6e4SKamenee Arumugam  * rvt_free_rq - free memory allocated for rvt_rq struct
947dabac6e4SKamenee Arumugam  * @rvt_rq: request queue data structure
948dabac6e4SKamenee Arumugam  *
949dabac6e4SKamenee Arumugam  * This function should only be called if the rvt_mmap_info()
950dabac6e4SKamenee Arumugam  * has not succeeded.
951dabac6e4SKamenee Arumugam  */
rvt_free_rq(struct rvt_rq * rq)952dabac6e4SKamenee Arumugam static inline void rvt_free_rq(struct rvt_rq *rq)
953dabac6e4SKamenee Arumugam {
954dabac6e4SKamenee Arumugam 	kvfree(rq->kwq);
955dabac6e4SKamenee Arumugam 	rq->kwq = NULL;
956dabac6e4SKamenee Arumugam 	vfree(rq->wq);
957dabac6e4SKamenee Arumugam 	rq->wq = NULL;
958dabac6e4SKamenee Arumugam }
959dabac6e4SKamenee Arumugam 
96071994354SKaike Wan /**
96171994354SKaike Wan  * rvt_to_iport - Get the ibport pointer
96271994354SKaike Wan  * @qp: the qp pointer
96371994354SKaike Wan  *
96471994354SKaike Wan  * This function returns the ibport pointer from the qp pointer.
96571994354SKaike Wan  */
rvt_to_iport(struct rvt_qp * qp)96671994354SKaike Wan static inline struct rvt_ibport *rvt_to_iport(struct rvt_qp *qp)
96771994354SKaike Wan {
96871994354SKaike Wan 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
96971994354SKaike Wan 
97071994354SKaike Wan 	return rdi->ports[qp->port_num - 1];
97171994354SKaike Wan }
97271994354SKaike Wan 
97371994354SKaike Wan /**
97471994354SKaike Wan  * rvt_rc_credit_avail - Check if there are enough RC credits for the request
97571994354SKaike Wan  * @qp: the qp
97671994354SKaike Wan  * @wqe: the request
97771994354SKaike Wan  *
97871994354SKaike Wan  * This function returns false when there are not enough credits for the given
97971994354SKaike Wan  * request and true otherwise.
98071994354SKaike Wan  */
rvt_rc_credit_avail(struct rvt_qp * qp,struct rvt_swqe * wqe)98171994354SKaike Wan static inline bool rvt_rc_credit_avail(struct rvt_qp *qp, struct rvt_swqe *wqe)
98271994354SKaike Wan {
98371994354SKaike Wan 	lockdep_assert_held(&qp->s_lock);
98471994354SKaike Wan 	if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
98571994354SKaike Wan 	    rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
98671994354SKaike Wan 		struct rvt_ibport *rvp = rvt_to_iport(qp);
98771994354SKaike Wan 
98871994354SKaike Wan 		qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
98971994354SKaike Wan 		rvp->n_rc_crwaits++;
99071994354SKaike Wan 		return false;
99171994354SKaike Wan 	}
99271994354SKaike Wan 	return true;
99371994354SKaike Wan }
99471994354SKaike Wan 
9954734b4f4SMike Marciniszyn struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
9964734b4f4SMike Marciniszyn 				     u64 v,
9974734b4f4SMike Marciniszyn 				     void (*cb)(struct rvt_qp *qp, u64 v));
9984734b4f4SMike Marciniszyn int rvt_qp_iter_next(struct rvt_qp_iter *iter);
9994734b4f4SMike Marciniszyn void rvt_qp_iter(struct rvt_dev_info *rdi,
10004734b4f4SMike Marciniszyn 		 u64 v,
10014734b4f4SMike Marciniszyn 		 void (*cb)(struct rvt_qp *qp, u64 v));
10020208da90SMike Marciniszyn void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey);
1003b4e64397SDennis Dalessandro #endif          /* DEF_RDMAVT_INCQP_H */
1004