1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3  * Copyright(c) 2018 Intel Corporation.
4  *
5  */
6 
7 #include "hfi.h"
8 #include "qp.h"
9 #include "rc.h"
10 #include "verbs.h"
11 #include "tid_rdma.h"
12 #include "exp_rcv.h"
13 #include "trace.h"
14 
15 /**
16  * DOC: TID RDMA READ protocol
17  *
18  * This is an end-to-end protocol at the hfi1 level between two nodes that
19  * improves performance by avoiding data copy on the requester side. It
20  * converts a qualified RDMA READ request into a TID RDMA READ request on
21  * the requester side and thereafter handles the request and response
22  * differently. To be qualified, the RDMA READ request should meet the
23  * following:
24  * -- The total data length should be greater than 256K;
25  * -- The total data length should be a multiple of 4K page size;
26  * -- Each local scatter-gather entry should be 4K page aligned;
27  * -- Each local scatter-gather entry should be a multiple of 4K page size;
28  */
29 
30 #define RCV_TID_FLOW_TABLE_CTRL_FLOW_VALID_SMASK BIT_ULL(32)
31 #define RCV_TID_FLOW_TABLE_CTRL_HDR_SUPP_EN_SMASK BIT_ULL(33)
32 #define RCV_TID_FLOW_TABLE_CTRL_KEEP_AFTER_SEQ_ERR_SMASK BIT_ULL(34)
33 #define RCV_TID_FLOW_TABLE_CTRL_KEEP_ON_GEN_ERR_SMASK BIT_ULL(35)
34 #define RCV_TID_FLOW_TABLE_STATUS_SEQ_MISMATCH_SMASK BIT_ULL(37)
35 #define RCV_TID_FLOW_TABLE_STATUS_GEN_MISMATCH_SMASK BIT_ULL(38)
36 
37 /* Maximum number of packets within a flow generation. */
38 #define MAX_TID_FLOW_PSN BIT(HFI1_KDETH_BTH_SEQ_SHIFT)
39 
40 #define GENERATION_MASK 0xFFFFF
41 
42 static u32 mask_generation(u32 a)
43 {
44 	return a & GENERATION_MASK;
45 }
46 
47 /* Reserved generation value to set to unused flows for kernel contexts */
48 #define KERN_GENERATION_RESERVED mask_generation(U32_MAX)
49 
50 /*
51  * J_KEY for kernel contexts when TID RDMA is used.
52  * See generate_jkey() in hfi.h for more information.
53  */
54 #define TID_RDMA_JKEY                   32
55 #define HFI1_KERNEL_MIN_JKEY HFI1_ADMIN_JKEY_RANGE
56 #define HFI1_KERNEL_MAX_JKEY (2 * HFI1_ADMIN_JKEY_RANGE - 1)
57 
58 /* Maximum number of segments in flight per QP request. */
59 #define TID_RDMA_MAX_READ_SEGS_PER_REQ  6
60 #define TID_RDMA_MAX_WRITE_SEGS_PER_REQ 4
61 #define MAX_REQ max_t(u16, TID_RDMA_MAX_READ_SEGS_PER_REQ, \
62 			TID_RDMA_MAX_WRITE_SEGS_PER_REQ)
63 #define MAX_FLOWS roundup_pow_of_two(MAX_REQ + 1)
64 
65 #define MAX_EXPECTED_PAGES     (MAX_EXPECTED_BUFFER / PAGE_SIZE)
66 
67 #define TID_RDMA_DESTQP_FLOW_SHIFT      11
68 #define TID_RDMA_DESTQP_FLOW_MASK       0x1f
69 
70 #define TID_OPFN_QP_CTXT_MASK 0xff
71 #define TID_OPFN_QP_CTXT_SHIFT 56
72 #define TID_OPFN_QP_KDETH_MASK 0xff
73 #define TID_OPFN_QP_KDETH_SHIFT 48
74 #define TID_OPFN_MAX_LEN_MASK 0x7ff
75 #define TID_OPFN_MAX_LEN_SHIFT 37
76 #define TID_OPFN_TIMEOUT_MASK 0x1f
77 #define TID_OPFN_TIMEOUT_SHIFT 32
78 #define TID_OPFN_RESERVED_MASK 0x3f
79 #define TID_OPFN_RESERVED_SHIFT 26
80 #define TID_OPFN_URG_MASK 0x1
81 #define TID_OPFN_URG_SHIFT 25
82 #define TID_OPFN_VER_MASK 0x7
83 #define TID_OPFN_VER_SHIFT 22
84 #define TID_OPFN_JKEY_MASK 0x3f
85 #define TID_OPFN_JKEY_SHIFT 16
86 #define TID_OPFN_MAX_READ_MASK 0x3f
87 #define TID_OPFN_MAX_READ_SHIFT 10
88 #define TID_OPFN_MAX_WRITE_MASK 0x3f
89 #define TID_OPFN_MAX_WRITE_SHIFT 4
90 
91 /*
92  * OPFN TID layout
93  *
94  * 63               47               31               15
95  * NNNNNNNNKKKKKKKK MMMMMMMMMMMTTTTT DDDDDDUVVVJJJJJJ RRRRRRWWWWWWCCCC
96  * 3210987654321098 7654321098765432 1098765432109876 5432109876543210
97  * N - the context Number
98  * K - the Kdeth_qp
99  * M - Max_len
100  * T - Timeout
101  * D - reserveD
102  * V - version
103  * U - Urg capable
104  * J - Jkey
105  * R - max_Read
106  * W - max_Write
107  * C - Capcode
108  */
109 
110 static u32 tid_rdma_flow_wt;
111 
112 static void tid_rdma_trigger_resume(struct work_struct *work);
113 static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req);
114 static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
115 					 gfp_t gfp);
116 static void hfi1_init_trdma_req(struct rvt_qp *qp,
117 				struct tid_rdma_request *req);
118 static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx);
119 static void hfi1_tid_timeout(struct timer_list *t);
120 static void hfi1_add_tid_reap_timer(struct rvt_qp *qp);
121 static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp);
122 static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp);
123 static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp);
124 static void hfi1_tid_retry_timeout(struct timer_list *t);
125 static int make_tid_rdma_ack(struct rvt_qp *qp,
126 			     struct ib_other_headers *ohdr,
127 			     struct hfi1_pkt_state *ps);
128 static void hfi1_do_tid_send(struct rvt_qp *qp);
129 static u32 read_r_next_psn(struct hfi1_devdata *dd, u8 ctxt, u8 fidx);
130 static void tid_rdma_rcv_err(struct hfi1_packet *packet,
131 			     struct ib_other_headers *ohdr,
132 			     struct rvt_qp *qp, u32 psn, int diff, bool fecn);
133 static void update_r_next_psn_fecn(struct hfi1_packet *packet,
134 				   struct hfi1_qp_priv *priv,
135 				   struct hfi1_ctxtdata *rcd,
136 				   struct tid_rdma_flow *flow,
137 				   bool fecn);
138 
139 static u64 tid_rdma_opfn_encode(struct tid_rdma_params *p)
140 {
141 	return
142 		(((u64)p->qp & TID_OPFN_QP_CTXT_MASK) <<
143 			TID_OPFN_QP_CTXT_SHIFT) |
144 		((((u64)p->qp >> 16) & TID_OPFN_QP_KDETH_MASK) <<
145 			TID_OPFN_QP_KDETH_SHIFT) |
146 		(((u64)((p->max_len >> PAGE_SHIFT) - 1) &
147 			TID_OPFN_MAX_LEN_MASK) << TID_OPFN_MAX_LEN_SHIFT) |
148 		(((u64)p->timeout & TID_OPFN_TIMEOUT_MASK) <<
149 			TID_OPFN_TIMEOUT_SHIFT) |
150 		(((u64)p->urg & TID_OPFN_URG_MASK) << TID_OPFN_URG_SHIFT) |
151 		(((u64)p->jkey & TID_OPFN_JKEY_MASK) << TID_OPFN_JKEY_SHIFT) |
152 		(((u64)p->max_read & TID_OPFN_MAX_READ_MASK) <<
153 			TID_OPFN_MAX_READ_SHIFT) |
154 		(((u64)p->max_write & TID_OPFN_MAX_WRITE_MASK) <<
155 			TID_OPFN_MAX_WRITE_SHIFT);
156 }
157 
158 static void tid_rdma_opfn_decode(struct tid_rdma_params *p, u64 data)
159 {
160 	p->max_len = (((data >> TID_OPFN_MAX_LEN_SHIFT) &
161 		TID_OPFN_MAX_LEN_MASK) + 1) << PAGE_SHIFT;
162 	p->jkey = (data >> TID_OPFN_JKEY_SHIFT) & TID_OPFN_JKEY_MASK;
163 	p->max_write = (data >> TID_OPFN_MAX_WRITE_SHIFT) &
164 		TID_OPFN_MAX_WRITE_MASK;
165 	p->max_read = (data >> TID_OPFN_MAX_READ_SHIFT) &
166 		TID_OPFN_MAX_READ_MASK;
167 	p->qp =
168 		((((data >> TID_OPFN_QP_KDETH_SHIFT) & TID_OPFN_QP_KDETH_MASK)
169 			<< 16) |
170 		((data >> TID_OPFN_QP_CTXT_SHIFT) & TID_OPFN_QP_CTXT_MASK));
171 	p->urg = (data >> TID_OPFN_URG_SHIFT) & TID_OPFN_URG_MASK;
172 	p->timeout = (data >> TID_OPFN_TIMEOUT_SHIFT) & TID_OPFN_TIMEOUT_MASK;
173 }
174 
175 void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p)
176 {
177 	struct hfi1_qp_priv *priv = qp->priv;
178 
179 	p->qp = (kdeth_qp << 16) | priv->rcd->ctxt;
180 	p->max_len = TID_RDMA_MAX_SEGMENT_SIZE;
181 	p->jkey = priv->rcd->jkey;
182 	p->max_read = TID_RDMA_MAX_READ_SEGS_PER_REQ;
183 	p->max_write = TID_RDMA_MAX_WRITE_SEGS_PER_REQ;
184 	p->timeout = qp->timeout;
185 	p->urg = is_urg_masked(priv->rcd);
186 }
187 
188 bool tid_rdma_conn_req(struct rvt_qp *qp, u64 *data)
189 {
190 	struct hfi1_qp_priv *priv = qp->priv;
191 
192 	*data = tid_rdma_opfn_encode(&priv->tid_rdma.local);
193 	return true;
194 }
195 
196 bool tid_rdma_conn_reply(struct rvt_qp *qp, u64 data)
197 {
198 	struct hfi1_qp_priv *priv = qp->priv;
199 	struct tid_rdma_params *remote, *old;
200 	bool ret = true;
201 
202 	old = rcu_dereference_protected(priv->tid_rdma.remote,
203 					lockdep_is_held(&priv->opfn.lock));
204 	data &= ~0xfULL;
205 	/*
206 	 * If data passed in is zero, return true so as not to continue the
207 	 * negotiation process
208 	 */
209 	if (!data || !HFI1_CAP_IS_KSET(TID_RDMA))
210 		goto null;
211 	/*
212 	 * If kzalloc fails, return false. This will result in:
213 	 * * at the requester a new OPFN request being generated to retry
214 	 *   the negotiation
215 	 * * at the responder, 0 being returned to the requester so as to
216 	 *   disable TID RDMA at both the requester and the responder
217 	 */
218 	remote = kzalloc(sizeof(*remote), GFP_ATOMIC);
219 	if (!remote) {
220 		ret = false;
221 		goto null;
222 	}
223 
224 	tid_rdma_opfn_decode(remote, data);
225 	priv->tid_timer_timeout_jiffies =
226 		usecs_to_jiffies((((4096UL * (1UL << remote->timeout)) /
227 				   1000UL) << 3) * 7);
228 	trace_hfi1_opfn_param(qp, 0, &priv->tid_rdma.local);
229 	trace_hfi1_opfn_param(qp, 1, remote);
230 	rcu_assign_pointer(priv->tid_rdma.remote, remote);
231 	/*
232 	 * A TID RDMA READ request's segment size is not equal to
233 	 * remote->max_len only when the request's data length is smaller
234 	 * than remote->max_len. In that case, there will be only one segment.
235 	 * Therefore, when priv->pkts_ps is used to calculate req->cur_seg
236 	 * during retry, it will lead to req->cur_seg = 0, which is exactly
237 	 * what is expected.
238 	 */
239 	priv->pkts_ps = (u16)rvt_div_mtu(qp, remote->max_len);
240 	priv->timeout_shift = ilog2(priv->pkts_ps - 1) + 1;
241 	goto free;
242 null:
243 	RCU_INIT_POINTER(priv->tid_rdma.remote, NULL);
244 	priv->timeout_shift = 0;
245 free:
246 	if (old)
247 		kfree_rcu(old, rcu_head);
248 	return ret;
249 }
250 
251 bool tid_rdma_conn_resp(struct rvt_qp *qp, u64 *data)
252 {
253 	bool ret;
254 
255 	ret = tid_rdma_conn_reply(qp, *data);
256 	*data = 0;
257 	/*
258 	 * If tid_rdma_conn_reply() returns error, set *data as 0 to indicate
259 	 * TID RDMA could not be enabled. This will result in TID RDMA being
260 	 * disabled at the requester too.
261 	 */
262 	if (ret)
263 		(void)tid_rdma_conn_req(qp, data);
264 	return ret;
265 }
266 
267 void tid_rdma_conn_error(struct rvt_qp *qp)
268 {
269 	struct hfi1_qp_priv *priv = qp->priv;
270 	struct tid_rdma_params *old;
271 
272 	old = rcu_dereference_protected(priv->tid_rdma.remote,
273 					lockdep_is_held(&priv->opfn.lock));
274 	RCU_INIT_POINTER(priv->tid_rdma.remote, NULL);
275 	if (old)
276 		kfree_rcu(old, rcu_head);
277 }
278 
279 /* This is called at context initialization time */
280 int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit)
281 {
282 	if (reinit)
283 		return 0;
284 
285 	BUILD_BUG_ON(TID_RDMA_JKEY < HFI1_KERNEL_MIN_JKEY);
286 	BUILD_BUG_ON(TID_RDMA_JKEY > HFI1_KERNEL_MAX_JKEY);
287 	rcd->jkey = TID_RDMA_JKEY;
288 	hfi1_set_ctxt_jkey(rcd->dd, rcd, rcd->jkey);
289 	return hfi1_alloc_ctxt_rcv_groups(rcd);
290 }
291 
292 /**
293  * qp_to_rcd - determine the receive context used by a qp
294  * @qp - the qp
295  *
296  * This routine returns the receive context associated
297  * with a a qp's qpn.
298  *
299  * Returns the context.
300  */
301 static struct hfi1_ctxtdata *qp_to_rcd(struct rvt_dev_info *rdi,
302 				       struct rvt_qp *qp)
303 {
304 	struct hfi1_ibdev *verbs_dev = container_of(rdi,
305 						    struct hfi1_ibdev,
306 						    rdi);
307 	struct hfi1_devdata *dd = container_of(verbs_dev,
308 					       struct hfi1_devdata,
309 					       verbs_dev);
310 	unsigned int ctxt;
311 
312 	if (qp->ibqp.qp_num == 0)
313 		ctxt = 0;
314 	else
315 		ctxt = hfi1_get_qp_map(dd, qp->ibqp.qp_num >> dd->qos_shift);
316 	return dd->rcd[ctxt];
317 }
318 
319 int hfi1_qp_priv_init(struct rvt_dev_info *rdi, struct rvt_qp *qp,
320 		      struct ib_qp_init_attr *init_attr)
321 {
322 	struct hfi1_qp_priv *qpriv = qp->priv;
323 	int i, ret;
324 
325 	qpriv->rcd = qp_to_rcd(rdi, qp);
326 
327 	spin_lock_init(&qpriv->opfn.lock);
328 	INIT_WORK(&qpriv->opfn.opfn_work, opfn_send_conn_request);
329 	INIT_WORK(&qpriv->tid_rdma.trigger_work, tid_rdma_trigger_resume);
330 	qpriv->flow_state.psn = 0;
331 	qpriv->flow_state.index = RXE_NUM_TID_FLOWS;
332 	qpriv->flow_state.last_index = RXE_NUM_TID_FLOWS;
333 	qpriv->flow_state.generation = KERN_GENERATION_RESERVED;
334 	qpriv->s_state = TID_OP(WRITE_RESP);
335 	qpriv->s_tid_cur = HFI1_QP_WQE_INVALID;
336 	qpriv->s_tid_head = HFI1_QP_WQE_INVALID;
337 	qpriv->s_tid_tail = HFI1_QP_WQE_INVALID;
338 	qpriv->rnr_nak_state = TID_RNR_NAK_INIT;
339 	qpriv->r_tid_head = HFI1_QP_WQE_INVALID;
340 	qpriv->r_tid_tail = HFI1_QP_WQE_INVALID;
341 	qpriv->r_tid_ack = HFI1_QP_WQE_INVALID;
342 	qpriv->r_tid_alloc = HFI1_QP_WQE_INVALID;
343 	atomic_set(&qpriv->n_requests, 0);
344 	atomic_set(&qpriv->n_tid_requests, 0);
345 	timer_setup(&qpriv->s_tid_timer, hfi1_tid_timeout, 0);
346 	timer_setup(&qpriv->s_tid_retry_timer, hfi1_tid_retry_timeout, 0);
347 	INIT_LIST_HEAD(&qpriv->tid_wait);
348 
349 	if (init_attr->qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) {
350 		struct hfi1_devdata *dd = qpriv->rcd->dd;
351 
352 		qpriv->pages = kzalloc_node(TID_RDMA_MAX_PAGES *
353 						sizeof(*qpriv->pages),
354 					    GFP_KERNEL, dd->node);
355 		if (!qpriv->pages)
356 			return -ENOMEM;
357 		for (i = 0; i < qp->s_size; i++) {
358 			struct hfi1_swqe_priv *priv;
359 			struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i);
360 
361 			priv = kzalloc_node(sizeof(*priv), GFP_KERNEL,
362 					    dd->node);
363 			if (!priv)
364 				return -ENOMEM;
365 
366 			hfi1_init_trdma_req(qp, &priv->tid_req);
367 			priv->tid_req.e.swqe = wqe;
368 			wqe->priv = priv;
369 		}
370 		for (i = 0; i < rvt_max_atomic(rdi); i++) {
371 			struct hfi1_ack_priv *priv;
372 
373 			priv = kzalloc_node(sizeof(*priv), GFP_KERNEL,
374 					    dd->node);
375 			if (!priv)
376 				return -ENOMEM;
377 
378 			hfi1_init_trdma_req(qp, &priv->tid_req);
379 			priv->tid_req.e.ack = &qp->s_ack_queue[i];
380 
381 			ret = hfi1_kern_exp_rcv_alloc_flows(&priv->tid_req,
382 							    GFP_KERNEL);
383 			if (ret) {
384 				kfree(priv);
385 				return ret;
386 			}
387 			qp->s_ack_queue[i].priv = priv;
388 		}
389 	}
390 
391 	return 0;
392 }
393 
394 void hfi1_qp_priv_tid_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
395 {
396 	struct hfi1_qp_priv *qpriv = qp->priv;
397 	struct rvt_swqe *wqe;
398 	u32 i;
399 
400 	if (qp->ibqp.qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) {
401 		for (i = 0; i < qp->s_size; i++) {
402 			wqe = rvt_get_swqe_ptr(qp, i);
403 			kfree(wqe->priv);
404 			wqe->priv = NULL;
405 		}
406 		for (i = 0; i < rvt_max_atomic(rdi); i++) {
407 			struct hfi1_ack_priv *priv = qp->s_ack_queue[i].priv;
408 
409 			if (priv)
410 				hfi1_kern_exp_rcv_free_flows(&priv->tid_req);
411 			kfree(priv);
412 			qp->s_ack_queue[i].priv = NULL;
413 		}
414 		cancel_work_sync(&qpriv->opfn.opfn_work);
415 		kfree(qpriv->pages);
416 		qpriv->pages = NULL;
417 	}
418 }
419 
420 /* Flow and tid waiter functions */
421 /**
422  * DOC: lock ordering
423  *
424  * There are two locks involved with the queuing
425  * routines: the qp s_lock and the exp_lock.
426  *
427  * Since the tid space allocation is called from
428  * the send engine, the qp s_lock is already held.
429  *
430  * The allocation routines will get the exp_lock.
431  *
432  * The first_qp() call is provided to allow the head of
433  * the rcd wait queue to be fetched under the exp_lock and
434  * followed by a drop of the exp_lock.
435  *
436  * Any qp in the wait list will have the qp reference count held
437  * to hold the qp in memory.
438  */
439 
440 /*
441  * return head of rcd wait list
442  *
443  * Must hold the exp_lock.
444  *
445  * Get a reference to the QP to hold the QP in memory.
446  *
447  * The caller must release the reference when the local
448  * is no longer being used.
449  */
450 static struct rvt_qp *first_qp(struct hfi1_ctxtdata *rcd,
451 			       struct tid_queue *queue)
452 	__must_hold(&rcd->exp_lock)
453 {
454 	struct hfi1_qp_priv *priv;
455 
456 	lockdep_assert_held(&rcd->exp_lock);
457 	priv = list_first_entry_or_null(&queue->queue_head,
458 					struct hfi1_qp_priv,
459 					tid_wait);
460 	if (!priv)
461 		return NULL;
462 	rvt_get_qp(priv->owner);
463 	return priv->owner;
464 }
465 
466 /**
467  * kernel_tid_waiters - determine rcd wait
468  * @rcd: the receive context
469  * @qp: the head of the qp being processed
470  *
471  * This routine will return false IFF
472  * the list is NULL or the head of the
473  * list is the indicated qp.
474  *
475  * Must hold the qp s_lock and the exp_lock.
476  *
477  * Return:
478  * false if either of the conditions below are satisfied:
479  * 1. The list is empty or
480  * 2. The indicated qp is at the head of the list and the
481  *    HFI1_S_WAIT_TID_SPACE bit is set in qp->s_flags.
482  * true is returned otherwise.
483  */
484 static bool kernel_tid_waiters(struct hfi1_ctxtdata *rcd,
485 			       struct tid_queue *queue, struct rvt_qp *qp)
486 	__must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
487 {
488 	struct rvt_qp *fqp;
489 	bool ret = true;
490 
491 	lockdep_assert_held(&qp->s_lock);
492 	lockdep_assert_held(&rcd->exp_lock);
493 	fqp = first_qp(rcd, queue);
494 	if (!fqp || (fqp == qp && (qp->s_flags & HFI1_S_WAIT_TID_SPACE)))
495 		ret = false;
496 	rvt_put_qp(fqp);
497 	return ret;
498 }
499 
500 /**
501  * dequeue_tid_waiter - dequeue the qp from the list
502  * @qp - the qp to remove the wait list
503  *
504  * This routine removes the indicated qp from the
505  * wait list if it is there.
506  *
507  * This should be done after the hardware flow and
508  * tid array resources have been allocated.
509  *
510  * Must hold the qp s_lock and the rcd exp_lock.
511  *
512  * It assumes the s_lock to protect the s_flags
513  * field and to reliably test the HFI1_S_WAIT_TID_SPACE flag.
514  */
515 static void dequeue_tid_waiter(struct hfi1_ctxtdata *rcd,
516 			       struct tid_queue *queue, struct rvt_qp *qp)
517 	__must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
518 {
519 	struct hfi1_qp_priv *priv = qp->priv;
520 
521 	lockdep_assert_held(&qp->s_lock);
522 	lockdep_assert_held(&rcd->exp_lock);
523 	if (list_empty(&priv->tid_wait))
524 		return;
525 	list_del_init(&priv->tid_wait);
526 	qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE;
527 	queue->dequeue++;
528 	rvt_put_qp(qp);
529 }
530 
531 /**
532  * queue_qp_for_tid_wait - suspend QP on tid space
533  * @rcd: the receive context
534  * @qp: the qp
535  *
536  * The qp is inserted at the tail of the rcd
537  * wait queue and the HFI1_S_WAIT_TID_SPACE s_flag is set.
538  *
539  * Must hold the qp s_lock and the exp_lock.
540  */
541 static void queue_qp_for_tid_wait(struct hfi1_ctxtdata *rcd,
542 				  struct tid_queue *queue, struct rvt_qp *qp)
543 	__must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
544 {
545 	struct hfi1_qp_priv *priv = qp->priv;
546 
547 	lockdep_assert_held(&qp->s_lock);
548 	lockdep_assert_held(&rcd->exp_lock);
549 	if (list_empty(&priv->tid_wait)) {
550 		qp->s_flags |= HFI1_S_WAIT_TID_SPACE;
551 		list_add_tail(&priv->tid_wait, &queue->queue_head);
552 		priv->tid_enqueue = ++queue->enqueue;
553 		rcd->dd->verbs_dev.n_tidwait++;
554 		trace_hfi1_qpsleep(qp, HFI1_S_WAIT_TID_SPACE);
555 		rvt_get_qp(qp);
556 	}
557 }
558 
559 /**
560  * __trigger_tid_waiter - trigger tid waiter
561  * @qp: the qp
562  *
563  * This is a private entrance to schedule the qp
564  * assuming the caller is holding the qp->s_lock.
565  */
566 static void __trigger_tid_waiter(struct rvt_qp *qp)
567 	__must_hold(&qp->s_lock)
568 {
569 	lockdep_assert_held(&qp->s_lock);
570 	if (!(qp->s_flags & HFI1_S_WAIT_TID_SPACE))
571 		return;
572 	trace_hfi1_qpwakeup(qp, HFI1_S_WAIT_TID_SPACE);
573 	hfi1_schedule_send(qp);
574 }
575 
576 /**
577  * tid_rdma_schedule_tid_wakeup - schedule wakeup for a qp
578  * @qp - the qp
579  *
580  * trigger a schedule or a waiting qp in a deadlock
581  * safe manner.  The qp reference is held prior
582  * to this call via first_qp().
583  *
584  * If the qp trigger was already scheduled (!rval)
585  * the the reference is dropped, otherwise the resume
586  * or the destroy cancel will dispatch the reference.
587  */
588 static void tid_rdma_schedule_tid_wakeup(struct rvt_qp *qp)
589 {
590 	struct hfi1_qp_priv *priv;
591 	struct hfi1_ibport *ibp;
592 	struct hfi1_pportdata *ppd;
593 	struct hfi1_devdata *dd;
594 	bool rval;
595 
596 	if (!qp)
597 		return;
598 
599 	priv = qp->priv;
600 	ibp = to_iport(qp->ibqp.device, qp->port_num);
601 	ppd = ppd_from_ibp(ibp);
602 	dd = dd_from_ibdev(qp->ibqp.device);
603 
604 	rval = queue_work_on(priv->s_sde ?
605 			     priv->s_sde->cpu :
606 			     cpumask_first(cpumask_of_node(dd->node)),
607 			     ppd->hfi1_wq,
608 			     &priv->tid_rdma.trigger_work);
609 	if (!rval)
610 		rvt_put_qp(qp);
611 }
612 
613 /**
614  * tid_rdma_trigger_resume - field a trigger work request
615  * @work - the work item
616  *
617  * Complete the off qp trigger processing by directly
618  * calling the progress routine.
619  */
620 static void tid_rdma_trigger_resume(struct work_struct *work)
621 {
622 	struct tid_rdma_qp_params *tr;
623 	struct hfi1_qp_priv *priv;
624 	struct rvt_qp *qp;
625 
626 	tr = container_of(work, struct tid_rdma_qp_params, trigger_work);
627 	priv = container_of(tr, struct hfi1_qp_priv, tid_rdma);
628 	qp = priv->owner;
629 	spin_lock_irq(&qp->s_lock);
630 	if (qp->s_flags & HFI1_S_WAIT_TID_SPACE) {
631 		spin_unlock_irq(&qp->s_lock);
632 		hfi1_do_send(priv->owner, true);
633 	} else {
634 		spin_unlock_irq(&qp->s_lock);
635 	}
636 	rvt_put_qp(qp);
637 }
638 
639 /**
640  * tid_rdma_flush_wait - unwind any tid space wait
641  *
642  * This is called when resetting a qp to
643  * allow a destroy or reset to get rid
644  * of any tid space linkage and reference counts.
645  */
646 static void _tid_rdma_flush_wait(struct rvt_qp *qp, struct tid_queue *queue)
647 	__must_hold(&qp->s_lock)
648 {
649 	struct hfi1_qp_priv *priv;
650 
651 	if (!qp)
652 		return;
653 	lockdep_assert_held(&qp->s_lock);
654 	priv = qp->priv;
655 	qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE;
656 	spin_lock(&priv->rcd->exp_lock);
657 	if (!list_empty(&priv->tid_wait)) {
658 		list_del_init(&priv->tid_wait);
659 		qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE;
660 		queue->dequeue++;
661 		rvt_put_qp(qp);
662 	}
663 	spin_unlock(&priv->rcd->exp_lock);
664 }
665 
666 void hfi1_tid_rdma_flush_wait(struct rvt_qp *qp)
667 	__must_hold(&qp->s_lock)
668 {
669 	struct hfi1_qp_priv *priv = qp->priv;
670 
671 	_tid_rdma_flush_wait(qp, &priv->rcd->flow_queue);
672 	_tid_rdma_flush_wait(qp, &priv->rcd->rarr_queue);
673 }
674 
675 /* Flow functions */
676 /**
677  * kern_reserve_flow - allocate a hardware flow
678  * @rcd - the context to use for allocation
679  * @last - the index of the preferred flow. Use RXE_NUM_TID_FLOWS to
680  *         signify "don't care".
681  *
682  * Use a bit mask based allocation to reserve a hardware
683  * flow for use in receiving KDETH data packets. If a preferred flow is
684  * specified the function will attempt to reserve that flow again, if
685  * available.
686  *
687  * The exp_lock must be held.
688  *
689  * Return:
690  * On success: a value postive value between 0 and RXE_NUM_TID_FLOWS - 1
691  * On failure: -EAGAIN
692  */
693 static int kern_reserve_flow(struct hfi1_ctxtdata *rcd, int last)
694 	__must_hold(&rcd->exp_lock)
695 {
696 	int nr;
697 
698 	/* Attempt to reserve the preferred flow index */
699 	if (last >= 0 && last < RXE_NUM_TID_FLOWS &&
700 	    !test_and_set_bit(last, &rcd->flow_mask))
701 		return last;
702 
703 	nr = ffz(rcd->flow_mask);
704 	BUILD_BUG_ON(RXE_NUM_TID_FLOWS >=
705 		     (sizeof(rcd->flow_mask) * BITS_PER_BYTE));
706 	if (nr > (RXE_NUM_TID_FLOWS - 1))
707 		return -EAGAIN;
708 	set_bit(nr, &rcd->flow_mask);
709 	return nr;
710 }
711 
712 static void kern_set_hw_flow(struct hfi1_ctxtdata *rcd, u32 generation,
713 			     u32 flow_idx)
714 {
715 	u64 reg;
716 
717 	reg = ((u64)generation << HFI1_KDETH_BTH_SEQ_SHIFT) |
718 		RCV_TID_FLOW_TABLE_CTRL_FLOW_VALID_SMASK |
719 		RCV_TID_FLOW_TABLE_CTRL_KEEP_AFTER_SEQ_ERR_SMASK |
720 		RCV_TID_FLOW_TABLE_CTRL_KEEP_ON_GEN_ERR_SMASK |
721 		RCV_TID_FLOW_TABLE_STATUS_SEQ_MISMATCH_SMASK |
722 		RCV_TID_FLOW_TABLE_STATUS_GEN_MISMATCH_SMASK;
723 
724 	if (generation != KERN_GENERATION_RESERVED)
725 		reg |= RCV_TID_FLOW_TABLE_CTRL_HDR_SUPP_EN_SMASK;
726 
727 	write_uctxt_csr(rcd->dd, rcd->ctxt,
728 			RCV_TID_FLOW_TABLE + 8 * flow_idx, reg);
729 }
730 
731 static u32 kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx)
732 	__must_hold(&rcd->exp_lock)
733 {
734 	u32 generation = rcd->flows[flow_idx].generation;
735 
736 	kern_set_hw_flow(rcd, generation, flow_idx);
737 	return generation;
738 }
739 
740 static u32 kern_flow_generation_next(u32 gen)
741 {
742 	u32 generation = mask_generation(gen + 1);
743 
744 	if (generation == KERN_GENERATION_RESERVED)
745 		generation = mask_generation(generation + 1);
746 	return generation;
747 }
748 
749 static void kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx)
750 	__must_hold(&rcd->exp_lock)
751 {
752 	rcd->flows[flow_idx].generation =
753 		kern_flow_generation_next(rcd->flows[flow_idx].generation);
754 	kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, flow_idx);
755 }
756 
757 int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp)
758 {
759 	struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv;
760 	struct tid_flow_state *fs = &qpriv->flow_state;
761 	struct rvt_qp *fqp;
762 	unsigned long flags;
763 	int ret = 0;
764 
765 	/* The QP already has an allocated flow */
766 	if (fs->index != RXE_NUM_TID_FLOWS)
767 		return ret;
768 
769 	spin_lock_irqsave(&rcd->exp_lock, flags);
770 	if (kernel_tid_waiters(rcd, &rcd->flow_queue, qp))
771 		goto queue;
772 
773 	ret = kern_reserve_flow(rcd, fs->last_index);
774 	if (ret < 0)
775 		goto queue;
776 	fs->index = ret;
777 	fs->last_index = fs->index;
778 
779 	/* Generation received in a RESYNC overrides default flow generation */
780 	if (fs->generation != KERN_GENERATION_RESERVED)
781 		rcd->flows[fs->index].generation = fs->generation;
782 	fs->generation = kern_setup_hw_flow(rcd, fs->index);
783 	fs->psn = 0;
784 	dequeue_tid_waiter(rcd, &rcd->flow_queue, qp);
785 	/* get head before dropping lock */
786 	fqp = first_qp(rcd, &rcd->flow_queue);
787 	spin_unlock_irqrestore(&rcd->exp_lock, flags);
788 
789 	tid_rdma_schedule_tid_wakeup(fqp);
790 	return 0;
791 queue:
792 	queue_qp_for_tid_wait(rcd, &rcd->flow_queue, qp);
793 	spin_unlock_irqrestore(&rcd->exp_lock, flags);
794 	return -EAGAIN;
795 }
796 
797 void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp)
798 {
799 	struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv;
800 	struct tid_flow_state *fs = &qpriv->flow_state;
801 	struct rvt_qp *fqp;
802 	unsigned long flags;
803 
804 	if (fs->index >= RXE_NUM_TID_FLOWS)
805 		return;
806 	spin_lock_irqsave(&rcd->exp_lock, flags);
807 	kern_clear_hw_flow(rcd, fs->index);
808 	clear_bit(fs->index, &rcd->flow_mask);
809 	fs->index = RXE_NUM_TID_FLOWS;
810 	fs->psn = 0;
811 	fs->generation = KERN_GENERATION_RESERVED;
812 
813 	/* get head before dropping lock */
814 	fqp = first_qp(rcd, &rcd->flow_queue);
815 	spin_unlock_irqrestore(&rcd->exp_lock, flags);
816 
817 	if (fqp == qp) {
818 		__trigger_tid_waiter(fqp);
819 		rvt_put_qp(fqp);
820 	} else {
821 		tid_rdma_schedule_tid_wakeup(fqp);
822 	}
823 }
824 
825 void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd)
826 {
827 	int i;
828 
829 	for (i = 0; i < RXE_NUM_TID_FLOWS; i++) {
830 		rcd->flows[i].generation = mask_generation(prandom_u32());
831 		kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, i);
832 	}
833 }
834 
835 /* TID allocation functions */
836 static u8 trdma_pset_order(struct tid_rdma_pageset *s)
837 {
838 	u8 count = s->count;
839 
840 	return ilog2(count) + 1;
841 }
842 
843 /**
844  * tid_rdma_find_phys_blocks_4k - get groups base on mr info
845  * @npages - number of pages
846  * @pages - pointer to an array of page structs
847  * @list - page set array to return
848  *
849  * This routine returns the number of groups associated with
850  * the current sge information.  This implementation is based
851  * on the expected receive find_phys_blocks() adjusted to
852  * use the MR information vs. the pfn.
853  *
854  * Return:
855  * the number of RcvArray entries
856  */
857 static u32 tid_rdma_find_phys_blocks_4k(struct tid_rdma_flow *flow,
858 					struct page **pages,
859 					u32 npages,
860 					struct tid_rdma_pageset *list)
861 {
862 	u32 pagecount, pageidx, setcount = 0, i;
863 	void *vaddr, *this_vaddr;
864 
865 	if (!npages)
866 		return 0;
867 
868 	/*
869 	 * Look for sets of physically contiguous pages in the user buffer.
870 	 * This will allow us to optimize Expected RcvArray entry usage by
871 	 * using the bigger supported sizes.
872 	 */
873 	vaddr = page_address(pages[0]);
874 	trace_hfi1_tid_flow_page(flow->req->qp, flow, 0, 0, 0, vaddr);
875 	for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) {
876 		this_vaddr = i < npages ? page_address(pages[i]) : NULL;
877 		trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 0, 0,
878 					 this_vaddr);
879 		/*
880 		 * If the vaddr's are not sequential, pages are not physically
881 		 * contiguous.
882 		 */
883 		if (this_vaddr != (vaddr + PAGE_SIZE)) {
884 			/*
885 			 * At this point we have to loop over the set of
886 			 * physically contiguous pages and break them down it
887 			 * sizes supported by the HW.
888 			 * There are two main constraints:
889 			 *     1. The max buffer size is MAX_EXPECTED_BUFFER.
890 			 *        If the total set size is bigger than that
891 			 *        program only a MAX_EXPECTED_BUFFER chunk.
892 			 *     2. The buffer size has to be a power of two. If
893 			 *        it is not, round down to the closes power of
894 			 *        2 and program that size.
895 			 */
896 			while (pagecount) {
897 				int maxpages = pagecount;
898 				u32 bufsize = pagecount * PAGE_SIZE;
899 
900 				if (bufsize > MAX_EXPECTED_BUFFER)
901 					maxpages =
902 						MAX_EXPECTED_BUFFER >>
903 						PAGE_SHIFT;
904 				else if (!is_power_of_2(bufsize))
905 					maxpages =
906 						rounddown_pow_of_two(bufsize) >>
907 						PAGE_SHIFT;
908 
909 				list[setcount].idx = pageidx;
910 				list[setcount].count = maxpages;
911 				trace_hfi1_tid_pageset(flow->req->qp, setcount,
912 						       list[setcount].idx,
913 						       list[setcount].count);
914 				pagecount -= maxpages;
915 				pageidx += maxpages;
916 				setcount++;
917 			}
918 			pageidx = i;
919 			pagecount = 1;
920 			vaddr = this_vaddr;
921 		} else {
922 			vaddr += PAGE_SIZE;
923 			pagecount++;
924 		}
925 	}
926 	/* insure we always return an even number of sets */
927 	if (setcount & 1)
928 		list[setcount++].count = 0;
929 	return setcount;
930 }
931 
932 /**
933  * tid_flush_pages - dump out pages into pagesets
934  * @list - list of pagesets
935  * @idx - pointer to current page index
936  * @pages - number of pages to dump
937  * @sets - current number of pagesset
938  *
939  * This routine flushes out accumuated pages.
940  *
941  * To insure an even number of sets the
942  * code may add a filler.
943  *
944  * This can happen with when pages is not
945  * a power of 2 or pages is a power of 2
946  * less than the maximum pages.
947  *
948  * Return:
949  * The new number of sets
950  */
951 
952 static u32 tid_flush_pages(struct tid_rdma_pageset *list,
953 			   u32 *idx, u32 pages, u32 sets)
954 {
955 	while (pages) {
956 		u32 maxpages = pages;
957 
958 		if (maxpages > MAX_EXPECTED_PAGES)
959 			maxpages = MAX_EXPECTED_PAGES;
960 		else if (!is_power_of_2(maxpages))
961 			maxpages = rounddown_pow_of_two(maxpages);
962 		list[sets].idx = *idx;
963 		list[sets++].count = maxpages;
964 		*idx += maxpages;
965 		pages -= maxpages;
966 	}
967 	/* might need a filler */
968 	if (sets & 1)
969 		list[sets++].count = 0;
970 	return sets;
971 }
972 
973 /**
974  * tid_rdma_find_phys_blocks_8k - get groups base on mr info
975  * @pages - pointer to an array of page structs
976  * @npages - number of pages
977  * @list - page set array to return
978  *
979  * This routine parses an array of pages to compute pagesets
980  * in an 8k compatible way.
981  *
982  * pages are tested two at a time, i, i + 1 for contiguous
983  * pages and i - 1 and i contiguous pages.
984  *
985  * If any condition is false, any accumlated pages are flushed and
986  * v0,v1 are emitted as separate PAGE_SIZE pagesets
987  *
988  * Otherwise, the current 8k is totaled for a future flush.
989  *
990  * Return:
991  * The number of pagesets
992  * list set with the returned number of pagesets
993  *
994  */
995 static u32 tid_rdma_find_phys_blocks_8k(struct tid_rdma_flow *flow,
996 					struct page **pages,
997 					u32 npages,
998 					struct tid_rdma_pageset *list)
999 {
1000 	u32 idx, sets = 0, i;
1001 	u32 pagecnt = 0;
1002 	void *v0, *v1, *vm1;
1003 
1004 	if (!npages)
1005 		return 0;
1006 	for (idx = 0, i = 0, vm1 = NULL; i < npages; i += 2) {
1007 		/* get a new v0 */
1008 		v0 = page_address(pages[i]);
1009 		trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 0, v0);
1010 		v1 = i + 1 < npages ?
1011 				page_address(pages[i + 1]) : NULL;
1012 		trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 1, v1);
1013 		/* compare i, i + 1 vaddr */
1014 		if (v1 != (v0 + PAGE_SIZE)) {
1015 			/* flush out pages */
1016 			sets = tid_flush_pages(list, &idx, pagecnt, sets);
1017 			/* output v0,v1 as two pagesets */
1018 			list[sets].idx = idx++;
1019 			list[sets++].count = 1;
1020 			if (v1) {
1021 				list[sets].count = 1;
1022 				list[sets++].idx = idx++;
1023 			} else {
1024 				list[sets++].count = 0;
1025 			}
1026 			vm1 = NULL;
1027 			pagecnt = 0;
1028 			continue;
1029 		}
1030 		/* i,i+1 consecutive, look at i-1,i */
1031 		if (vm1 && v0 != (vm1 + PAGE_SIZE)) {
1032 			/* flush out pages */
1033 			sets = tid_flush_pages(list, &idx, pagecnt, sets);
1034 			pagecnt = 0;
1035 		}
1036 		/* pages will always be a multiple of 8k */
1037 		pagecnt += 2;
1038 		/* save i-1 */
1039 		vm1 = v1;
1040 		/* move to next pair */
1041 	}
1042 	/* dump residual pages at end */
1043 	sets = tid_flush_pages(list, &idx, npages - idx, sets);
1044 	/* by design cannot be odd sets */
1045 	WARN_ON(sets & 1);
1046 	return sets;
1047 }
1048 
1049 /**
1050  * Find pages for one segment of a sge array represented by @ss. The function
1051  * does not check the sge, the sge must have been checked for alignment with a
1052  * prior call to hfi1_kern_trdma_ok. Other sge checking is done as part of
1053  * rvt_lkey_ok and rvt_rkey_ok. Also, the function only modifies the local sge
1054  * copy maintained in @ss->sge, the original sge is not modified.
1055  *
1056  * Unlike IB RDMA WRITE, we can't decrement ss->num_sge here because we are not
1057  * releasing the MR reference count at the same time. Otherwise, we'll "leak"
1058  * references to the MR. This difference requires that we keep track of progress
1059  * into the sg_list. This is done by the cur_seg cursor in the tid_rdma_request
1060  * structure.
1061  */
1062 static u32 kern_find_pages(struct tid_rdma_flow *flow,
1063 			   struct page **pages,
1064 			   struct rvt_sge_state *ss, bool *last)
1065 {
1066 	struct tid_rdma_request *req = flow->req;
1067 	struct rvt_sge *sge = &ss->sge;
1068 	u32 length = flow->req->seg_len;
1069 	u32 len = PAGE_SIZE;
1070 	u32 i = 0;
1071 
1072 	while (length && req->isge < ss->num_sge) {
1073 		pages[i++] = virt_to_page(sge->vaddr);
1074 
1075 		sge->vaddr += len;
1076 		sge->length -= len;
1077 		sge->sge_length -= len;
1078 		if (!sge->sge_length) {
1079 			if (++req->isge < ss->num_sge)
1080 				*sge = ss->sg_list[req->isge - 1];
1081 		} else if (sge->length == 0 && sge->mr->lkey) {
1082 			if (++sge->n >= RVT_SEGSZ) {
1083 				++sge->m;
1084 				sge->n = 0;
1085 			}
1086 			sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
1087 			sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
1088 		}
1089 		length -= len;
1090 	}
1091 
1092 	flow->length = flow->req->seg_len - length;
1093 	*last = req->isge == ss->num_sge ? false : true;
1094 	return i;
1095 }
1096 
1097 static void dma_unmap_flow(struct tid_rdma_flow *flow)
1098 {
1099 	struct hfi1_devdata *dd;
1100 	int i;
1101 	struct tid_rdma_pageset *pset;
1102 
1103 	dd = flow->req->rcd->dd;
1104 	for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets;
1105 			i++, pset++) {
1106 		if (pset->count && pset->addr) {
1107 			dma_unmap_page(&dd->pcidev->dev,
1108 				       pset->addr,
1109 				       PAGE_SIZE * pset->count,
1110 				       DMA_FROM_DEVICE);
1111 			pset->mapped = 0;
1112 		}
1113 	}
1114 }
1115 
1116 static int dma_map_flow(struct tid_rdma_flow *flow, struct page **pages)
1117 {
1118 	int i;
1119 	struct hfi1_devdata *dd = flow->req->rcd->dd;
1120 	struct tid_rdma_pageset *pset;
1121 
1122 	for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets;
1123 			i++, pset++) {
1124 		if (pset->count) {
1125 			pset->addr = dma_map_page(&dd->pcidev->dev,
1126 						  pages[pset->idx],
1127 						  0,
1128 						  PAGE_SIZE * pset->count,
1129 						  DMA_FROM_DEVICE);
1130 
1131 			if (dma_mapping_error(&dd->pcidev->dev, pset->addr)) {
1132 				dma_unmap_flow(flow);
1133 				return -ENOMEM;
1134 			}
1135 			pset->mapped = 1;
1136 		}
1137 	}
1138 	return 0;
1139 }
1140 
1141 static inline bool dma_mapped(struct tid_rdma_flow *flow)
1142 {
1143 	return !!flow->pagesets[0].mapped;
1144 }
1145 
1146 /*
1147  * Get pages pointers and identify contiguous physical memory chunks for a
1148  * segment. All segments are of length flow->req->seg_len.
1149  */
1150 static int kern_get_phys_blocks(struct tid_rdma_flow *flow,
1151 				struct page **pages,
1152 				struct rvt_sge_state *ss, bool *last)
1153 {
1154 	u8 npages;
1155 
1156 	/* Reuse previously computed pagesets, if any */
1157 	if (flow->npagesets) {
1158 		trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head,
1159 					  flow);
1160 		if (!dma_mapped(flow))
1161 			return dma_map_flow(flow, pages);
1162 		return 0;
1163 	}
1164 
1165 	npages = kern_find_pages(flow, pages, ss, last);
1166 
1167 	if (flow->req->qp->pmtu == enum_to_mtu(OPA_MTU_4096))
1168 		flow->npagesets =
1169 			tid_rdma_find_phys_blocks_4k(flow, pages, npages,
1170 						     flow->pagesets);
1171 	else
1172 		flow->npagesets =
1173 			tid_rdma_find_phys_blocks_8k(flow, pages, npages,
1174 						     flow->pagesets);
1175 
1176 	return dma_map_flow(flow, pages);
1177 }
1178 
1179 static inline void kern_add_tid_node(struct tid_rdma_flow *flow,
1180 				     struct hfi1_ctxtdata *rcd, char *s,
1181 				     struct tid_group *grp, u8 cnt)
1182 {
1183 	struct kern_tid_node *node = &flow->tnode[flow->tnode_cnt++];
1184 
1185 	WARN_ON_ONCE(flow->tnode_cnt >=
1186 		     (TID_RDMA_MAX_SEGMENT_SIZE >> PAGE_SHIFT));
1187 	if (WARN_ON_ONCE(cnt & 1))
1188 		dd_dev_err(rcd->dd,
1189 			   "unexpected odd allocation cnt %u map 0x%x used %u",
1190 			   cnt, grp->map, grp->used);
1191 
1192 	node->grp = grp;
1193 	node->map = grp->map;
1194 	node->cnt = cnt;
1195 	trace_hfi1_tid_node_add(flow->req->qp, s, flow->tnode_cnt - 1,
1196 				grp->base, grp->map, grp->used, cnt);
1197 }
1198 
1199 /*
1200  * Try to allocate pageset_count TID's from TID groups for a context
1201  *
1202  * This function allocates TID's without moving groups between lists or
1203  * modifying grp->map. This is done as follows, being cogizant of the lists
1204  * between which the TID groups will move:
1205  * 1. First allocate complete groups of 8 TID's since this is more efficient,
1206  *    these groups will move from group->full without affecting used
1207  * 2. If more TID's are needed allocate from used (will move from used->full or
1208  *    stay in used)
1209  * 3. If we still don't have the required number of TID's go back and look again
1210  *    at a complete group (will move from group->used)
1211  */
1212 static int kern_alloc_tids(struct tid_rdma_flow *flow)
1213 {
1214 	struct hfi1_ctxtdata *rcd = flow->req->rcd;
1215 	struct hfi1_devdata *dd = rcd->dd;
1216 	u32 ngroups, pageidx = 0;
1217 	struct tid_group *group = NULL, *used;
1218 	u8 use;
1219 
1220 	flow->tnode_cnt = 0;
1221 	ngroups = flow->npagesets / dd->rcv_entries.group_size;
1222 	if (!ngroups)
1223 		goto used_list;
1224 
1225 	/* First look at complete groups */
1226 	list_for_each_entry(group,  &rcd->tid_group_list.list, list) {
1227 		kern_add_tid_node(flow, rcd, "complete groups", group,
1228 				  group->size);
1229 
1230 		pageidx += group->size;
1231 		if (!--ngroups)
1232 			break;
1233 	}
1234 
1235 	if (pageidx >= flow->npagesets)
1236 		goto ok;
1237 
1238 used_list:
1239 	/* Now look at partially used groups */
1240 	list_for_each_entry(used, &rcd->tid_used_list.list, list) {
1241 		use = min_t(u32, flow->npagesets - pageidx,
1242 			    used->size - used->used);
1243 		kern_add_tid_node(flow, rcd, "used groups", used, use);
1244 
1245 		pageidx += use;
1246 		if (pageidx >= flow->npagesets)
1247 			goto ok;
1248 	}
1249 
1250 	/*
1251 	 * Look again at a complete group, continuing from where we left.
1252 	 * However, if we are at the head, we have reached the end of the
1253 	 * complete groups list from the first loop above
1254 	 */
1255 	if (group && &group->list == &rcd->tid_group_list.list)
1256 		goto bail_eagain;
1257 	group = list_prepare_entry(group, &rcd->tid_group_list.list,
1258 				   list);
1259 	if (list_is_last(&group->list, &rcd->tid_group_list.list))
1260 		goto bail_eagain;
1261 	group = list_next_entry(group, list);
1262 	use = min_t(u32, flow->npagesets - pageidx, group->size);
1263 	kern_add_tid_node(flow, rcd, "complete continue", group, use);
1264 	pageidx += use;
1265 	if (pageidx >= flow->npagesets)
1266 		goto ok;
1267 bail_eagain:
1268 	trace_hfi1_msg_alloc_tids(flow->req->qp, " insufficient tids: needed ",
1269 				  (u64)flow->npagesets);
1270 	return -EAGAIN;
1271 ok:
1272 	return 0;
1273 }
1274 
1275 static void kern_program_rcv_group(struct tid_rdma_flow *flow, int grp_num,
1276 				   u32 *pset_idx)
1277 {
1278 	struct hfi1_ctxtdata *rcd = flow->req->rcd;
1279 	struct hfi1_devdata *dd = rcd->dd;
1280 	struct kern_tid_node *node = &flow->tnode[grp_num];
1281 	struct tid_group *grp = node->grp;
1282 	struct tid_rdma_pageset *pset;
1283 	u32 pmtu_pg = flow->req->qp->pmtu >> PAGE_SHIFT;
1284 	u32 rcventry, npages = 0, pair = 0, tidctrl;
1285 	u8 i, cnt = 0;
1286 
1287 	for (i = 0; i < grp->size; i++) {
1288 		rcventry = grp->base + i;
1289 
1290 		if (node->map & BIT(i) || cnt >= node->cnt) {
1291 			rcv_array_wc_fill(dd, rcventry);
1292 			continue;
1293 		}
1294 		pset = &flow->pagesets[(*pset_idx)++];
1295 		if (pset->count) {
1296 			hfi1_put_tid(dd, rcventry, PT_EXPECTED,
1297 				     pset->addr, trdma_pset_order(pset));
1298 		} else {
1299 			hfi1_put_tid(dd, rcventry, PT_INVALID, 0, 0);
1300 		}
1301 		npages += pset->count;
1302 
1303 		rcventry -= rcd->expected_base;
1304 		tidctrl = pair ? 0x3 : rcventry & 0x1 ? 0x2 : 0x1;
1305 		/*
1306 		 * A single TID entry will be used to use a rcvarr pair (with
1307 		 * tidctrl 0x3), if ALL these are true (a) the bit pos is even
1308 		 * (b) the group map shows current and the next bits as free
1309 		 * indicating two consecutive rcvarry entries are available (c)
1310 		 * we actually need 2 more entries
1311 		 */
1312 		pair = !(i & 0x1) && !((node->map >> i) & 0x3) &&
1313 			node->cnt >= cnt + 2;
1314 		if (!pair) {
1315 			if (!pset->count)
1316 				tidctrl = 0x1;
1317 			flow->tid_entry[flow->tidcnt++] =
1318 				EXP_TID_SET(IDX, rcventry >> 1) |
1319 				EXP_TID_SET(CTRL, tidctrl) |
1320 				EXP_TID_SET(LEN, npages);
1321 			trace_hfi1_tid_entry_alloc(/* entry */
1322 			   flow->req->qp, flow->tidcnt - 1,
1323 			   flow->tid_entry[flow->tidcnt - 1]);
1324 
1325 			/* Efficient DIV_ROUND_UP(npages, pmtu_pg) */
1326 			flow->npkts += (npages + pmtu_pg - 1) >> ilog2(pmtu_pg);
1327 			npages = 0;
1328 		}
1329 
1330 		if (grp->used == grp->size - 1)
1331 			tid_group_move(grp, &rcd->tid_used_list,
1332 				       &rcd->tid_full_list);
1333 		else if (!grp->used)
1334 			tid_group_move(grp, &rcd->tid_group_list,
1335 				       &rcd->tid_used_list);
1336 
1337 		grp->used++;
1338 		grp->map |= BIT(i);
1339 		cnt++;
1340 	}
1341 }
1342 
1343 static void kern_unprogram_rcv_group(struct tid_rdma_flow *flow, int grp_num)
1344 {
1345 	struct hfi1_ctxtdata *rcd = flow->req->rcd;
1346 	struct hfi1_devdata *dd = rcd->dd;
1347 	struct kern_tid_node *node = &flow->tnode[grp_num];
1348 	struct tid_group *grp = node->grp;
1349 	u32 rcventry;
1350 	u8 i, cnt = 0;
1351 
1352 	for (i = 0; i < grp->size; i++) {
1353 		rcventry = grp->base + i;
1354 
1355 		if (node->map & BIT(i) || cnt >= node->cnt) {
1356 			rcv_array_wc_fill(dd, rcventry);
1357 			continue;
1358 		}
1359 
1360 		hfi1_put_tid(dd, rcventry, PT_INVALID, 0, 0);
1361 
1362 		grp->used--;
1363 		grp->map &= ~BIT(i);
1364 		cnt++;
1365 
1366 		if (grp->used == grp->size - 1)
1367 			tid_group_move(grp, &rcd->tid_full_list,
1368 				       &rcd->tid_used_list);
1369 		else if (!grp->used)
1370 			tid_group_move(grp, &rcd->tid_used_list,
1371 				       &rcd->tid_group_list);
1372 	}
1373 	if (WARN_ON_ONCE(cnt & 1)) {
1374 		struct hfi1_ctxtdata *rcd = flow->req->rcd;
1375 		struct hfi1_devdata *dd = rcd->dd;
1376 
1377 		dd_dev_err(dd, "unexpected odd free cnt %u map 0x%x used %u",
1378 			   cnt, grp->map, grp->used);
1379 	}
1380 }
1381 
1382 static void kern_program_rcvarray(struct tid_rdma_flow *flow)
1383 {
1384 	u32 pset_idx = 0;
1385 	int i;
1386 
1387 	flow->npkts = 0;
1388 	flow->tidcnt = 0;
1389 	for (i = 0; i < flow->tnode_cnt; i++)
1390 		kern_program_rcv_group(flow, i, &pset_idx);
1391 	trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head, flow);
1392 }
1393 
1394 /**
1395  * hfi1_kern_exp_rcv_setup() - setup TID's and flow for one segment of a
1396  * TID RDMA request
1397  *
1398  * @req: TID RDMA request for which the segment/flow is being set up
1399  * @ss: sge state, maintains state across successive segments of a sge
1400  * @last: set to true after the last sge segment has been processed
1401  *
1402  * This function
1403  * (1) finds a free flow entry in the flow circular buffer
1404  * (2) finds pages and continuous physical chunks constituing one segment
1405  *     of an sge
1406  * (3) allocates TID group entries for those chunks
1407  * (4) programs rcvarray entries in the hardware corresponding to those
1408  *     TID's
1409  * (5) computes a tidarray with formatted TID entries which can be sent
1410  *     to the sender
1411  * (6) Reserves and programs HW flows.
1412  * (7) It also manages queing the QP when TID/flow resources are not
1413  *     available.
1414  *
1415  * @req points to struct tid_rdma_request of which the segments are a part. The
1416  * function uses qp, rcd and seg_len members of @req. In the absence of errors,
1417  * req->flow_idx is the index of the flow which has been prepared in this
1418  * invocation of function call. With flow = &req->flows[req->flow_idx],
1419  * flow->tid_entry contains the TID array which the sender can use for TID RDMA
1420  * sends and flow->npkts contains number of packets required to send the
1421  * segment.
1422  *
1423  * hfi1_check_sge_align should be called prior to calling this function and if
1424  * it signals error TID RDMA cannot be used for this sge and this function
1425  * should not be called.
1426  *
1427  * For the queuing, caller must hold the flow->req->qp s_lock from the send
1428  * engine and the function will procure the exp_lock.
1429  *
1430  * Return:
1431  * The function returns -EAGAIN if sufficient number of TID/flow resources to
1432  * map the segment could not be allocated. In this case the function should be
1433  * called again with previous arguments to retry the TID allocation. There are
1434  * no other error returns. The function returns 0 on success.
1435  */
1436 int hfi1_kern_exp_rcv_setup(struct tid_rdma_request *req,
1437 			    struct rvt_sge_state *ss, bool *last)
1438 	__must_hold(&req->qp->s_lock)
1439 {
1440 	struct tid_rdma_flow *flow = &req->flows[req->setup_head];
1441 	struct hfi1_ctxtdata *rcd = req->rcd;
1442 	struct hfi1_qp_priv *qpriv = req->qp->priv;
1443 	unsigned long flags;
1444 	struct rvt_qp *fqp;
1445 	u16 clear_tail = req->clear_tail;
1446 
1447 	lockdep_assert_held(&req->qp->s_lock);
1448 	/*
1449 	 * We return error if either (a) we don't have space in the flow
1450 	 * circular buffer, or (b) we already have max entries in the buffer.
1451 	 * Max entries depend on the type of request we are processing and the
1452 	 * negotiated TID RDMA parameters.
1453 	 */
1454 	if (!CIRC_SPACE(req->setup_head, clear_tail, MAX_FLOWS) ||
1455 	    CIRC_CNT(req->setup_head, clear_tail, MAX_FLOWS) >=
1456 	    req->n_flows)
1457 		return -EINVAL;
1458 
1459 	/*
1460 	 * Get pages, identify contiguous physical memory chunks for the segment
1461 	 * If we can not determine a DMA address mapping we will treat it just
1462 	 * like if we ran out of space above.
1463 	 */
1464 	if (kern_get_phys_blocks(flow, qpriv->pages, ss, last)) {
1465 		hfi1_wait_kmem(flow->req->qp);
1466 		return -ENOMEM;
1467 	}
1468 
1469 	spin_lock_irqsave(&rcd->exp_lock, flags);
1470 	if (kernel_tid_waiters(rcd, &rcd->rarr_queue, flow->req->qp))
1471 		goto queue;
1472 
1473 	/*
1474 	 * At this point we know the number of pagesets and hence the number of
1475 	 * TID's to map the segment. Allocate the TID's from the TID groups. If
1476 	 * we cannot allocate the required number we exit and try again later
1477 	 */
1478 	if (kern_alloc_tids(flow))
1479 		goto queue;
1480 	/*
1481 	 * Finally program the TID entries with the pagesets, compute the
1482 	 * tidarray and enable the HW flow
1483 	 */
1484 	kern_program_rcvarray(flow);
1485 
1486 	/*
1487 	 * Setup the flow state with relevant information.
1488 	 * This information is used for tracking the sequence of data packets
1489 	 * for the segment.
1490 	 * The flow is setup here as this is the most accurate time and place
1491 	 * to do so. Doing at a later time runs the risk of the flow data in
1492 	 * qpriv getting out of sync.
1493 	 */
1494 	memset(&flow->flow_state, 0x0, sizeof(flow->flow_state));
1495 	flow->idx = qpriv->flow_state.index;
1496 	flow->flow_state.generation = qpriv->flow_state.generation;
1497 	flow->flow_state.spsn = qpriv->flow_state.psn;
1498 	flow->flow_state.lpsn = flow->flow_state.spsn + flow->npkts - 1;
1499 	flow->flow_state.r_next_psn =
1500 		full_flow_psn(flow, flow->flow_state.spsn);
1501 	qpriv->flow_state.psn += flow->npkts;
1502 
1503 	dequeue_tid_waiter(rcd, &rcd->rarr_queue, flow->req->qp);
1504 	/* get head before dropping lock */
1505 	fqp = first_qp(rcd, &rcd->rarr_queue);
1506 	spin_unlock_irqrestore(&rcd->exp_lock, flags);
1507 	tid_rdma_schedule_tid_wakeup(fqp);
1508 
1509 	req->setup_head = (req->setup_head + 1) & (MAX_FLOWS - 1);
1510 	return 0;
1511 queue:
1512 	queue_qp_for_tid_wait(rcd, &rcd->rarr_queue, flow->req->qp);
1513 	spin_unlock_irqrestore(&rcd->exp_lock, flags);
1514 	return -EAGAIN;
1515 }
1516 
1517 static void hfi1_tid_rdma_reset_flow(struct tid_rdma_flow *flow)
1518 {
1519 	flow->npagesets = 0;
1520 }
1521 
1522 /*
1523  * This function is called after one segment has been successfully sent to
1524  * release the flow and TID HW/SW resources for that segment. The segments for a
1525  * TID RDMA request are setup and cleared in FIFO order which is managed using a
1526  * circular buffer.
1527  */
1528 int hfi1_kern_exp_rcv_clear(struct tid_rdma_request *req)
1529 	__must_hold(&req->qp->s_lock)
1530 {
1531 	struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
1532 	struct hfi1_ctxtdata *rcd = req->rcd;
1533 	unsigned long flags;
1534 	int i;
1535 	struct rvt_qp *fqp;
1536 
1537 	lockdep_assert_held(&req->qp->s_lock);
1538 	/* Exit if we have nothing in the flow circular buffer */
1539 	if (!CIRC_CNT(req->setup_head, req->clear_tail, MAX_FLOWS))
1540 		return -EINVAL;
1541 
1542 	spin_lock_irqsave(&rcd->exp_lock, flags);
1543 
1544 	for (i = 0; i < flow->tnode_cnt; i++)
1545 		kern_unprogram_rcv_group(flow, i);
1546 	/* To prevent double unprogramming */
1547 	flow->tnode_cnt = 0;
1548 	/* get head before dropping lock */
1549 	fqp = first_qp(rcd, &rcd->rarr_queue);
1550 	spin_unlock_irqrestore(&rcd->exp_lock, flags);
1551 
1552 	dma_unmap_flow(flow);
1553 
1554 	hfi1_tid_rdma_reset_flow(flow);
1555 	req->clear_tail = (req->clear_tail + 1) & (MAX_FLOWS - 1);
1556 
1557 	if (fqp == req->qp) {
1558 		__trigger_tid_waiter(fqp);
1559 		rvt_put_qp(fqp);
1560 	} else {
1561 		tid_rdma_schedule_tid_wakeup(fqp);
1562 	}
1563 
1564 	return 0;
1565 }
1566 
1567 /*
1568  * This function is called to release all the tid entries for
1569  * a request.
1570  */
1571 void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req)
1572 	__must_hold(&req->qp->s_lock)
1573 {
1574 	/* Use memory barrier for proper ordering */
1575 	while (CIRC_CNT(req->setup_head, req->clear_tail, MAX_FLOWS)) {
1576 		if (hfi1_kern_exp_rcv_clear(req))
1577 			break;
1578 	}
1579 }
1580 
1581 /**
1582  * hfi1_kern_exp_rcv_free_flows - free priviously allocated flow information
1583  * @req - the tid rdma request to be cleaned
1584  */
1585 static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req)
1586 {
1587 	kfree(req->flows);
1588 	req->flows = NULL;
1589 }
1590 
1591 /**
1592  * __trdma_clean_swqe - clean up for large sized QPs
1593  * @qp: the queue patch
1594  * @wqe: the send wqe
1595  */
1596 void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
1597 {
1598 	struct hfi1_swqe_priv *p = wqe->priv;
1599 
1600 	hfi1_kern_exp_rcv_free_flows(&p->tid_req);
1601 }
1602 
1603 /*
1604  * This can be called at QP create time or in the data path.
1605  */
1606 static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
1607 					 gfp_t gfp)
1608 {
1609 	struct tid_rdma_flow *flows;
1610 	int i;
1611 
1612 	if (likely(req->flows))
1613 		return 0;
1614 	flows = kmalloc_node(MAX_FLOWS * sizeof(*flows), gfp,
1615 			     req->rcd->numa_id);
1616 	if (!flows)
1617 		return -ENOMEM;
1618 	/* mini init */
1619 	for (i = 0; i < MAX_FLOWS; i++) {
1620 		flows[i].req = req;
1621 		flows[i].npagesets = 0;
1622 		flows[i].pagesets[0].mapped =  0;
1623 		flows[i].resync_npkts = 0;
1624 	}
1625 	req->flows = flows;
1626 	return 0;
1627 }
1628 
1629 static void hfi1_init_trdma_req(struct rvt_qp *qp,
1630 				struct tid_rdma_request *req)
1631 {
1632 	struct hfi1_qp_priv *qpriv = qp->priv;
1633 
1634 	/*
1635 	 * Initialize various TID RDMA request variables.
1636 	 * These variables are "static", which is why they
1637 	 * can be pre-initialized here before the WRs has
1638 	 * even been submitted.
1639 	 * However, non-NULL values for these variables do not
1640 	 * imply that this WQE has been enabled for TID RDMA.
1641 	 * Drivers should check the WQE's opcode to determine
1642 	 * if a request is a TID RDMA one or not.
1643 	 */
1644 	req->qp = qp;
1645 	req->rcd = qpriv->rcd;
1646 }
1647 
1648 u64 hfi1_access_sw_tid_wait(const struct cntr_entry *entry,
1649 			    void *context, int vl, int mode, u64 data)
1650 {
1651 	struct hfi1_devdata *dd = context;
1652 
1653 	return dd->verbs_dev.n_tidwait;
1654 }
1655 
1656 static struct tid_rdma_flow *find_flow_ib(struct tid_rdma_request *req,
1657 					  u32 psn, u16 *fidx)
1658 {
1659 	u16 head, tail;
1660 	struct tid_rdma_flow *flow;
1661 
1662 	head = req->setup_head;
1663 	tail = req->clear_tail;
1664 	for ( ; CIRC_CNT(head, tail, MAX_FLOWS);
1665 	     tail = CIRC_NEXT(tail, MAX_FLOWS)) {
1666 		flow = &req->flows[tail];
1667 		if (cmp_psn(psn, flow->flow_state.ib_spsn) >= 0 &&
1668 		    cmp_psn(psn, flow->flow_state.ib_lpsn) <= 0) {
1669 			if (fidx)
1670 				*fidx = tail;
1671 			return flow;
1672 		}
1673 	}
1674 	return NULL;
1675 }
1676 
1677 /* TID RDMA READ functions */
1678 u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
1679 				    struct ib_other_headers *ohdr, u32 *bth1,
1680 				    u32 *bth2, u32 *len)
1681 {
1682 	struct tid_rdma_request *req = wqe_to_tid_req(wqe);
1683 	struct tid_rdma_flow *flow = &req->flows[req->flow_idx];
1684 	struct rvt_qp *qp = req->qp;
1685 	struct hfi1_qp_priv *qpriv = qp->priv;
1686 	struct hfi1_swqe_priv *wpriv = wqe->priv;
1687 	struct tid_rdma_read_req *rreq = &ohdr->u.tid_rdma.r_req;
1688 	struct tid_rdma_params *remote;
1689 	u32 req_len = 0;
1690 	void *req_addr = NULL;
1691 
1692 	/* This is the IB psn used to send the request */
1693 	*bth2 = mask_psn(flow->flow_state.ib_spsn + flow->pkt);
1694 	trace_hfi1_tid_flow_build_read_pkt(qp, req->flow_idx, flow);
1695 
1696 	/* TID Entries for TID RDMA READ payload */
1697 	req_addr = &flow->tid_entry[flow->tid_idx];
1698 	req_len = sizeof(*flow->tid_entry) *
1699 			(flow->tidcnt - flow->tid_idx);
1700 
1701 	memset(&ohdr->u.tid_rdma.r_req, 0, sizeof(ohdr->u.tid_rdma.r_req));
1702 	wpriv->ss.sge.vaddr = req_addr;
1703 	wpriv->ss.sge.sge_length = req_len;
1704 	wpriv->ss.sge.length = wpriv->ss.sge.sge_length;
1705 	/*
1706 	 * We can safely zero these out. Since the first SGE covers the
1707 	 * entire packet, nothing else should even look at the MR.
1708 	 */
1709 	wpriv->ss.sge.mr = NULL;
1710 	wpriv->ss.sge.m = 0;
1711 	wpriv->ss.sge.n = 0;
1712 
1713 	wpriv->ss.sg_list = NULL;
1714 	wpriv->ss.total_len = wpriv->ss.sge.sge_length;
1715 	wpriv->ss.num_sge = 1;
1716 
1717 	/* Construct the TID RDMA READ REQ packet header */
1718 	rcu_read_lock();
1719 	remote = rcu_dereference(qpriv->tid_rdma.remote);
1720 
1721 	KDETH_RESET(rreq->kdeth0, KVER, 0x1);
1722 	KDETH_RESET(rreq->kdeth1, JKEY, remote->jkey);
1723 	rreq->reth.vaddr = cpu_to_be64(wqe->rdma_wr.remote_addr +
1724 			   req->cur_seg * req->seg_len + flow->sent);
1725 	rreq->reth.rkey = cpu_to_be32(wqe->rdma_wr.rkey);
1726 	rreq->reth.length = cpu_to_be32(*len);
1727 	rreq->tid_flow_psn =
1728 		cpu_to_be32((flow->flow_state.generation <<
1729 			     HFI1_KDETH_BTH_SEQ_SHIFT) |
1730 			    ((flow->flow_state.spsn + flow->pkt) &
1731 			     HFI1_KDETH_BTH_SEQ_MASK));
1732 	rreq->tid_flow_qp =
1733 		cpu_to_be32(qpriv->tid_rdma.local.qp |
1734 			    ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) <<
1735 			     TID_RDMA_DESTQP_FLOW_SHIFT) |
1736 			    qpriv->rcd->ctxt);
1737 	rreq->verbs_qp = cpu_to_be32(qp->remote_qpn);
1738 	*bth1 &= ~RVT_QPN_MASK;
1739 	*bth1 |= remote->qp;
1740 	*bth2 |= IB_BTH_REQ_ACK;
1741 	rcu_read_unlock();
1742 
1743 	/* We are done with this segment */
1744 	flow->sent += *len;
1745 	req->cur_seg++;
1746 	qp->s_state = TID_OP(READ_REQ);
1747 	req->ack_pending++;
1748 	req->flow_idx = (req->flow_idx + 1) & (MAX_FLOWS - 1);
1749 	qpriv->pending_tid_r_segs++;
1750 	qp->s_num_rd_atomic++;
1751 
1752 	/* Set the TID RDMA READ request payload size */
1753 	*len = req_len;
1754 
1755 	return sizeof(ohdr->u.tid_rdma.r_req) / sizeof(u32);
1756 }
1757 
1758 /*
1759  * @len: contains the data length to read upon entry and the read request
1760  *       payload length upon exit.
1761  */
1762 u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
1763 				 struct ib_other_headers *ohdr, u32 *bth1,
1764 				 u32 *bth2, u32 *len)
1765 	__must_hold(&qp->s_lock)
1766 {
1767 	struct hfi1_qp_priv *qpriv = qp->priv;
1768 	struct tid_rdma_request *req = wqe_to_tid_req(wqe);
1769 	struct tid_rdma_flow *flow = NULL;
1770 	u32 hdwords = 0;
1771 	bool last;
1772 	bool retry = true;
1773 	u32 npkts = rvt_div_round_up_mtu(qp, *len);
1774 
1775 	trace_hfi1_tid_req_build_read_req(qp, 0, wqe->wr.opcode, wqe->psn,
1776 					  wqe->lpsn, req);
1777 	/*
1778 	 * Check sync conditions. Make sure that there are no pending
1779 	 * segments before freeing the flow.
1780 	 */
1781 sync_check:
1782 	if (req->state == TID_REQUEST_SYNC) {
1783 		if (qpriv->pending_tid_r_segs)
1784 			goto done;
1785 
1786 		hfi1_kern_clear_hw_flow(req->rcd, qp);
1787 		qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
1788 		req->state = TID_REQUEST_ACTIVE;
1789 	}
1790 
1791 	/*
1792 	 * If the request for this segment is resent, the tid resources should
1793 	 * have been allocated before. In this case, req->flow_idx should
1794 	 * fall behind req->setup_head.
1795 	 */
1796 	if (req->flow_idx == req->setup_head) {
1797 		retry = false;
1798 		if (req->state == TID_REQUEST_RESEND) {
1799 			/*
1800 			 * This is the first new segment for a request whose
1801 			 * earlier segments have been re-sent. We need to
1802 			 * set up the sge pointer correctly.
1803 			 */
1804 			restart_sge(&qp->s_sge, wqe, req->s_next_psn,
1805 				    qp->pmtu);
1806 			req->isge = 0;
1807 			req->state = TID_REQUEST_ACTIVE;
1808 		}
1809 
1810 		/*
1811 		 * Check sync. The last PSN of each generation is reserved for
1812 		 * RESYNC.
1813 		 */
1814 		if ((qpriv->flow_state.psn + npkts) > MAX_TID_FLOW_PSN - 1) {
1815 			req->state = TID_REQUEST_SYNC;
1816 			goto sync_check;
1817 		}
1818 
1819 		/* Allocate the flow if not yet */
1820 		if (hfi1_kern_setup_hw_flow(qpriv->rcd, qp))
1821 			goto done;
1822 
1823 		/*
1824 		 * The following call will advance req->setup_head after
1825 		 * allocating the tid entries.
1826 		 */
1827 		if (hfi1_kern_exp_rcv_setup(req, &qp->s_sge, &last)) {
1828 			req->state = TID_REQUEST_QUEUED;
1829 
1830 			/*
1831 			 * We don't have resources for this segment. The QP has
1832 			 * already been queued.
1833 			 */
1834 			goto done;
1835 		}
1836 	}
1837 
1838 	/* req->flow_idx should only be one slot behind req->setup_head */
1839 	flow = &req->flows[req->flow_idx];
1840 	flow->pkt = 0;
1841 	flow->tid_idx = 0;
1842 	flow->sent = 0;
1843 	if (!retry) {
1844 		/* Set the first and last IB PSN for the flow in use.*/
1845 		flow->flow_state.ib_spsn = req->s_next_psn;
1846 		flow->flow_state.ib_lpsn =
1847 			flow->flow_state.ib_spsn + flow->npkts - 1;
1848 	}
1849 
1850 	/* Calculate the next segment start psn.*/
1851 	req->s_next_psn += flow->npkts;
1852 
1853 	/* Build the packet header */
1854 	hdwords = hfi1_build_tid_rdma_read_packet(wqe, ohdr, bth1, bth2, len);
1855 done:
1856 	return hdwords;
1857 }
1858 
1859 /*
1860  * Validate and accept the TID RDMA READ request parameters.
1861  * Return 0 if the request is accepted successfully;
1862  * Return 1 otherwise.
1863  */
1864 static int tid_rdma_rcv_read_request(struct rvt_qp *qp,
1865 				     struct rvt_ack_entry *e,
1866 				     struct hfi1_packet *packet,
1867 				     struct ib_other_headers *ohdr,
1868 				     u32 bth0, u32 psn, u64 vaddr, u32 len)
1869 {
1870 	struct hfi1_qp_priv *qpriv = qp->priv;
1871 	struct tid_rdma_request *req;
1872 	struct tid_rdma_flow *flow;
1873 	u32 flow_psn, i, tidlen = 0, pktlen, tlen;
1874 
1875 	req = ack_to_tid_req(e);
1876 
1877 	/* Validate the payload first */
1878 	flow = &req->flows[req->setup_head];
1879 
1880 	/* payload length = packet length - (header length + ICRC length) */
1881 	pktlen = packet->tlen - (packet->hlen + 4);
1882 	if (pktlen > sizeof(flow->tid_entry))
1883 		return 1;
1884 	memcpy(flow->tid_entry, packet->ebuf, pktlen);
1885 	flow->tidcnt = pktlen / sizeof(*flow->tid_entry);
1886 
1887 	/*
1888 	 * Walk the TID_ENTRY list to make sure we have enough space for a
1889 	 * complete segment. Also calculate the number of required packets.
1890 	 */
1891 	flow->npkts = rvt_div_round_up_mtu(qp, len);
1892 	for (i = 0; i < flow->tidcnt; i++) {
1893 		trace_hfi1_tid_entry_rcv_read_req(qp, i,
1894 						  flow->tid_entry[i]);
1895 		tlen = EXP_TID_GET(flow->tid_entry[i], LEN);
1896 		if (!tlen)
1897 			return 1;
1898 
1899 		/*
1900 		 * For tid pair (tidctr == 3), the buffer size of the pair
1901 		 * should be the sum of the buffer size described by each
1902 		 * tid entry. However, only the first entry needs to be
1903 		 * specified in the request (see WFR HAS Section 8.5.7.1).
1904 		 */
1905 		tidlen += tlen;
1906 	}
1907 	if (tidlen * PAGE_SIZE < len)
1908 		return 1;
1909 
1910 	/* Empty the flow array */
1911 	req->clear_tail = req->setup_head;
1912 	flow->pkt = 0;
1913 	flow->tid_idx = 0;
1914 	flow->tid_offset = 0;
1915 	flow->sent = 0;
1916 	flow->tid_qpn = be32_to_cpu(ohdr->u.tid_rdma.r_req.tid_flow_qp);
1917 	flow->idx = (flow->tid_qpn >> TID_RDMA_DESTQP_FLOW_SHIFT) &
1918 		    TID_RDMA_DESTQP_FLOW_MASK;
1919 	flow_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_req.tid_flow_psn));
1920 	flow->flow_state.generation = flow_psn >> HFI1_KDETH_BTH_SEQ_SHIFT;
1921 	flow->flow_state.spsn = flow_psn & HFI1_KDETH_BTH_SEQ_MASK;
1922 	flow->length = len;
1923 
1924 	flow->flow_state.lpsn = flow->flow_state.spsn +
1925 		flow->npkts - 1;
1926 	flow->flow_state.ib_spsn = psn;
1927 	flow->flow_state.ib_lpsn = flow->flow_state.ib_spsn + flow->npkts - 1;
1928 
1929 	trace_hfi1_tid_flow_rcv_read_req(qp, req->setup_head, flow);
1930 	/* Set the initial flow index to the current flow. */
1931 	req->flow_idx = req->setup_head;
1932 
1933 	/* advance circular buffer head */
1934 	req->setup_head = (req->setup_head + 1) & (MAX_FLOWS - 1);
1935 
1936 	/*
1937 	 * Compute last PSN for request.
1938 	 */
1939 	e->opcode = (bth0 >> 24) & 0xff;
1940 	e->psn = psn;
1941 	e->lpsn = psn + flow->npkts - 1;
1942 	e->sent = 0;
1943 
1944 	req->n_flows = qpriv->tid_rdma.local.max_read;
1945 	req->state = TID_REQUEST_ACTIVE;
1946 	req->cur_seg = 0;
1947 	req->comp_seg = 0;
1948 	req->ack_seg = 0;
1949 	req->isge = 0;
1950 	req->seg_len = qpriv->tid_rdma.local.max_len;
1951 	req->total_len = len;
1952 	req->total_segs = 1;
1953 	req->r_flow_psn = e->psn;
1954 
1955 	trace_hfi1_tid_req_rcv_read_req(qp, 0, e->opcode, e->psn, e->lpsn,
1956 					req);
1957 	return 0;
1958 }
1959 
1960 static int tid_rdma_rcv_error(struct hfi1_packet *packet,
1961 			      struct ib_other_headers *ohdr,
1962 			      struct rvt_qp *qp, u32 psn, int diff)
1963 {
1964 	struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1965 	struct hfi1_ctxtdata *rcd = ((struct hfi1_qp_priv *)qp->priv)->rcd;
1966 	struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
1967 	struct hfi1_qp_priv *qpriv = qp->priv;
1968 	struct rvt_ack_entry *e;
1969 	struct tid_rdma_request *req;
1970 	unsigned long flags;
1971 	u8 prev;
1972 	bool old_req;
1973 
1974 	trace_hfi1_rsp_tid_rcv_error(qp, psn);
1975 	trace_hfi1_tid_rdma_rcv_err(qp, 0, psn, diff);
1976 	if (diff > 0) {
1977 		/* sequence error */
1978 		if (!qp->r_nak_state) {
1979 			ibp->rvp.n_rc_seqnak++;
1980 			qp->r_nak_state = IB_NAK_PSN_ERROR;
1981 			qp->r_ack_psn = qp->r_psn;
1982 			rc_defered_ack(rcd, qp);
1983 		}
1984 		goto done;
1985 	}
1986 
1987 	ibp->rvp.n_rc_dupreq++;
1988 
1989 	spin_lock_irqsave(&qp->s_lock, flags);
1990 	e = find_prev_entry(qp, psn, &prev, NULL, &old_req);
1991 	if (!e || (e->opcode != TID_OP(READ_REQ) &&
1992 		   e->opcode != TID_OP(WRITE_REQ)))
1993 		goto unlock;
1994 
1995 	req = ack_to_tid_req(e);
1996 	req->r_flow_psn = psn;
1997 	trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn, e->lpsn, req);
1998 	if (e->opcode == TID_OP(READ_REQ)) {
1999 		struct ib_reth *reth;
2000 		u32 len;
2001 		u32 rkey;
2002 		u64 vaddr;
2003 		int ok;
2004 		u32 bth0;
2005 
2006 		reth = &ohdr->u.tid_rdma.r_req.reth;
2007 		/*
2008 		 * The requester always restarts from the start of the original
2009 		 * request.
2010 		 */
2011 		len = be32_to_cpu(reth->length);
2012 		if (psn != e->psn || len != req->total_len)
2013 			goto unlock;
2014 
2015 		release_rdma_sge_mr(e);
2016 
2017 		rkey = be32_to_cpu(reth->rkey);
2018 		vaddr = get_ib_reth_vaddr(reth);
2019 
2020 		qp->r_len = len;
2021 		ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
2022 				 IB_ACCESS_REMOTE_READ);
2023 		if (unlikely(!ok))
2024 			goto unlock;
2025 
2026 		/*
2027 		 * If all the response packets for the current request have
2028 		 * been sent out and this request is complete (old_request
2029 		 * == false) and the TID flow may be unusable (the
2030 		 * req->clear_tail is advanced). However, when an earlier
2031 		 * request is received, this request will not be complete any
2032 		 * more (qp->s_tail_ack_queue is moved back, see below).
2033 		 * Consequently, we need to update the TID flow info everytime
2034 		 * a duplicate request is received.
2035 		 */
2036 		bth0 = be32_to_cpu(ohdr->bth[0]);
2037 		if (tid_rdma_rcv_read_request(qp, e, packet, ohdr, bth0, psn,
2038 					      vaddr, len))
2039 			goto unlock;
2040 
2041 		/*
2042 		 * True if the request is already scheduled (between
2043 		 * qp->s_tail_ack_queue and qp->r_head_ack_queue);
2044 		 */
2045 		if (old_req)
2046 			goto unlock;
2047 	} else {
2048 		struct flow_state *fstate;
2049 		bool schedule = false;
2050 		u8 i;
2051 
2052 		if (req->state == TID_REQUEST_RESEND) {
2053 			req->state = TID_REQUEST_RESEND_ACTIVE;
2054 		} else if (req->state == TID_REQUEST_INIT_RESEND) {
2055 			req->state = TID_REQUEST_INIT;
2056 			schedule = true;
2057 		}
2058 
2059 		/*
2060 		 * True if the request is already scheduled (between
2061 		 * qp->s_tail_ack_queue and qp->r_head_ack_queue).
2062 		 * Also, don't change requests, which are at the SYNC
2063 		 * point and haven't generated any responses yet.
2064 		 * There is nothing to retransmit for them yet.
2065 		 */
2066 		if (old_req || req->state == TID_REQUEST_INIT ||
2067 		    (req->state == TID_REQUEST_SYNC && !req->cur_seg)) {
2068 			for (i = prev + 1; ; i++) {
2069 				if (i > rvt_size_atomic(&dev->rdi))
2070 					i = 0;
2071 				if (i == qp->r_head_ack_queue)
2072 					break;
2073 				e = &qp->s_ack_queue[i];
2074 				req = ack_to_tid_req(e);
2075 				if (e->opcode == TID_OP(WRITE_REQ) &&
2076 				    req->state == TID_REQUEST_INIT)
2077 					req->state = TID_REQUEST_INIT_RESEND;
2078 			}
2079 			/*
2080 			 * If the state of the request has been changed,
2081 			 * the first leg needs to get scheduled in order to
2082 			 * pick up the change. Otherwise, normal response
2083 			 * processing should take care of it.
2084 			 */
2085 			if (!schedule)
2086 				goto unlock;
2087 		}
2088 
2089 		/*
2090 		 * If there is no more allocated segment, just schedule the qp
2091 		 * without changing any state.
2092 		 */
2093 		if (req->clear_tail == req->setup_head)
2094 			goto schedule;
2095 		/*
2096 		 * If this request has sent responses for segments, which have
2097 		 * not received data yet (flow_idx != clear_tail), the flow_idx
2098 		 * pointer needs to be adjusted so the same responses can be
2099 		 * re-sent.
2100 		 */
2101 		if (CIRC_CNT(req->flow_idx, req->clear_tail, MAX_FLOWS)) {
2102 			fstate = &req->flows[req->clear_tail].flow_state;
2103 			qpriv->pending_tid_w_segs -=
2104 				CIRC_CNT(req->flow_idx, req->clear_tail,
2105 					 MAX_FLOWS);
2106 			req->flow_idx =
2107 				CIRC_ADD(req->clear_tail,
2108 					 delta_psn(psn, fstate->resp_ib_psn),
2109 					 MAX_FLOWS);
2110 			qpriv->pending_tid_w_segs +=
2111 				delta_psn(psn, fstate->resp_ib_psn);
2112 			/*
2113 			 * When flow_idx == setup_head, we've gotten a duplicate
2114 			 * request for a segment, which has not been allocated
2115 			 * yet. In that case, don't adjust this request.
2116 			 * However, we still want to go through the loop below
2117 			 * to adjust all subsequent requests.
2118 			 */
2119 			if (CIRC_CNT(req->setup_head, req->flow_idx,
2120 				     MAX_FLOWS)) {
2121 				req->cur_seg = delta_psn(psn, e->psn);
2122 				req->state = TID_REQUEST_RESEND_ACTIVE;
2123 			}
2124 		}
2125 
2126 		for (i = prev + 1; ; i++) {
2127 			/*
2128 			 * Look at everything up to and including
2129 			 * s_tail_ack_queue
2130 			 */
2131 			if (i > rvt_size_atomic(&dev->rdi))
2132 				i = 0;
2133 			if (i == qp->r_head_ack_queue)
2134 				break;
2135 			e = &qp->s_ack_queue[i];
2136 			req = ack_to_tid_req(e);
2137 			trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn,
2138 						   e->lpsn, req);
2139 			if (e->opcode != TID_OP(WRITE_REQ) ||
2140 			    req->cur_seg == req->comp_seg ||
2141 			    req->state == TID_REQUEST_INIT ||
2142 			    req->state == TID_REQUEST_INIT_RESEND) {
2143 				if (req->state == TID_REQUEST_INIT)
2144 					req->state = TID_REQUEST_INIT_RESEND;
2145 				continue;
2146 			}
2147 			qpriv->pending_tid_w_segs -=
2148 				CIRC_CNT(req->flow_idx,
2149 					 req->clear_tail,
2150 					 MAX_FLOWS);
2151 			req->flow_idx = req->clear_tail;
2152 			req->state = TID_REQUEST_RESEND;
2153 			req->cur_seg = req->comp_seg;
2154 		}
2155 		qpriv->s_flags &= ~HFI1_R_TID_WAIT_INTERLCK;
2156 	}
2157 	/* Re-process old requests.*/
2158 	if (qp->s_acked_ack_queue == qp->s_tail_ack_queue)
2159 		qp->s_acked_ack_queue = prev;
2160 	qp->s_tail_ack_queue = prev;
2161 	/*
2162 	 * Since the qp->s_tail_ack_queue is modified, the
2163 	 * qp->s_ack_state must be changed to re-initialize
2164 	 * qp->s_ack_rdma_sge; Otherwise, we will end up in
2165 	 * wrong memory region.
2166 	 */
2167 	qp->s_ack_state = OP(ACKNOWLEDGE);
2168 schedule:
2169 	/*
2170 	 * It's possible to receive a retry psn that is earlier than an RNRNAK
2171 	 * psn. In this case, the rnrnak state should be cleared.
2172 	 */
2173 	if (qpriv->rnr_nak_state) {
2174 		qp->s_nak_state = 0;
2175 		qpriv->rnr_nak_state = TID_RNR_NAK_INIT;
2176 		qp->r_psn = e->lpsn + 1;
2177 		hfi1_tid_write_alloc_resources(qp, true);
2178 	}
2179 
2180 	qp->r_state = e->opcode;
2181 	qp->r_nak_state = 0;
2182 	qp->s_flags |= RVT_S_RESP_PENDING;
2183 	hfi1_schedule_send(qp);
2184 unlock:
2185 	spin_unlock_irqrestore(&qp->s_lock, flags);
2186 done:
2187 	return 1;
2188 }
2189 
2190 void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet)
2191 {
2192 	/* HANDLER FOR TID RDMA READ REQUEST packet (Responder side)*/
2193 
2194 	/*
2195 	 * 1. Verify TID RDMA READ REQ as per IB_OPCODE_RC_RDMA_READ
2196 	 *    (see hfi1_rc_rcv())
2197 	 * 2. Put TID RDMA READ REQ into the response queueu (s_ack_queue)
2198 	 *     - Setup struct tid_rdma_req with request info
2199 	 *     - Initialize struct tid_rdma_flow info;
2200 	 *     - Copy TID entries;
2201 	 * 3. Set the qp->s_ack_state.
2202 	 * 4. Set RVT_S_RESP_PENDING in s_flags.
2203 	 * 5. Kick the send engine (hfi1_schedule_send())
2204 	 */
2205 	struct hfi1_ctxtdata *rcd = packet->rcd;
2206 	struct rvt_qp *qp = packet->qp;
2207 	struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
2208 	struct ib_other_headers *ohdr = packet->ohdr;
2209 	struct rvt_ack_entry *e;
2210 	unsigned long flags;
2211 	struct ib_reth *reth;
2212 	struct hfi1_qp_priv *qpriv = qp->priv;
2213 	u32 bth0, psn, len, rkey;
2214 	bool fecn;
2215 	u8 next;
2216 	u64 vaddr;
2217 	int diff;
2218 	u8 nack_state = IB_NAK_INVALID_REQUEST;
2219 
2220 	bth0 = be32_to_cpu(ohdr->bth[0]);
2221 	if (hfi1_ruc_check_hdr(ibp, packet))
2222 		return;
2223 
2224 	fecn = process_ecn(qp, packet);
2225 	psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
2226 	trace_hfi1_rsp_rcv_tid_read_req(qp, psn);
2227 
2228 	if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
2229 		rvt_comm_est(qp);
2230 
2231 	if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
2232 		goto nack_inv;
2233 
2234 	reth = &ohdr->u.tid_rdma.r_req.reth;
2235 	vaddr = be64_to_cpu(reth->vaddr);
2236 	len = be32_to_cpu(reth->length);
2237 	/* The length needs to be in multiples of PAGE_SIZE */
2238 	if (!len || len & ~PAGE_MASK || len > qpriv->tid_rdma.local.max_len)
2239 		goto nack_inv;
2240 
2241 	diff = delta_psn(psn, qp->r_psn);
2242 	if (unlikely(diff)) {
2243 		tid_rdma_rcv_err(packet, ohdr, qp, psn, diff, fecn);
2244 		return;
2245 	}
2246 
2247 	/* We've verified the request, insert it into the ack queue. */
2248 	next = qp->r_head_ack_queue + 1;
2249 	if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
2250 		next = 0;
2251 	spin_lock_irqsave(&qp->s_lock, flags);
2252 	if (unlikely(next == qp->s_tail_ack_queue)) {
2253 		if (!qp->s_ack_queue[next].sent) {
2254 			nack_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
2255 			goto nack_inv_unlock;
2256 		}
2257 		update_ack_queue(qp, next);
2258 	}
2259 	e = &qp->s_ack_queue[qp->r_head_ack_queue];
2260 	release_rdma_sge_mr(e);
2261 
2262 	rkey = be32_to_cpu(reth->rkey);
2263 	qp->r_len = len;
2264 
2265 	if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr,
2266 				  rkey, IB_ACCESS_REMOTE_READ)))
2267 		goto nack_acc;
2268 
2269 	/* Accept the request parameters */
2270 	if (tid_rdma_rcv_read_request(qp, e, packet, ohdr, bth0, psn, vaddr,
2271 				      len))
2272 		goto nack_inv_unlock;
2273 
2274 	qp->r_state = e->opcode;
2275 	qp->r_nak_state = 0;
2276 	/*
2277 	 * We need to increment the MSN here instead of when we
2278 	 * finish sending the result since a duplicate request would
2279 	 * increment it more than once.
2280 	 */
2281 	qp->r_msn++;
2282 	qp->r_psn += e->lpsn - e->psn + 1;
2283 
2284 	qp->r_head_ack_queue = next;
2285 
2286 	/*
2287 	 * For all requests other than TID WRITE which are added to the ack
2288 	 * queue, qpriv->r_tid_alloc follows qp->r_head_ack_queue. It is ok to
2289 	 * do this because of interlocks between these and TID WRITE
2290 	 * requests. The same change has also been made in hfi1_rc_rcv().
2291 	 */
2292 	qpriv->r_tid_alloc = qp->r_head_ack_queue;
2293 
2294 	/* Schedule the send tasklet. */
2295 	qp->s_flags |= RVT_S_RESP_PENDING;
2296 	if (fecn)
2297 		qp->s_flags |= RVT_S_ECN;
2298 	hfi1_schedule_send(qp);
2299 
2300 	spin_unlock_irqrestore(&qp->s_lock, flags);
2301 	return;
2302 
2303 nack_inv_unlock:
2304 	spin_unlock_irqrestore(&qp->s_lock, flags);
2305 nack_inv:
2306 	rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2307 	qp->r_nak_state = nack_state;
2308 	qp->r_ack_psn = qp->r_psn;
2309 	/* Queue NAK for later */
2310 	rc_defered_ack(rcd, qp);
2311 	return;
2312 nack_acc:
2313 	spin_unlock_irqrestore(&qp->s_lock, flags);
2314 	rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
2315 	qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
2316 	qp->r_ack_psn = qp->r_psn;
2317 }
2318 
2319 u32 hfi1_build_tid_rdma_read_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
2320 				  struct ib_other_headers *ohdr, u32 *bth0,
2321 				  u32 *bth1, u32 *bth2, u32 *len, bool *last)
2322 {
2323 	struct hfi1_ack_priv *epriv = e->priv;
2324 	struct tid_rdma_request *req = &epriv->tid_req;
2325 	struct hfi1_qp_priv *qpriv = qp->priv;
2326 	struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
2327 	u32 tidentry = flow->tid_entry[flow->tid_idx];
2328 	u32 tidlen = EXP_TID_GET(tidentry, LEN) << PAGE_SHIFT;
2329 	struct tid_rdma_read_resp *resp = &ohdr->u.tid_rdma.r_rsp;
2330 	u32 next_offset, om = KDETH_OM_LARGE;
2331 	bool last_pkt;
2332 	u32 hdwords = 0;
2333 	struct tid_rdma_params *remote;
2334 
2335 	*len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset);
2336 	flow->sent += *len;
2337 	next_offset = flow->tid_offset + *len;
2338 	last_pkt = (flow->sent >= flow->length);
2339 
2340 	trace_hfi1_tid_entry_build_read_resp(qp, flow->tid_idx, tidentry);
2341 	trace_hfi1_tid_flow_build_read_resp(qp, req->clear_tail, flow);
2342 
2343 	rcu_read_lock();
2344 	remote = rcu_dereference(qpriv->tid_rdma.remote);
2345 	if (!remote) {
2346 		rcu_read_unlock();
2347 		goto done;
2348 	}
2349 	KDETH_RESET(resp->kdeth0, KVER, 0x1);
2350 	KDETH_SET(resp->kdeth0, SH, !last_pkt);
2351 	KDETH_SET(resp->kdeth0, INTR, !!(!last_pkt && remote->urg));
2352 	KDETH_SET(resp->kdeth0, TIDCTRL, EXP_TID_GET(tidentry, CTRL));
2353 	KDETH_SET(resp->kdeth0, TID, EXP_TID_GET(tidentry, IDX));
2354 	KDETH_SET(resp->kdeth0, OM, om == KDETH_OM_LARGE);
2355 	KDETH_SET(resp->kdeth0, OFFSET, flow->tid_offset / om);
2356 	KDETH_RESET(resp->kdeth1, JKEY, remote->jkey);
2357 	resp->verbs_qp = cpu_to_be32(qp->remote_qpn);
2358 	rcu_read_unlock();
2359 
2360 	resp->aeth = rvt_compute_aeth(qp);
2361 	resp->verbs_psn = cpu_to_be32(mask_psn(flow->flow_state.ib_spsn +
2362 					       flow->pkt));
2363 
2364 	*bth0 = TID_OP(READ_RESP) << 24;
2365 	*bth1 = flow->tid_qpn;
2366 	*bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) &
2367 			  HFI1_KDETH_BTH_SEQ_MASK) |
2368 			 (flow->flow_state.generation <<
2369 			  HFI1_KDETH_BTH_SEQ_SHIFT));
2370 	*last = last_pkt;
2371 	if (last_pkt)
2372 		/* Advance to next flow */
2373 		req->clear_tail = (req->clear_tail + 1) &
2374 				  (MAX_FLOWS - 1);
2375 
2376 	if (next_offset >= tidlen) {
2377 		flow->tid_offset = 0;
2378 		flow->tid_idx++;
2379 	} else {
2380 		flow->tid_offset = next_offset;
2381 	}
2382 
2383 	hdwords = sizeof(ohdr->u.tid_rdma.r_rsp) / sizeof(u32);
2384 
2385 done:
2386 	return hdwords;
2387 }
2388 
2389 static inline struct tid_rdma_request *
2390 find_tid_request(struct rvt_qp *qp, u32 psn, enum ib_wr_opcode opcode)
2391 	__must_hold(&qp->s_lock)
2392 {
2393 	struct rvt_swqe *wqe;
2394 	struct tid_rdma_request *req = NULL;
2395 	u32 i, end;
2396 
2397 	end = qp->s_cur + 1;
2398 	if (end == qp->s_size)
2399 		end = 0;
2400 	for (i = qp->s_acked; i != end;) {
2401 		wqe = rvt_get_swqe_ptr(qp, i);
2402 		if (cmp_psn(psn, wqe->psn) >= 0 &&
2403 		    cmp_psn(psn, wqe->lpsn) <= 0) {
2404 			if (wqe->wr.opcode == opcode)
2405 				req = wqe_to_tid_req(wqe);
2406 			break;
2407 		}
2408 		if (++i == qp->s_size)
2409 			i = 0;
2410 	}
2411 
2412 	return req;
2413 }
2414 
2415 void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet)
2416 {
2417 	/* HANDLER FOR TID RDMA READ RESPONSE packet (Requestor side */
2418 
2419 	/*
2420 	 * 1. Find matching SWQE
2421 	 * 2. Check that the entire segment has been read.
2422 	 * 3. Remove HFI1_S_WAIT_TID_RESP from s_flags.
2423 	 * 4. Free the TID flow resources.
2424 	 * 5. Kick the send engine (hfi1_schedule_send())
2425 	 */
2426 	struct ib_other_headers *ohdr = packet->ohdr;
2427 	struct rvt_qp *qp = packet->qp;
2428 	struct hfi1_qp_priv *priv = qp->priv;
2429 	struct hfi1_ctxtdata *rcd = packet->rcd;
2430 	struct tid_rdma_request *req;
2431 	struct tid_rdma_flow *flow;
2432 	u32 opcode, aeth;
2433 	bool fecn;
2434 	unsigned long flags;
2435 	u32 kpsn, ipsn;
2436 
2437 	trace_hfi1_sender_rcv_tid_read_resp(qp);
2438 	fecn = process_ecn(qp, packet);
2439 	kpsn = mask_psn(be32_to_cpu(ohdr->bth[2]));
2440 	aeth = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.aeth);
2441 	opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
2442 
2443 	spin_lock_irqsave(&qp->s_lock, flags);
2444 	ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn));
2445 	req = find_tid_request(qp, ipsn, IB_WR_TID_RDMA_READ);
2446 	if (unlikely(!req))
2447 		goto ack_op_err;
2448 
2449 	flow = &req->flows[req->clear_tail];
2450 	/* When header suppression is disabled */
2451 	if (cmp_psn(ipsn, flow->flow_state.ib_lpsn)) {
2452 		update_r_next_psn_fecn(packet, priv, rcd, flow, fecn);
2453 
2454 		if (cmp_psn(kpsn, flow->flow_state.r_next_psn))
2455 			goto ack_done;
2456 		flow->flow_state.r_next_psn = mask_psn(kpsn + 1);
2457 		/*
2458 		 * Copy the payload to destination buffer if this packet is
2459 		 * delivered as an eager packet due to RSM rule and FECN.
2460 		 * The RSM rule selects FECN bit in BTH and SH bit in
2461 		 * KDETH header and therefore will not match the last
2462 		 * packet of each segment that has SH bit cleared.
2463 		 */
2464 		if (fecn && packet->etype == RHF_RCV_TYPE_EAGER) {
2465 			struct rvt_sge_state ss;
2466 			u32 len;
2467 			u32 tlen = packet->tlen;
2468 			u16 hdrsize = packet->hlen;
2469 			u8 pad = packet->pad;
2470 			u8 extra_bytes = pad + packet->extra_byte +
2471 				(SIZE_OF_CRC << 2);
2472 			u32 pmtu = qp->pmtu;
2473 
2474 			if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
2475 				goto ack_op_err;
2476 			len = restart_sge(&ss, req->e.swqe, ipsn, pmtu);
2477 			if (unlikely(len < pmtu))
2478 				goto ack_op_err;
2479 			rvt_copy_sge(qp, &ss, packet->payload, pmtu, false,
2480 				     false);
2481 			/* Raise the sw sequence check flag for next packet */
2482 			priv->s_flags |= HFI1_R_TID_SW_PSN;
2483 		}
2484 
2485 		goto ack_done;
2486 	}
2487 	flow->flow_state.r_next_psn = mask_psn(kpsn + 1);
2488 	req->ack_pending--;
2489 	priv->pending_tid_r_segs--;
2490 	qp->s_num_rd_atomic--;
2491 	if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
2492 	    !qp->s_num_rd_atomic) {
2493 		qp->s_flags &= ~(RVT_S_WAIT_FENCE |
2494 				 RVT_S_WAIT_ACK);
2495 		hfi1_schedule_send(qp);
2496 	}
2497 	if (qp->s_flags & RVT_S_WAIT_RDMAR) {
2498 		qp->s_flags &= ~(RVT_S_WAIT_RDMAR | RVT_S_WAIT_ACK);
2499 		hfi1_schedule_send(qp);
2500 	}
2501 
2502 	trace_hfi1_ack(qp, ipsn);
2503 	trace_hfi1_tid_req_rcv_read_resp(qp, 0, req->e.swqe->wr.opcode,
2504 					 req->e.swqe->psn, req->e.swqe->lpsn,
2505 					 req);
2506 	trace_hfi1_tid_flow_rcv_read_resp(qp, req->clear_tail, flow);
2507 
2508 	/* Release the tid resources */
2509 	hfi1_kern_exp_rcv_clear(req);
2510 
2511 	if (!do_rc_ack(qp, aeth, ipsn, opcode, 0, rcd))
2512 		goto ack_done;
2513 
2514 	/* If not done yet, build next read request */
2515 	if (++req->comp_seg >= req->total_segs) {
2516 		priv->tid_r_comp++;
2517 		req->state = TID_REQUEST_COMPLETE;
2518 	}
2519 
2520 	/*
2521 	 * Clear the hw flow under two conditions:
2522 	 * 1. This request is a sync point and it is complete;
2523 	 * 2. Current request is completed and there are no more requests.
2524 	 */
2525 	if ((req->state == TID_REQUEST_SYNC &&
2526 	     req->comp_seg == req->cur_seg) ||
2527 	    priv->tid_r_comp == priv->tid_r_reqs) {
2528 		hfi1_kern_clear_hw_flow(priv->rcd, qp);
2529 		priv->s_flags &= ~HFI1_R_TID_SW_PSN;
2530 		if (req->state == TID_REQUEST_SYNC)
2531 			req->state = TID_REQUEST_ACTIVE;
2532 	}
2533 
2534 	hfi1_schedule_send(qp);
2535 	goto ack_done;
2536 
2537 ack_op_err:
2538 	/*
2539 	 * The test indicates that the send engine has finished its cleanup
2540 	 * after sending the request and it's now safe to put the QP into error
2541 	 * state. However, if the wqe queue is empty (qp->s_acked == qp->s_tail
2542 	 * == qp->s_head), it would be unsafe to complete the wqe pointed by
2543 	 * qp->s_acked here. Putting the qp into error state will safely flush
2544 	 * all remaining requests.
2545 	 */
2546 	if (qp->s_last == qp->s_acked)
2547 		rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
2548 
2549 ack_done:
2550 	spin_unlock_irqrestore(&qp->s_lock, flags);
2551 }
2552 
2553 void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp)
2554 	__must_hold(&qp->s_lock)
2555 {
2556 	u32 n = qp->s_acked;
2557 	struct rvt_swqe *wqe;
2558 	struct tid_rdma_request *req;
2559 	struct hfi1_qp_priv *priv = qp->priv;
2560 
2561 	lockdep_assert_held(&qp->s_lock);
2562 	/* Free any TID entries */
2563 	while (n != qp->s_tail) {
2564 		wqe = rvt_get_swqe_ptr(qp, n);
2565 		if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
2566 			req = wqe_to_tid_req(wqe);
2567 			hfi1_kern_exp_rcv_clear_all(req);
2568 		}
2569 
2570 		if (++n == qp->s_size)
2571 			n = 0;
2572 	}
2573 	/* Free flow */
2574 	hfi1_kern_clear_hw_flow(priv->rcd, qp);
2575 }
2576 
2577 static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd,
2578 			     struct hfi1_packet *packet, u8 rcv_type,
2579 			     u8 opcode)
2580 {
2581 	struct rvt_qp *qp = packet->qp;
2582 	struct hfi1_qp_priv *qpriv = qp->priv;
2583 	u32 ipsn;
2584 	struct ib_other_headers *ohdr = packet->ohdr;
2585 	struct rvt_ack_entry *e;
2586 	struct tid_rdma_request *req;
2587 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2588 	u32 i;
2589 
2590 	if (rcv_type >= RHF_RCV_TYPE_IB)
2591 		goto done;
2592 
2593 	spin_lock(&qp->s_lock);
2594 
2595 	/*
2596 	 * We've ran out of space in the eager buffer.
2597 	 * Eagerly received KDETH packets which require space in the
2598 	 * Eager buffer (packet that have payload) are TID RDMA WRITE
2599 	 * response packets. In this case, we have to re-transmit the
2600 	 * TID RDMA WRITE request.
2601 	 */
2602 	if (rcv_type == RHF_RCV_TYPE_EAGER) {
2603 		hfi1_restart_rc(qp, qp->s_last_psn + 1, 1);
2604 		hfi1_schedule_send(qp);
2605 		goto done_unlock;
2606 	}
2607 
2608 	/*
2609 	 * For TID READ response, error out QP after freeing the tid
2610 	 * resources.
2611 	 */
2612 	if (opcode == TID_OP(READ_RESP)) {
2613 		ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn));
2614 		if (cmp_psn(ipsn, qp->s_last_psn) > 0 &&
2615 		    cmp_psn(ipsn, qp->s_psn) < 0) {
2616 			hfi1_kern_read_tid_flow_free(qp);
2617 			spin_unlock(&qp->s_lock);
2618 			rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2619 			goto done;
2620 		}
2621 		goto done_unlock;
2622 	}
2623 
2624 	/*
2625 	 * Error out the qp for TID RDMA WRITE
2626 	 */
2627 	hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
2628 	for (i = 0; i < rvt_max_atomic(rdi); i++) {
2629 		e = &qp->s_ack_queue[i];
2630 		if (e->opcode == TID_OP(WRITE_REQ)) {
2631 			req = ack_to_tid_req(e);
2632 			hfi1_kern_exp_rcv_clear_all(req);
2633 		}
2634 	}
2635 	spin_unlock(&qp->s_lock);
2636 	rvt_rc_error(qp, IB_WC_LOC_LEN_ERR);
2637 	goto done;
2638 
2639 done_unlock:
2640 	spin_unlock(&qp->s_lock);
2641 done:
2642 	return true;
2643 }
2644 
2645 static void restart_tid_rdma_read_req(struct hfi1_ctxtdata *rcd,
2646 				      struct rvt_qp *qp, struct rvt_swqe *wqe)
2647 {
2648 	struct tid_rdma_request *req;
2649 	struct tid_rdma_flow *flow;
2650 
2651 	/* Start from the right segment */
2652 	qp->r_flags |= RVT_R_RDMAR_SEQ;
2653 	req = wqe_to_tid_req(wqe);
2654 	flow = &req->flows[req->clear_tail];
2655 	hfi1_restart_rc(qp, flow->flow_state.ib_spsn, 0);
2656 	if (list_empty(&qp->rspwait)) {
2657 		qp->r_flags |= RVT_R_RSP_SEND;
2658 		rvt_get_qp(qp);
2659 		list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2660 	}
2661 }
2662 
2663 /*
2664  * Handle the KDETH eflags for TID RDMA READ response.
2665  *
2666  * Return true if the last packet for a segment has been received and it is
2667  * time to process the response normally; otherwise, return true.
2668  *
2669  * The caller must hold the packet->qp->r_lock and the rcu_read_lock.
2670  */
2671 static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2672 				     struct hfi1_packet *packet, u8 rcv_type,
2673 				     u8 rte, u32 psn, u32 ibpsn)
2674 	__must_hold(&packet->qp->r_lock) __must_hold(RCU)
2675 {
2676 	struct hfi1_pportdata *ppd = rcd->ppd;
2677 	struct hfi1_devdata *dd = ppd->dd;
2678 	struct hfi1_ibport *ibp;
2679 	struct rvt_swqe *wqe;
2680 	struct tid_rdma_request *req;
2681 	struct tid_rdma_flow *flow;
2682 	u32 ack_psn;
2683 	struct rvt_qp *qp = packet->qp;
2684 	struct hfi1_qp_priv *priv = qp->priv;
2685 	bool ret = true;
2686 	int diff = 0;
2687 	u32 fpsn;
2688 
2689 	lockdep_assert_held(&qp->r_lock);
2690 	/* If the psn is out of valid range, drop the packet */
2691 	if (cmp_psn(ibpsn, qp->s_last_psn) < 0 ||
2692 	    cmp_psn(ibpsn, qp->s_psn) > 0)
2693 		return ret;
2694 
2695 	spin_lock(&qp->s_lock);
2696 	/*
2697 	 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
2698 	 * requests and implicitly NAK RDMA read and atomic requests issued
2699 	 * before the NAK'ed request.
2700 	 */
2701 	ack_psn = ibpsn - 1;
2702 	wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2703 	ibp = to_iport(qp->ibqp.device, qp->port_num);
2704 
2705 	/* Complete WQEs that the PSN finishes. */
2706 	while ((int)delta_psn(ack_psn, wqe->lpsn) >= 0) {
2707 		/*
2708 		 * If this request is a RDMA read or atomic, and the NACK is
2709 		 * for a later operation, this NACK NAKs the RDMA read or
2710 		 * atomic.
2711 		 */
2712 		if (wqe->wr.opcode == IB_WR_RDMA_READ ||
2713 		    wqe->wr.opcode == IB_WR_TID_RDMA_READ ||
2714 		    wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2715 		    wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2716 			/* Retry this request. */
2717 			if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
2718 				qp->r_flags |= RVT_R_RDMAR_SEQ;
2719 				if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
2720 					restart_tid_rdma_read_req(rcd, qp,
2721 								  wqe);
2722 				} else {
2723 					hfi1_restart_rc(qp, qp->s_last_psn + 1,
2724 							0);
2725 					if (list_empty(&qp->rspwait)) {
2726 						qp->r_flags |= RVT_R_RSP_SEND;
2727 						rvt_get_qp(qp);
2728 						list_add_tail(/* wait */
2729 						   &qp->rspwait,
2730 						   &rcd->qp_wait_list);
2731 					}
2732 				}
2733 			}
2734 			/*
2735 			 * No need to process the NAK since we are
2736 			 * restarting an earlier request.
2737 			 */
2738 			break;
2739 		}
2740 
2741 		wqe = do_rc_completion(qp, wqe, ibp);
2742 		if (qp->s_acked == qp->s_tail)
2743 			break;
2744 	}
2745 
2746 	/* Handle the eflags for the request */
2747 	if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
2748 		goto s_unlock;
2749 
2750 	req = wqe_to_tid_req(wqe);
2751 	switch (rcv_type) {
2752 	case RHF_RCV_TYPE_EXPECTED:
2753 		switch (rte) {
2754 		case RHF_RTE_EXPECTED_FLOW_SEQ_ERR:
2755 			/*
2756 			 * On the first occurrence of a Flow Sequence error,
2757 			 * the flag TID_FLOW_SW_PSN is set.
2758 			 *
2759 			 * After that, the flow is *not* reprogrammed and the
2760 			 * protocol falls back to SW PSN checking. This is done
2761 			 * to prevent continuous Flow Sequence errors for any
2762 			 * packets that could be still in the fabric.
2763 			 */
2764 			flow = &req->flows[req->clear_tail];
2765 			if (priv->s_flags & HFI1_R_TID_SW_PSN) {
2766 				diff = cmp_psn(psn,
2767 					       flow->flow_state.r_next_psn);
2768 				if (diff > 0) {
2769 					if (!(qp->r_flags & RVT_R_RDMAR_SEQ))
2770 						restart_tid_rdma_read_req(rcd,
2771 									  qp,
2772 									  wqe);
2773 
2774 					/* Drop the packet.*/
2775 					goto s_unlock;
2776 				} else if (diff < 0) {
2777 					/*
2778 					 * If a response packet for a restarted
2779 					 * request has come back, reset the
2780 					 * restart flag.
2781 					 */
2782 					if (qp->r_flags & RVT_R_RDMAR_SEQ)
2783 						qp->r_flags &=
2784 							~RVT_R_RDMAR_SEQ;
2785 
2786 					/* Drop the packet.*/
2787 					goto s_unlock;
2788 				}
2789 
2790 				/*
2791 				 * If SW PSN verification is successful and
2792 				 * this is the last packet in the segment, tell
2793 				 * the caller to process it as a normal packet.
2794 				 */
2795 				fpsn = full_flow_psn(flow,
2796 						     flow->flow_state.lpsn);
2797 				if (cmp_psn(fpsn, psn) == 0) {
2798 					ret = false;
2799 					if (qp->r_flags & RVT_R_RDMAR_SEQ)
2800 						qp->r_flags &=
2801 							~RVT_R_RDMAR_SEQ;
2802 				}
2803 				flow->flow_state.r_next_psn =
2804 					mask_psn(psn + 1);
2805 			} else {
2806 				u32 last_psn;
2807 
2808 				last_psn = read_r_next_psn(dd, rcd->ctxt,
2809 							   flow->idx);
2810 				flow->flow_state.r_next_psn = last_psn;
2811 				priv->s_flags |= HFI1_R_TID_SW_PSN;
2812 				/*
2813 				 * If no request has been restarted yet,
2814 				 * restart the current one.
2815 				 */
2816 				if (!(qp->r_flags & RVT_R_RDMAR_SEQ))
2817 					restart_tid_rdma_read_req(rcd, qp,
2818 								  wqe);
2819 			}
2820 
2821 			break;
2822 
2823 		case RHF_RTE_EXPECTED_FLOW_GEN_ERR:
2824 			/*
2825 			 * Since the TID flow is able to ride through
2826 			 * generation mismatch, drop this stale packet.
2827 			 */
2828 			break;
2829 
2830 		default:
2831 			break;
2832 		}
2833 		break;
2834 
2835 	case RHF_RCV_TYPE_ERROR:
2836 		switch (rte) {
2837 		case RHF_RTE_ERROR_OP_CODE_ERR:
2838 		case RHF_RTE_ERROR_KHDR_MIN_LEN_ERR:
2839 		case RHF_RTE_ERROR_KHDR_HCRC_ERR:
2840 		case RHF_RTE_ERROR_KHDR_KVER_ERR:
2841 		case RHF_RTE_ERROR_CONTEXT_ERR:
2842 		case RHF_RTE_ERROR_KHDR_TID_ERR:
2843 		default:
2844 			break;
2845 		}
2846 	default:
2847 		break;
2848 	}
2849 s_unlock:
2850 	spin_unlock(&qp->s_lock);
2851 	return ret;
2852 }
2853 
2854 bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2855 			      struct hfi1_pportdata *ppd,
2856 			      struct hfi1_packet *packet)
2857 {
2858 	struct hfi1_ibport *ibp = &ppd->ibport_data;
2859 	struct hfi1_devdata *dd = ppd->dd;
2860 	struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
2861 	u8 rcv_type = rhf_rcv_type(packet->rhf);
2862 	u8 rte = rhf_rcv_type_err(packet->rhf);
2863 	struct ib_header *hdr = packet->hdr;
2864 	struct ib_other_headers *ohdr = NULL;
2865 	int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
2866 	u16 lid  = be16_to_cpu(hdr->lrh[1]);
2867 	u8 opcode;
2868 	u32 qp_num, psn, ibpsn;
2869 	struct rvt_qp *qp;
2870 	struct hfi1_qp_priv *qpriv;
2871 	unsigned long flags;
2872 	bool ret = true;
2873 	struct rvt_ack_entry *e;
2874 	struct tid_rdma_request *req;
2875 	struct tid_rdma_flow *flow;
2876 	int diff = 0;
2877 
2878 	trace_hfi1_msg_handle_kdeth_eflags(NULL, "Kdeth error: rhf ",
2879 					   packet->rhf);
2880 	if (packet->rhf & RHF_ICRC_ERR)
2881 		return ret;
2882 
2883 	packet->ohdr = &hdr->u.oth;
2884 	ohdr = packet->ohdr;
2885 	trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
2886 
2887 	/* Get the destination QP number. */
2888 	qp_num = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_qp) &
2889 		RVT_QPN_MASK;
2890 	if (lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
2891 		goto drop;
2892 
2893 	psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
2894 	opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
2895 
2896 	rcu_read_lock();
2897 	qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
2898 	if (!qp)
2899 		goto rcu_unlock;
2900 
2901 	packet->qp = qp;
2902 
2903 	/* Check for valid receive state. */
2904 	spin_lock_irqsave(&qp->r_lock, flags);
2905 	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
2906 		ibp->rvp.n_pkt_drops++;
2907 		goto r_unlock;
2908 	}
2909 
2910 	if (packet->rhf & RHF_TID_ERR) {
2911 		/* For TIDERR and RC QPs preemptively schedule a NAK */
2912 		u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */
2913 
2914 		/* Sanity check packet */
2915 		if (tlen < 24)
2916 			goto r_unlock;
2917 
2918 		/*
2919 		 * Check for GRH. We should never get packets with GRH in this
2920 		 * path.
2921 		 */
2922 		if (lnh == HFI1_LRH_GRH)
2923 			goto r_unlock;
2924 
2925 		if (tid_rdma_tid_err(rcd, packet, rcv_type, opcode))
2926 			goto r_unlock;
2927 	}
2928 
2929 	/* handle TID RDMA READ */
2930 	if (opcode == TID_OP(READ_RESP)) {
2931 		ibpsn = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn);
2932 		ibpsn = mask_psn(ibpsn);
2933 		ret = handle_read_kdeth_eflags(rcd, packet, rcv_type, rte, psn,
2934 					       ibpsn);
2935 		goto r_unlock;
2936 	}
2937 
2938 	/*
2939 	 * qp->s_tail_ack_queue points to the rvt_ack_entry currently being
2940 	 * processed. These a completed sequentially so we can be sure that
2941 	 * the pointer will not change until the entire request has completed.
2942 	 */
2943 	spin_lock(&qp->s_lock);
2944 	qpriv = qp->priv;
2945 	e = &qp->s_ack_queue[qpriv->r_tid_tail];
2946 	req = ack_to_tid_req(e);
2947 	flow = &req->flows[req->clear_tail];
2948 	trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn);
2949 	trace_hfi1_rsp_handle_kdeth_eflags(qp, psn);
2950 	trace_hfi1_tid_write_rsp_handle_kdeth_eflags(qp);
2951 	trace_hfi1_tid_req_handle_kdeth_eflags(qp, 0, e->opcode, e->psn,
2952 					       e->lpsn, req);
2953 	trace_hfi1_tid_flow_handle_kdeth_eflags(qp, req->clear_tail, flow);
2954 
2955 	switch (rcv_type) {
2956 	case RHF_RCV_TYPE_EXPECTED:
2957 		switch (rte) {
2958 		case RHF_RTE_EXPECTED_FLOW_SEQ_ERR:
2959 			if (!(qpriv->s_flags & HFI1_R_TID_SW_PSN)) {
2960 				qpriv->s_flags |= HFI1_R_TID_SW_PSN;
2961 				flow->flow_state.r_next_psn =
2962 					read_r_next_psn(dd, rcd->ctxt,
2963 							flow->idx);
2964 				qpriv->r_next_psn_kdeth =
2965 					flow->flow_state.r_next_psn;
2966 				goto nak_psn;
2967 			} else {
2968 				/*
2969 				 * If the received PSN does not match the next
2970 				 * expected PSN, NAK the packet.
2971 				 * However, only do that if we know that the a
2972 				 * NAK has already been sent. Otherwise, this
2973 				 * mismatch could be due to packets that were
2974 				 * already in flight.
2975 				 */
2976 				diff = cmp_psn(psn,
2977 					       flow->flow_state.r_next_psn);
2978 				if (diff > 0)
2979 					goto nak_psn;
2980 				else if (diff < 0)
2981 					break;
2982 
2983 				qpriv->s_nak_state = 0;
2984 				/*
2985 				 * If SW PSN verification is successful and this
2986 				 * is the last packet in the segment, tell the
2987 				 * caller to process it as a normal packet.
2988 				 */
2989 				if (psn == full_flow_psn(flow,
2990 							 flow->flow_state.lpsn))
2991 					ret = false;
2992 				flow->flow_state.r_next_psn =
2993 					mask_psn(psn + 1);
2994 				qpriv->r_next_psn_kdeth =
2995 					flow->flow_state.r_next_psn;
2996 			}
2997 			break;
2998 
2999 		case RHF_RTE_EXPECTED_FLOW_GEN_ERR:
3000 			goto nak_psn;
3001 
3002 		default:
3003 			break;
3004 		}
3005 		break;
3006 
3007 	case RHF_RCV_TYPE_ERROR:
3008 		switch (rte) {
3009 		case RHF_RTE_ERROR_OP_CODE_ERR:
3010 		case RHF_RTE_ERROR_KHDR_MIN_LEN_ERR:
3011 		case RHF_RTE_ERROR_KHDR_HCRC_ERR:
3012 		case RHF_RTE_ERROR_KHDR_KVER_ERR:
3013 		case RHF_RTE_ERROR_CONTEXT_ERR:
3014 		case RHF_RTE_ERROR_KHDR_TID_ERR:
3015 		default:
3016 			break;
3017 		}
3018 	default:
3019 		break;
3020 	}
3021 
3022 unlock:
3023 	spin_unlock(&qp->s_lock);
3024 r_unlock:
3025 	spin_unlock_irqrestore(&qp->r_lock, flags);
3026 rcu_unlock:
3027 	rcu_read_unlock();
3028 drop:
3029 	return ret;
3030 nak_psn:
3031 	ibp->rvp.n_rc_seqnak++;
3032 	if (!qpriv->s_nak_state) {
3033 		qpriv->s_nak_state = IB_NAK_PSN_ERROR;
3034 		/* We are NAK'ing the next expected PSN */
3035 		qpriv->s_nak_psn = mask_psn(flow->flow_state.r_next_psn);
3036 		qpriv->s_flags |= RVT_S_ACK_PENDING;
3037 		if (qpriv->r_tid_ack == HFI1_QP_WQE_INVALID)
3038 			qpriv->r_tid_ack = qpriv->r_tid_tail;
3039 		hfi1_schedule_tid_send(qp);
3040 	}
3041 	goto unlock;
3042 }
3043 
3044 /*
3045  * "Rewind" the TID request information.
3046  * This means that we reset the state back to ACTIVE,
3047  * find the proper flow, set the flow index to that flow,
3048  * and reset the flow information.
3049  */
3050 void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
3051 			       u32 *bth2)
3052 {
3053 	struct tid_rdma_request *req = wqe_to_tid_req(wqe);
3054 	struct tid_rdma_flow *flow;
3055 	struct hfi1_qp_priv *qpriv = qp->priv;
3056 	int diff, delta_pkts;
3057 	u32 tididx = 0, i;
3058 	u16 fidx;
3059 
3060 	if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
3061 		*bth2 = mask_psn(qp->s_psn);
3062 		flow = find_flow_ib(req, *bth2, &fidx);
3063 		if (!flow) {
3064 			trace_hfi1_msg_tid_restart_req(/* msg */
3065 			   qp, "!!!!!! Could not find flow to restart: bth2 ",
3066 			   (u64)*bth2);
3067 			trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode,
3068 						       wqe->psn, wqe->lpsn,
3069 						       req);
3070 			return;
3071 		}
3072 	} else {
3073 		fidx = req->acked_tail;
3074 		flow = &req->flows[fidx];
3075 		*bth2 = mask_psn(req->r_ack_psn);
3076 	}
3077 
3078 	if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
3079 		delta_pkts = delta_psn(*bth2, flow->flow_state.ib_spsn);
3080 	else
3081 		delta_pkts = delta_psn(*bth2,
3082 				       full_flow_psn(flow,
3083 						     flow->flow_state.spsn));
3084 
3085 	trace_hfi1_tid_flow_restart_req(qp, fidx, flow);
3086 	diff = delta_pkts + flow->resync_npkts;
3087 
3088 	flow->sent = 0;
3089 	flow->pkt = 0;
3090 	flow->tid_idx = 0;
3091 	flow->tid_offset = 0;
3092 	if (diff) {
3093 		for (tididx = 0; tididx < flow->tidcnt; tididx++) {
3094 			u32 tidentry = flow->tid_entry[tididx], tidlen,
3095 				tidnpkts, npkts;
3096 
3097 			flow->tid_offset = 0;
3098 			tidlen = EXP_TID_GET(tidentry, LEN) * PAGE_SIZE;
3099 			tidnpkts = rvt_div_round_up_mtu(qp, tidlen);
3100 			npkts = min_t(u32, diff, tidnpkts);
3101 			flow->pkt += npkts;
3102 			flow->sent += (npkts == tidnpkts ? tidlen :
3103 				       npkts * qp->pmtu);
3104 			flow->tid_offset += npkts * qp->pmtu;
3105 			diff -= npkts;
3106 			if (!diff)
3107 				break;
3108 		}
3109 	}
3110 	if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) {
3111 		rvt_skip_sge(&qpriv->tid_ss, (req->cur_seg * req->seg_len) +
3112 			     flow->sent, 0);
3113 		/*
3114 		 * Packet PSN is based on flow_state.spsn + flow->pkt. However,
3115 		 * during a RESYNC, the generation is incremented and the
3116 		 * sequence is reset to 0. Since we've adjusted the npkts in the
3117 		 * flow and the SGE has been sufficiently advanced, we have to
3118 		 * adjust flow->pkt in order to calculate the correct PSN.
3119 		 */
3120 		flow->pkt -= flow->resync_npkts;
3121 	}
3122 
3123 	if (flow->tid_offset ==
3124 	    EXP_TID_GET(flow->tid_entry[tididx], LEN) * PAGE_SIZE) {
3125 		tididx++;
3126 		flow->tid_offset = 0;
3127 	}
3128 	flow->tid_idx = tididx;
3129 	if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
3130 		/* Move flow_idx to correct index */
3131 		req->flow_idx = fidx;
3132 	else
3133 		req->clear_tail = fidx;
3134 
3135 	trace_hfi1_tid_flow_restart_req(qp, fidx, flow);
3136 	trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode, wqe->psn,
3137 				       wqe->lpsn, req);
3138 	req->state = TID_REQUEST_ACTIVE;
3139 	if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) {
3140 		/* Reset all the flows that we are going to resend */
3141 		fidx = CIRC_NEXT(fidx, MAX_FLOWS);
3142 		i = qpriv->s_tid_tail;
3143 		do {
3144 			for (; CIRC_CNT(req->setup_head, fidx, MAX_FLOWS);
3145 			      fidx = CIRC_NEXT(fidx, MAX_FLOWS)) {
3146 				req->flows[fidx].sent = 0;
3147 				req->flows[fidx].pkt = 0;
3148 				req->flows[fidx].tid_idx = 0;
3149 				req->flows[fidx].tid_offset = 0;
3150 				req->flows[fidx].resync_npkts = 0;
3151 			}
3152 			if (i == qpriv->s_tid_cur)
3153 				break;
3154 			do {
3155 				i = (++i == qp->s_size ? 0 : i);
3156 				wqe = rvt_get_swqe_ptr(qp, i);
3157 			} while (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE);
3158 			req = wqe_to_tid_req(wqe);
3159 			req->cur_seg = req->ack_seg;
3160 			fidx = req->acked_tail;
3161 			/* Pull req->clear_tail back */
3162 			req->clear_tail = fidx;
3163 		} while (1);
3164 	}
3165 }
3166 
3167 void hfi1_qp_kern_exp_rcv_clear_all(struct rvt_qp *qp)
3168 {
3169 	int i, ret;
3170 	struct hfi1_qp_priv *qpriv = qp->priv;
3171 	struct tid_flow_state *fs;
3172 
3173 	if (qp->ibqp.qp_type != IB_QPT_RC || !HFI1_CAP_IS_KSET(TID_RDMA))
3174 		return;
3175 
3176 	/*
3177 	 * First, clear the flow to help prevent any delayed packets from
3178 	 * being delivered.
3179 	 */
3180 	fs = &qpriv->flow_state;
3181 	if (fs->index != RXE_NUM_TID_FLOWS)
3182 		hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
3183 
3184 	for (i = qp->s_acked; i != qp->s_head;) {
3185 		struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i);
3186 
3187 		if (++i == qp->s_size)
3188 			i = 0;
3189 		/* Free only locally allocated TID entries */
3190 		if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
3191 			continue;
3192 		do {
3193 			struct hfi1_swqe_priv *priv = wqe->priv;
3194 
3195 			ret = hfi1_kern_exp_rcv_clear(&priv->tid_req);
3196 		} while (!ret);
3197 	}
3198 	for (i = qp->s_acked_ack_queue; i != qp->r_head_ack_queue;) {
3199 		struct rvt_ack_entry *e = &qp->s_ack_queue[i];
3200 
3201 		if (++i == rvt_max_atomic(ib_to_rvt(qp->ibqp.device)))
3202 			i = 0;
3203 		/* Free only locally allocated TID entries */
3204 		if (e->opcode != TID_OP(WRITE_REQ))
3205 			continue;
3206 		do {
3207 			struct hfi1_ack_priv *priv = e->priv;
3208 
3209 			ret = hfi1_kern_exp_rcv_clear(&priv->tid_req);
3210 		} while (!ret);
3211 	}
3212 }
3213 
3214 bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe)
3215 {
3216 	struct rvt_swqe *prev;
3217 	struct hfi1_qp_priv *priv = qp->priv;
3218 	u32 s_prev;
3219 	struct tid_rdma_request *req;
3220 
3221 	s_prev = (qp->s_cur == 0 ? qp->s_size : qp->s_cur) - 1;
3222 	prev = rvt_get_swqe_ptr(qp, s_prev);
3223 
3224 	switch (wqe->wr.opcode) {
3225 	case IB_WR_SEND:
3226 	case IB_WR_SEND_WITH_IMM:
3227 	case IB_WR_SEND_WITH_INV:
3228 	case IB_WR_ATOMIC_CMP_AND_SWP:
3229 	case IB_WR_ATOMIC_FETCH_AND_ADD:
3230 	case IB_WR_RDMA_WRITE:
3231 		switch (prev->wr.opcode) {
3232 		case IB_WR_TID_RDMA_WRITE:
3233 			req = wqe_to_tid_req(prev);
3234 			if (req->ack_seg != req->total_segs)
3235 				goto interlock;
3236 		default:
3237 			break;
3238 		}
3239 		break;
3240 	case IB_WR_RDMA_READ:
3241 		if (prev->wr.opcode != IB_WR_TID_RDMA_WRITE)
3242 			break;
3243 		/* fall through */
3244 	case IB_WR_TID_RDMA_READ:
3245 		switch (prev->wr.opcode) {
3246 		case IB_WR_RDMA_READ:
3247 			if (qp->s_acked != qp->s_cur)
3248 				goto interlock;
3249 			break;
3250 		case IB_WR_TID_RDMA_WRITE:
3251 			req = wqe_to_tid_req(prev);
3252 			if (req->ack_seg != req->total_segs)
3253 				goto interlock;
3254 		default:
3255 			break;
3256 		}
3257 	default:
3258 		break;
3259 	}
3260 	return false;
3261 
3262 interlock:
3263 	priv->s_flags |= HFI1_S_TID_WAIT_INTERLCK;
3264 	return true;
3265 }
3266 
3267 /* Does @sge meet the alignment requirements for tid rdma? */
3268 static inline bool hfi1_check_sge_align(struct rvt_qp *qp,
3269 					struct rvt_sge *sge, int num_sge)
3270 {
3271 	int i;
3272 
3273 	for (i = 0; i < num_sge; i++, sge++) {
3274 		trace_hfi1_sge_check_align(qp, i, sge);
3275 		if ((u64)sge->vaddr & ~PAGE_MASK ||
3276 		    sge->sge_length & ~PAGE_MASK)
3277 			return false;
3278 	}
3279 	return true;
3280 }
3281 
3282 void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
3283 {
3284 	struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv;
3285 	struct hfi1_swqe_priv *priv = wqe->priv;
3286 	struct tid_rdma_params *remote;
3287 	enum ib_wr_opcode new_opcode;
3288 	bool do_tid_rdma = false;
3289 	struct hfi1_pportdata *ppd = qpriv->rcd->ppd;
3290 
3291 	if ((rdma_ah_get_dlid(&qp->remote_ah_attr) & ~((1 << ppd->lmc) - 1)) ==
3292 				ppd->lid)
3293 		return;
3294 	if (qpriv->hdr_type != HFI1_PKT_TYPE_9B)
3295 		return;
3296 
3297 	rcu_read_lock();
3298 	remote = rcu_dereference(qpriv->tid_rdma.remote);
3299 	/*
3300 	 * If TID RDMA is disabled by the negotiation, don't
3301 	 * use it.
3302 	 */
3303 	if (!remote)
3304 		goto exit;
3305 
3306 	if (wqe->wr.opcode == IB_WR_RDMA_READ) {
3307 		if (hfi1_check_sge_align(qp, &wqe->sg_list[0],
3308 					 wqe->wr.num_sge)) {
3309 			new_opcode = IB_WR_TID_RDMA_READ;
3310 			do_tid_rdma = true;
3311 		}
3312 	} else if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
3313 		/*
3314 		 * TID RDMA is enabled for this RDMA WRITE request iff:
3315 		 *   1. The remote address is page-aligned,
3316 		 *   2. The length is larger than the minimum segment size,
3317 		 *   3. The length is page-multiple.
3318 		 */
3319 		if (!(wqe->rdma_wr.remote_addr & ~PAGE_MASK) &&
3320 		    !(wqe->length & ~PAGE_MASK)) {
3321 			new_opcode = IB_WR_TID_RDMA_WRITE;
3322 			do_tid_rdma = true;
3323 		}
3324 	}
3325 
3326 	if (do_tid_rdma) {
3327 		if (hfi1_kern_exp_rcv_alloc_flows(&priv->tid_req, GFP_ATOMIC))
3328 			goto exit;
3329 		wqe->wr.opcode = new_opcode;
3330 		priv->tid_req.seg_len =
3331 			min_t(u32, remote->max_len, wqe->length);
3332 		priv->tid_req.total_segs =
3333 			DIV_ROUND_UP(wqe->length, priv->tid_req.seg_len);
3334 		/* Compute the last PSN of the request */
3335 		wqe->lpsn = wqe->psn;
3336 		if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
3337 			priv->tid_req.n_flows = remote->max_read;
3338 			qpriv->tid_r_reqs++;
3339 			wqe->lpsn += rvt_div_round_up_mtu(qp, wqe->length) - 1;
3340 		} else {
3341 			wqe->lpsn += priv->tid_req.total_segs - 1;
3342 			atomic_inc(&qpriv->n_requests);
3343 		}
3344 
3345 		priv->tid_req.cur_seg = 0;
3346 		priv->tid_req.comp_seg = 0;
3347 		priv->tid_req.ack_seg = 0;
3348 		priv->tid_req.state = TID_REQUEST_INACTIVE;
3349 		/*
3350 		 * Reset acked_tail.
3351 		 * TID RDMA READ does not have ACKs so it does not
3352 		 * update the pointer. We have to reset it so TID RDMA
3353 		 * WRITE does not get confused.
3354 		 */
3355 		priv->tid_req.acked_tail = priv->tid_req.setup_head;
3356 		trace_hfi1_tid_req_setup_tid_wqe(qp, 1, wqe->wr.opcode,
3357 						 wqe->psn, wqe->lpsn,
3358 						 &priv->tid_req);
3359 	}
3360 exit:
3361 	rcu_read_unlock();
3362 }
3363 
3364 /* TID RDMA WRITE functions */
3365 
3366 u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
3367 				  struct ib_other_headers *ohdr,
3368 				  u32 *bth1, u32 *bth2, u32 *len)
3369 {
3370 	struct hfi1_qp_priv *qpriv = qp->priv;
3371 	struct tid_rdma_request *req = wqe_to_tid_req(wqe);
3372 	struct tid_rdma_params *remote;
3373 
3374 	rcu_read_lock();
3375 	remote = rcu_dereference(qpriv->tid_rdma.remote);
3376 	/*
3377 	 * Set the number of flow to be used based on negotiated
3378 	 * parameters.
3379 	 */
3380 	req->n_flows = remote->max_write;
3381 	req->state = TID_REQUEST_ACTIVE;
3382 
3383 	KDETH_RESET(ohdr->u.tid_rdma.w_req.kdeth0, KVER, 0x1);
3384 	KDETH_RESET(ohdr->u.tid_rdma.w_req.kdeth1, JKEY, remote->jkey);
3385 	ohdr->u.tid_rdma.w_req.reth.vaddr =
3386 		cpu_to_be64(wqe->rdma_wr.remote_addr + (wqe->length - *len));
3387 	ohdr->u.tid_rdma.w_req.reth.rkey =
3388 		cpu_to_be32(wqe->rdma_wr.rkey);
3389 	ohdr->u.tid_rdma.w_req.reth.length = cpu_to_be32(*len);
3390 	ohdr->u.tid_rdma.w_req.verbs_qp = cpu_to_be32(qp->remote_qpn);
3391 	*bth1 &= ~RVT_QPN_MASK;
3392 	*bth1 |= remote->qp;
3393 	qp->s_state = TID_OP(WRITE_REQ);
3394 	qp->s_flags |= HFI1_S_WAIT_TID_RESP;
3395 	*bth2 |= IB_BTH_REQ_ACK;
3396 	*len = 0;
3397 
3398 	rcu_read_unlock();
3399 	return sizeof(ohdr->u.tid_rdma.w_req) / sizeof(u32);
3400 }
3401 
3402 void hfi1_compute_tid_rdma_flow_wt(void)
3403 {
3404 	/*
3405 	 * Heuristic for computing the RNR timeout when waiting on the flow
3406 	 * queue. Rather than a computationaly expensive exact estimate of when
3407 	 * a flow will be available, we assume that if a QP is at position N in
3408 	 * the flow queue it has to wait approximately (N + 1) * (number of
3409 	 * segments between two sync points), assuming PMTU of 4K. The rationale
3410 	 * for this is that flows are released and recycled at each sync point.
3411 	 */
3412 	tid_rdma_flow_wt = MAX_TID_FLOW_PSN * enum_to_mtu(OPA_MTU_4096) /
3413 		TID_RDMA_MAX_SEGMENT_SIZE;
3414 }
3415 
3416 static u32 position_in_queue(struct hfi1_qp_priv *qpriv,
3417 			     struct tid_queue *queue)
3418 {
3419 	return qpriv->tid_enqueue - queue->dequeue;
3420 }
3421 
3422 /*
3423  * @qp: points to rvt_qp context.
3424  * @to_seg: desired RNR timeout in segments.
3425  * Return: index of the next highest timeout in the ib_hfi1_rnr_table[]
3426  */
3427 static u32 hfi1_compute_tid_rnr_timeout(struct rvt_qp *qp, u32 to_seg)
3428 {
3429 	struct hfi1_qp_priv *qpriv = qp->priv;
3430 	u64 timeout;
3431 	u32 bytes_per_us;
3432 	u8 i;
3433 
3434 	bytes_per_us = active_egress_rate(qpriv->rcd->ppd) / 8;
3435 	timeout = (to_seg * TID_RDMA_MAX_SEGMENT_SIZE) / bytes_per_us;
3436 	/*
3437 	 * Find the next highest value in the RNR table to the required
3438 	 * timeout. This gives the responder some padding.
3439 	 */
3440 	for (i = 1; i <= IB_AETH_CREDIT_MASK; i++)
3441 		if (rvt_rnr_tbl_to_usec(i) >= timeout)
3442 			return i;
3443 	return 0;
3444 }
3445 
3446 /**
3447  * Central place for resource allocation at TID write responder,
3448  * is called from write_req and write_data interrupt handlers as
3449  * well as the send thread when a queued QP is scheduled for
3450  * resource allocation.
3451  *
3452  * Iterates over (a) segments of a request and then (b) queued requests
3453  * themselves to allocate resources for up to local->max_write
3454  * segments across multiple requests. Stop allocating when we
3455  * hit a sync point, resume allocating after data packets at
3456  * sync point have been received.
3457  *
3458  * Resource allocation and sending of responses is decoupled. The
3459  * request/segment which are being allocated and sent are as follows.
3460  * Resources are allocated for:
3461  *     [request: qpriv->r_tid_alloc, segment: req->alloc_seg]
3462  * The send thread sends:
3463  *     [request: qp->s_tail_ack_queue, segment:req->cur_seg]
3464  */
3465 static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
3466 {
3467 	struct tid_rdma_request *req;
3468 	struct hfi1_qp_priv *qpriv = qp->priv;
3469 	struct hfi1_ctxtdata *rcd = qpriv->rcd;
3470 	struct tid_rdma_params *local = &qpriv->tid_rdma.local;
3471 	struct rvt_ack_entry *e;
3472 	u32 npkts, to_seg;
3473 	bool last;
3474 	int ret = 0;
3475 
3476 	lockdep_assert_held(&qp->s_lock);
3477 
3478 	while (1) {
3479 		trace_hfi1_rsp_tid_write_alloc_res(qp, 0);
3480 		trace_hfi1_tid_write_rsp_alloc_res(qp);
3481 		/*
3482 		 * Don't allocate more segments if a RNR NAK has already been
3483 		 * scheduled to avoid messing up qp->r_psn: the RNR NAK will
3484 		 * be sent only when all allocated segments have been sent.
3485 		 * However, if more segments are allocated before that, TID RDMA
3486 		 * WRITE RESP packets will be sent out for these new segments
3487 		 * before the RNR NAK packet. When the requester receives the
3488 		 * RNR NAK packet, it will restart with qp->s_last_psn + 1,
3489 		 * which does not match qp->r_psn and will be dropped.
3490 		 * Consequently, the requester will exhaust its retries and
3491 		 * put the qp into error state.
3492 		 */
3493 		if (qpriv->rnr_nak_state == TID_RNR_NAK_SEND)
3494 			break;
3495 
3496 		/* No requests left to process */
3497 		if (qpriv->r_tid_alloc == qpriv->r_tid_head) {
3498 			/* If all data has been received, clear the flow */
3499 			if (qpriv->flow_state.index < RXE_NUM_TID_FLOWS &&
3500 			    !qpriv->alloc_w_segs) {
3501 				hfi1_kern_clear_hw_flow(rcd, qp);
3502 				qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
3503 			}
3504 			break;
3505 		}
3506 
3507 		e = &qp->s_ack_queue[qpriv->r_tid_alloc];
3508 		if (e->opcode != TID_OP(WRITE_REQ))
3509 			goto next_req;
3510 		req = ack_to_tid_req(e);
3511 		trace_hfi1_tid_req_write_alloc_res(qp, 0, e->opcode, e->psn,
3512 						   e->lpsn, req);
3513 		/* Finished allocating for all segments of this request */
3514 		if (req->alloc_seg >= req->total_segs)
3515 			goto next_req;
3516 
3517 		/* Can allocate only a maximum of local->max_write for a QP */
3518 		if (qpriv->alloc_w_segs >= local->max_write)
3519 			break;
3520 
3521 		/* Don't allocate at a sync point with data packets pending */
3522 		if (qpriv->sync_pt && qpriv->alloc_w_segs)
3523 			break;
3524 
3525 		/* All data received at the sync point, continue */
3526 		if (qpriv->sync_pt && !qpriv->alloc_w_segs) {
3527 			hfi1_kern_clear_hw_flow(rcd, qp);
3528 			qpriv->sync_pt = false;
3529 			qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
3530 		}
3531 
3532 		/* Allocate flow if we don't have one */
3533 		if (qpriv->flow_state.index >= RXE_NUM_TID_FLOWS) {
3534 			ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp);
3535 			if (ret) {
3536 				to_seg = tid_rdma_flow_wt *
3537 					position_in_queue(qpriv,
3538 							  &rcd->flow_queue);
3539 				break;
3540 			}
3541 		}
3542 
3543 		npkts = rvt_div_round_up_mtu(qp, req->seg_len);
3544 
3545 		/*
3546 		 * We are at a sync point if we run out of KDETH PSN space.
3547 		 * Last PSN of every generation is reserved for RESYNC.
3548 		 */
3549 		if (qpriv->flow_state.psn + npkts > MAX_TID_FLOW_PSN - 1) {
3550 			qpriv->sync_pt = true;
3551 			break;
3552 		}
3553 
3554 		/*
3555 		 * If overtaking req->acked_tail, send an RNR NAK. Because the
3556 		 * QP is not queued in this case, and the issue can only be
3557 		 * caused due a delay in scheduling the second leg which we
3558 		 * cannot estimate, we use a rather arbitrary RNR timeout of
3559 		 * (MAX_FLOWS / 2) segments
3560 		 */
3561 		if (!CIRC_SPACE(req->setup_head, req->acked_tail,
3562 				MAX_FLOWS)) {
3563 			ret = -EAGAIN;
3564 			to_seg = MAX_FLOWS >> 1;
3565 			qpriv->s_flags |= RVT_S_ACK_PENDING;
3566 			hfi1_schedule_tid_send(qp);
3567 			break;
3568 		}
3569 
3570 		/* Try to allocate rcv array / TID entries */
3571 		ret = hfi1_kern_exp_rcv_setup(req, &req->ss, &last);
3572 		if (ret == -EAGAIN)
3573 			to_seg = position_in_queue(qpriv, &rcd->rarr_queue);
3574 		if (ret)
3575 			break;
3576 
3577 		qpriv->alloc_w_segs++;
3578 		req->alloc_seg++;
3579 		continue;
3580 next_req:
3581 		/* Begin processing the next request */
3582 		if (++qpriv->r_tid_alloc >
3583 		    rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
3584 			qpriv->r_tid_alloc = 0;
3585 	}
3586 
3587 	/*
3588 	 * Schedule an RNR NAK to be sent if (a) flow or rcv array allocation
3589 	 * has failed (b) we are called from the rcv handler interrupt context
3590 	 * (c) an RNR NAK has not already been scheduled
3591 	 */
3592 	if (ret == -EAGAIN && intr_ctx && !qp->r_nak_state)
3593 		goto send_rnr_nak;
3594 
3595 	return;
3596 
3597 send_rnr_nak:
3598 	lockdep_assert_held(&qp->r_lock);
3599 
3600 	/* Set r_nak_state to prevent unrelated events from generating NAK's */
3601 	qp->r_nak_state = hfi1_compute_tid_rnr_timeout(qp, to_seg) | IB_RNR_NAK;
3602 
3603 	/* Pull back r_psn to the segment being RNR NAK'd */
3604 	qp->r_psn = e->psn + req->alloc_seg;
3605 	qp->r_ack_psn = qp->r_psn;
3606 	/*
3607 	 * Pull back r_head_ack_queue to the ack entry following the request
3608 	 * being RNR NAK'd. This allows resources to be allocated to the request
3609 	 * if the queued QP is scheduled.
3610 	 */
3611 	qp->r_head_ack_queue = qpriv->r_tid_alloc + 1;
3612 	if (qp->r_head_ack_queue > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
3613 		qp->r_head_ack_queue = 0;
3614 	qpriv->r_tid_head = qp->r_head_ack_queue;
3615 	/*
3616 	 * These send side fields are used in make_rc_ack(). They are set in
3617 	 * hfi1_send_rc_ack() but must be set here before dropping qp->s_lock
3618 	 * for consistency
3619 	 */
3620 	qp->s_nak_state = qp->r_nak_state;
3621 	qp->s_ack_psn = qp->r_ack_psn;
3622 	/*
3623 	 * Clear the ACK PENDING flag to prevent unwanted ACK because we
3624 	 * have modified qp->s_ack_psn here.
3625 	 */
3626 	qp->s_flags &= ~(RVT_S_ACK_PENDING);
3627 
3628 	trace_hfi1_rsp_tid_write_alloc_res(qp, qp->r_psn);
3629 	/*
3630 	 * qpriv->rnr_nak_state is used to determine when the scheduled RNR NAK
3631 	 * has actually been sent. qp->s_flags RVT_S_ACK_PENDING bit cannot be
3632 	 * used for this because qp->s_lock is dropped before calling
3633 	 * hfi1_send_rc_ack() leading to inconsistency between the receive
3634 	 * interrupt handlers and the send thread in make_rc_ack()
3635 	 */
3636 	qpriv->rnr_nak_state = TID_RNR_NAK_SEND;
3637 
3638 	/*
3639 	 * Schedule RNR NAK to be sent. RNR NAK's are scheduled from the receive
3640 	 * interrupt handlers but will be sent from the send engine behind any
3641 	 * previous responses that may have been scheduled
3642 	 */
3643 	rc_defered_ack(rcd, qp);
3644 }
3645 
3646 void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet)
3647 {
3648 	/* HANDLER FOR TID RDMA WRITE REQUEST packet (Responder side)*/
3649 
3650 	/*
3651 	 * 1. Verify TID RDMA WRITE REQ as per IB_OPCODE_RC_RDMA_WRITE_FIRST
3652 	 *    (see hfi1_rc_rcv())
3653 	 *     - Don't allow 0-length requests.
3654 	 * 2. Put TID RDMA WRITE REQ into the response queueu (s_ack_queue)
3655 	 *     - Setup struct tid_rdma_req with request info
3656 	 *     - Prepare struct tid_rdma_flow array?
3657 	 * 3. Set the qp->s_ack_state as state diagram in design doc.
3658 	 * 4. Set RVT_S_RESP_PENDING in s_flags.
3659 	 * 5. Kick the send engine (hfi1_schedule_send())
3660 	 */
3661 	struct hfi1_ctxtdata *rcd = packet->rcd;
3662 	struct rvt_qp *qp = packet->qp;
3663 	struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
3664 	struct ib_other_headers *ohdr = packet->ohdr;
3665 	struct rvt_ack_entry *e;
3666 	unsigned long flags;
3667 	struct ib_reth *reth;
3668 	struct hfi1_qp_priv *qpriv = qp->priv;
3669 	struct tid_rdma_request *req;
3670 	u32 bth0, psn, len, rkey, num_segs;
3671 	bool fecn;
3672 	u8 next;
3673 	u64 vaddr;
3674 	int diff;
3675 
3676 	bth0 = be32_to_cpu(ohdr->bth[0]);
3677 	if (hfi1_ruc_check_hdr(ibp, packet))
3678 		return;
3679 
3680 	fecn = process_ecn(qp, packet);
3681 	psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
3682 	trace_hfi1_rsp_rcv_tid_write_req(qp, psn);
3683 
3684 	if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
3685 		rvt_comm_est(qp);
3686 
3687 	if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3688 		goto nack_inv;
3689 
3690 	reth = &ohdr->u.tid_rdma.w_req.reth;
3691 	vaddr = be64_to_cpu(reth->vaddr);
3692 	len = be32_to_cpu(reth->length);
3693 
3694 	num_segs = DIV_ROUND_UP(len, qpriv->tid_rdma.local.max_len);
3695 	diff = delta_psn(psn, qp->r_psn);
3696 	if (unlikely(diff)) {
3697 		tid_rdma_rcv_err(packet, ohdr, qp, psn, diff, fecn);
3698 		return;
3699 	}
3700 
3701 	/*
3702 	 * The resent request which was previously RNR NAK'd is inserted at the
3703 	 * location of the original request, which is one entry behind
3704 	 * r_head_ack_queue
3705 	 */
3706 	if (qpriv->rnr_nak_state)
3707 		qp->r_head_ack_queue = qp->r_head_ack_queue ?
3708 			qp->r_head_ack_queue - 1 :
3709 			rvt_size_atomic(ib_to_rvt(qp->ibqp.device));
3710 
3711 	/* We've verified the request, insert it into the ack queue. */
3712 	next = qp->r_head_ack_queue + 1;
3713 	if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
3714 		next = 0;
3715 	spin_lock_irqsave(&qp->s_lock, flags);
3716 	if (unlikely(next == qp->s_acked_ack_queue)) {
3717 		if (!qp->s_ack_queue[next].sent)
3718 			goto nack_inv_unlock;
3719 		update_ack_queue(qp, next);
3720 	}
3721 	e = &qp->s_ack_queue[qp->r_head_ack_queue];
3722 	req = ack_to_tid_req(e);
3723 
3724 	/* Bring previously RNR NAK'd request back to life */
3725 	if (qpriv->rnr_nak_state) {
3726 		qp->r_nak_state = 0;
3727 		qp->s_nak_state = 0;
3728 		qpriv->rnr_nak_state = TID_RNR_NAK_INIT;
3729 		qp->r_psn = e->lpsn + 1;
3730 		req->state = TID_REQUEST_INIT;
3731 		goto update_head;
3732 	}
3733 
3734 	release_rdma_sge_mr(e);
3735 
3736 	/* The length needs to be in multiples of PAGE_SIZE */
3737 	if (!len || len & ~PAGE_MASK)
3738 		goto nack_inv_unlock;
3739 
3740 	rkey = be32_to_cpu(reth->rkey);
3741 	qp->r_len = len;
3742 
3743 	if (e->opcode == TID_OP(WRITE_REQ) &&
3744 	    (req->setup_head != req->clear_tail ||
3745 	     req->clear_tail != req->acked_tail))
3746 		goto nack_inv_unlock;
3747 
3748 	if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr,
3749 				  rkey, IB_ACCESS_REMOTE_WRITE)))
3750 		goto nack_acc;
3751 
3752 	qp->r_psn += num_segs - 1;
3753 
3754 	e->opcode = (bth0 >> 24) & 0xff;
3755 	e->psn = psn;
3756 	e->lpsn = qp->r_psn;
3757 	e->sent = 0;
3758 
3759 	req->n_flows = min_t(u16, num_segs, qpriv->tid_rdma.local.max_write);
3760 	req->state = TID_REQUEST_INIT;
3761 	req->cur_seg = 0;
3762 	req->comp_seg = 0;
3763 	req->ack_seg = 0;
3764 	req->alloc_seg = 0;
3765 	req->isge = 0;
3766 	req->seg_len = qpriv->tid_rdma.local.max_len;
3767 	req->total_len = len;
3768 	req->total_segs = num_segs;
3769 	req->r_flow_psn = e->psn;
3770 	req->ss.sge = e->rdma_sge;
3771 	req->ss.num_sge = 1;
3772 
3773 	req->flow_idx = req->setup_head;
3774 	req->clear_tail = req->setup_head;
3775 	req->acked_tail = req->setup_head;
3776 
3777 	qp->r_state = e->opcode;
3778 	qp->r_nak_state = 0;
3779 	/*
3780 	 * We need to increment the MSN here instead of when we
3781 	 * finish sending the result since a duplicate request would
3782 	 * increment it more than once.
3783 	 */
3784 	qp->r_msn++;
3785 	qp->r_psn++;
3786 
3787 	trace_hfi1_tid_req_rcv_write_req(qp, 0, e->opcode, e->psn, e->lpsn,
3788 					 req);
3789 
3790 	if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID) {
3791 		qpriv->r_tid_tail = qp->r_head_ack_queue;
3792 	} else if (qpriv->r_tid_tail == qpriv->r_tid_head) {
3793 		struct tid_rdma_request *ptr;
3794 
3795 		e = &qp->s_ack_queue[qpriv->r_tid_tail];
3796 		ptr = ack_to_tid_req(e);
3797 
3798 		if (e->opcode != TID_OP(WRITE_REQ) ||
3799 		    ptr->comp_seg == ptr->total_segs) {
3800 			if (qpriv->r_tid_tail == qpriv->r_tid_ack)
3801 				qpriv->r_tid_ack = qp->r_head_ack_queue;
3802 			qpriv->r_tid_tail = qp->r_head_ack_queue;
3803 		}
3804 	}
3805 update_head:
3806 	qp->r_head_ack_queue = next;
3807 	qpriv->r_tid_head = qp->r_head_ack_queue;
3808 
3809 	hfi1_tid_write_alloc_resources(qp, true);
3810 	trace_hfi1_tid_write_rsp_rcv_req(qp);
3811 
3812 	/* Schedule the send tasklet. */
3813 	qp->s_flags |= RVT_S_RESP_PENDING;
3814 	if (fecn)
3815 		qp->s_flags |= RVT_S_ECN;
3816 	hfi1_schedule_send(qp);
3817 
3818 	spin_unlock_irqrestore(&qp->s_lock, flags);
3819 	return;
3820 
3821 nack_inv_unlock:
3822 	spin_unlock_irqrestore(&qp->s_lock, flags);
3823 nack_inv:
3824 	rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
3825 	qp->r_nak_state = IB_NAK_INVALID_REQUEST;
3826 	qp->r_ack_psn = qp->r_psn;
3827 	/* Queue NAK for later */
3828 	rc_defered_ack(rcd, qp);
3829 	return;
3830 nack_acc:
3831 	spin_unlock_irqrestore(&qp->s_lock, flags);
3832 	rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
3833 	qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
3834 	qp->r_ack_psn = qp->r_psn;
3835 }
3836 
3837 u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
3838 				   struct ib_other_headers *ohdr, u32 *bth1,
3839 				   u32 bth2, u32 *len,
3840 				   struct rvt_sge_state **ss)
3841 {
3842 	struct hfi1_ack_priv *epriv = e->priv;
3843 	struct tid_rdma_request *req = &epriv->tid_req;
3844 	struct hfi1_qp_priv *qpriv = qp->priv;
3845 	struct tid_rdma_flow *flow = NULL;
3846 	u32 resp_len = 0, hdwords = 0;
3847 	void *resp_addr = NULL;
3848 	struct tid_rdma_params *remote;
3849 
3850 	trace_hfi1_tid_req_build_write_resp(qp, 0, e->opcode, e->psn, e->lpsn,
3851 					    req);
3852 	trace_hfi1_tid_write_rsp_build_resp(qp);
3853 	trace_hfi1_rsp_build_tid_write_resp(qp, bth2);
3854 	flow = &req->flows[req->flow_idx];
3855 	switch (req->state) {
3856 	default:
3857 		/*
3858 		 * Try to allocate resources here in case QP was queued and was
3859 		 * later scheduled when resources became available
3860 		 */
3861 		hfi1_tid_write_alloc_resources(qp, false);
3862 
3863 		/* We've already sent everything which is ready */
3864 		if (req->cur_seg >= req->alloc_seg)
3865 			goto done;
3866 
3867 		/*
3868 		 * Resources can be assigned but responses cannot be sent in
3869 		 * rnr_nak state, till the resent request is received
3870 		 */
3871 		if (qpriv->rnr_nak_state == TID_RNR_NAK_SENT)
3872 			goto done;
3873 
3874 		req->state = TID_REQUEST_ACTIVE;
3875 		trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow);
3876 		req->flow_idx = CIRC_NEXT(req->flow_idx, MAX_FLOWS);
3877 		hfi1_add_tid_reap_timer(qp);
3878 		break;
3879 
3880 	case TID_REQUEST_RESEND_ACTIVE:
3881 	case TID_REQUEST_RESEND:
3882 		trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow);
3883 		req->flow_idx = CIRC_NEXT(req->flow_idx, MAX_FLOWS);
3884 		if (!CIRC_CNT(req->setup_head, req->flow_idx, MAX_FLOWS))
3885 			req->state = TID_REQUEST_ACTIVE;
3886 
3887 		hfi1_mod_tid_reap_timer(qp);
3888 		break;
3889 	}
3890 	flow->flow_state.resp_ib_psn = bth2;
3891 	resp_addr = (void *)flow->tid_entry;
3892 	resp_len = sizeof(*flow->tid_entry) * flow->tidcnt;
3893 	req->cur_seg++;
3894 
3895 	memset(&ohdr->u.tid_rdma.w_rsp, 0, sizeof(ohdr->u.tid_rdma.w_rsp));
3896 	epriv->ss.sge.vaddr = resp_addr;
3897 	epriv->ss.sge.sge_length = resp_len;
3898 	epriv->ss.sge.length = epriv->ss.sge.sge_length;
3899 	/*
3900 	 * We can safely zero these out. Since the first SGE covers the
3901 	 * entire packet, nothing else should even look at the MR.
3902 	 */
3903 	epriv->ss.sge.mr = NULL;
3904 	epriv->ss.sge.m = 0;
3905 	epriv->ss.sge.n = 0;
3906 
3907 	epriv->ss.sg_list = NULL;
3908 	epriv->ss.total_len = epriv->ss.sge.sge_length;
3909 	epriv->ss.num_sge = 1;
3910 
3911 	*ss = &epriv->ss;
3912 	*len = epriv->ss.total_len;
3913 
3914 	/* Construct the TID RDMA WRITE RESP packet header */
3915 	rcu_read_lock();
3916 	remote = rcu_dereference(qpriv->tid_rdma.remote);
3917 
3918 	KDETH_RESET(ohdr->u.tid_rdma.w_rsp.kdeth0, KVER, 0x1);
3919 	KDETH_RESET(ohdr->u.tid_rdma.w_rsp.kdeth1, JKEY, remote->jkey);
3920 	ohdr->u.tid_rdma.w_rsp.aeth = rvt_compute_aeth(qp);
3921 	ohdr->u.tid_rdma.w_rsp.tid_flow_psn =
3922 		cpu_to_be32((flow->flow_state.generation <<
3923 			     HFI1_KDETH_BTH_SEQ_SHIFT) |
3924 			    (flow->flow_state.spsn &
3925 			     HFI1_KDETH_BTH_SEQ_MASK));
3926 	ohdr->u.tid_rdma.w_rsp.tid_flow_qp =
3927 		cpu_to_be32(qpriv->tid_rdma.local.qp |
3928 			    ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) <<
3929 			     TID_RDMA_DESTQP_FLOW_SHIFT) |
3930 			    qpriv->rcd->ctxt);
3931 	ohdr->u.tid_rdma.w_rsp.verbs_qp = cpu_to_be32(qp->remote_qpn);
3932 	*bth1 = remote->qp;
3933 	rcu_read_unlock();
3934 	hdwords = sizeof(ohdr->u.tid_rdma.w_rsp) / sizeof(u32);
3935 	qpriv->pending_tid_w_segs++;
3936 done:
3937 	return hdwords;
3938 }
3939 
3940 static void hfi1_add_tid_reap_timer(struct rvt_qp *qp)
3941 {
3942 	struct hfi1_qp_priv *qpriv = qp->priv;
3943 
3944 	lockdep_assert_held(&qp->s_lock);
3945 	if (!(qpriv->s_flags & HFI1_R_TID_RSC_TIMER)) {
3946 		qpriv->s_flags |= HFI1_R_TID_RSC_TIMER;
3947 		qpriv->s_tid_timer.expires = jiffies +
3948 			qpriv->tid_timer_timeout_jiffies;
3949 		add_timer(&qpriv->s_tid_timer);
3950 	}
3951 }
3952 
3953 static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp)
3954 {
3955 	struct hfi1_qp_priv *qpriv = qp->priv;
3956 
3957 	lockdep_assert_held(&qp->s_lock);
3958 	qpriv->s_flags |= HFI1_R_TID_RSC_TIMER;
3959 	mod_timer(&qpriv->s_tid_timer, jiffies +
3960 		  qpriv->tid_timer_timeout_jiffies);
3961 }
3962 
3963 static int hfi1_stop_tid_reap_timer(struct rvt_qp *qp)
3964 {
3965 	struct hfi1_qp_priv *qpriv = qp->priv;
3966 	int rval = 0;
3967 
3968 	lockdep_assert_held(&qp->s_lock);
3969 	if (qpriv->s_flags & HFI1_R_TID_RSC_TIMER) {
3970 		rval = del_timer(&qpriv->s_tid_timer);
3971 		qpriv->s_flags &= ~HFI1_R_TID_RSC_TIMER;
3972 	}
3973 	return rval;
3974 }
3975 
3976 void hfi1_del_tid_reap_timer(struct rvt_qp *qp)
3977 {
3978 	struct hfi1_qp_priv *qpriv = qp->priv;
3979 
3980 	del_timer_sync(&qpriv->s_tid_timer);
3981 	qpriv->s_flags &= ~HFI1_R_TID_RSC_TIMER;
3982 }
3983 
3984 static void hfi1_tid_timeout(struct timer_list *t)
3985 {
3986 	struct hfi1_qp_priv *qpriv = from_timer(qpriv, t, s_tid_timer);
3987 	struct rvt_qp *qp = qpriv->owner;
3988 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
3989 	unsigned long flags;
3990 	u32 i;
3991 
3992 	spin_lock_irqsave(&qp->r_lock, flags);
3993 	spin_lock(&qp->s_lock);
3994 	if (qpriv->s_flags & HFI1_R_TID_RSC_TIMER) {
3995 		dd_dev_warn(dd_from_ibdev(qp->ibqp.device), "[QP%u] %s %d\n",
3996 			    qp->ibqp.qp_num, __func__, __LINE__);
3997 		trace_hfi1_msg_tid_timeout(/* msg */
3998 			qp, "resource timeout = ",
3999 			(u64)qpriv->tid_timer_timeout_jiffies);
4000 		hfi1_stop_tid_reap_timer(qp);
4001 		/*
4002 		 * Go though the entire ack queue and clear any outstanding
4003 		 * HW flow and RcvArray resources.
4004 		 */
4005 		hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
4006 		for (i = 0; i < rvt_max_atomic(rdi); i++) {
4007 			struct tid_rdma_request *req =
4008 				ack_to_tid_req(&qp->s_ack_queue[i]);
4009 
4010 			hfi1_kern_exp_rcv_clear_all(req);
4011 		}
4012 		spin_unlock(&qp->s_lock);
4013 		if (qp->ibqp.event_handler) {
4014 			struct ib_event ev;
4015 
4016 			ev.device = qp->ibqp.device;
4017 			ev.element.qp = &qp->ibqp;
4018 			ev.event = IB_EVENT_QP_FATAL;
4019 			qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
4020 		}
4021 		rvt_rc_error(qp, IB_WC_RESP_TIMEOUT_ERR);
4022 		goto unlock_r_lock;
4023 	}
4024 	spin_unlock(&qp->s_lock);
4025 unlock_r_lock:
4026 	spin_unlock_irqrestore(&qp->r_lock, flags);
4027 }
4028 
4029 void hfi1_rc_rcv_tid_rdma_write_resp(struct hfi1_packet *packet)
4030 {
4031 	/* HANDLER FOR TID RDMA WRITE RESPONSE packet (Requestor side */
4032 
4033 	/*
4034 	 * 1. Find matching SWQE
4035 	 * 2. Check that TIDENTRY array has enough space for a complete
4036 	 *    segment. If not, put QP in error state.
4037 	 * 3. Save response data in struct tid_rdma_req and struct tid_rdma_flow
4038 	 * 4. Remove HFI1_S_WAIT_TID_RESP from s_flags.
4039 	 * 5. Set qp->s_state
4040 	 * 6. Kick the send engine (hfi1_schedule_send())
4041 	 */
4042 	struct ib_other_headers *ohdr = packet->ohdr;
4043 	struct rvt_qp *qp = packet->qp;
4044 	struct hfi1_qp_priv *qpriv = qp->priv;
4045 	struct hfi1_ctxtdata *rcd = packet->rcd;
4046 	struct rvt_swqe *wqe;
4047 	struct tid_rdma_request *req;
4048 	struct tid_rdma_flow *flow;
4049 	enum ib_wc_status status;
4050 	u32 opcode, aeth, psn, flow_psn, i, tidlen = 0, pktlen;
4051 	bool fecn;
4052 	unsigned long flags;
4053 
4054 	fecn = process_ecn(qp, packet);
4055 	psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
4056 	aeth = be32_to_cpu(ohdr->u.tid_rdma.w_rsp.aeth);
4057 	opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
4058 
4059 	spin_lock_irqsave(&qp->s_lock, flags);
4060 
4061 	/* Ignore invalid responses */
4062 	if (cmp_psn(psn, qp->s_next_psn) >= 0)
4063 		goto ack_done;
4064 
4065 	/* Ignore duplicate responses. */
4066 	if (unlikely(cmp_psn(psn, qp->s_last_psn) <= 0))
4067 		goto ack_done;
4068 
4069 	if (unlikely(qp->s_acked == qp->s_tail))
4070 		goto ack_done;
4071 
4072 	/*
4073 	 * If we are waiting for a particular packet sequence number
4074 	 * due to a request being resent, check for it. Otherwise,
4075 	 * ensure that we haven't missed anything.
4076 	 */
4077 	if (qp->r_flags & RVT_R_RDMAR_SEQ) {
4078 		if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
4079 			goto ack_done;
4080 		qp->r_flags &= ~RVT_R_RDMAR_SEQ;
4081 	}
4082 
4083 	wqe = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur);
4084 	if (unlikely(wqe->wr.opcode != IB_WR_TID_RDMA_WRITE))
4085 		goto ack_op_err;
4086 
4087 	req = wqe_to_tid_req(wqe);
4088 	/*
4089 	 * If we've lost ACKs and our acked_tail pointer is too far
4090 	 * behind, don't overwrite segments. Just drop the packet and
4091 	 * let the reliability protocol take care of it.
4092 	 */
4093 	if (!CIRC_SPACE(req->setup_head, req->acked_tail, MAX_FLOWS))
4094 		goto ack_done;
4095 
4096 	/*
4097 	 * The call to do_rc_ack() should be last in the chain of
4098 	 * packet checks because it will end up updating the QP state.
4099 	 * Therefore, anything that would prevent the packet from
4100 	 * being accepted as a successful response should be prior
4101 	 * to it.
4102 	 */
4103 	if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
4104 		goto ack_done;
4105 
4106 	trace_hfi1_ack(qp, psn);
4107 
4108 	flow = &req->flows[req->setup_head];
4109 	flow->pkt = 0;
4110 	flow->tid_idx = 0;
4111 	flow->tid_offset = 0;
4112 	flow->sent = 0;
4113 	flow->resync_npkts = 0;
4114 	flow->tid_qpn = be32_to_cpu(ohdr->u.tid_rdma.w_rsp.tid_flow_qp);
4115 	flow->idx = (flow->tid_qpn >> TID_RDMA_DESTQP_FLOW_SHIFT) &
4116 		TID_RDMA_DESTQP_FLOW_MASK;
4117 	flow_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.w_rsp.tid_flow_psn));
4118 	flow->flow_state.generation = flow_psn >> HFI1_KDETH_BTH_SEQ_SHIFT;
4119 	flow->flow_state.spsn = flow_psn & HFI1_KDETH_BTH_SEQ_MASK;
4120 	flow->flow_state.resp_ib_psn = psn;
4121 	flow->length = min_t(u32, req->seg_len,
4122 			     (wqe->length - (req->comp_seg * req->seg_len)));
4123 
4124 	flow->npkts = rvt_div_round_up_mtu(qp, flow->length);
4125 	flow->flow_state.lpsn = flow->flow_state.spsn +
4126 		flow->npkts - 1;
4127 	/* payload length = packet length - (header length + ICRC length) */
4128 	pktlen = packet->tlen - (packet->hlen + 4);
4129 	if (pktlen > sizeof(flow->tid_entry)) {
4130 		status = IB_WC_LOC_LEN_ERR;
4131 		goto ack_err;
4132 	}
4133 	memcpy(flow->tid_entry, packet->ebuf, pktlen);
4134 	flow->tidcnt = pktlen / sizeof(*flow->tid_entry);
4135 	trace_hfi1_tid_flow_rcv_write_resp(qp, req->setup_head, flow);
4136 
4137 	req->comp_seg++;
4138 	trace_hfi1_tid_write_sender_rcv_resp(qp, 0);
4139 	/*
4140 	 * Walk the TID_ENTRY list to make sure we have enough space for a
4141 	 * complete segment.
4142 	 */
4143 	for (i = 0; i < flow->tidcnt; i++) {
4144 		trace_hfi1_tid_entry_rcv_write_resp(/* entry */
4145 			qp, i, flow->tid_entry[i]);
4146 		if (!EXP_TID_GET(flow->tid_entry[i], LEN)) {
4147 			status = IB_WC_LOC_LEN_ERR;
4148 			goto ack_err;
4149 		}
4150 		tidlen += EXP_TID_GET(flow->tid_entry[i], LEN);
4151 	}
4152 	if (tidlen * PAGE_SIZE < flow->length) {
4153 		status = IB_WC_LOC_LEN_ERR;
4154 		goto ack_err;
4155 	}
4156 
4157 	trace_hfi1_tid_req_rcv_write_resp(qp, 0, wqe->wr.opcode, wqe->psn,
4158 					  wqe->lpsn, req);
4159 	/*
4160 	 * If this is the first response for this request, set the initial
4161 	 * flow index to the current flow.
4162 	 */
4163 	if (!cmp_psn(psn, wqe->psn)) {
4164 		req->r_last_acked = mask_psn(wqe->psn - 1);
4165 		/* Set acked flow index to head index */
4166 		req->acked_tail = req->setup_head;
4167 	}
4168 
4169 	/* advance circular buffer head */
4170 	req->setup_head = CIRC_NEXT(req->setup_head, MAX_FLOWS);
4171 	req->state = TID_REQUEST_ACTIVE;
4172 
4173 	/*
4174 	 * If all responses for this TID RDMA WRITE request have been received
4175 	 * advance the pointer to the next one.
4176 	 * Since TID RDMA requests could be mixed in with regular IB requests,
4177 	 * they might not appear sequentially in the queue. Therefore, the
4178 	 * next request needs to be "found".
4179 	 */
4180 	if (qpriv->s_tid_cur != qpriv->s_tid_head &&
4181 	    req->comp_seg == req->total_segs) {
4182 		for (i = qpriv->s_tid_cur + 1; ; i++) {
4183 			if (i == qp->s_size)
4184 				i = 0;
4185 			wqe = rvt_get_swqe_ptr(qp, i);
4186 			if (i == qpriv->s_tid_head)
4187 				break;
4188 			if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
4189 				break;
4190 		}
4191 		qpriv->s_tid_cur = i;
4192 	}
4193 	qp->s_flags &= ~HFI1_S_WAIT_TID_RESP;
4194 	hfi1_schedule_tid_send(qp);
4195 	goto ack_done;
4196 
4197 ack_op_err:
4198 	status = IB_WC_LOC_QP_OP_ERR;
4199 ack_err:
4200 	rvt_error_qp(qp, status);
4201 ack_done:
4202 	if (fecn)
4203 		qp->s_flags |= RVT_S_ECN;
4204 	spin_unlock_irqrestore(&qp->s_lock, flags);
4205 }
4206 
4207 bool hfi1_build_tid_rdma_packet(struct rvt_swqe *wqe,
4208 				struct ib_other_headers *ohdr,
4209 				u32 *bth1, u32 *bth2, u32 *len)
4210 {
4211 	struct tid_rdma_request *req = wqe_to_tid_req(wqe);
4212 	struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
4213 	struct tid_rdma_params *remote;
4214 	struct rvt_qp *qp = req->qp;
4215 	struct hfi1_qp_priv *qpriv = qp->priv;
4216 	u32 tidentry = flow->tid_entry[flow->tid_idx];
4217 	u32 tidlen = EXP_TID_GET(tidentry, LEN) << PAGE_SHIFT;
4218 	struct tid_rdma_write_data *wd = &ohdr->u.tid_rdma.w_data;
4219 	u32 next_offset, om = KDETH_OM_LARGE;
4220 	bool last_pkt;
4221 
4222 	if (!tidlen) {
4223 		hfi1_trdma_send_complete(qp, wqe, IB_WC_REM_INV_RD_REQ_ERR);
4224 		rvt_error_qp(qp, IB_WC_REM_INV_RD_REQ_ERR);
4225 	}
4226 
4227 	*len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset);
4228 	flow->sent += *len;
4229 	next_offset = flow->tid_offset + *len;
4230 	last_pkt = (flow->tid_idx == (flow->tidcnt - 1) &&
4231 		    next_offset >= tidlen) || (flow->sent >= flow->length);
4232 	trace_hfi1_tid_entry_build_write_data(qp, flow->tid_idx, tidentry);
4233 	trace_hfi1_tid_flow_build_write_data(qp, req->clear_tail, flow);
4234 
4235 	rcu_read_lock();
4236 	remote = rcu_dereference(qpriv->tid_rdma.remote);
4237 	KDETH_RESET(wd->kdeth0, KVER, 0x1);
4238 	KDETH_SET(wd->kdeth0, SH, !last_pkt);
4239 	KDETH_SET(wd->kdeth0, INTR, !!(!last_pkt && remote->urg));
4240 	KDETH_SET(wd->kdeth0, TIDCTRL, EXP_TID_GET(tidentry, CTRL));
4241 	KDETH_SET(wd->kdeth0, TID, EXP_TID_GET(tidentry, IDX));
4242 	KDETH_SET(wd->kdeth0, OM, om == KDETH_OM_LARGE);
4243 	KDETH_SET(wd->kdeth0, OFFSET, flow->tid_offset / om);
4244 	KDETH_RESET(wd->kdeth1, JKEY, remote->jkey);
4245 	wd->verbs_qp = cpu_to_be32(qp->remote_qpn);
4246 	rcu_read_unlock();
4247 
4248 	*bth1 = flow->tid_qpn;
4249 	*bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) &
4250 			 HFI1_KDETH_BTH_SEQ_MASK) |
4251 			 (flow->flow_state.generation <<
4252 			  HFI1_KDETH_BTH_SEQ_SHIFT));
4253 	if (last_pkt) {
4254 		/* PSNs are zero-based, so +1 to count number of packets */
4255 		if (flow->flow_state.lpsn + 1 +
4256 		    rvt_div_round_up_mtu(qp, req->seg_len) >
4257 		    MAX_TID_FLOW_PSN)
4258 			req->state = TID_REQUEST_SYNC;
4259 		*bth2 |= IB_BTH_REQ_ACK;
4260 	}
4261 
4262 	if (next_offset >= tidlen) {
4263 		flow->tid_offset = 0;
4264 		flow->tid_idx++;
4265 	} else {
4266 		flow->tid_offset = next_offset;
4267 	}
4268 	return last_pkt;
4269 }
4270 
4271 void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
4272 {
4273 	struct rvt_qp *qp = packet->qp;
4274 	struct hfi1_qp_priv *priv = qp->priv;
4275 	struct hfi1_ctxtdata *rcd = priv->rcd;
4276 	struct ib_other_headers *ohdr = packet->ohdr;
4277 	struct rvt_ack_entry *e;
4278 	struct tid_rdma_request *req;
4279 	struct tid_rdma_flow *flow;
4280 	struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
4281 	unsigned long flags;
4282 	u32 psn, next;
4283 	u8 opcode;
4284 	bool fecn;
4285 
4286 	fecn = process_ecn(qp, packet);
4287 	psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
4288 	opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
4289 
4290 	/*
4291 	 * All error handling should be done by now. If we are here, the packet
4292 	 * is either good or been accepted by the error handler.
4293 	 */
4294 	spin_lock_irqsave(&qp->s_lock, flags);
4295 	e = &qp->s_ack_queue[priv->r_tid_tail];
4296 	req = ack_to_tid_req(e);
4297 	flow = &req->flows[req->clear_tail];
4298 	if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.lpsn))) {
4299 		update_r_next_psn_fecn(packet, priv, rcd, flow, fecn);
4300 
4301 		if (cmp_psn(psn, flow->flow_state.r_next_psn))
4302 			goto send_nak;
4303 
4304 		flow->flow_state.r_next_psn = mask_psn(psn + 1);
4305 		/*
4306 		 * Copy the payload to destination buffer if this packet is
4307 		 * delivered as an eager packet due to RSM rule and FECN.
4308 		 * The RSM rule selects FECN bit in BTH and SH bit in
4309 		 * KDETH header and therefore will not match the last
4310 		 * packet of each segment that has SH bit cleared.
4311 		 */
4312 		if (fecn && packet->etype == RHF_RCV_TYPE_EAGER) {
4313 			struct rvt_sge_state ss;
4314 			u32 len;
4315 			u32 tlen = packet->tlen;
4316 			u16 hdrsize = packet->hlen;
4317 			u8 pad = packet->pad;
4318 			u8 extra_bytes = pad + packet->extra_byte +
4319 				(SIZE_OF_CRC << 2);
4320 			u32 pmtu = qp->pmtu;
4321 
4322 			if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
4323 				goto send_nak;
4324 			len = req->comp_seg * req->seg_len;
4325 			len += delta_psn(psn,
4326 				full_flow_psn(flow, flow->flow_state.spsn)) *
4327 				pmtu;
4328 			if (unlikely(req->total_len - len < pmtu))
4329 				goto send_nak;
4330 
4331 			/*
4332 			 * The e->rdma_sge field is set when TID RDMA WRITE REQ
4333 			 * is first received and is never modified thereafter.
4334 			 */
4335 			ss.sge = e->rdma_sge;
4336 			ss.sg_list = NULL;
4337 			ss.num_sge = 1;
4338 			ss.total_len = req->total_len;
4339 			rvt_skip_sge(&ss, len, false);
4340 			rvt_copy_sge(qp, &ss, packet->payload, pmtu, false,
4341 				     false);
4342 			/* Raise the sw sequence check flag for next packet */
4343 			priv->r_next_psn_kdeth = mask_psn(psn + 1);
4344 			priv->s_flags |= HFI1_R_TID_SW_PSN;
4345 		}
4346 		goto exit;
4347 	}
4348 	flow->flow_state.r_next_psn = mask_psn(psn + 1);
4349 	hfi1_kern_exp_rcv_clear(req);
4350 	priv->alloc_w_segs--;
4351 	rcd->flows[flow->idx].psn = psn & HFI1_KDETH_BTH_SEQ_MASK;
4352 	req->comp_seg++;
4353 	priv->s_nak_state = 0;
4354 
4355 	/*
4356 	 * Release the flow if one of the following conditions has been met:
4357 	 *  - The request has reached a sync point AND all outstanding
4358 	 *    segments have been completed, or
4359 	 *  - The entire request is complete and there are no more requests
4360 	 *    (of any kind) in the queue.
4361 	 */
4362 	trace_hfi1_rsp_rcv_tid_write_data(qp, psn);
4363 	trace_hfi1_tid_req_rcv_write_data(qp, 0, e->opcode, e->psn, e->lpsn,
4364 					  req);
4365 	trace_hfi1_tid_write_rsp_rcv_data(qp);
4366 	if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
4367 		priv->r_tid_ack = priv->r_tid_tail;
4368 
4369 	if (opcode == TID_OP(WRITE_DATA_LAST)) {
4370 		release_rdma_sge_mr(e);
4371 		for (next = priv->r_tid_tail + 1; ; next++) {
4372 			if (next > rvt_size_atomic(&dev->rdi))
4373 				next = 0;
4374 			if (next == priv->r_tid_head)
4375 				break;
4376 			e = &qp->s_ack_queue[next];
4377 			if (e->opcode == TID_OP(WRITE_REQ))
4378 				break;
4379 		}
4380 		priv->r_tid_tail = next;
4381 		if (++qp->s_acked_ack_queue > rvt_size_atomic(&dev->rdi))
4382 			qp->s_acked_ack_queue = 0;
4383 	}
4384 
4385 	hfi1_tid_write_alloc_resources(qp, true);
4386 
4387 	/*
4388 	 * If we need to generate more responses, schedule the
4389 	 * send engine.
4390 	 */
4391 	if (req->cur_seg < req->total_segs ||
4392 	    qp->s_tail_ack_queue != qp->r_head_ack_queue) {
4393 		qp->s_flags |= RVT_S_RESP_PENDING;
4394 		hfi1_schedule_send(qp);
4395 	}
4396 
4397 	priv->pending_tid_w_segs--;
4398 	if (priv->s_flags & HFI1_R_TID_RSC_TIMER) {
4399 		if (priv->pending_tid_w_segs)
4400 			hfi1_mod_tid_reap_timer(req->qp);
4401 		else
4402 			hfi1_stop_tid_reap_timer(req->qp);
4403 	}
4404 
4405 done:
4406 	priv->s_flags |= RVT_S_ACK_PENDING;
4407 	hfi1_schedule_tid_send(qp);
4408 exit:
4409 	priv->r_next_psn_kdeth = flow->flow_state.r_next_psn;
4410 	if (fecn)
4411 		qp->s_flags |= RVT_S_ECN;
4412 	spin_unlock_irqrestore(&qp->s_lock, flags);
4413 	return;
4414 
4415 send_nak:
4416 	if (!priv->s_nak_state) {
4417 		priv->s_nak_state = IB_NAK_PSN_ERROR;
4418 		priv->s_nak_psn = flow->flow_state.r_next_psn;
4419 		priv->s_flags |= RVT_S_ACK_PENDING;
4420 		if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
4421 			priv->r_tid_ack = priv->r_tid_tail;
4422 		hfi1_schedule_tid_send(qp);
4423 	}
4424 	goto done;
4425 }
4426 
4427 static bool hfi1_tid_rdma_is_resync_psn(u32 psn)
4428 {
4429 	return (bool)((psn & HFI1_KDETH_BTH_SEQ_MASK) ==
4430 		      HFI1_KDETH_BTH_SEQ_MASK);
4431 }
4432 
4433 u32 hfi1_build_tid_rdma_write_ack(struct rvt_qp *qp, struct rvt_ack_entry *e,
4434 				  struct ib_other_headers *ohdr, u16 iflow,
4435 				  u32 *bth1, u32 *bth2)
4436 {
4437 	struct hfi1_qp_priv *qpriv = qp->priv;
4438 	struct tid_flow_state *fs = &qpriv->flow_state;
4439 	struct tid_rdma_request *req = ack_to_tid_req(e);
4440 	struct tid_rdma_flow *flow = &req->flows[iflow];
4441 	struct tid_rdma_params *remote;
4442 
4443 	rcu_read_lock();
4444 	remote = rcu_dereference(qpriv->tid_rdma.remote);
4445 	KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth1, JKEY, remote->jkey);
4446 	ohdr->u.tid_rdma.ack.verbs_qp = cpu_to_be32(qp->remote_qpn);
4447 	*bth1 = remote->qp;
4448 	rcu_read_unlock();
4449 
4450 	if (qpriv->resync) {
4451 		*bth2 = mask_psn((fs->generation <<
4452 				  HFI1_KDETH_BTH_SEQ_SHIFT) - 1);
4453 		ohdr->u.tid_rdma.ack.aeth = rvt_compute_aeth(qp);
4454 	} else if (qpriv->s_nak_state) {
4455 		*bth2 = mask_psn(qpriv->s_nak_psn);
4456 		ohdr->u.tid_rdma.ack.aeth =
4457 			cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
4458 				    (qpriv->s_nak_state <<
4459 				     IB_AETH_CREDIT_SHIFT));
4460 	} else {
4461 		*bth2 = full_flow_psn(flow, flow->flow_state.lpsn);
4462 		ohdr->u.tid_rdma.ack.aeth = rvt_compute_aeth(qp);
4463 	}
4464 	KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth0, KVER, 0x1);
4465 	ohdr->u.tid_rdma.ack.tid_flow_qp =
4466 		cpu_to_be32(qpriv->tid_rdma.local.qp |
4467 			    ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) <<
4468 			     TID_RDMA_DESTQP_FLOW_SHIFT) |
4469 			    qpriv->rcd->ctxt);
4470 
4471 	ohdr->u.tid_rdma.ack.tid_flow_psn = 0;
4472 	ohdr->u.tid_rdma.ack.verbs_psn =
4473 		cpu_to_be32(flow->flow_state.resp_ib_psn);
4474 
4475 	if (qpriv->resync) {
4476 		/*
4477 		 * If the PSN before the current expect KDETH PSN is the
4478 		 * RESYNC PSN, then we never received a good TID RDMA WRITE
4479 		 * DATA packet after a previous RESYNC.
4480 		 * In this case, the next expected KDETH PSN stays the same.
4481 		 */
4482 		if (hfi1_tid_rdma_is_resync_psn(qpriv->r_next_psn_kdeth - 1)) {
4483 			ohdr->u.tid_rdma.ack.tid_flow_psn =
4484 				cpu_to_be32(qpriv->r_next_psn_kdeth_save);
4485 		} else {
4486 			/*
4487 			 * Because the KDETH PSNs jump during a RESYNC, it's
4488 			 * not possible to infer (or compute) the previous value
4489 			 * of r_next_psn_kdeth in the case of back-to-back
4490 			 * RESYNC packets. Therefore, we save it.
4491 			 */
4492 			qpriv->r_next_psn_kdeth_save =
4493 				qpriv->r_next_psn_kdeth - 1;
4494 			ohdr->u.tid_rdma.ack.tid_flow_psn =
4495 				cpu_to_be32(qpriv->r_next_psn_kdeth_save);
4496 			qpriv->r_next_psn_kdeth = mask_psn(*bth2 + 1);
4497 		}
4498 		qpriv->resync = false;
4499 	}
4500 
4501 	return sizeof(ohdr->u.tid_rdma.ack) / sizeof(u32);
4502 }
4503 
4504 void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
4505 {
4506 	struct ib_other_headers *ohdr = packet->ohdr;
4507 	struct rvt_qp *qp = packet->qp;
4508 	struct hfi1_qp_priv *qpriv = qp->priv;
4509 	struct rvt_swqe *wqe;
4510 	struct tid_rdma_request *req;
4511 	struct tid_rdma_flow *flow;
4512 	u32 aeth, psn, req_psn, ack_psn, resync_psn, ack_kpsn;
4513 	unsigned long flags;
4514 	u16 fidx;
4515 
4516 	trace_hfi1_tid_write_sender_rcv_tid_ack(qp, 0);
4517 	process_ecn(qp, packet);
4518 	psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
4519 	aeth = be32_to_cpu(ohdr->u.tid_rdma.ack.aeth);
4520 	req_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.ack.verbs_psn));
4521 	resync_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.ack.tid_flow_psn));
4522 
4523 	spin_lock_irqsave(&qp->s_lock, flags);
4524 	trace_hfi1_rcv_tid_ack(qp, aeth, psn, req_psn, resync_psn);
4525 
4526 	/* If we are waiting for an ACK to RESYNC, drop any other packets */
4527 	if ((qp->s_flags & HFI1_S_WAIT_HALT) &&
4528 	    cmp_psn(psn, qpriv->s_resync_psn))
4529 		goto ack_op_err;
4530 
4531 	ack_psn = req_psn;
4532 	if (hfi1_tid_rdma_is_resync_psn(psn))
4533 		ack_kpsn = resync_psn;
4534 	else
4535 		ack_kpsn = psn;
4536 	if (aeth >> 29) {
4537 		ack_psn--;
4538 		ack_kpsn--;
4539 	}
4540 
4541 	wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
4542 
4543 	if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
4544 		goto ack_op_err;
4545 
4546 	req = wqe_to_tid_req(wqe);
4547 	trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn,
4548 				       wqe->lpsn, req);
4549 	flow = &req->flows[req->acked_tail];
4550 	trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
4551 
4552 	/* Drop stale ACK/NAK */
4553 	if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0)
4554 		goto ack_op_err;
4555 
4556 	while (cmp_psn(ack_kpsn,
4557 		       full_flow_psn(flow, flow->flow_state.lpsn)) >= 0 &&
4558 	       req->ack_seg < req->cur_seg) {
4559 		req->ack_seg++;
4560 		/* advance acked segment pointer */
4561 		req->acked_tail = CIRC_NEXT(req->acked_tail, MAX_FLOWS);
4562 		req->r_last_acked = flow->flow_state.resp_ib_psn;
4563 		trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn,
4564 					       wqe->lpsn, req);
4565 		if (req->ack_seg == req->total_segs) {
4566 			req->state = TID_REQUEST_COMPLETE;
4567 			wqe = do_rc_completion(qp, wqe,
4568 					       to_iport(qp->ibqp.device,
4569 							qp->port_num));
4570 			trace_hfi1_sender_rcv_tid_ack(qp);
4571 			atomic_dec(&qpriv->n_tid_requests);
4572 			if (qp->s_acked == qp->s_tail)
4573 				break;
4574 			if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
4575 				break;
4576 			req = wqe_to_tid_req(wqe);
4577 		}
4578 		flow = &req->flows[req->acked_tail];
4579 		trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
4580 	}
4581 
4582 	trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn,
4583 				       wqe->lpsn, req);
4584 	switch (aeth >> 29) {
4585 	case 0:         /* ACK */
4586 		if (qpriv->s_flags & RVT_S_WAIT_ACK)
4587 			qpriv->s_flags &= ~RVT_S_WAIT_ACK;
4588 		if (!hfi1_tid_rdma_is_resync_psn(psn)) {
4589 			/* Check if there is any pending TID ACK */
4590 			if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
4591 			    req->ack_seg < req->cur_seg)
4592 				hfi1_mod_tid_retry_timer(qp);
4593 			else
4594 				hfi1_stop_tid_retry_timer(qp);
4595 			hfi1_schedule_send(qp);
4596 		} else {
4597 			u32 spsn, fpsn, last_acked, generation;
4598 			struct tid_rdma_request *rptr;
4599 
4600 			/* ACK(RESYNC) */
4601 			hfi1_stop_tid_retry_timer(qp);
4602 			/* Allow new requests (see hfi1_make_tid_rdma_pkt) */
4603 			qp->s_flags &= ~HFI1_S_WAIT_HALT;
4604 			/*
4605 			 * Clear RVT_S_SEND_ONE flag in case that the TID RDMA
4606 			 * ACK is received after the TID retry timer is fired
4607 			 * again. In this case, do not send any more TID
4608 			 * RESYNC request or wait for any more TID ACK packet.
4609 			 */
4610 			qpriv->s_flags &= ~RVT_S_SEND_ONE;
4611 			hfi1_schedule_send(qp);
4612 
4613 			if ((qp->s_acked == qpriv->s_tid_tail &&
4614 			     req->ack_seg == req->total_segs) ||
4615 			    qp->s_acked == qp->s_tail) {
4616 				qpriv->s_state = TID_OP(WRITE_DATA_LAST);
4617 				goto done;
4618 			}
4619 
4620 			if (req->ack_seg == req->comp_seg) {
4621 				qpriv->s_state = TID_OP(WRITE_DATA);
4622 				goto done;
4623 			}
4624 
4625 			/*
4626 			 * The PSN to start with is the next PSN after the
4627 			 * RESYNC PSN.
4628 			 */
4629 			psn = mask_psn(psn + 1);
4630 			generation = psn >> HFI1_KDETH_BTH_SEQ_SHIFT;
4631 			spsn = 0;
4632 
4633 			/*
4634 			 * Update to the correct WQE when we get an ACK(RESYNC)
4635 			 * in the middle of a request.
4636 			 */
4637 			if (delta_psn(ack_psn, wqe->lpsn))
4638 				wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
4639 			req = wqe_to_tid_req(wqe);
4640 			flow = &req->flows[req->acked_tail];
4641 			/*
4642 			 * RESYNC re-numbers the PSN ranges of all remaining
4643 			 * segments. Also, PSN's start from 0 in the middle of a
4644 			 * segment and the first segment size is less than the
4645 			 * default number of packets. flow->resync_npkts is used
4646 			 * to track the number of packets from the start of the
4647 			 * real segment to the point of 0 PSN after the RESYNC
4648 			 * in order to later correctly rewind the SGE.
4649 			 */
4650 			fpsn = full_flow_psn(flow, flow->flow_state.spsn);
4651 			req->r_ack_psn = psn;
4652 			flow->resync_npkts +=
4653 				delta_psn(mask_psn(resync_psn + 1), fpsn);
4654 			/*
4655 			 * Renumber all packet sequence number ranges
4656 			 * based on the new generation.
4657 			 */
4658 			last_acked = qp->s_acked;
4659 			rptr = req;
4660 			while (1) {
4661 				/* start from last acked segment */
4662 				for (fidx = rptr->acked_tail;
4663 				     CIRC_CNT(rptr->setup_head, fidx,
4664 					      MAX_FLOWS);
4665 				     fidx = CIRC_NEXT(fidx, MAX_FLOWS)) {
4666 					u32 lpsn;
4667 					u32 gen;
4668 
4669 					flow = &rptr->flows[fidx];
4670 					gen = flow->flow_state.generation;
4671 					if (WARN_ON(gen == generation &&
4672 						    flow->flow_state.spsn !=
4673 						     spsn))
4674 						continue;
4675 					lpsn = flow->flow_state.lpsn;
4676 					lpsn = full_flow_psn(flow, lpsn);
4677 					flow->npkts =
4678 						delta_psn(lpsn,
4679 							  mask_psn(resync_psn)
4680 							  );
4681 					flow->flow_state.generation =
4682 						generation;
4683 					flow->flow_state.spsn = spsn;
4684 					flow->flow_state.lpsn =
4685 						flow->flow_state.spsn +
4686 						flow->npkts - 1;
4687 					flow->pkt = 0;
4688 					spsn += flow->npkts;
4689 					resync_psn += flow->npkts;
4690 					trace_hfi1_tid_flow_rcv_tid_ack(qp,
4691 									fidx,
4692 									flow);
4693 				}
4694 				if (++last_acked == qpriv->s_tid_cur + 1)
4695 					break;
4696 				if (last_acked == qp->s_size)
4697 					last_acked = 0;
4698 				wqe = rvt_get_swqe_ptr(qp, last_acked);
4699 				rptr = wqe_to_tid_req(wqe);
4700 			}
4701 			req->cur_seg = req->ack_seg;
4702 			qpriv->s_tid_tail = qp->s_acked;
4703 			qpriv->s_state = TID_OP(WRITE_REQ);
4704 			hfi1_schedule_tid_send(qp);
4705 		}
4706 done:
4707 		qpriv->s_retry = qp->s_retry_cnt;
4708 		break;
4709 
4710 	case 3:         /* NAK */
4711 		hfi1_stop_tid_retry_timer(qp);
4712 		switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
4713 			IB_AETH_CREDIT_MASK) {
4714 		case 0: /* PSN sequence error */
4715 			flow = &req->flows[req->acked_tail];
4716 			trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail,
4717 							flow);
4718 			req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
4719 			req->cur_seg = req->ack_seg;
4720 			qpriv->s_tid_tail = qp->s_acked;
4721 			qpriv->s_state = TID_OP(WRITE_REQ);
4722 			qpriv->s_retry = qp->s_retry_cnt;
4723 			hfi1_schedule_tid_send(qp);
4724 			break;
4725 
4726 		default:
4727 			break;
4728 		}
4729 		break;
4730 
4731 	default:
4732 		break;
4733 	}
4734 
4735 ack_op_err:
4736 	spin_unlock_irqrestore(&qp->s_lock, flags);
4737 }
4738 
4739 void hfi1_add_tid_retry_timer(struct rvt_qp *qp)
4740 {
4741 	struct hfi1_qp_priv *priv = qp->priv;
4742 	struct ib_qp *ibqp = &qp->ibqp;
4743 	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
4744 
4745 	lockdep_assert_held(&qp->s_lock);
4746 	if (!(priv->s_flags & HFI1_S_TID_RETRY_TIMER)) {
4747 		priv->s_flags |= HFI1_S_TID_RETRY_TIMER;
4748 		priv->s_tid_retry_timer.expires = jiffies +
4749 			priv->tid_retry_timeout_jiffies + rdi->busy_jiffies;
4750 		add_timer(&priv->s_tid_retry_timer);
4751 	}
4752 }
4753 
4754 static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp)
4755 {
4756 	struct hfi1_qp_priv *priv = qp->priv;
4757 	struct ib_qp *ibqp = &qp->ibqp;
4758 	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
4759 
4760 	lockdep_assert_held(&qp->s_lock);
4761 	priv->s_flags |= HFI1_S_TID_RETRY_TIMER;
4762 	mod_timer(&priv->s_tid_retry_timer, jiffies +
4763 		  priv->tid_retry_timeout_jiffies + rdi->busy_jiffies);
4764 }
4765 
4766 static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp)
4767 {
4768 	struct hfi1_qp_priv *priv = qp->priv;
4769 	int rval = 0;
4770 
4771 	lockdep_assert_held(&qp->s_lock);
4772 	if (priv->s_flags & HFI1_S_TID_RETRY_TIMER) {
4773 		rval = del_timer(&priv->s_tid_retry_timer);
4774 		priv->s_flags &= ~HFI1_S_TID_RETRY_TIMER;
4775 	}
4776 	return rval;
4777 }
4778 
4779 void hfi1_del_tid_retry_timer(struct rvt_qp *qp)
4780 {
4781 	struct hfi1_qp_priv *priv = qp->priv;
4782 
4783 	del_timer_sync(&priv->s_tid_retry_timer);
4784 	priv->s_flags &= ~HFI1_S_TID_RETRY_TIMER;
4785 }
4786 
4787 static void hfi1_tid_retry_timeout(struct timer_list *t)
4788 {
4789 	struct hfi1_qp_priv *priv = from_timer(priv, t, s_tid_retry_timer);
4790 	struct rvt_qp *qp = priv->owner;
4791 	struct rvt_swqe *wqe;
4792 	unsigned long flags;
4793 	struct tid_rdma_request *req;
4794 
4795 	spin_lock_irqsave(&qp->r_lock, flags);
4796 	spin_lock(&qp->s_lock);
4797 	trace_hfi1_tid_write_sender_retry_timeout(qp, 0);
4798 	if (priv->s_flags & HFI1_S_TID_RETRY_TIMER) {
4799 		hfi1_stop_tid_retry_timer(qp);
4800 		if (!priv->s_retry) {
4801 			trace_hfi1_msg_tid_retry_timeout(/* msg */
4802 				qp,
4803 				"Exhausted retries. Tid retry timeout = ",
4804 				(u64)priv->tid_retry_timeout_jiffies);
4805 
4806 			wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
4807 			hfi1_trdma_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
4808 			rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
4809 		} else {
4810 			wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
4811 			req = wqe_to_tid_req(wqe);
4812 			trace_hfi1_tid_req_tid_retry_timeout(/* req */
4813 			   qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req);
4814 
4815 			priv->s_flags &= ~RVT_S_WAIT_ACK;
4816 			/* Only send one packet (the RESYNC) */
4817 			priv->s_flags |= RVT_S_SEND_ONE;
4818 			/*
4819 			 * No additional request shall be made by this QP until
4820 			 * the RESYNC has been complete.
4821 			 */
4822 			qp->s_flags |= HFI1_S_WAIT_HALT;
4823 			priv->s_state = TID_OP(RESYNC);
4824 			priv->s_retry--;
4825 			hfi1_schedule_tid_send(qp);
4826 		}
4827 	}
4828 	spin_unlock(&qp->s_lock);
4829 	spin_unlock_irqrestore(&qp->r_lock, flags);
4830 }
4831 
4832 u32 hfi1_build_tid_rdma_resync(struct rvt_qp *qp, struct rvt_swqe *wqe,
4833 			       struct ib_other_headers *ohdr, u32 *bth1,
4834 			       u32 *bth2, u16 fidx)
4835 {
4836 	struct hfi1_qp_priv *qpriv = qp->priv;
4837 	struct tid_rdma_params *remote;
4838 	struct tid_rdma_request *req = wqe_to_tid_req(wqe);
4839 	struct tid_rdma_flow *flow = &req->flows[fidx];
4840 	u32 generation;
4841 
4842 	rcu_read_lock();
4843 	remote = rcu_dereference(qpriv->tid_rdma.remote);
4844 	KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth1, JKEY, remote->jkey);
4845 	ohdr->u.tid_rdma.ack.verbs_qp = cpu_to_be32(qp->remote_qpn);
4846 	*bth1 = remote->qp;
4847 	rcu_read_unlock();
4848 
4849 	generation = kern_flow_generation_next(flow->flow_state.generation);
4850 	*bth2 = mask_psn((generation << HFI1_KDETH_BTH_SEQ_SHIFT) - 1);
4851 	qpriv->s_resync_psn = *bth2;
4852 	*bth2 |= IB_BTH_REQ_ACK;
4853 	KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth0, KVER, 0x1);
4854 
4855 	return sizeof(ohdr->u.tid_rdma.resync) / sizeof(u32);
4856 }
4857 
4858 void hfi1_rc_rcv_tid_rdma_resync(struct hfi1_packet *packet)
4859 {
4860 	struct ib_other_headers *ohdr = packet->ohdr;
4861 	struct rvt_qp *qp = packet->qp;
4862 	struct hfi1_qp_priv *qpriv = qp->priv;
4863 	struct hfi1_ctxtdata *rcd = qpriv->rcd;
4864 	struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
4865 	struct rvt_ack_entry *e;
4866 	struct tid_rdma_request *req;
4867 	struct tid_rdma_flow *flow;
4868 	struct tid_flow_state *fs = &qpriv->flow_state;
4869 	u32 psn, generation, idx, gen_next;
4870 	bool fecn;
4871 	unsigned long flags;
4872 
4873 	fecn = process_ecn(qp, packet);
4874 	psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
4875 
4876 	generation = mask_psn(psn + 1) >> HFI1_KDETH_BTH_SEQ_SHIFT;
4877 	spin_lock_irqsave(&qp->s_lock, flags);
4878 
4879 	gen_next = (fs->generation == KERN_GENERATION_RESERVED) ?
4880 		generation : kern_flow_generation_next(fs->generation);
4881 	/*
4882 	 * RESYNC packet contains the "next" generation and can only be
4883 	 * from the current or previous generations
4884 	 */
4885 	if (generation != mask_generation(gen_next - 1) &&
4886 	    generation != gen_next)
4887 		goto bail;
4888 	/* Already processing a resync */
4889 	if (qpriv->resync)
4890 		goto bail;
4891 
4892 	spin_lock(&rcd->exp_lock);
4893 	if (fs->index >= RXE_NUM_TID_FLOWS) {
4894 		/*
4895 		 * If we don't have a flow, save the generation so it can be
4896 		 * applied when a new flow is allocated
4897 		 */
4898 		fs->generation = generation;
4899 	} else {
4900 		/* Reprogram the QP flow with new generation */
4901 		rcd->flows[fs->index].generation = generation;
4902 		fs->generation = kern_setup_hw_flow(rcd, fs->index);
4903 	}
4904 	fs->psn = 0;
4905 	/*
4906 	 * Disable SW PSN checking since a RESYNC is equivalent to a
4907 	 * sync point and the flow has/will be reprogrammed
4908 	 */
4909 	qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
4910 	trace_hfi1_tid_write_rsp_rcv_resync(qp);
4911 
4912 	/*
4913 	 * Reset all TID flow information with the new generation.
4914 	 * This is done for all requests and segments after the
4915 	 * last received segment
4916 	 */
4917 	for (idx = qpriv->r_tid_tail; ; idx++) {
4918 		u16 flow_idx;
4919 
4920 		if (idx > rvt_size_atomic(&dev->rdi))
4921 			idx = 0;
4922 		e = &qp->s_ack_queue[idx];
4923 		if (e->opcode == TID_OP(WRITE_REQ)) {
4924 			req = ack_to_tid_req(e);
4925 			trace_hfi1_tid_req_rcv_resync(qp, 0, e->opcode, e->psn,
4926 						      e->lpsn, req);
4927 
4928 			/* start from last unacked segment */
4929 			for (flow_idx = req->clear_tail;
4930 			     CIRC_CNT(req->setup_head, flow_idx,
4931 				      MAX_FLOWS);
4932 			     flow_idx = CIRC_NEXT(flow_idx, MAX_FLOWS)) {
4933 				u32 lpsn;
4934 				u32 next;
4935 
4936 				flow = &req->flows[flow_idx];
4937 				lpsn = full_flow_psn(flow,
4938 						     flow->flow_state.lpsn);
4939 				next = flow->flow_state.r_next_psn;
4940 				flow->npkts = delta_psn(lpsn, next - 1);
4941 				flow->flow_state.generation = fs->generation;
4942 				flow->flow_state.spsn = fs->psn;
4943 				flow->flow_state.lpsn =
4944 					flow->flow_state.spsn + flow->npkts - 1;
4945 				flow->flow_state.r_next_psn =
4946 					full_flow_psn(flow,
4947 						      flow->flow_state.spsn);
4948 				fs->psn += flow->npkts;
4949 				trace_hfi1_tid_flow_rcv_resync(qp, flow_idx,
4950 							       flow);
4951 			}
4952 		}
4953 		if (idx == qp->s_tail_ack_queue)
4954 			break;
4955 	}
4956 
4957 	spin_unlock(&rcd->exp_lock);
4958 	qpriv->resync = true;
4959 	/* RESYNC request always gets a TID RDMA ACK. */
4960 	qpriv->s_nak_state = 0;
4961 	qpriv->s_flags |= RVT_S_ACK_PENDING;
4962 	hfi1_schedule_tid_send(qp);
4963 bail:
4964 	if (fecn)
4965 		qp->s_flags |= RVT_S_ECN;
4966 	spin_unlock_irqrestore(&qp->s_lock, flags);
4967 }
4968 
4969 /*
4970  * Call this function when the last TID RDMA WRITE DATA packet for a request
4971  * is built.
4972  */
4973 static void update_tid_tail(struct rvt_qp *qp)
4974 	__must_hold(&qp->s_lock)
4975 {
4976 	struct hfi1_qp_priv *priv = qp->priv;
4977 	u32 i;
4978 	struct rvt_swqe *wqe;
4979 
4980 	lockdep_assert_held(&qp->s_lock);
4981 	/* Can't move beyond s_tid_cur */
4982 	if (priv->s_tid_tail == priv->s_tid_cur)
4983 		return;
4984 	for (i = priv->s_tid_tail + 1; ; i++) {
4985 		if (i == qp->s_size)
4986 			i = 0;
4987 
4988 		if (i == priv->s_tid_cur)
4989 			break;
4990 		wqe = rvt_get_swqe_ptr(qp, i);
4991 		if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
4992 			break;
4993 	}
4994 	priv->s_tid_tail = i;
4995 	priv->s_state = TID_OP(WRITE_RESP);
4996 }
4997 
4998 int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
4999 	__must_hold(&qp->s_lock)
5000 {
5001 	struct hfi1_qp_priv *priv = qp->priv;
5002 	struct rvt_swqe *wqe;
5003 	u32 bth1 = 0, bth2 = 0, hwords = 5, len, middle = 0;
5004 	struct ib_other_headers *ohdr;
5005 	struct rvt_sge_state *ss = &qp->s_sge;
5006 	struct rvt_ack_entry *e = &qp->s_ack_queue[qp->s_tail_ack_queue];
5007 	struct tid_rdma_request *req = ack_to_tid_req(e);
5008 	bool last = false;
5009 	u8 opcode = TID_OP(WRITE_DATA);
5010 
5011 	lockdep_assert_held(&qp->s_lock);
5012 	trace_hfi1_tid_write_sender_make_tid_pkt(qp, 0);
5013 	/*
5014 	 * Prioritize the sending of the requests and responses over the
5015 	 * sending of the TID RDMA data packets.
5016 	 */
5017 	if (((atomic_read(&priv->n_tid_requests) < HFI1_TID_RDMA_WRITE_CNT) &&
5018 	     atomic_read(&priv->n_requests) &&
5019 	     !(qp->s_flags & (RVT_S_BUSY | RVT_S_WAIT_ACK |
5020 			     HFI1_S_ANY_WAIT_IO))) ||
5021 	    (e->opcode == TID_OP(WRITE_REQ) && req->cur_seg < req->alloc_seg &&
5022 	     !(qp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT_IO)))) {
5023 		struct iowait_work *iowork;
5024 
5025 		iowork = iowait_get_ib_work(&priv->s_iowait);
5026 		ps->s_txreq = get_waiting_verbs_txreq(iowork);
5027 		if (ps->s_txreq || hfi1_make_rc_req(qp, ps)) {
5028 			priv->s_flags |= HFI1_S_TID_BUSY_SET;
5029 			return 1;
5030 		}
5031 	}
5032 
5033 	ps->s_txreq = get_txreq(ps->dev, qp);
5034 	if (!ps->s_txreq)
5035 		goto bail_no_tx;
5036 
5037 	ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
5038 
5039 	if ((priv->s_flags & RVT_S_ACK_PENDING) &&
5040 	    make_tid_rdma_ack(qp, ohdr, ps))
5041 		return 1;
5042 
5043 	/*
5044 	 * Bail out if we can't send data.
5045 	 * Be reminded that this check must been done after the call to
5046 	 * make_tid_rdma_ack() because the responding QP could be in
5047 	 * RTR state where it can send TID RDMA ACK, not TID RDMA WRITE DATA.
5048 	 */
5049 	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK))
5050 		goto bail;
5051 
5052 	if (priv->s_flags & RVT_S_WAIT_ACK)
5053 		goto bail;
5054 
5055 	/* Check whether there is anything to do. */
5056 	if (priv->s_tid_tail == HFI1_QP_WQE_INVALID)
5057 		goto bail;
5058 	wqe = rvt_get_swqe_ptr(qp, priv->s_tid_tail);
5059 	req = wqe_to_tid_req(wqe);
5060 	trace_hfi1_tid_req_make_tid_pkt(qp, 0, wqe->wr.opcode, wqe->psn,
5061 					wqe->lpsn, req);
5062 	switch (priv->s_state) {
5063 	case TID_OP(WRITE_REQ):
5064 	case TID_OP(WRITE_RESP):
5065 		priv->tid_ss.sge = wqe->sg_list[0];
5066 		priv->tid_ss.sg_list = wqe->sg_list + 1;
5067 		priv->tid_ss.num_sge = wqe->wr.num_sge;
5068 		priv->tid_ss.total_len = wqe->length;
5069 
5070 		if (priv->s_state == TID_OP(WRITE_REQ))
5071 			hfi1_tid_rdma_restart_req(qp, wqe, &bth2);
5072 		priv->s_state = TID_OP(WRITE_DATA);
5073 		/* fall through */
5074 
5075 	case TID_OP(WRITE_DATA):
5076 		/*
5077 		 * 1. Check whether TID RDMA WRITE RESP available.
5078 		 * 2. If no:
5079 		 *    2.1 If have more segments and no TID RDMA WRITE RESP,
5080 		 *        set HFI1_S_WAIT_TID_RESP
5081 		 *    2.2 Return indicating no progress made.
5082 		 * 3. If yes:
5083 		 *    3.1 Build TID RDMA WRITE DATA packet.
5084 		 *    3.2 If last packet in segment:
5085 		 *        3.2.1 Change KDETH header bits
5086 		 *        3.2.2 Advance RESP pointers.
5087 		 *    3.3 Return indicating progress made.
5088 		 */
5089 		trace_hfi1_sender_make_tid_pkt(qp);
5090 		trace_hfi1_tid_write_sender_make_tid_pkt(qp, 0);
5091 		wqe = rvt_get_swqe_ptr(qp, priv->s_tid_tail);
5092 		req = wqe_to_tid_req(wqe);
5093 		len = wqe->length;
5094 
5095 		if (!req->comp_seg || req->cur_seg == req->comp_seg)
5096 			goto bail;
5097 
5098 		trace_hfi1_tid_req_make_tid_pkt(qp, 0, wqe->wr.opcode,
5099 						wqe->psn, wqe->lpsn, req);
5100 		last = hfi1_build_tid_rdma_packet(wqe, ohdr, &bth1, &bth2,
5101 						  &len);
5102 
5103 		if (last) {
5104 			/* move pointer to next flow */
5105 			req->clear_tail = CIRC_NEXT(req->clear_tail,
5106 						    MAX_FLOWS);
5107 			if (++req->cur_seg < req->total_segs) {
5108 				if (!CIRC_CNT(req->setup_head, req->clear_tail,
5109 					      MAX_FLOWS))
5110 					qp->s_flags |= HFI1_S_WAIT_TID_RESP;
5111 			} else {
5112 				priv->s_state = TID_OP(WRITE_DATA_LAST);
5113 				opcode = TID_OP(WRITE_DATA_LAST);
5114 
5115 				/* Advance the s_tid_tail now */
5116 				update_tid_tail(qp);
5117 			}
5118 		}
5119 		hwords += sizeof(ohdr->u.tid_rdma.w_data) / sizeof(u32);
5120 		ss = &priv->tid_ss;
5121 		break;
5122 
5123 	case TID_OP(RESYNC):
5124 		trace_hfi1_sender_make_tid_pkt(qp);
5125 		/* Use generation from the most recently received response */
5126 		wqe = rvt_get_swqe_ptr(qp, priv->s_tid_cur);
5127 		req = wqe_to_tid_req(wqe);
5128 		/* If no responses for this WQE look at the previous one */
5129 		if (!req->comp_seg) {
5130 			wqe = rvt_get_swqe_ptr(qp,
5131 					       (!priv->s_tid_cur ? qp->s_size :
5132 						priv->s_tid_cur) - 1);
5133 			req = wqe_to_tid_req(wqe);
5134 		}
5135 		hwords += hfi1_build_tid_rdma_resync(qp, wqe, ohdr, &bth1,
5136 						     &bth2,
5137 						     CIRC_PREV(req->setup_head,
5138 							       MAX_FLOWS));
5139 		ss = NULL;
5140 		len = 0;
5141 		opcode = TID_OP(RESYNC);
5142 		break;
5143 
5144 	default:
5145 		goto bail;
5146 	}
5147 	if (priv->s_flags & RVT_S_SEND_ONE) {
5148 		priv->s_flags &= ~RVT_S_SEND_ONE;
5149 		priv->s_flags |= RVT_S_WAIT_ACK;
5150 		bth2 |= IB_BTH_REQ_ACK;
5151 	}
5152 	qp->s_len -= len;
5153 	ps->s_txreq->hdr_dwords = hwords;
5154 	ps->s_txreq->sde = priv->s_sde;
5155 	ps->s_txreq->ss = ss;
5156 	ps->s_txreq->s_cur_size = len;
5157 	hfi1_make_ruc_header(qp, ohdr, (opcode << 24), bth1, bth2,
5158 			     middle, ps);
5159 	return 1;
5160 bail:
5161 	hfi1_put_txreq(ps->s_txreq);
5162 bail_no_tx:
5163 	ps->s_txreq = NULL;
5164 	priv->s_flags &= ~RVT_S_BUSY;
5165 	/*
5166 	 * If we didn't get a txreq, the QP will be woken up later to try
5167 	 * again, set the flags to the the wake up which work item to wake
5168 	 * up.
5169 	 * (A better algorithm should be found to do this and generalize the
5170 	 * sleep/wakeup flags.)
5171 	 */
5172 	iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
5173 	return 0;
5174 }
5175 
5176 static int make_tid_rdma_ack(struct rvt_qp *qp,
5177 			     struct ib_other_headers *ohdr,
5178 			     struct hfi1_pkt_state *ps)
5179 {
5180 	struct rvt_ack_entry *e;
5181 	struct hfi1_qp_priv *qpriv = qp->priv;
5182 	struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
5183 	u32 hwords, next;
5184 	u32 len = 0;
5185 	u32 bth1 = 0, bth2 = 0;
5186 	int middle = 0;
5187 	u16 flow;
5188 	struct tid_rdma_request *req, *nreq;
5189 
5190 	trace_hfi1_tid_write_rsp_make_tid_ack(qp);
5191 	/* Don't send an ACK if we aren't supposed to. */
5192 	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
5193 		goto bail;
5194 
5195 	/* header size in 32-bit words LRH+BTH = (8+12)/4. */
5196 	hwords = 5;
5197 
5198 	e = &qp->s_ack_queue[qpriv->r_tid_ack];
5199 	req = ack_to_tid_req(e);
5200 	/*
5201 	 * In the RESYNC case, we are exactly one segment past the
5202 	 * previously sent ack or at the previously sent NAK. So to send
5203 	 * the resync ack, we go back one segment (which might be part of
5204 	 * the previous request) and let the do-while loop execute again.
5205 	 * The advantage of executing the do-while loop is that any data
5206 	 * received after the previous ack is automatically acked in the
5207 	 * RESYNC ack. It turns out that for the do-while loop we only need
5208 	 * to pull back qpriv->r_tid_ack, not the segment
5209 	 * indices/counters. The scheme works even if the previous request
5210 	 * was not a TID WRITE request.
5211 	 */
5212 	if (qpriv->resync) {
5213 		if (!req->ack_seg || req->ack_seg == req->total_segs)
5214 			qpriv->r_tid_ack = !qpriv->r_tid_ack ?
5215 				rvt_size_atomic(&dev->rdi) :
5216 				qpriv->r_tid_ack - 1;
5217 		e = &qp->s_ack_queue[qpriv->r_tid_ack];
5218 		req = ack_to_tid_req(e);
5219 	}
5220 
5221 	trace_hfi1_rsp_make_tid_ack(qp, e->psn);
5222 	trace_hfi1_tid_req_make_tid_ack(qp, 0, e->opcode, e->psn, e->lpsn,
5223 					req);
5224 	/*
5225 	 * If we've sent all the ACKs that we can, we are done
5226 	 * until we get more segments...
5227 	 */
5228 	if (!qpriv->s_nak_state && !qpriv->resync &&
5229 	    req->ack_seg == req->comp_seg)
5230 		goto bail;
5231 
5232 	do {
5233 		/*
5234 		 * To deal with coalesced ACKs, the acked_tail pointer
5235 		 * into the flow array is used. The distance between it
5236 		 * and the clear_tail is the number of flows that are
5237 		 * being ACK'ed.
5238 		 */
5239 		req->ack_seg +=
5240 			/* Get up-to-date value */
5241 			CIRC_CNT(req->clear_tail, req->acked_tail,
5242 				 MAX_FLOWS);
5243 		/* Advance acked index */
5244 		req->acked_tail = req->clear_tail;
5245 
5246 		/*
5247 		 * req->clear_tail points to the segment currently being
5248 		 * received. So, when sending an ACK, the previous
5249 		 * segment is being ACK'ed.
5250 		 */
5251 		flow = CIRC_PREV(req->acked_tail, MAX_FLOWS);
5252 		if (req->ack_seg != req->total_segs)
5253 			break;
5254 		req->state = TID_REQUEST_COMPLETE;
5255 
5256 		next = qpriv->r_tid_ack + 1;
5257 		if (next > rvt_size_atomic(&dev->rdi))
5258 			next = 0;
5259 		qpriv->r_tid_ack = next;
5260 		if (qp->s_ack_queue[next].opcode != TID_OP(WRITE_REQ))
5261 			break;
5262 		nreq = ack_to_tid_req(&qp->s_ack_queue[next]);
5263 		if (!nreq->comp_seg || nreq->ack_seg == nreq->comp_seg)
5264 			break;
5265 
5266 		/* Move to the next ack entry now */
5267 		e = &qp->s_ack_queue[qpriv->r_tid_ack];
5268 		req = ack_to_tid_req(e);
5269 	} while (1);
5270 
5271 	/*
5272 	 * At this point qpriv->r_tid_ack == qpriv->r_tid_tail but e and
5273 	 * req could be pointing at the previous ack queue entry
5274 	 */
5275 	if (qpriv->s_nak_state ||
5276 	    (qpriv->resync &&
5277 	     !hfi1_tid_rdma_is_resync_psn(qpriv->r_next_psn_kdeth - 1) &&
5278 	     (cmp_psn(qpriv->r_next_psn_kdeth - 1,
5279 		      full_flow_psn(&req->flows[flow],
5280 				    req->flows[flow].flow_state.lpsn)) > 0))) {
5281 		/*
5282 		 * A NAK will implicitly acknowledge all previous TID RDMA
5283 		 * requests. Therefore, we NAK with the req->acked_tail
5284 		 * segment for the request at qpriv->r_tid_ack (same at
5285 		 * this point as the req->clear_tail segment for the
5286 		 * qpriv->r_tid_tail request)
5287 		 */
5288 		e = &qp->s_ack_queue[qpriv->r_tid_ack];
5289 		req = ack_to_tid_req(e);
5290 		flow = req->acked_tail;
5291 	} else if (req->ack_seg == req->total_segs &&
5292 		   qpriv->s_flags & HFI1_R_TID_WAIT_INTERLCK)
5293 		qpriv->s_flags &= ~HFI1_R_TID_WAIT_INTERLCK;
5294 
5295 	trace_hfi1_tid_write_rsp_make_tid_ack(qp);
5296 	trace_hfi1_tid_req_make_tid_ack(qp, 0, e->opcode, e->psn, e->lpsn,
5297 					req);
5298 	hwords += hfi1_build_tid_rdma_write_ack(qp, e, ohdr, flow, &bth1,
5299 						&bth2);
5300 	len = 0;
5301 	qpriv->s_flags &= ~RVT_S_ACK_PENDING;
5302 	ps->s_txreq->hdr_dwords = hwords;
5303 	ps->s_txreq->sde = qpriv->s_sde;
5304 	ps->s_txreq->s_cur_size = len;
5305 	ps->s_txreq->ss = NULL;
5306 	hfi1_make_ruc_header(qp, ohdr, (TID_OP(ACK) << 24), bth1, bth2, middle,
5307 			     ps);
5308 	ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP;
5309 	return 1;
5310 bail:
5311 	/*
5312 	 * Ensure s_rdma_ack_cnt changes are committed prior to resetting
5313 	 * RVT_S_RESP_PENDING
5314 	 */
5315 	smp_wmb();
5316 	qpriv->s_flags &= ~RVT_S_ACK_PENDING;
5317 	return 0;
5318 }
5319 
5320 static int hfi1_send_tid_ok(struct rvt_qp *qp)
5321 {
5322 	struct hfi1_qp_priv *priv = qp->priv;
5323 
5324 	return !(priv->s_flags & RVT_S_BUSY ||
5325 		 qp->s_flags & HFI1_S_ANY_WAIT_IO) &&
5326 		(verbs_txreq_queued(iowait_get_tid_work(&priv->s_iowait)) ||
5327 		 (priv->s_flags & RVT_S_RESP_PENDING) ||
5328 		 !(qp->s_flags & HFI1_S_ANY_TID_WAIT_SEND));
5329 }
5330 
5331 void _hfi1_do_tid_send(struct work_struct *work)
5332 {
5333 	struct iowait_work *w = container_of(work, struct iowait_work, iowork);
5334 	struct rvt_qp *qp = iowait_to_qp(w->iow);
5335 
5336 	hfi1_do_tid_send(qp);
5337 }
5338 
5339 static void hfi1_do_tid_send(struct rvt_qp *qp)
5340 {
5341 	struct hfi1_pkt_state ps;
5342 	struct hfi1_qp_priv *priv = qp->priv;
5343 
5344 	ps.dev = to_idev(qp->ibqp.device);
5345 	ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
5346 	ps.ppd = ppd_from_ibp(ps.ibp);
5347 	ps.wait = iowait_get_tid_work(&priv->s_iowait);
5348 	ps.in_thread = false;
5349 	ps.timeout_int = qp->timeout_jiffies / 8;
5350 
5351 	trace_hfi1_rc_do_tid_send(qp, false);
5352 	spin_lock_irqsave(&qp->s_lock, ps.flags);
5353 
5354 	/* Return if we are already busy processing a work request. */
5355 	if (!hfi1_send_tid_ok(qp)) {
5356 		if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
5357 			iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
5358 		spin_unlock_irqrestore(&qp->s_lock, ps.flags);
5359 		return;
5360 	}
5361 
5362 	priv->s_flags |= RVT_S_BUSY;
5363 
5364 	ps.timeout = jiffies + ps.timeout_int;
5365 	ps.cpu = priv->s_sde ? priv->s_sde->cpu :
5366 		cpumask_first(cpumask_of_node(ps.ppd->dd->node));
5367 	ps.pkts_sent = false;
5368 
5369 	/* insure a pre-built packet is handled  */
5370 	ps.s_txreq = get_waiting_verbs_txreq(ps.wait);
5371 	do {
5372 		/* Check for a constructed packet to be sent. */
5373 		if (ps.s_txreq) {
5374 			if (priv->s_flags & HFI1_S_TID_BUSY_SET) {
5375 				qp->s_flags |= RVT_S_BUSY;
5376 				ps.wait = iowait_get_ib_work(&priv->s_iowait);
5377 			}
5378 			spin_unlock_irqrestore(&qp->s_lock, ps.flags);
5379 
5380 			/*
5381 			 * If the packet cannot be sent now, return and
5382 			 * the send tasklet will be woken up later.
5383 			 */
5384 			if (hfi1_verbs_send(qp, &ps))
5385 				return;
5386 
5387 			/* allow other tasks to run */
5388 			if (hfi1_schedule_send_yield(qp, &ps, true))
5389 				return;
5390 
5391 			spin_lock_irqsave(&qp->s_lock, ps.flags);
5392 			if (priv->s_flags & HFI1_S_TID_BUSY_SET) {
5393 				qp->s_flags &= ~RVT_S_BUSY;
5394 				priv->s_flags &= ~HFI1_S_TID_BUSY_SET;
5395 				ps.wait = iowait_get_tid_work(&priv->s_iowait);
5396 				if (iowait_flag_set(&priv->s_iowait,
5397 						    IOWAIT_PENDING_IB))
5398 					hfi1_schedule_send(qp);
5399 			}
5400 		}
5401 	} while (hfi1_make_tid_rdma_pkt(qp, &ps));
5402 	iowait_starve_clear(ps.pkts_sent, &priv->s_iowait);
5403 	spin_unlock_irqrestore(&qp->s_lock, ps.flags);
5404 }
5405 
5406 static bool _hfi1_schedule_tid_send(struct rvt_qp *qp)
5407 {
5408 	struct hfi1_qp_priv *priv = qp->priv;
5409 	struct hfi1_ibport *ibp =
5410 		to_iport(qp->ibqp.device, qp->port_num);
5411 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
5412 	struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
5413 
5414 	return iowait_tid_schedule(&priv->s_iowait, ppd->hfi1_wq,
5415 				   priv->s_sde ?
5416 				   priv->s_sde->cpu :
5417 				   cpumask_first(cpumask_of_node(dd->node)));
5418 }
5419 
5420 /**
5421  * hfi1_schedule_tid_send - schedule progress on TID RDMA state machine
5422  * @qp: the QP
5423  *
5424  * This schedules qp progress on the TID RDMA state machine. Caller
5425  * should hold the s_lock.
5426  * Unlike hfi1_schedule_send(), this cannot use hfi1_send_ok() because
5427  * the two state machines can step on each other with respect to the
5428  * RVT_S_BUSY flag.
5429  * Therefore, a modified test is used.
5430  * @return true if the second leg is scheduled;
5431  *  false if the second leg is not scheduled.
5432  */
5433 bool hfi1_schedule_tid_send(struct rvt_qp *qp)
5434 {
5435 	lockdep_assert_held(&qp->s_lock);
5436 	if (hfi1_send_tid_ok(qp)) {
5437 		/*
5438 		 * The following call returns true if the qp is not on the
5439 		 * queue and false if the qp is already on the queue before
5440 		 * this call. Either way, the qp will be on the queue when the
5441 		 * call returns.
5442 		 */
5443 		_hfi1_schedule_tid_send(qp);
5444 		return true;
5445 	}
5446 	if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
5447 		iowait_set_flag(&((struct hfi1_qp_priv *)qp->priv)->s_iowait,
5448 				IOWAIT_PENDING_TID);
5449 	return false;
5450 }
5451 
5452 bool hfi1_tid_rdma_ack_interlock(struct rvt_qp *qp, struct rvt_ack_entry *e)
5453 {
5454 	struct rvt_ack_entry *prev;
5455 	struct tid_rdma_request *req;
5456 	struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
5457 	struct hfi1_qp_priv *priv = qp->priv;
5458 	u32 s_prev;
5459 
5460 	s_prev = qp->s_tail_ack_queue == 0 ? rvt_size_atomic(&dev->rdi) :
5461 		(qp->s_tail_ack_queue - 1);
5462 	prev = &qp->s_ack_queue[s_prev];
5463 
5464 	if ((e->opcode == TID_OP(READ_REQ) ||
5465 	     e->opcode == OP(RDMA_READ_REQUEST)) &&
5466 	    prev->opcode == TID_OP(WRITE_REQ)) {
5467 		req = ack_to_tid_req(prev);
5468 		if (req->ack_seg != req->total_segs) {
5469 			priv->s_flags |= HFI1_R_TID_WAIT_INTERLCK;
5470 			return true;
5471 		}
5472 	}
5473 	return false;
5474 }
5475 
5476 static u32 read_r_next_psn(struct hfi1_devdata *dd, u8 ctxt, u8 fidx)
5477 {
5478 	u64 reg;
5479 
5480 	/*
5481 	 * The only sane way to get the amount of
5482 	 * progress is to read the HW flow state.
5483 	 */
5484 	reg = read_uctxt_csr(dd, ctxt, RCV_TID_FLOW_TABLE + (8 * fidx));
5485 	return mask_psn(reg);
5486 }
5487 
5488 static void tid_rdma_rcv_err(struct hfi1_packet *packet,
5489 			     struct ib_other_headers *ohdr,
5490 			     struct rvt_qp *qp, u32 psn, int diff, bool fecn)
5491 {
5492 	unsigned long flags;
5493 
5494 	tid_rdma_rcv_error(packet, ohdr, qp, psn, diff);
5495 	if (fecn) {
5496 		spin_lock_irqsave(&qp->s_lock, flags);
5497 		qp->s_flags |= RVT_S_ECN;
5498 		spin_unlock_irqrestore(&qp->s_lock, flags);
5499 	}
5500 }
5501 
5502 static void update_r_next_psn_fecn(struct hfi1_packet *packet,
5503 				   struct hfi1_qp_priv *priv,
5504 				   struct hfi1_ctxtdata *rcd,
5505 				   struct tid_rdma_flow *flow,
5506 				   bool fecn)
5507 {
5508 	/*
5509 	 * If a start/middle packet is delivered here due to
5510 	 * RSM rule and FECN, we need to update the r_next_psn.
5511 	 */
5512 	if (fecn && packet->etype == RHF_RCV_TYPE_EAGER &&
5513 	    !(priv->s_flags & HFI1_R_TID_SW_PSN)) {
5514 		struct hfi1_devdata *dd = rcd->dd;
5515 
5516 		flow->flow_state.r_next_psn =
5517 			read_r_next_psn(dd, rcd->ctxt, flow->idx);
5518 	}
5519 }
5520