1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6 
7 #include <linux/skbuff.h>
8 
9 #include "rxe.h"
10 #include "rxe_loc.h"
11 #include "rxe_queue.h"
12 #include "rxe_task.h"
13 
14 enum comp_state {
15 	COMPST_GET_ACK,
16 	COMPST_GET_WQE,
17 	COMPST_COMP_WQE,
18 	COMPST_COMP_ACK,
19 	COMPST_CHECK_PSN,
20 	COMPST_CHECK_ACK,
21 	COMPST_READ,
22 	COMPST_ATOMIC,
23 	COMPST_WRITE_SEND,
24 	COMPST_UPDATE_COMP,
25 	COMPST_ERROR_RETRY,
26 	COMPST_RNR_RETRY,
27 	COMPST_ERROR,
28 	COMPST_EXIT, /* We have an issue, and we want to rerun the completer */
29 	COMPST_DONE, /* The completer finished successflly */
30 };
31 
32 static char *comp_state_name[] =  {
33 	[COMPST_GET_ACK]		= "GET ACK",
34 	[COMPST_GET_WQE]		= "GET WQE",
35 	[COMPST_COMP_WQE]		= "COMP WQE",
36 	[COMPST_COMP_ACK]		= "COMP ACK",
37 	[COMPST_CHECK_PSN]		= "CHECK PSN",
38 	[COMPST_CHECK_ACK]		= "CHECK ACK",
39 	[COMPST_READ]			= "READ",
40 	[COMPST_ATOMIC]			= "ATOMIC",
41 	[COMPST_WRITE_SEND]		= "WRITE/SEND",
42 	[COMPST_UPDATE_COMP]		= "UPDATE COMP",
43 	[COMPST_ERROR_RETRY]		= "ERROR RETRY",
44 	[COMPST_RNR_RETRY]		= "RNR RETRY",
45 	[COMPST_ERROR]			= "ERROR",
46 	[COMPST_EXIT]			= "EXIT",
47 	[COMPST_DONE]			= "DONE",
48 };
49 
50 static unsigned long rnrnak_usec[32] = {
51 	[IB_RNR_TIMER_655_36] = 655360,
52 	[IB_RNR_TIMER_000_01] = 10,
53 	[IB_RNR_TIMER_000_02] = 20,
54 	[IB_RNR_TIMER_000_03] = 30,
55 	[IB_RNR_TIMER_000_04] = 40,
56 	[IB_RNR_TIMER_000_06] = 60,
57 	[IB_RNR_TIMER_000_08] = 80,
58 	[IB_RNR_TIMER_000_12] = 120,
59 	[IB_RNR_TIMER_000_16] = 160,
60 	[IB_RNR_TIMER_000_24] = 240,
61 	[IB_RNR_TIMER_000_32] = 320,
62 	[IB_RNR_TIMER_000_48] = 480,
63 	[IB_RNR_TIMER_000_64] = 640,
64 	[IB_RNR_TIMER_000_96] = 960,
65 	[IB_RNR_TIMER_001_28] = 1280,
66 	[IB_RNR_TIMER_001_92] = 1920,
67 	[IB_RNR_TIMER_002_56] = 2560,
68 	[IB_RNR_TIMER_003_84] = 3840,
69 	[IB_RNR_TIMER_005_12] = 5120,
70 	[IB_RNR_TIMER_007_68] = 7680,
71 	[IB_RNR_TIMER_010_24] = 10240,
72 	[IB_RNR_TIMER_015_36] = 15360,
73 	[IB_RNR_TIMER_020_48] = 20480,
74 	[IB_RNR_TIMER_030_72] = 30720,
75 	[IB_RNR_TIMER_040_96] = 40960,
76 	[IB_RNR_TIMER_061_44] = 61410,
77 	[IB_RNR_TIMER_081_92] = 81920,
78 	[IB_RNR_TIMER_122_88] = 122880,
79 	[IB_RNR_TIMER_163_84] = 163840,
80 	[IB_RNR_TIMER_245_76] = 245760,
81 	[IB_RNR_TIMER_327_68] = 327680,
82 	[IB_RNR_TIMER_491_52] = 491520,
83 };
84 
85 static inline unsigned long rnrnak_jiffies(u8 timeout)
86 {
87 	return max_t(unsigned long,
88 		usecs_to_jiffies(rnrnak_usec[timeout]), 1);
89 }
90 
91 static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode)
92 {
93 	switch (opcode) {
94 	case IB_WR_RDMA_WRITE:			return IB_WC_RDMA_WRITE;
95 	case IB_WR_RDMA_WRITE_WITH_IMM:		return IB_WC_RDMA_WRITE;
96 	case IB_WR_SEND:			return IB_WC_SEND;
97 	case IB_WR_SEND_WITH_IMM:		return IB_WC_SEND;
98 	case IB_WR_RDMA_READ:			return IB_WC_RDMA_READ;
99 	case IB_WR_ATOMIC_CMP_AND_SWP:		return IB_WC_COMP_SWAP;
100 	case IB_WR_ATOMIC_FETCH_AND_ADD:	return IB_WC_FETCH_ADD;
101 	case IB_WR_LSO:				return IB_WC_LSO;
102 	case IB_WR_SEND_WITH_INV:		return IB_WC_SEND;
103 	case IB_WR_RDMA_READ_WITH_INV:		return IB_WC_RDMA_READ;
104 	case IB_WR_LOCAL_INV:			return IB_WC_LOCAL_INV;
105 	case IB_WR_REG_MR:			return IB_WC_REG_MR;
106 
107 	default:
108 		return 0xff;
109 	}
110 }
111 
112 void retransmit_timer(struct timer_list *t)
113 {
114 	struct rxe_qp *qp = from_timer(qp, t, retrans_timer);
115 
116 	if (qp->valid) {
117 		qp->comp.timeout = 1;
118 		rxe_run_task(&qp->comp.task, 1);
119 	}
120 }
121 
122 void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
123 {
124 	int must_sched;
125 
126 	skb_queue_tail(&qp->resp_pkts, skb);
127 
128 	must_sched = skb_queue_len(&qp->resp_pkts) > 1;
129 	if (must_sched != 0)
130 		rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED);
131 
132 	rxe_run_task(&qp->comp.task, must_sched);
133 }
134 
135 static inline enum comp_state get_wqe(struct rxe_qp *qp,
136 				      struct rxe_pkt_info *pkt,
137 				      struct rxe_send_wqe **wqe_p)
138 {
139 	struct rxe_send_wqe *wqe;
140 
141 	/* we come here whether or not we found a response packet to see if
142 	 * there are any posted WQEs
143 	 */
144 	wqe = queue_head(qp->sq.queue);
145 	*wqe_p = wqe;
146 
147 	/* no WQE or requester has not started it yet */
148 	if (!wqe || wqe->state == wqe_state_posted)
149 		return pkt ? COMPST_DONE : COMPST_EXIT;
150 
151 	/* WQE does not require an ack */
152 	if (wqe->state == wqe_state_done)
153 		return COMPST_COMP_WQE;
154 
155 	/* WQE caused an error */
156 	if (wqe->state == wqe_state_error)
157 		return COMPST_ERROR;
158 
159 	/* we have a WQE, if we also have an ack check its PSN */
160 	return pkt ? COMPST_CHECK_PSN : COMPST_EXIT;
161 }
162 
163 static inline void reset_retry_counters(struct rxe_qp *qp)
164 {
165 	qp->comp.retry_cnt = qp->attr.retry_cnt;
166 	qp->comp.rnr_retry = qp->attr.rnr_retry;
167 	qp->comp.started_retry = 0;
168 }
169 
170 static inline enum comp_state check_psn(struct rxe_qp *qp,
171 					struct rxe_pkt_info *pkt,
172 					struct rxe_send_wqe *wqe)
173 {
174 	s32 diff;
175 
176 	/* check to see if response is past the oldest WQE. if it is, complete
177 	 * send/write or error read/atomic
178 	 */
179 	diff = psn_compare(pkt->psn, wqe->last_psn);
180 	if (diff > 0) {
181 		if (wqe->state == wqe_state_pending) {
182 			if (wqe->mask & WR_ATOMIC_OR_READ_MASK)
183 				return COMPST_ERROR_RETRY;
184 
185 			reset_retry_counters(qp);
186 			return COMPST_COMP_WQE;
187 		} else {
188 			return COMPST_DONE;
189 		}
190 	}
191 
192 	/* compare response packet to expected response */
193 	diff = psn_compare(pkt->psn, qp->comp.psn);
194 	if (diff < 0) {
195 		/* response is most likely a retried packet if it matches an
196 		 * uncompleted WQE go complete it else ignore it
197 		 */
198 		if (pkt->psn == wqe->last_psn)
199 			return COMPST_COMP_ACK;
200 		else
201 			return COMPST_DONE;
202 	} else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) {
203 		return COMPST_DONE;
204 	} else {
205 		return COMPST_CHECK_ACK;
206 	}
207 }
208 
209 static inline enum comp_state check_ack(struct rxe_qp *qp,
210 					struct rxe_pkt_info *pkt,
211 					struct rxe_send_wqe *wqe)
212 {
213 	unsigned int mask = pkt->mask;
214 	u8 syn;
215 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
216 
217 	/* Check the sequence only */
218 	switch (qp->comp.opcode) {
219 	case -1:
220 		/* Will catch all *_ONLY cases. */
221 		if (!(mask & RXE_START_MASK))
222 			return COMPST_ERROR;
223 
224 		break;
225 
226 	case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST:
227 	case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
228 		if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE &&
229 		    pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) {
230 			/* read retries of partial data may restart from
231 			 * read response first or response only.
232 			 */
233 			if ((pkt->psn == wqe->first_psn &&
234 			     pkt->opcode ==
235 			     IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) ||
236 			    (wqe->first_psn == wqe->last_psn &&
237 			     pkt->opcode ==
238 			     IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY))
239 				break;
240 
241 			return COMPST_ERROR;
242 		}
243 		break;
244 	default:
245 		WARN_ON_ONCE(1);
246 	}
247 
248 	/* Check operation validity. */
249 	switch (pkt->opcode) {
250 	case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST:
251 	case IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST:
252 	case IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY:
253 		syn = aeth_syn(pkt);
254 
255 		if ((syn & AETH_TYPE_MASK) != AETH_ACK)
256 			return COMPST_ERROR;
257 
258 		fallthrough;
259 		/* (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE doesn't have an AETH)
260 		 */
261 	case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
262 		if (wqe->wr.opcode != IB_WR_RDMA_READ &&
263 		    wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) {
264 			wqe->status = IB_WC_FATAL_ERR;
265 			return COMPST_ERROR;
266 		}
267 		reset_retry_counters(qp);
268 		return COMPST_READ;
269 
270 	case IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE:
271 		syn = aeth_syn(pkt);
272 
273 		if ((syn & AETH_TYPE_MASK) != AETH_ACK)
274 			return COMPST_ERROR;
275 
276 		if (wqe->wr.opcode != IB_WR_ATOMIC_CMP_AND_SWP &&
277 		    wqe->wr.opcode != IB_WR_ATOMIC_FETCH_AND_ADD)
278 			return COMPST_ERROR;
279 		reset_retry_counters(qp);
280 		return COMPST_ATOMIC;
281 
282 	case IB_OPCODE_RC_ACKNOWLEDGE:
283 		syn = aeth_syn(pkt);
284 		switch (syn & AETH_TYPE_MASK) {
285 		case AETH_ACK:
286 			reset_retry_counters(qp);
287 			return COMPST_WRITE_SEND;
288 
289 		case AETH_RNR_NAK:
290 			rxe_counter_inc(rxe, RXE_CNT_RCV_RNR);
291 			return COMPST_RNR_RETRY;
292 
293 		case AETH_NAK:
294 			switch (syn) {
295 			case AETH_NAK_PSN_SEQ_ERROR:
296 				/* a nak implicitly acks all packets with psns
297 				 * before
298 				 */
299 				if (psn_compare(pkt->psn, qp->comp.psn) > 0) {
300 					rxe_counter_inc(rxe,
301 							RXE_CNT_RCV_SEQ_ERR);
302 					qp->comp.psn = pkt->psn;
303 					if (qp->req.wait_psn) {
304 						qp->req.wait_psn = 0;
305 						rxe_run_task(&qp->req.task, 0);
306 					}
307 				}
308 				return COMPST_ERROR_RETRY;
309 
310 			case AETH_NAK_INVALID_REQ:
311 				wqe->status = IB_WC_REM_INV_REQ_ERR;
312 				return COMPST_ERROR;
313 
314 			case AETH_NAK_REM_ACC_ERR:
315 				wqe->status = IB_WC_REM_ACCESS_ERR;
316 				return COMPST_ERROR;
317 
318 			case AETH_NAK_REM_OP_ERR:
319 				wqe->status = IB_WC_REM_OP_ERR;
320 				return COMPST_ERROR;
321 
322 			default:
323 				pr_warn("unexpected nak %x\n", syn);
324 				wqe->status = IB_WC_REM_OP_ERR;
325 				return COMPST_ERROR;
326 			}
327 
328 		default:
329 			return COMPST_ERROR;
330 		}
331 		break;
332 
333 	default:
334 		pr_warn("unexpected opcode\n");
335 	}
336 
337 	return COMPST_ERROR;
338 }
339 
340 static inline enum comp_state do_read(struct rxe_qp *qp,
341 				      struct rxe_pkt_info *pkt,
342 				      struct rxe_send_wqe *wqe)
343 {
344 	int ret;
345 
346 	ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
347 			&wqe->dma, payload_addr(pkt),
348 			payload_size(pkt), to_mr_obj, NULL);
349 	if (ret) {
350 		wqe->status = IB_WC_LOC_PROT_ERR;
351 		return COMPST_ERROR;
352 	}
353 
354 	if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
355 		return COMPST_COMP_ACK;
356 
357 	return COMPST_UPDATE_COMP;
358 }
359 
360 static inline enum comp_state do_atomic(struct rxe_qp *qp,
361 					struct rxe_pkt_info *pkt,
362 					struct rxe_send_wqe *wqe)
363 {
364 	int ret;
365 
366 	u64 atomic_orig = atmack_orig(pkt);
367 
368 	ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
369 			&wqe->dma, &atomic_orig,
370 			sizeof(u64), to_mr_obj, NULL);
371 	if (ret) {
372 		wqe->status = IB_WC_LOC_PROT_ERR;
373 		return COMPST_ERROR;
374 	}
375 
376 	return COMPST_COMP_ACK;
377 }
378 
379 static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
380 			  struct rxe_cqe *cqe)
381 {
382 	memset(cqe, 0, sizeof(*cqe));
383 
384 	if (!qp->is_user) {
385 		struct ib_wc		*wc	= &cqe->ibwc;
386 
387 		wc->wr_id		= wqe->wr.wr_id;
388 		wc->status		= wqe->status;
389 		wc->opcode		= wr_to_wc_opcode(wqe->wr.opcode);
390 		if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
391 		    wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
392 			wc->wc_flags = IB_WC_WITH_IMM;
393 		wc->byte_len		= wqe->dma.length;
394 		wc->qp			= &qp->ibqp;
395 	} else {
396 		struct ib_uverbs_wc	*uwc	= &cqe->uibwc;
397 
398 		uwc->wr_id		= wqe->wr.wr_id;
399 		uwc->status		= wqe->status;
400 		uwc->opcode		= wr_to_wc_opcode(wqe->wr.opcode);
401 		if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
402 		    wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
403 			uwc->wc_flags = IB_WC_WITH_IMM;
404 		uwc->byte_len		= wqe->dma.length;
405 		uwc->qp_num		= qp->ibqp.qp_num;
406 	}
407 }
408 
409 /*
410  * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS
411  * ---------8<---------8<-------------
412  * ...Note that if a completion error occurs, a Work Completion
413  * will always be generated, even if the signaling
414  * indicator requests an Unsignaled Completion.
415  * ---------8<---------8<-------------
416  */
417 static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
418 {
419 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
420 	struct rxe_cqe cqe;
421 
422 	if ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) ||
423 	    (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
424 	    wqe->status != IB_WC_SUCCESS) {
425 		make_send_cqe(qp, wqe, &cqe);
426 		advance_consumer(qp->sq.queue);
427 		rxe_cq_post(qp->scq, &cqe, 0);
428 	} else {
429 		advance_consumer(qp->sq.queue);
430 	}
431 
432 	if (wqe->wr.opcode == IB_WR_SEND ||
433 	    wqe->wr.opcode == IB_WR_SEND_WITH_IMM ||
434 	    wqe->wr.opcode == IB_WR_SEND_WITH_INV)
435 		rxe_counter_inc(rxe, RXE_CNT_RDMA_SEND);
436 
437 	/*
438 	 * we completed something so let req run again
439 	 * if it is trying to fence
440 	 */
441 	if (qp->req.wait_fence) {
442 		qp->req.wait_fence = 0;
443 		rxe_run_task(&qp->req.task, 0);
444 	}
445 }
446 
447 static inline enum comp_state complete_ack(struct rxe_qp *qp,
448 					   struct rxe_pkt_info *pkt,
449 					   struct rxe_send_wqe *wqe)
450 {
451 	unsigned long flags;
452 
453 	if (wqe->has_rd_atomic) {
454 		wqe->has_rd_atomic = 0;
455 		atomic_inc(&qp->req.rd_atomic);
456 		if (qp->req.need_rd_atomic) {
457 			qp->comp.timeout_retry = 0;
458 			qp->req.need_rd_atomic = 0;
459 			rxe_run_task(&qp->req.task, 0);
460 		}
461 	}
462 
463 	if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
464 		/* state_lock used by requester & completer */
465 		spin_lock_irqsave(&qp->state_lock, flags);
466 		if ((qp->req.state == QP_STATE_DRAIN) &&
467 		    (qp->comp.psn == qp->req.psn)) {
468 			qp->req.state = QP_STATE_DRAINED;
469 			spin_unlock_irqrestore(&qp->state_lock, flags);
470 
471 			if (qp->ibqp.event_handler) {
472 				struct ib_event ev;
473 
474 				ev.device = qp->ibqp.device;
475 				ev.element.qp = &qp->ibqp;
476 				ev.event = IB_EVENT_SQ_DRAINED;
477 				qp->ibqp.event_handler(&ev,
478 					qp->ibqp.qp_context);
479 			}
480 		} else {
481 			spin_unlock_irqrestore(&qp->state_lock, flags);
482 		}
483 	}
484 
485 	do_complete(qp, wqe);
486 
487 	if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
488 		return COMPST_UPDATE_COMP;
489 	else
490 		return COMPST_DONE;
491 }
492 
493 static inline enum comp_state complete_wqe(struct rxe_qp *qp,
494 					   struct rxe_pkt_info *pkt,
495 					   struct rxe_send_wqe *wqe)
496 {
497 	if (pkt && wqe->state == wqe_state_pending) {
498 		if (psn_compare(wqe->last_psn, qp->comp.psn) >= 0) {
499 			qp->comp.psn = (wqe->last_psn + 1) & BTH_PSN_MASK;
500 			qp->comp.opcode = -1;
501 		}
502 
503 		if (qp->req.wait_psn) {
504 			qp->req.wait_psn = 0;
505 			rxe_run_task(&qp->req.task, 1);
506 		}
507 	}
508 
509 	do_complete(qp, wqe);
510 
511 	return COMPST_GET_WQE;
512 }
513 
514 static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
515 {
516 	struct sk_buff *skb;
517 	struct rxe_send_wqe *wqe;
518 
519 	while ((skb = skb_dequeue(&qp->resp_pkts))) {
520 		rxe_drop_ref(qp);
521 		kfree_skb(skb);
522 		ib_device_put(qp->ibqp.device);
523 	}
524 
525 	while ((wqe = queue_head(qp->sq.queue))) {
526 		if (notify) {
527 			wqe->status = IB_WC_WR_FLUSH_ERR;
528 			do_complete(qp, wqe);
529 		} else {
530 			advance_consumer(qp->sq.queue);
531 		}
532 	}
533 }
534 
535 static void free_pkt(struct rxe_pkt_info *pkt)
536 {
537 	struct sk_buff *skb = PKT_TO_SKB(pkt);
538 	struct rxe_qp *qp = pkt->qp;
539 	struct ib_device *dev = qp->ibqp.device;
540 
541 	kfree_skb(skb);
542 	rxe_drop_ref(qp);
543 	ib_device_put(dev);
544 }
545 
546 int rxe_completer(void *arg)
547 {
548 	struct rxe_qp *qp = (struct rxe_qp *)arg;
549 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
550 	struct rxe_send_wqe *wqe = NULL;
551 	struct sk_buff *skb = NULL;
552 	struct rxe_pkt_info *pkt = NULL;
553 	enum comp_state state;
554 	int ret = 0;
555 
556 	rxe_add_ref(qp);
557 
558 	if (!qp->valid || qp->req.state == QP_STATE_ERROR ||
559 	    qp->req.state == QP_STATE_RESET) {
560 		rxe_drain_resp_pkts(qp, qp->valid &&
561 				    qp->req.state == QP_STATE_ERROR);
562 		ret = -EAGAIN;
563 		goto done;
564 	}
565 
566 	if (qp->comp.timeout) {
567 		qp->comp.timeout_retry = 1;
568 		qp->comp.timeout = 0;
569 	} else {
570 		qp->comp.timeout_retry = 0;
571 	}
572 
573 	if (qp->req.need_retry) {
574 		ret = -EAGAIN;
575 		goto done;
576 	}
577 
578 	state = COMPST_GET_ACK;
579 
580 	while (1) {
581 		pr_debug("qp#%d state = %s\n", qp_num(qp),
582 			 comp_state_name[state]);
583 		switch (state) {
584 		case COMPST_GET_ACK:
585 			skb = skb_dequeue(&qp->resp_pkts);
586 			if (skb) {
587 				pkt = SKB_TO_PKT(skb);
588 				qp->comp.timeout_retry = 0;
589 			}
590 			state = COMPST_GET_WQE;
591 			break;
592 
593 		case COMPST_GET_WQE:
594 			state = get_wqe(qp, pkt, &wqe);
595 			break;
596 
597 		case COMPST_CHECK_PSN:
598 			state = check_psn(qp, pkt, wqe);
599 			break;
600 
601 		case COMPST_CHECK_ACK:
602 			state = check_ack(qp, pkt, wqe);
603 			break;
604 
605 		case COMPST_READ:
606 			state = do_read(qp, pkt, wqe);
607 			break;
608 
609 		case COMPST_ATOMIC:
610 			state = do_atomic(qp, pkt, wqe);
611 			break;
612 
613 		case COMPST_WRITE_SEND:
614 			if (wqe->state == wqe_state_pending &&
615 			    wqe->last_psn == pkt->psn)
616 				state = COMPST_COMP_ACK;
617 			else
618 				state = COMPST_UPDATE_COMP;
619 			break;
620 
621 		case COMPST_COMP_ACK:
622 			state = complete_ack(qp, pkt, wqe);
623 			break;
624 
625 		case COMPST_COMP_WQE:
626 			state = complete_wqe(qp, pkt, wqe);
627 			break;
628 
629 		case COMPST_UPDATE_COMP:
630 			if (pkt->mask & RXE_END_MASK)
631 				qp->comp.opcode = -1;
632 			else
633 				qp->comp.opcode = pkt->opcode;
634 
635 			if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
636 				qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
637 
638 			if (qp->req.wait_psn) {
639 				qp->req.wait_psn = 0;
640 				rxe_run_task(&qp->req.task, 1);
641 			}
642 
643 			state = COMPST_DONE;
644 			break;
645 
646 		case COMPST_DONE:
647 			goto done;
648 
649 		case COMPST_EXIT:
650 			if (qp->comp.timeout_retry && wqe) {
651 				state = COMPST_ERROR_RETRY;
652 				break;
653 			}
654 
655 			/* re reset the timeout counter if
656 			 * (1) QP is type RC
657 			 * (2) the QP is alive
658 			 * (3) there is a packet sent by the requester that
659 			 *     might be acked (we still might get spurious
660 			 *     timeouts but try to keep them as few as possible)
661 			 * (4) the timeout parameter is set
662 			 */
663 			if ((qp_type(qp) == IB_QPT_RC) &&
664 			    (qp->req.state == QP_STATE_READY) &&
665 			    (psn_compare(qp->req.psn, qp->comp.psn) > 0) &&
666 			    qp->qp_timeout_jiffies)
667 				mod_timer(&qp->retrans_timer,
668 					  jiffies + qp->qp_timeout_jiffies);
669 			ret = -EAGAIN;
670 			goto done;
671 
672 		case COMPST_ERROR_RETRY:
673 			/* we come here if the retry timer fired and we did
674 			 * not receive a response packet. try to retry the send
675 			 * queue if that makes sense and the limits have not
676 			 * been exceeded. remember that some timeouts are
677 			 * spurious since we do not reset the timer but kick
678 			 * it down the road or let it expire
679 			 */
680 
681 			/* there is nothing to retry in this case */
682 			if (!wqe || (wqe->state == wqe_state_posted)) {
683 				ret = -EAGAIN;
684 				goto done;
685 			}
686 
687 			/* if we've started a retry, don't start another
688 			 * retry sequence, unless this is a timeout.
689 			 */
690 			if (qp->comp.started_retry &&
691 			    !qp->comp.timeout_retry)
692 				goto done;
693 
694 			if (qp->comp.retry_cnt > 0) {
695 				if (qp->comp.retry_cnt != 7)
696 					qp->comp.retry_cnt--;
697 
698 				/* no point in retrying if we have already
699 				 * seen the last ack that the requester could
700 				 * have caused
701 				 */
702 				if (psn_compare(qp->req.psn,
703 						qp->comp.psn) > 0) {
704 					/* tell the requester to retry the
705 					 * send queue next time around
706 					 */
707 					rxe_counter_inc(rxe,
708 							RXE_CNT_COMP_RETRY);
709 					qp->req.need_retry = 1;
710 					qp->comp.started_retry = 1;
711 					rxe_run_task(&qp->req.task, 0);
712 				}
713 				goto done;
714 
715 			} else {
716 				rxe_counter_inc(rxe, RXE_CNT_RETRY_EXCEEDED);
717 				wqe->status = IB_WC_RETRY_EXC_ERR;
718 				state = COMPST_ERROR;
719 			}
720 			break;
721 
722 		case COMPST_RNR_RETRY:
723 			if (qp->comp.rnr_retry > 0) {
724 				if (qp->comp.rnr_retry != 7)
725 					qp->comp.rnr_retry--;
726 
727 				qp->req.need_retry = 1;
728 				pr_debug("qp#%d set rnr nak timer\n",
729 					 qp_num(qp));
730 				mod_timer(&qp->rnr_nak_timer,
731 					  jiffies + rnrnak_jiffies(aeth_syn(pkt)
732 						& ~AETH_TYPE_MASK));
733 				ret = -EAGAIN;
734 				goto done;
735 			} else {
736 				rxe_counter_inc(rxe,
737 						RXE_CNT_RNR_RETRY_EXCEEDED);
738 				wqe->status = IB_WC_RNR_RETRY_EXC_ERR;
739 				state = COMPST_ERROR;
740 			}
741 			break;
742 
743 		case COMPST_ERROR:
744 			WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS);
745 			do_complete(qp, wqe);
746 			rxe_qp_error(qp);
747 			ret = -EAGAIN;
748 			goto done;
749 		}
750 	}
751 
752 done:
753 	if (pkt)
754 		free_pkt(pkt);
755 	rxe_drop_ref(qp);
756 
757 	return ret;
758 }
759