1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *	- Redistributions of source code must retain the above
16  *	  copyright notice, this list of conditions and the following
17  *	  disclaimer.
18  *
19  *	- Redistributions in binary form must reproduce the above
20  *	  copyright notice, this list of conditions and the following
21  *	  disclaimer in the documentation and/or other materials
22  *	  provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/skbuff.h>
35 
36 #include "rxe.h"
37 #include "rxe_loc.h"
38 #include "rxe_queue.h"
39 
40 enum resp_states {
41 	RESPST_NONE,
42 	RESPST_GET_REQ,
43 	RESPST_CHK_PSN,
44 	RESPST_CHK_OP_SEQ,
45 	RESPST_CHK_OP_VALID,
46 	RESPST_CHK_RESOURCE,
47 	RESPST_CHK_LENGTH,
48 	RESPST_CHK_RKEY,
49 	RESPST_EXECUTE,
50 	RESPST_READ_REPLY,
51 	RESPST_COMPLETE,
52 	RESPST_ACKNOWLEDGE,
53 	RESPST_CLEANUP,
54 	RESPST_DUPLICATE_REQUEST,
55 	RESPST_ERR_MALFORMED_WQE,
56 	RESPST_ERR_UNSUPPORTED_OPCODE,
57 	RESPST_ERR_MISALIGNED_ATOMIC,
58 	RESPST_ERR_PSN_OUT_OF_SEQ,
59 	RESPST_ERR_MISSING_OPCODE_FIRST,
60 	RESPST_ERR_MISSING_OPCODE_LAST_C,
61 	RESPST_ERR_MISSING_OPCODE_LAST_D1E,
62 	RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
63 	RESPST_ERR_RNR,
64 	RESPST_ERR_RKEY_VIOLATION,
65 	RESPST_ERR_LENGTH,
66 	RESPST_ERR_CQ_OVERFLOW,
67 	RESPST_ERROR,
68 	RESPST_RESET,
69 	RESPST_DONE,
70 	RESPST_EXIT,
71 };
72 
73 static char *resp_state_name[] = {
74 	[RESPST_NONE]				= "NONE",
75 	[RESPST_GET_REQ]			= "GET_REQ",
76 	[RESPST_CHK_PSN]			= "CHK_PSN",
77 	[RESPST_CHK_OP_SEQ]			= "CHK_OP_SEQ",
78 	[RESPST_CHK_OP_VALID]			= "CHK_OP_VALID",
79 	[RESPST_CHK_RESOURCE]			= "CHK_RESOURCE",
80 	[RESPST_CHK_LENGTH]			= "CHK_LENGTH",
81 	[RESPST_CHK_RKEY]			= "CHK_RKEY",
82 	[RESPST_EXECUTE]			= "EXECUTE",
83 	[RESPST_READ_REPLY]			= "READ_REPLY",
84 	[RESPST_COMPLETE]			= "COMPLETE",
85 	[RESPST_ACKNOWLEDGE]			= "ACKNOWLEDGE",
86 	[RESPST_CLEANUP]			= "CLEANUP",
87 	[RESPST_DUPLICATE_REQUEST]		= "DUPLICATE_REQUEST",
88 	[RESPST_ERR_MALFORMED_WQE]		= "ERR_MALFORMED_WQE",
89 	[RESPST_ERR_UNSUPPORTED_OPCODE]		= "ERR_UNSUPPORTED_OPCODE",
90 	[RESPST_ERR_MISALIGNED_ATOMIC]		= "ERR_MISALIGNED_ATOMIC",
91 	[RESPST_ERR_PSN_OUT_OF_SEQ]		= "ERR_PSN_OUT_OF_SEQ",
92 	[RESPST_ERR_MISSING_OPCODE_FIRST]	= "ERR_MISSING_OPCODE_FIRST",
93 	[RESPST_ERR_MISSING_OPCODE_LAST_C]	= "ERR_MISSING_OPCODE_LAST_C",
94 	[RESPST_ERR_MISSING_OPCODE_LAST_D1E]	= "ERR_MISSING_OPCODE_LAST_D1E",
95 	[RESPST_ERR_TOO_MANY_RDMA_ATM_REQ]	= "ERR_TOO_MANY_RDMA_ATM_REQ",
96 	[RESPST_ERR_RNR]			= "ERR_RNR",
97 	[RESPST_ERR_RKEY_VIOLATION]		= "ERR_RKEY_VIOLATION",
98 	[RESPST_ERR_LENGTH]			= "ERR_LENGTH",
99 	[RESPST_ERR_CQ_OVERFLOW]		= "ERR_CQ_OVERFLOW",
100 	[RESPST_ERROR]				= "ERROR",
101 	[RESPST_RESET]				= "RESET",
102 	[RESPST_DONE]				= "DONE",
103 	[RESPST_EXIT]				= "EXIT",
104 };
105 
106 /* rxe_recv calls here to add a request packet to the input queue */
107 void rxe_resp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp,
108 			struct sk_buff *skb)
109 {
110 	int must_sched;
111 	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
112 
113 	skb_queue_tail(&qp->req_pkts, skb);
114 
115 	must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
116 			(skb_queue_len(&qp->req_pkts) > 1);
117 
118 	rxe_run_task(&qp->resp.task, must_sched);
119 }
120 
121 static inline enum resp_states get_req(struct rxe_qp *qp,
122 				       struct rxe_pkt_info **pkt_p)
123 {
124 	struct sk_buff *skb;
125 
126 	if (qp->resp.state == QP_STATE_ERROR) {
127 		skb = skb_dequeue(&qp->req_pkts);
128 		if (skb) {
129 			/* drain request packet queue */
130 			rxe_drop_ref(qp);
131 			kfree_skb(skb);
132 			return RESPST_GET_REQ;
133 		}
134 
135 		/* go drain recv wr queue */
136 		return RESPST_CHK_RESOURCE;
137 	}
138 
139 	skb = skb_peek(&qp->req_pkts);
140 	if (!skb)
141 		return RESPST_EXIT;
142 
143 	*pkt_p = SKB_TO_PKT(skb);
144 
145 	return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
146 }
147 
148 static enum resp_states check_psn(struct rxe_qp *qp,
149 				  struct rxe_pkt_info *pkt)
150 {
151 	int diff = psn_compare(pkt->psn, qp->resp.psn);
152 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
153 
154 	switch (qp_type(qp)) {
155 	case IB_QPT_RC:
156 		if (diff > 0) {
157 			if (qp->resp.sent_psn_nak)
158 				return RESPST_CLEANUP;
159 
160 			qp->resp.sent_psn_nak = 1;
161 			rxe_counter_inc(rxe, RXE_CNT_OUT_OF_SEQ_REQ);
162 			return RESPST_ERR_PSN_OUT_OF_SEQ;
163 
164 		} else if (diff < 0) {
165 			rxe_counter_inc(rxe, RXE_CNT_DUP_REQ);
166 			return RESPST_DUPLICATE_REQUEST;
167 		}
168 
169 		if (qp->resp.sent_psn_nak)
170 			qp->resp.sent_psn_nak = 0;
171 
172 		break;
173 
174 	case IB_QPT_UC:
175 		if (qp->resp.drop_msg || diff != 0) {
176 			if (pkt->mask & RXE_START_MASK) {
177 				qp->resp.drop_msg = 0;
178 				return RESPST_CHK_OP_SEQ;
179 			}
180 
181 			qp->resp.drop_msg = 1;
182 			return RESPST_CLEANUP;
183 		}
184 		break;
185 	default:
186 		break;
187 	}
188 
189 	return RESPST_CHK_OP_SEQ;
190 }
191 
192 static enum resp_states check_op_seq(struct rxe_qp *qp,
193 				     struct rxe_pkt_info *pkt)
194 {
195 	switch (qp_type(qp)) {
196 	case IB_QPT_RC:
197 		switch (qp->resp.opcode) {
198 		case IB_OPCODE_RC_SEND_FIRST:
199 		case IB_OPCODE_RC_SEND_MIDDLE:
200 			switch (pkt->opcode) {
201 			case IB_OPCODE_RC_SEND_MIDDLE:
202 			case IB_OPCODE_RC_SEND_LAST:
203 			case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
204 			case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
205 				return RESPST_CHK_OP_VALID;
206 			default:
207 				return RESPST_ERR_MISSING_OPCODE_LAST_C;
208 			}
209 
210 		case IB_OPCODE_RC_RDMA_WRITE_FIRST:
211 		case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
212 			switch (pkt->opcode) {
213 			case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
214 			case IB_OPCODE_RC_RDMA_WRITE_LAST:
215 			case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
216 				return RESPST_CHK_OP_VALID;
217 			default:
218 				return RESPST_ERR_MISSING_OPCODE_LAST_C;
219 			}
220 
221 		default:
222 			switch (pkt->opcode) {
223 			case IB_OPCODE_RC_SEND_MIDDLE:
224 			case IB_OPCODE_RC_SEND_LAST:
225 			case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
226 			case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
227 			case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
228 			case IB_OPCODE_RC_RDMA_WRITE_LAST:
229 			case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
230 				return RESPST_ERR_MISSING_OPCODE_FIRST;
231 			default:
232 				return RESPST_CHK_OP_VALID;
233 			}
234 		}
235 		break;
236 
237 	case IB_QPT_UC:
238 		switch (qp->resp.opcode) {
239 		case IB_OPCODE_UC_SEND_FIRST:
240 		case IB_OPCODE_UC_SEND_MIDDLE:
241 			switch (pkt->opcode) {
242 			case IB_OPCODE_UC_SEND_MIDDLE:
243 			case IB_OPCODE_UC_SEND_LAST:
244 			case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
245 				return RESPST_CHK_OP_VALID;
246 			default:
247 				return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
248 			}
249 
250 		case IB_OPCODE_UC_RDMA_WRITE_FIRST:
251 		case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
252 			switch (pkt->opcode) {
253 			case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
254 			case IB_OPCODE_UC_RDMA_WRITE_LAST:
255 			case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
256 				return RESPST_CHK_OP_VALID;
257 			default:
258 				return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
259 			}
260 
261 		default:
262 			switch (pkt->opcode) {
263 			case IB_OPCODE_UC_SEND_MIDDLE:
264 			case IB_OPCODE_UC_SEND_LAST:
265 			case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
266 			case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
267 			case IB_OPCODE_UC_RDMA_WRITE_LAST:
268 			case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
269 				qp->resp.drop_msg = 1;
270 				return RESPST_CLEANUP;
271 			default:
272 				return RESPST_CHK_OP_VALID;
273 			}
274 		}
275 		break;
276 
277 	default:
278 		return RESPST_CHK_OP_VALID;
279 	}
280 }
281 
282 static enum resp_states check_op_valid(struct rxe_qp *qp,
283 				       struct rxe_pkt_info *pkt)
284 {
285 	switch (qp_type(qp)) {
286 	case IB_QPT_RC:
287 		if (((pkt->mask & RXE_READ_MASK) &&
288 		     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
289 		    ((pkt->mask & RXE_WRITE_MASK) &&
290 		     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
291 		    ((pkt->mask & RXE_ATOMIC_MASK) &&
292 		     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) {
293 			return RESPST_ERR_UNSUPPORTED_OPCODE;
294 		}
295 
296 		break;
297 
298 	case IB_QPT_UC:
299 		if ((pkt->mask & RXE_WRITE_MASK) &&
300 		    !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
301 			qp->resp.drop_msg = 1;
302 			return RESPST_CLEANUP;
303 		}
304 
305 		break;
306 
307 	case IB_QPT_UD:
308 	case IB_QPT_SMI:
309 	case IB_QPT_GSI:
310 		break;
311 
312 	default:
313 		WARN_ON_ONCE(1);
314 		break;
315 	}
316 
317 	return RESPST_CHK_RESOURCE;
318 }
319 
320 static enum resp_states get_srq_wqe(struct rxe_qp *qp)
321 {
322 	struct rxe_srq *srq = qp->srq;
323 	struct rxe_queue *q = srq->rq.queue;
324 	struct rxe_recv_wqe *wqe;
325 	struct ib_event ev;
326 
327 	if (srq->error)
328 		return RESPST_ERR_RNR;
329 
330 	spin_lock_bh(&srq->rq.consumer_lock);
331 
332 	wqe = queue_head(q);
333 	if (!wqe) {
334 		spin_unlock_bh(&srq->rq.consumer_lock);
335 		return RESPST_ERR_RNR;
336 	}
337 
338 	/* note kernel and user space recv wqes have same size */
339 	memcpy(&qp->resp.srq_wqe, wqe, sizeof(qp->resp.srq_wqe));
340 
341 	qp->resp.wqe = &qp->resp.srq_wqe.wqe;
342 	advance_consumer(q);
343 
344 	if (srq->limit && srq->ibsrq.event_handler &&
345 	    (queue_count(q) < srq->limit)) {
346 		srq->limit = 0;
347 		goto event;
348 	}
349 
350 	spin_unlock_bh(&srq->rq.consumer_lock);
351 	return RESPST_CHK_LENGTH;
352 
353 event:
354 	spin_unlock_bh(&srq->rq.consumer_lock);
355 	ev.device = qp->ibqp.device;
356 	ev.element.srq = qp->ibqp.srq;
357 	ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
358 	srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context);
359 	return RESPST_CHK_LENGTH;
360 }
361 
362 static enum resp_states check_resource(struct rxe_qp *qp,
363 				       struct rxe_pkt_info *pkt)
364 {
365 	struct rxe_srq *srq = qp->srq;
366 
367 	if (qp->resp.state == QP_STATE_ERROR) {
368 		if (qp->resp.wqe) {
369 			qp->resp.status = IB_WC_WR_FLUSH_ERR;
370 			return RESPST_COMPLETE;
371 		} else if (!srq) {
372 			qp->resp.wqe = queue_head(qp->rq.queue);
373 			if (qp->resp.wqe) {
374 				qp->resp.status = IB_WC_WR_FLUSH_ERR;
375 				return RESPST_COMPLETE;
376 			} else {
377 				return RESPST_EXIT;
378 			}
379 		} else {
380 			return RESPST_EXIT;
381 		}
382 	}
383 
384 	if (pkt->mask & RXE_READ_OR_ATOMIC) {
385 		/* it is the requesters job to not send
386 		 * too many read/atomic ops, we just
387 		 * recycle the responder resource queue
388 		 */
389 		if (likely(qp->attr.max_dest_rd_atomic > 0))
390 			return RESPST_CHK_LENGTH;
391 		else
392 			return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
393 	}
394 
395 	if (pkt->mask & RXE_RWR_MASK) {
396 		if (srq)
397 			return get_srq_wqe(qp);
398 
399 		qp->resp.wqe = queue_head(qp->rq.queue);
400 		return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
401 	}
402 
403 	return RESPST_CHK_LENGTH;
404 }
405 
406 static enum resp_states check_length(struct rxe_qp *qp,
407 				     struct rxe_pkt_info *pkt)
408 {
409 	switch (qp_type(qp)) {
410 	case IB_QPT_RC:
411 		return RESPST_CHK_RKEY;
412 
413 	case IB_QPT_UC:
414 		return RESPST_CHK_RKEY;
415 
416 	default:
417 		return RESPST_CHK_RKEY;
418 	}
419 }
420 
421 static enum resp_states check_rkey(struct rxe_qp *qp,
422 				   struct rxe_pkt_info *pkt)
423 {
424 	struct rxe_mem *mem = NULL;
425 	u64 va;
426 	u32 rkey;
427 	u32 resid;
428 	u32 pktlen;
429 	int mtu = qp->mtu;
430 	enum resp_states state;
431 	int access;
432 
433 	if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) {
434 		if (pkt->mask & RXE_RETH_MASK) {
435 			qp->resp.va = reth_va(pkt);
436 			qp->resp.rkey = reth_rkey(pkt);
437 			qp->resp.resid = reth_len(pkt);
438 		}
439 		access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
440 						     : IB_ACCESS_REMOTE_WRITE;
441 	} else if (pkt->mask & RXE_ATOMIC_MASK) {
442 		qp->resp.va = atmeth_va(pkt);
443 		qp->resp.rkey = atmeth_rkey(pkt);
444 		qp->resp.resid = sizeof(u64);
445 		access = IB_ACCESS_REMOTE_ATOMIC;
446 	} else {
447 		return RESPST_EXECUTE;
448 	}
449 
450 	/* A zero-byte op is not required to set an addr or rkey. */
451 	if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) &&
452 	    (pkt->mask & RXE_RETH_MASK) &&
453 	    reth_len(pkt) == 0) {
454 		return RESPST_EXECUTE;
455 	}
456 
457 	va	= qp->resp.va;
458 	rkey	= qp->resp.rkey;
459 	resid	= qp->resp.resid;
460 	pktlen	= payload_size(pkt);
461 
462 	mem = lookup_mem(qp->pd, access, rkey, lookup_remote);
463 	if (!mem) {
464 		state = RESPST_ERR_RKEY_VIOLATION;
465 		goto err;
466 	}
467 
468 	if (unlikely(mem->state == RXE_MEM_STATE_FREE)) {
469 		state = RESPST_ERR_RKEY_VIOLATION;
470 		goto err;
471 	}
472 
473 	if (mem_check_range(mem, va, resid)) {
474 		state = RESPST_ERR_RKEY_VIOLATION;
475 		goto err;
476 	}
477 
478 	if (pkt->mask & RXE_WRITE_MASK)	 {
479 		if (resid > mtu) {
480 			if (pktlen != mtu || bth_pad(pkt)) {
481 				state = RESPST_ERR_LENGTH;
482 				goto err;
483 			}
484 		} else {
485 			if (pktlen != resid) {
486 				state = RESPST_ERR_LENGTH;
487 				goto err;
488 			}
489 			if ((bth_pad(pkt) != (0x3 & (-resid)))) {
490 				/* This case may not be exactly that
491 				 * but nothing else fits.
492 				 */
493 				state = RESPST_ERR_LENGTH;
494 				goto err;
495 			}
496 		}
497 	}
498 
499 	WARN_ON_ONCE(qp->resp.mr);
500 
501 	qp->resp.mr = mem;
502 	return RESPST_EXECUTE;
503 
504 err:
505 	if (mem)
506 		rxe_drop_ref(mem);
507 	return state;
508 }
509 
510 static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
511 				     int data_len)
512 {
513 	int err;
514 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
515 
516 	err = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
517 			data_addr, data_len, to_mem_obj, NULL);
518 	if (unlikely(err))
519 		return (err == -ENOSPC) ? RESPST_ERR_LENGTH
520 					: RESPST_ERR_MALFORMED_WQE;
521 
522 	return RESPST_NONE;
523 }
524 
525 static enum resp_states write_data_in(struct rxe_qp *qp,
526 				      struct rxe_pkt_info *pkt)
527 {
528 	enum resp_states rc = RESPST_NONE;
529 	int	err;
530 	int data_len = payload_size(pkt);
531 
532 	err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt),
533 			   data_len, to_mem_obj, NULL);
534 	if (err) {
535 		rc = RESPST_ERR_RKEY_VIOLATION;
536 		goto out;
537 	}
538 
539 	qp->resp.va += data_len;
540 	qp->resp.resid -= data_len;
541 
542 out:
543 	return rc;
544 }
545 
546 /* Guarantee atomicity of atomic operations at the machine level. */
547 static DEFINE_SPINLOCK(atomic_ops_lock);
548 
549 static enum resp_states process_atomic(struct rxe_qp *qp,
550 				       struct rxe_pkt_info *pkt)
551 {
552 	u64 iova = atmeth_va(pkt);
553 	u64 *vaddr;
554 	enum resp_states ret;
555 	struct rxe_mem *mr = qp->resp.mr;
556 
557 	if (mr->state != RXE_MEM_STATE_VALID) {
558 		ret = RESPST_ERR_RKEY_VIOLATION;
559 		goto out;
560 	}
561 
562 	vaddr = iova_to_vaddr(mr, iova, sizeof(u64));
563 
564 	/* check vaddr is 8 bytes aligned. */
565 	if (!vaddr || (uintptr_t)vaddr & 7) {
566 		ret = RESPST_ERR_MISALIGNED_ATOMIC;
567 		goto out;
568 	}
569 
570 	spin_lock_bh(&atomic_ops_lock);
571 
572 	qp->resp.atomic_orig = *vaddr;
573 
574 	if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP ||
575 	    pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) {
576 		if (*vaddr == atmeth_comp(pkt))
577 			*vaddr = atmeth_swap_add(pkt);
578 	} else {
579 		*vaddr += atmeth_swap_add(pkt);
580 	}
581 
582 	spin_unlock_bh(&atomic_ops_lock);
583 
584 	ret = RESPST_NONE;
585 out:
586 	return ret;
587 }
588 
589 static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
590 					  struct rxe_pkt_info *pkt,
591 					  struct rxe_pkt_info *ack,
592 					  int opcode,
593 					  int payload,
594 					  u32 psn,
595 					  u8 syndrome,
596 					  u32 *crcp)
597 {
598 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
599 	struct sk_buff *skb;
600 	u32 crc = 0;
601 	u32 *p;
602 	int paylen;
603 	int pad;
604 	int err;
605 
606 	/*
607 	 * allocate packet
608 	 */
609 	pad = (-payload) & 0x3;
610 	paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
611 
612 	skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack);
613 	if (!skb)
614 		return NULL;
615 
616 	ack->qp = qp;
617 	ack->opcode = opcode;
618 	ack->mask = rxe_opcode[opcode].mask;
619 	ack->offset = pkt->offset;
620 	ack->paylen = paylen;
621 
622 	/* fill in bth using the request packet headers */
623 	memcpy(ack->hdr, pkt->hdr, pkt->offset + RXE_BTH_BYTES);
624 
625 	bth_set_opcode(ack, opcode);
626 	bth_set_qpn(ack, qp->attr.dest_qp_num);
627 	bth_set_pad(ack, pad);
628 	bth_set_se(ack, 0);
629 	bth_set_psn(ack, psn);
630 	bth_set_ack(ack, 0);
631 	ack->psn = psn;
632 
633 	if (ack->mask & RXE_AETH_MASK) {
634 		aeth_set_syn(ack, syndrome);
635 		aeth_set_msn(ack, qp->resp.msn);
636 	}
637 
638 	if (ack->mask & RXE_ATMACK_MASK)
639 		atmack_set_orig(ack, qp->resp.atomic_orig);
640 
641 	err = rxe_prepare(rxe, ack, skb, &crc);
642 	if (err) {
643 		kfree_skb(skb);
644 		return NULL;
645 	}
646 
647 	if (crcp) {
648 		/* CRC computation will be continued by the caller */
649 		*crcp = crc;
650 	} else {
651 		p = payload_addr(ack) + payload + bth_pad(ack);
652 		*p = ~crc;
653 	}
654 
655 	return skb;
656 }
657 
658 /* RDMA read response. If res is not NULL, then we have a current RDMA request
659  * being processed or replayed.
660  */
661 static enum resp_states read_reply(struct rxe_qp *qp,
662 				   struct rxe_pkt_info *req_pkt)
663 {
664 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
665 	struct rxe_pkt_info ack_pkt;
666 	struct sk_buff *skb;
667 	int mtu = qp->mtu;
668 	enum resp_states state;
669 	int payload;
670 	int opcode;
671 	int err;
672 	struct resp_res *res = qp->resp.res;
673 	u32 icrc;
674 	u32 *p;
675 
676 	if (!res) {
677 		/* This is the first time we process that request. Get a
678 		 * resource
679 		 */
680 		res = &qp->resp.resources[qp->resp.res_head];
681 
682 		free_rd_atomic_resource(qp, res);
683 		rxe_advance_resp_resource(qp);
684 
685 		res->type		= RXE_READ_MASK;
686 
687 		res->read.va		= qp->resp.va;
688 		res->read.va_org	= qp->resp.va;
689 
690 		res->first_psn		= req_pkt->psn;
691 
692 		if (reth_len(req_pkt)) {
693 			res->last_psn	= (req_pkt->psn +
694 					   (reth_len(req_pkt) + mtu - 1) /
695 					   mtu - 1) & BTH_PSN_MASK;
696 		} else {
697 			res->last_psn	= res->first_psn;
698 		}
699 		res->cur_psn		= req_pkt->psn;
700 
701 		res->read.resid		= qp->resp.resid;
702 		res->read.length	= qp->resp.resid;
703 		res->read.rkey		= qp->resp.rkey;
704 
705 		/* note res inherits the reference to mr from qp */
706 		res->read.mr		= qp->resp.mr;
707 		qp->resp.mr		= NULL;
708 
709 		qp->resp.res		= res;
710 		res->state		= rdatm_res_state_new;
711 	}
712 
713 	if (res->state == rdatm_res_state_new) {
714 		if (res->read.resid <= mtu)
715 			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
716 		else
717 			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
718 	} else {
719 		if (res->read.resid > mtu)
720 			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
721 		else
722 			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
723 	}
724 
725 	res->state = rdatm_res_state_next;
726 
727 	payload = min_t(int, res->read.resid, mtu);
728 
729 	skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
730 				 res->cur_psn, AETH_ACK_UNLIMITED, &icrc);
731 	if (!skb)
732 		return RESPST_ERR_RNR;
733 
734 	err = rxe_mem_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
735 			   payload, from_mem_obj, &icrc);
736 	if (err)
737 		pr_err("Failed copying memory\n");
738 
739 	p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt);
740 	*p = ~icrc;
741 
742 	err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
743 	if (err) {
744 		pr_err("Failed sending RDMA reply.\n");
745 		kfree_skb(skb);
746 		return RESPST_ERR_RNR;
747 	}
748 
749 	res->read.va += payload;
750 	res->read.resid -= payload;
751 	res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;
752 
753 	if (res->read.resid > 0) {
754 		state = RESPST_DONE;
755 	} else {
756 		qp->resp.res = NULL;
757 		qp->resp.opcode = -1;
758 		if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
759 			qp->resp.psn = res->cur_psn;
760 		state = RESPST_CLEANUP;
761 	}
762 
763 	return state;
764 }
765 
766 static void build_rdma_network_hdr(union rdma_network_hdr *hdr,
767 				   struct rxe_pkt_info *pkt)
768 {
769 	struct sk_buff *skb = PKT_TO_SKB(pkt);
770 
771 	memset(hdr, 0, sizeof(*hdr));
772 	if (skb->protocol == htons(ETH_P_IP))
773 		memcpy(&hdr->roce4grh, ip_hdr(skb), sizeof(hdr->roce4grh));
774 	else if (skb->protocol == htons(ETH_P_IPV6))
775 		memcpy(&hdr->ibgrh, ipv6_hdr(skb), sizeof(hdr->ibgrh));
776 }
777 
778 /* Executes a new request. A retried request never reach that function (send
779  * and writes are discarded, and reads and atomics are retried elsewhere.
780  */
781 static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
782 {
783 	enum resp_states err;
784 
785 	if (pkt->mask & RXE_SEND_MASK) {
786 		if (qp_type(qp) == IB_QPT_UD ||
787 		    qp_type(qp) == IB_QPT_SMI ||
788 		    qp_type(qp) == IB_QPT_GSI) {
789 			union rdma_network_hdr hdr;
790 
791 			build_rdma_network_hdr(&hdr, pkt);
792 
793 			err = send_data_in(qp, &hdr, sizeof(hdr));
794 			if (err)
795 				return err;
796 		}
797 		err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
798 		if (err)
799 			return err;
800 	} else if (pkt->mask & RXE_WRITE_MASK) {
801 		err = write_data_in(qp, pkt);
802 		if (err)
803 			return err;
804 	} else if (pkt->mask & RXE_READ_MASK) {
805 		/* For RDMA Read we can increment the msn now. See C9-148. */
806 		qp->resp.msn++;
807 		return RESPST_READ_REPLY;
808 	} else if (pkt->mask & RXE_ATOMIC_MASK) {
809 		err = process_atomic(qp, pkt);
810 		if (err)
811 			return err;
812 	} else {
813 		/* Unreachable */
814 		WARN_ON_ONCE(1);
815 	}
816 
817 	/* next expected psn, read handles this separately */
818 	qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
819 
820 	qp->resp.opcode = pkt->opcode;
821 	qp->resp.status = IB_WC_SUCCESS;
822 
823 	if (pkt->mask & RXE_COMP_MASK) {
824 		/* We successfully processed this new request. */
825 		qp->resp.msn++;
826 		return RESPST_COMPLETE;
827 	} else if (qp_type(qp) == IB_QPT_RC)
828 		return RESPST_ACKNOWLEDGE;
829 	else
830 		return RESPST_CLEANUP;
831 }
832 
833 static enum resp_states do_complete(struct rxe_qp *qp,
834 				    struct rxe_pkt_info *pkt)
835 {
836 	struct rxe_cqe cqe;
837 	struct ib_wc *wc = &cqe.ibwc;
838 	struct ib_uverbs_wc *uwc = &cqe.uibwc;
839 	struct rxe_recv_wqe *wqe = qp->resp.wqe;
840 
841 	if (unlikely(!wqe))
842 		return RESPST_CLEANUP;
843 
844 	memset(&cqe, 0, sizeof(cqe));
845 
846 	wc->wr_id		= wqe->wr_id;
847 	wc->status		= qp->resp.status;
848 	wc->qp			= &qp->ibqp;
849 
850 	/* fields after status are not required for errors */
851 	if (wc->status == IB_WC_SUCCESS) {
852 		wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
853 				pkt->mask & RXE_WRITE_MASK) ?
854 					IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
855 		wc->vendor_err = 0;
856 		wc->byte_len = wqe->dma.length - wqe->dma.resid;
857 
858 		/* fields after byte_len are different between kernel and user
859 		 * space
860 		 */
861 		if (qp->rcq->is_user) {
862 			uwc->wc_flags = IB_WC_GRH;
863 
864 			if (pkt->mask & RXE_IMMDT_MASK) {
865 				uwc->wc_flags |= IB_WC_WITH_IMM;
866 				uwc->ex.imm_data =
867 					(__u32 __force)immdt_imm(pkt);
868 			}
869 
870 			if (pkt->mask & RXE_IETH_MASK) {
871 				uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
872 				uwc->ex.invalidate_rkey = ieth_rkey(pkt);
873 			}
874 
875 			uwc->qp_num		= qp->ibqp.qp_num;
876 
877 			if (pkt->mask & RXE_DETH_MASK)
878 				uwc->src_qp = deth_sqp(pkt);
879 
880 			uwc->port_num		= qp->attr.port_num;
881 		} else {
882 			struct sk_buff *skb = PKT_TO_SKB(pkt);
883 
884 			wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
885 			if (skb->protocol == htons(ETH_P_IP))
886 				wc->network_hdr_type = RDMA_NETWORK_IPV4;
887 			else
888 				wc->network_hdr_type = RDMA_NETWORK_IPV6;
889 
890 			if (pkt->mask & RXE_IMMDT_MASK) {
891 				wc->wc_flags |= IB_WC_WITH_IMM;
892 				wc->ex.imm_data = immdt_imm(pkt);
893 			}
894 
895 			if (pkt->mask & RXE_IETH_MASK) {
896 				struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
897 				struct rxe_mem *rmr;
898 
899 				wc->wc_flags |= IB_WC_WITH_INVALIDATE;
900 				wc->ex.invalidate_rkey = ieth_rkey(pkt);
901 
902 				rmr = rxe_pool_get_index(&rxe->mr_pool,
903 							 wc->ex.invalidate_rkey >> 8);
904 				if (unlikely(!rmr)) {
905 					pr_err("Bad rkey %#x invalidation\n",
906 					       wc->ex.invalidate_rkey);
907 					return RESPST_ERROR;
908 				}
909 				rmr->state = RXE_MEM_STATE_FREE;
910 				rxe_drop_ref(rmr);
911 			}
912 
913 			wc->qp			= &qp->ibqp;
914 
915 			if (pkt->mask & RXE_DETH_MASK)
916 				wc->src_qp = deth_sqp(pkt);
917 
918 			wc->port_num		= qp->attr.port_num;
919 		}
920 	}
921 
922 	/* have copy for srq and reference for !srq */
923 	if (!qp->srq)
924 		advance_consumer(qp->rq.queue);
925 
926 	qp->resp.wqe = NULL;
927 
928 	if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
929 		return RESPST_ERR_CQ_OVERFLOW;
930 
931 	if (qp->resp.state == QP_STATE_ERROR)
932 		return RESPST_CHK_RESOURCE;
933 
934 	if (!pkt)
935 		return RESPST_DONE;
936 	else if (qp_type(qp) == IB_QPT_RC)
937 		return RESPST_ACKNOWLEDGE;
938 	else
939 		return RESPST_CLEANUP;
940 }
941 
942 static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
943 		    u8 syndrome, u32 psn)
944 {
945 	int err = 0;
946 	struct rxe_pkt_info ack_pkt;
947 	struct sk_buff *skb;
948 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
949 
950 	skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
951 				 0, psn, syndrome, NULL);
952 	if (!skb) {
953 		err = -ENOMEM;
954 		goto err1;
955 	}
956 
957 	err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
958 	if (err) {
959 		pr_err_ratelimited("Failed sending ack\n");
960 		kfree_skb(skb);
961 	}
962 
963 err1:
964 	return err;
965 }
966 
967 static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
968 			   u8 syndrome)
969 {
970 	int rc = 0;
971 	struct rxe_pkt_info ack_pkt;
972 	struct sk_buff *skb;
973 	struct sk_buff *skb_copy;
974 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
975 	struct resp_res *res;
976 
977 	skb = prepare_ack_packet(qp, pkt, &ack_pkt,
978 				 IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
979 				 syndrome, NULL);
980 	if (!skb) {
981 		rc = -ENOMEM;
982 		goto out;
983 	}
984 
985 	skb_copy = skb_clone(skb, GFP_ATOMIC);
986 	if (skb_copy)
987 		rxe_add_ref(qp); /* for the new SKB */
988 	else {
989 		pr_warn("Could not clone atomic response\n");
990 		rc = -ENOMEM;
991 		goto out;
992 	}
993 
994 	res = &qp->resp.resources[qp->resp.res_head];
995 	free_rd_atomic_resource(qp, res);
996 	rxe_advance_resp_resource(qp);
997 
998 	memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(ack_pkt));
999 	memset((unsigned char *)SKB_TO_PKT(skb) + sizeof(ack_pkt), 0,
1000 	       sizeof(skb->cb) - sizeof(ack_pkt));
1001 
1002 	res->type = RXE_ATOMIC_MASK;
1003 	res->atomic.skb = skb;
1004 	res->first_psn = ack_pkt.psn;
1005 	res->last_psn  = ack_pkt.psn;
1006 	res->cur_psn   = ack_pkt.psn;
1007 
1008 	rc = rxe_xmit_packet(rxe, qp, &ack_pkt, skb_copy);
1009 	if (rc) {
1010 		pr_err_ratelimited("Failed sending ack\n");
1011 		rxe_drop_ref(qp);
1012 		kfree_skb(skb_copy);
1013 	}
1014 
1015 out:
1016 	return rc;
1017 }
1018 
1019 static enum resp_states acknowledge(struct rxe_qp *qp,
1020 				    struct rxe_pkt_info *pkt)
1021 {
1022 	if (qp_type(qp) != IB_QPT_RC)
1023 		return RESPST_CLEANUP;
1024 
1025 	if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
1026 		send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
1027 	else if (pkt->mask & RXE_ATOMIC_MASK)
1028 		send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
1029 	else if (bth_ack(pkt))
1030 		send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
1031 
1032 	return RESPST_CLEANUP;
1033 }
1034 
1035 static enum resp_states cleanup(struct rxe_qp *qp,
1036 				struct rxe_pkt_info *pkt)
1037 {
1038 	struct sk_buff *skb;
1039 
1040 	if (pkt) {
1041 		skb = skb_dequeue(&qp->req_pkts);
1042 		rxe_drop_ref(qp);
1043 		kfree_skb(skb);
1044 	}
1045 
1046 	if (qp->resp.mr) {
1047 		rxe_drop_ref(qp->resp.mr);
1048 		qp->resp.mr = NULL;
1049 	}
1050 
1051 	return RESPST_DONE;
1052 }
1053 
1054 static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
1055 {
1056 	int i;
1057 
1058 	for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
1059 		struct resp_res *res = &qp->resp.resources[i];
1060 
1061 		if (res->type == 0)
1062 			continue;
1063 
1064 		if (psn_compare(psn, res->first_psn) >= 0 &&
1065 		    psn_compare(psn, res->last_psn) <= 0) {
1066 			return res;
1067 		}
1068 	}
1069 
1070 	return NULL;
1071 }
1072 
1073 static enum resp_states duplicate_request(struct rxe_qp *qp,
1074 					  struct rxe_pkt_info *pkt)
1075 {
1076 	enum resp_states rc;
1077 	u32 prev_psn = (qp->resp.psn - 1) & BTH_PSN_MASK;
1078 
1079 	if (pkt->mask & RXE_SEND_MASK ||
1080 	    pkt->mask & RXE_WRITE_MASK) {
1081 		/* SEND. Ack again and cleanup. C9-105. */
1082 		if (bth_ack(pkt))
1083 			send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
1084 		rc = RESPST_CLEANUP;
1085 		goto out;
1086 	} else if (pkt->mask & RXE_READ_MASK) {
1087 		struct resp_res *res;
1088 
1089 		res = find_resource(qp, pkt->psn);
1090 		if (!res) {
1091 			/* Resource not found. Class D error.  Drop the
1092 			 * request.
1093 			 */
1094 			rc = RESPST_CLEANUP;
1095 			goto out;
1096 		} else {
1097 			/* Ensure this new request is the same as the previous
1098 			 * one or a subset of it.
1099 			 */
1100 			u64 iova = reth_va(pkt);
1101 			u32 resid = reth_len(pkt);
1102 
1103 			if (iova < res->read.va_org ||
1104 			    resid > res->read.length ||
1105 			    (iova + resid) > (res->read.va_org +
1106 					      res->read.length)) {
1107 				rc = RESPST_CLEANUP;
1108 				goto out;
1109 			}
1110 
1111 			if (reth_rkey(pkt) != res->read.rkey) {
1112 				rc = RESPST_CLEANUP;
1113 				goto out;
1114 			}
1115 
1116 			res->cur_psn = pkt->psn;
1117 			res->state = (pkt->psn == res->first_psn) ?
1118 					rdatm_res_state_new :
1119 					rdatm_res_state_replay;
1120 
1121 			/* Reset the resource, except length. */
1122 			res->read.va_org = iova;
1123 			res->read.va = iova;
1124 			res->read.resid = resid;
1125 
1126 			/* Replay the RDMA read reply. */
1127 			qp->resp.res = res;
1128 			rc = RESPST_READ_REPLY;
1129 			goto out;
1130 		}
1131 	} else {
1132 		struct resp_res *res;
1133 
1134 		/* Find the operation in our list of responder resources. */
1135 		res = find_resource(qp, pkt->psn);
1136 		if (res) {
1137 			struct sk_buff *skb_copy;
1138 
1139 			skb_copy = skb_clone(res->atomic.skb, GFP_ATOMIC);
1140 			if (skb_copy) {
1141 				rxe_add_ref(qp); /* for the new SKB */
1142 			} else {
1143 				pr_warn("Couldn't clone atomic resp\n");
1144 				rc = RESPST_CLEANUP;
1145 				goto out;
1146 			}
1147 
1148 			/* Resend the result. */
1149 			rc = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp,
1150 					     pkt, skb_copy);
1151 			if (rc) {
1152 				pr_err("Failed resending result. This flow is not handled - skb ignored\n");
1153 				rxe_drop_ref(qp);
1154 				kfree_skb(skb_copy);
1155 				rc = RESPST_CLEANUP;
1156 				goto out;
1157 			}
1158 		}
1159 
1160 		/* Resource not found. Class D error. Drop the request. */
1161 		rc = RESPST_CLEANUP;
1162 		goto out;
1163 	}
1164 out:
1165 	return rc;
1166 }
1167 
1168 /* Process a class A or C. Both are treated the same in this implementation. */
1169 static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
1170 			      enum ib_wc_status status)
1171 {
1172 	qp->resp.aeth_syndrome	= syndrome;
1173 	qp->resp.status		= status;
1174 
1175 	/* indicate that we should go through the ERROR state */
1176 	qp->resp.goto_error	= 1;
1177 }
1178 
1179 static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
1180 {
1181 	/* UC */
1182 	if (qp->srq) {
1183 		/* Class E */
1184 		qp->resp.drop_msg = 1;
1185 		if (qp->resp.wqe) {
1186 			qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1187 			return RESPST_COMPLETE;
1188 		} else {
1189 			return RESPST_CLEANUP;
1190 		}
1191 	} else {
1192 		/* Class D1. This packet may be the start of a
1193 		 * new message and could be valid. The previous
1194 		 * message is invalid and ignored. reset the
1195 		 * recv wr to its original state
1196 		 */
1197 		if (qp->resp.wqe) {
1198 			qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
1199 			qp->resp.wqe->dma.cur_sge = 0;
1200 			qp->resp.wqe->dma.sge_offset = 0;
1201 			qp->resp.opcode = -1;
1202 		}
1203 
1204 		if (qp->resp.mr) {
1205 			rxe_drop_ref(qp->resp.mr);
1206 			qp->resp.mr = NULL;
1207 		}
1208 
1209 		return RESPST_CLEANUP;
1210 	}
1211 }
1212 
1213 void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
1214 {
1215 	struct sk_buff *skb;
1216 
1217 	while ((skb = skb_dequeue(&qp->req_pkts))) {
1218 		rxe_drop_ref(qp);
1219 		kfree_skb(skb);
1220 	}
1221 
1222 	if (notify)
1223 		return;
1224 
1225 	while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue))
1226 		advance_consumer(qp->rq.queue);
1227 }
1228 
1229 int rxe_responder(void *arg)
1230 {
1231 	struct rxe_qp *qp = (struct rxe_qp *)arg;
1232 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
1233 	enum resp_states state;
1234 	struct rxe_pkt_info *pkt = NULL;
1235 	int ret = 0;
1236 
1237 	rxe_add_ref(qp);
1238 
1239 	qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
1240 
1241 	if (!qp->valid) {
1242 		ret = -EINVAL;
1243 		goto done;
1244 	}
1245 
1246 	switch (qp->resp.state) {
1247 	case QP_STATE_RESET:
1248 		state = RESPST_RESET;
1249 		break;
1250 
1251 	default:
1252 		state = RESPST_GET_REQ;
1253 		break;
1254 	}
1255 
1256 	while (1) {
1257 		pr_debug("qp#%d state = %s\n", qp_num(qp),
1258 			 resp_state_name[state]);
1259 		switch (state) {
1260 		case RESPST_GET_REQ:
1261 			state = get_req(qp, &pkt);
1262 			break;
1263 		case RESPST_CHK_PSN:
1264 			state = check_psn(qp, pkt);
1265 			break;
1266 		case RESPST_CHK_OP_SEQ:
1267 			state = check_op_seq(qp, pkt);
1268 			break;
1269 		case RESPST_CHK_OP_VALID:
1270 			state = check_op_valid(qp, pkt);
1271 			break;
1272 		case RESPST_CHK_RESOURCE:
1273 			state = check_resource(qp, pkt);
1274 			break;
1275 		case RESPST_CHK_LENGTH:
1276 			state = check_length(qp, pkt);
1277 			break;
1278 		case RESPST_CHK_RKEY:
1279 			state = check_rkey(qp, pkt);
1280 			break;
1281 		case RESPST_EXECUTE:
1282 			state = execute(qp, pkt);
1283 			break;
1284 		case RESPST_COMPLETE:
1285 			state = do_complete(qp, pkt);
1286 			break;
1287 		case RESPST_READ_REPLY:
1288 			state = read_reply(qp, pkt);
1289 			break;
1290 		case RESPST_ACKNOWLEDGE:
1291 			state = acknowledge(qp, pkt);
1292 			break;
1293 		case RESPST_CLEANUP:
1294 			state = cleanup(qp, pkt);
1295 			break;
1296 		case RESPST_DUPLICATE_REQUEST:
1297 			state = duplicate_request(qp, pkt);
1298 			break;
1299 		case RESPST_ERR_PSN_OUT_OF_SEQ:
1300 			/* RC only - Class B. Drop packet. */
1301 			send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
1302 			state = RESPST_CLEANUP;
1303 			break;
1304 
1305 		case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ:
1306 		case RESPST_ERR_MISSING_OPCODE_FIRST:
1307 		case RESPST_ERR_MISSING_OPCODE_LAST_C:
1308 		case RESPST_ERR_UNSUPPORTED_OPCODE:
1309 		case RESPST_ERR_MISALIGNED_ATOMIC:
1310 			/* RC Only - Class C. */
1311 			do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1312 					  IB_WC_REM_INV_REQ_ERR);
1313 			state = RESPST_COMPLETE;
1314 			break;
1315 
1316 		case RESPST_ERR_MISSING_OPCODE_LAST_D1E:
1317 			state = do_class_d1e_error(qp);
1318 			break;
1319 		case RESPST_ERR_RNR:
1320 			if (qp_type(qp) == IB_QPT_RC) {
1321 				rxe_counter_inc(rxe, RXE_CNT_SND_RNR);
1322 				/* RC - class B */
1323 				send_ack(qp, pkt, AETH_RNR_NAK |
1324 					 (~AETH_TYPE_MASK &
1325 					 qp->attr.min_rnr_timer),
1326 					 pkt->psn);
1327 			} else {
1328 				/* UD/UC - class D */
1329 				qp->resp.drop_msg = 1;
1330 			}
1331 			state = RESPST_CLEANUP;
1332 			break;
1333 
1334 		case RESPST_ERR_RKEY_VIOLATION:
1335 			if (qp_type(qp) == IB_QPT_RC) {
1336 				/* Class C */
1337 				do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
1338 						  IB_WC_REM_ACCESS_ERR);
1339 				state = RESPST_COMPLETE;
1340 			} else {
1341 				qp->resp.drop_msg = 1;
1342 				if (qp->srq) {
1343 					/* UC/SRQ Class D */
1344 					qp->resp.status = IB_WC_REM_ACCESS_ERR;
1345 					state = RESPST_COMPLETE;
1346 				} else {
1347 					/* UC/non-SRQ Class E. */
1348 					state = RESPST_CLEANUP;
1349 				}
1350 			}
1351 			break;
1352 
1353 		case RESPST_ERR_LENGTH:
1354 			if (qp_type(qp) == IB_QPT_RC) {
1355 				/* Class C */
1356 				do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1357 						  IB_WC_REM_INV_REQ_ERR);
1358 				state = RESPST_COMPLETE;
1359 			} else if (qp->srq) {
1360 				/* UC/UD - class E */
1361 				qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1362 				state = RESPST_COMPLETE;
1363 			} else {
1364 				/* UC/UD - class D */
1365 				qp->resp.drop_msg = 1;
1366 				state = RESPST_CLEANUP;
1367 			}
1368 			break;
1369 
1370 		case RESPST_ERR_MALFORMED_WQE:
1371 			/* All, Class A. */
1372 			do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
1373 					  IB_WC_LOC_QP_OP_ERR);
1374 			state = RESPST_COMPLETE;
1375 			break;
1376 
1377 		case RESPST_ERR_CQ_OVERFLOW:
1378 			/* All - Class G */
1379 			state = RESPST_ERROR;
1380 			break;
1381 
1382 		case RESPST_DONE:
1383 			if (qp->resp.goto_error) {
1384 				state = RESPST_ERROR;
1385 				break;
1386 			}
1387 
1388 			goto done;
1389 
1390 		case RESPST_EXIT:
1391 			if (qp->resp.goto_error) {
1392 				state = RESPST_ERROR;
1393 				break;
1394 			}
1395 
1396 			goto exit;
1397 
1398 		case RESPST_RESET:
1399 			rxe_drain_req_pkts(qp, false);
1400 			qp->resp.wqe = NULL;
1401 			goto exit;
1402 
1403 		case RESPST_ERROR:
1404 			qp->resp.goto_error = 0;
1405 			pr_warn("qp#%d moved to error state\n", qp_num(qp));
1406 			rxe_qp_error(qp);
1407 			goto exit;
1408 
1409 		default:
1410 			WARN_ON_ONCE(1);
1411 		}
1412 	}
1413 
1414 exit:
1415 	ret = -EAGAIN;
1416 done:
1417 	rxe_drop_ref(qp);
1418 	return ret;
1419 }
1420