1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6 
7 #include <linux/skbuff.h>
8 #include <crypto/hash.h>
9 
10 #include "rxe.h"
11 #include "rxe_loc.h"
12 #include "rxe_queue.h"
13 
14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
15 		       u32 opcode);
16 
17 static inline void retry_first_write_send(struct rxe_qp *qp,
18 					  struct rxe_send_wqe *wqe,
19 					  unsigned int mask, int npsn)
20 {
21 	int i;
22 
23 	for (i = 0; i < npsn; i++) {
24 		int to_send = (wqe->dma.resid > qp->mtu) ?
25 				qp->mtu : wqe->dma.resid;
26 
27 		qp->req.opcode = next_opcode(qp, wqe,
28 					     wqe->wr.opcode);
29 
30 		if (wqe->wr.send_flags & IB_SEND_INLINE) {
31 			wqe->dma.resid -= to_send;
32 			wqe->dma.sge_offset += to_send;
33 		} else {
34 			advance_dma_data(&wqe->dma, to_send);
35 		}
36 		if (mask & WR_WRITE_MASK)
37 			wqe->iova += qp->mtu;
38 	}
39 }
40 
41 static void req_retry(struct rxe_qp *qp)
42 {
43 	struct rxe_send_wqe *wqe;
44 	unsigned int wqe_index;
45 	unsigned int mask;
46 	int npsn;
47 	int first = 1;
48 	struct rxe_queue *q = qp->sq.queue;
49 	unsigned int cons;
50 	unsigned int prod;
51 
52 	if (qp->is_user) {
53 		cons = consumer_index(q, QUEUE_TYPE_FROM_USER);
54 		prod = producer_index(q, QUEUE_TYPE_FROM_USER);
55 	} else {
56 		cons = consumer_index(q, QUEUE_TYPE_KERNEL);
57 		prod = producer_index(q, QUEUE_TYPE_KERNEL);
58 	}
59 
60 	qp->req.wqe_index	= cons;
61 	qp->req.psn		= qp->comp.psn;
62 	qp->req.opcode		= -1;
63 
64 	for (wqe_index = cons; wqe_index != prod;
65 			wqe_index = next_index(q, wqe_index)) {
66 		wqe = addr_from_index(qp->sq.queue, wqe_index);
67 		mask = wr_opcode_mask(wqe->wr.opcode, qp);
68 
69 		if (wqe->state == wqe_state_posted)
70 			break;
71 
72 		if (wqe->state == wqe_state_done)
73 			continue;
74 
75 		wqe->iova = (mask & WR_ATOMIC_MASK) ?
76 			     wqe->wr.wr.atomic.remote_addr :
77 			     (mask & WR_READ_OR_WRITE_MASK) ?
78 			     wqe->wr.wr.rdma.remote_addr :
79 			     0;
80 
81 		if (!first || (mask & WR_READ_MASK) == 0) {
82 			wqe->dma.resid = wqe->dma.length;
83 			wqe->dma.cur_sge = 0;
84 			wqe->dma.sge_offset = 0;
85 		}
86 
87 		if (first) {
88 			first = 0;
89 
90 			if (mask & WR_WRITE_OR_SEND_MASK) {
91 				npsn = (qp->comp.psn - wqe->first_psn) &
92 					BTH_PSN_MASK;
93 				retry_first_write_send(qp, wqe, mask, npsn);
94 			}
95 
96 			if (mask & WR_READ_MASK) {
97 				npsn = (wqe->dma.length - wqe->dma.resid) /
98 					qp->mtu;
99 				wqe->iova += npsn * qp->mtu;
100 			}
101 		}
102 
103 		wqe->state = wqe_state_posted;
104 	}
105 }
106 
107 void rnr_nak_timer(struct timer_list *t)
108 {
109 	struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
110 
111 	pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp));
112 	rxe_run_task(&qp->req.task, 1);
113 }
114 
115 static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
116 {
117 	struct rxe_send_wqe *wqe;
118 	unsigned long flags;
119 	struct rxe_queue *q = qp->sq.queue;
120 	unsigned int index = qp->req.wqe_index;
121 	unsigned int cons;
122 	unsigned int prod;
123 
124 	if (qp->is_user) {
125 		wqe = queue_head(q, QUEUE_TYPE_FROM_USER);
126 		cons = consumer_index(q, QUEUE_TYPE_FROM_USER);
127 		prod = producer_index(q, QUEUE_TYPE_FROM_USER);
128 	} else {
129 		wqe = queue_head(q, QUEUE_TYPE_KERNEL);
130 		cons = consumer_index(q, QUEUE_TYPE_KERNEL);
131 		prod = producer_index(q, QUEUE_TYPE_KERNEL);
132 	}
133 
134 	if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
135 		/* check to see if we are drained;
136 		 * state_lock used by requester and completer
137 		 */
138 		spin_lock_irqsave(&qp->state_lock, flags);
139 		do {
140 			if (qp->req.state != QP_STATE_DRAIN) {
141 				/* comp just finished */
142 				spin_unlock_irqrestore(&qp->state_lock,
143 						       flags);
144 				break;
145 			}
146 
147 			if (wqe && ((index != cons) ||
148 				(wqe->state != wqe_state_posted))) {
149 				/* comp not done yet */
150 				spin_unlock_irqrestore(&qp->state_lock,
151 						       flags);
152 				break;
153 			}
154 
155 			qp->req.state = QP_STATE_DRAINED;
156 			spin_unlock_irqrestore(&qp->state_lock, flags);
157 
158 			if (qp->ibqp.event_handler) {
159 				struct ib_event ev;
160 
161 				ev.device = qp->ibqp.device;
162 				ev.element.qp = &qp->ibqp;
163 				ev.event = IB_EVENT_SQ_DRAINED;
164 				qp->ibqp.event_handler(&ev,
165 					qp->ibqp.qp_context);
166 			}
167 		} while (0);
168 	}
169 
170 	if (index == prod)
171 		return NULL;
172 
173 	wqe = addr_from_index(q, index);
174 
175 	if (unlikely((qp->req.state == QP_STATE_DRAIN ||
176 		      qp->req.state == QP_STATE_DRAINED) &&
177 		     (wqe->state != wqe_state_processing)))
178 		return NULL;
179 
180 	if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) &&
181 						     (index != cons))) {
182 		qp->req.wait_fence = 1;
183 		return NULL;
184 	}
185 
186 	wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
187 	return wqe;
188 }
189 
190 static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
191 {
192 	switch (opcode) {
193 	case IB_WR_RDMA_WRITE:
194 		if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
195 		    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
196 			return fits ?
197 				IB_OPCODE_RC_RDMA_WRITE_LAST :
198 				IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
199 		else
200 			return fits ?
201 				IB_OPCODE_RC_RDMA_WRITE_ONLY :
202 				IB_OPCODE_RC_RDMA_WRITE_FIRST;
203 
204 	case IB_WR_RDMA_WRITE_WITH_IMM:
205 		if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
206 		    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
207 			return fits ?
208 				IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
209 				IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
210 		else
211 			return fits ?
212 				IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
213 				IB_OPCODE_RC_RDMA_WRITE_FIRST;
214 
215 	case IB_WR_SEND:
216 		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
217 		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
218 			return fits ?
219 				IB_OPCODE_RC_SEND_LAST :
220 				IB_OPCODE_RC_SEND_MIDDLE;
221 		else
222 			return fits ?
223 				IB_OPCODE_RC_SEND_ONLY :
224 				IB_OPCODE_RC_SEND_FIRST;
225 
226 	case IB_WR_SEND_WITH_IMM:
227 		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
228 		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
229 			return fits ?
230 				IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
231 				IB_OPCODE_RC_SEND_MIDDLE;
232 		else
233 			return fits ?
234 				IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
235 				IB_OPCODE_RC_SEND_FIRST;
236 
237 	case IB_WR_RDMA_READ:
238 		return IB_OPCODE_RC_RDMA_READ_REQUEST;
239 
240 	case IB_WR_ATOMIC_CMP_AND_SWP:
241 		return IB_OPCODE_RC_COMPARE_SWAP;
242 
243 	case IB_WR_ATOMIC_FETCH_AND_ADD:
244 		return IB_OPCODE_RC_FETCH_ADD;
245 
246 	case IB_WR_SEND_WITH_INV:
247 		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
248 		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
249 			return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
250 				IB_OPCODE_RC_SEND_MIDDLE;
251 		else
252 			return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
253 				IB_OPCODE_RC_SEND_FIRST;
254 	case IB_WR_REG_MR:
255 	case IB_WR_LOCAL_INV:
256 		return opcode;
257 	}
258 
259 	return -EINVAL;
260 }
261 
262 static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
263 {
264 	switch (opcode) {
265 	case IB_WR_RDMA_WRITE:
266 		if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
267 		    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
268 			return fits ?
269 				IB_OPCODE_UC_RDMA_WRITE_LAST :
270 				IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
271 		else
272 			return fits ?
273 				IB_OPCODE_UC_RDMA_WRITE_ONLY :
274 				IB_OPCODE_UC_RDMA_WRITE_FIRST;
275 
276 	case IB_WR_RDMA_WRITE_WITH_IMM:
277 		if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
278 		    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
279 			return fits ?
280 				IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
281 				IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
282 		else
283 			return fits ?
284 				IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
285 				IB_OPCODE_UC_RDMA_WRITE_FIRST;
286 
287 	case IB_WR_SEND:
288 		if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
289 		    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
290 			return fits ?
291 				IB_OPCODE_UC_SEND_LAST :
292 				IB_OPCODE_UC_SEND_MIDDLE;
293 		else
294 			return fits ?
295 				IB_OPCODE_UC_SEND_ONLY :
296 				IB_OPCODE_UC_SEND_FIRST;
297 
298 	case IB_WR_SEND_WITH_IMM:
299 		if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
300 		    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
301 			return fits ?
302 				IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
303 				IB_OPCODE_UC_SEND_MIDDLE;
304 		else
305 			return fits ?
306 				IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
307 				IB_OPCODE_UC_SEND_FIRST;
308 	}
309 
310 	return -EINVAL;
311 }
312 
313 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
314 		       u32 opcode)
315 {
316 	int fits = (wqe->dma.resid <= qp->mtu);
317 
318 	switch (qp_type(qp)) {
319 	case IB_QPT_RC:
320 		return next_opcode_rc(qp, opcode, fits);
321 
322 	case IB_QPT_UC:
323 		return next_opcode_uc(qp, opcode, fits);
324 
325 	case IB_QPT_SMI:
326 	case IB_QPT_UD:
327 	case IB_QPT_GSI:
328 		switch (opcode) {
329 		case IB_WR_SEND:
330 			return IB_OPCODE_UD_SEND_ONLY;
331 
332 		case IB_WR_SEND_WITH_IMM:
333 			return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
334 		}
335 		break;
336 
337 	default:
338 		break;
339 	}
340 
341 	return -EINVAL;
342 }
343 
344 static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
345 {
346 	int depth;
347 
348 	if (wqe->has_rd_atomic)
349 		return 0;
350 
351 	qp->req.need_rd_atomic = 1;
352 	depth = atomic_dec_return(&qp->req.rd_atomic);
353 
354 	if (depth >= 0) {
355 		qp->req.need_rd_atomic = 0;
356 		wqe->has_rd_atomic = 1;
357 		return 0;
358 	}
359 
360 	atomic_inc(&qp->req.rd_atomic);
361 	return -EAGAIN;
362 }
363 
364 static inline int get_mtu(struct rxe_qp *qp)
365 {
366 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
367 
368 	if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))
369 		return qp->mtu;
370 
371 	return rxe->port.mtu_cap;
372 }
373 
374 static struct sk_buff *init_req_packet(struct rxe_qp *qp,
375 				       struct rxe_send_wqe *wqe,
376 				       int opcode, int payload,
377 				       struct rxe_pkt_info *pkt)
378 {
379 	struct rxe_dev		*rxe = to_rdev(qp->ibqp.device);
380 	struct sk_buff		*skb;
381 	struct rxe_send_wr	*ibwr = &wqe->wr;
382 	struct rxe_av		*av;
383 	int			pad = (-payload) & 0x3;
384 	int			paylen;
385 	int			solicited;
386 	u16			pkey;
387 	u32			qp_num;
388 	int			ack_req;
389 
390 	/* length from start of bth to end of icrc */
391 	paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
392 
393 	/* pkt->hdr, rxe, port_num and mask are initialized in ifc
394 	 * layer
395 	 */
396 	pkt->opcode	= opcode;
397 	pkt->qp		= qp;
398 	pkt->psn	= qp->req.psn;
399 	pkt->mask	= rxe_opcode[opcode].mask;
400 	pkt->paylen	= paylen;
401 	pkt->wqe	= wqe;
402 
403 	/* init skb */
404 	av = rxe_get_av(pkt);
405 	skb = rxe_init_packet(rxe, av, paylen, pkt);
406 	if (unlikely(!skb))
407 		return NULL;
408 
409 	/* init bth */
410 	solicited = (ibwr->send_flags & IB_SEND_SOLICITED) &&
411 			(pkt->mask & RXE_END_MASK) &&
412 			((pkt->mask & (RXE_SEND_MASK)) ||
413 			(pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
414 			(RXE_WRITE_MASK | RXE_IMMDT_MASK));
415 
416 	pkey = IB_DEFAULT_PKEY_FULL;
417 
418 	qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
419 					 qp->attr.dest_qp_num;
420 
421 	ack_req = ((pkt->mask & RXE_END_MASK) ||
422 		(qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
423 	if (ack_req)
424 		qp->req.noack_pkts = 0;
425 
426 	bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num,
427 		 ack_req, pkt->psn);
428 
429 	/* init optional headers */
430 	if (pkt->mask & RXE_RETH_MASK) {
431 		reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
432 		reth_set_va(pkt, wqe->iova);
433 		reth_set_len(pkt, wqe->dma.resid);
434 	}
435 
436 	if (pkt->mask & RXE_IMMDT_MASK)
437 		immdt_set_imm(pkt, ibwr->ex.imm_data);
438 
439 	if (pkt->mask & RXE_IETH_MASK)
440 		ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);
441 
442 	if (pkt->mask & RXE_ATMETH_MASK) {
443 		atmeth_set_va(pkt, wqe->iova);
444 		if (opcode == IB_OPCODE_RC_COMPARE_SWAP ||
445 		    opcode == IB_OPCODE_RD_COMPARE_SWAP) {
446 			atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
447 			atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
448 		} else {
449 			atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
450 		}
451 		atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
452 	}
453 
454 	if (pkt->mask & RXE_DETH_MASK) {
455 		if (qp->ibqp.qp_num == 1)
456 			deth_set_qkey(pkt, GSI_QKEY);
457 		else
458 			deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
459 		deth_set_sqp(pkt, qp->ibqp.qp_num);
460 	}
461 
462 	return skb;
463 }
464 
465 static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
466 		       struct rxe_pkt_info *pkt, struct sk_buff *skb,
467 		       int paylen)
468 {
469 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
470 	u32 crc = 0;
471 	u32 *p;
472 	int err;
473 
474 	err = rxe_prepare(pkt, skb, &crc);
475 	if (err)
476 		return err;
477 
478 	if (pkt->mask & RXE_WRITE_OR_SEND) {
479 		if (wqe->wr.send_flags & IB_SEND_INLINE) {
480 			u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
481 
482 			crc = rxe_crc32(rxe, crc, tmp, paylen);
483 			memcpy(payload_addr(pkt), tmp, paylen);
484 
485 			wqe->dma.resid -= paylen;
486 			wqe->dma.sge_offset += paylen;
487 		} else {
488 			err = copy_data(qp->pd, 0, &wqe->dma,
489 					payload_addr(pkt), paylen,
490 					RXE_FROM_MR_OBJ,
491 					&crc);
492 			if (err)
493 				return err;
494 		}
495 		if (bth_pad(pkt)) {
496 			u8 *pad = payload_addr(pkt) + paylen;
497 
498 			memset(pad, 0, bth_pad(pkt));
499 			crc = rxe_crc32(rxe, crc, pad, bth_pad(pkt));
500 		}
501 	}
502 	p = payload_addr(pkt) + paylen + bth_pad(pkt);
503 
504 	*p = ~crc;
505 
506 	return 0;
507 }
508 
509 static void update_wqe_state(struct rxe_qp *qp,
510 		struct rxe_send_wqe *wqe,
511 		struct rxe_pkt_info *pkt)
512 {
513 	if (pkt->mask & RXE_END_MASK) {
514 		if (qp_type(qp) == IB_QPT_RC)
515 			wqe->state = wqe_state_pending;
516 	} else {
517 		wqe->state = wqe_state_processing;
518 	}
519 }
520 
521 static void update_wqe_psn(struct rxe_qp *qp,
522 			   struct rxe_send_wqe *wqe,
523 			   struct rxe_pkt_info *pkt,
524 			   int payload)
525 {
526 	/* number of packets left to send including current one */
527 	int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
528 
529 	/* handle zero length packet case */
530 	if (num_pkt == 0)
531 		num_pkt = 1;
532 
533 	if (pkt->mask & RXE_START_MASK) {
534 		wqe->first_psn = qp->req.psn;
535 		wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
536 	}
537 
538 	if (pkt->mask & RXE_READ_MASK)
539 		qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
540 	else
541 		qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
542 }
543 
544 static void save_state(struct rxe_send_wqe *wqe,
545 		       struct rxe_qp *qp,
546 		       struct rxe_send_wqe *rollback_wqe,
547 		       u32 *rollback_psn)
548 {
549 	rollback_wqe->state     = wqe->state;
550 	rollback_wqe->first_psn = wqe->first_psn;
551 	rollback_wqe->last_psn  = wqe->last_psn;
552 	*rollback_psn		= qp->req.psn;
553 }
554 
555 static void rollback_state(struct rxe_send_wqe *wqe,
556 			   struct rxe_qp *qp,
557 			   struct rxe_send_wqe *rollback_wqe,
558 			   u32 rollback_psn)
559 {
560 	wqe->state     = rollback_wqe->state;
561 	wqe->first_psn = rollback_wqe->first_psn;
562 	wqe->last_psn  = rollback_wqe->last_psn;
563 	qp->req.psn    = rollback_psn;
564 }
565 
566 static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
567 			 struct rxe_pkt_info *pkt, int payload)
568 {
569 	qp->req.opcode = pkt->opcode;
570 
571 	if (pkt->mask & RXE_END_MASK)
572 		qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
573 
574 	qp->need_req_skb = 0;
575 
576 	if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer))
577 		mod_timer(&qp->retrans_timer,
578 			  jiffies + qp->qp_timeout_jiffies);
579 }
580 
581 static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
582 {
583 	u8 opcode = wqe->wr.opcode;
584 	struct rxe_mr *mr;
585 	u32 rkey;
586 	int ret;
587 
588 	switch (opcode) {
589 	case IB_WR_LOCAL_INV:
590 		rkey = wqe->wr.ex.invalidate_rkey;
591 		if (rkey_is_mw(rkey))
592 			ret = rxe_invalidate_mw(qp, rkey);
593 		else
594 			ret = rxe_invalidate_mr(qp, rkey);
595 
596 		if (unlikely(ret)) {
597 			wqe->status = IB_WC_LOC_QP_OP_ERR;
598 			return ret;
599 		}
600 		break;
601 	case IB_WR_REG_MR:
602 		mr = to_rmr(wqe->wr.wr.reg.mr);
603 		rxe_add_ref(mr);
604 		mr->state = RXE_MR_STATE_VALID;
605 		mr->access = wqe->wr.wr.reg.access;
606 		mr->ibmr.lkey = wqe->wr.wr.reg.key;
607 		mr->ibmr.rkey = wqe->wr.wr.reg.key;
608 		mr->iova = wqe->wr.wr.reg.mr->iova;
609 		rxe_drop_ref(mr);
610 		break;
611 	case IB_WR_BIND_MW:
612 		ret = rxe_bind_mw(qp, wqe);
613 		if (unlikely(ret)) {
614 			wqe->status = IB_WC_MW_BIND_ERR;
615 			return ret;
616 		}
617 		break;
618 	default:
619 		pr_err("Unexpected send wqe opcode %d\n", opcode);
620 		wqe->status = IB_WC_LOC_QP_OP_ERR;
621 		return -EINVAL;
622 	}
623 
624 	wqe->state = wqe_state_done;
625 	wqe->status = IB_WC_SUCCESS;
626 	qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
627 
628 	if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
629 	    qp->sq_sig_type == IB_SIGNAL_ALL_WR)
630 		rxe_run_task(&qp->comp.task, 1);
631 
632 	return 0;
633 }
634 
635 int rxe_requester(void *arg)
636 {
637 	struct rxe_qp *qp = (struct rxe_qp *)arg;
638 	struct rxe_pkt_info pkt;
639 	struct sk_buff *skb;
640 	struct rxe_send_wqe *wqe;
641 	enum rxe_hdr_mask mask;
642 	int payload;
643 	int mtu;
644 	int opcode;
645 	int ret;
646 	struct rxe_send_wqe rollback_wqe;
647 	u32 rollback_psn;
648 	struct rxe_queue *q = qp->sq.queue;
649 
650 	rxe_add_ref(qp);
651 
652 next_wqe:
653 	if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
654 		goto exit;
655 
656 	if (unlikely(qp->req.state == QP_STATE_RESET)) {
657 		qp->req.wqe_index = consumer_index(q, q->type);
658 		qp->req.opcode = -1;
659 		qp->req.need_rd_atomic = 0;
660 		qp->req.wait_psn = 0;
661 		qp->req.need_retry = 0;
662 		goto exit;
663 	}
664 
665 	if (unlikely(qp->req.need_retry)) {
666 		req_retry(qp);
667 		qp->req.need_retry = 0;
668 	}
669 
670 	wqe = req_next_wqe(qp);
671 	if (unlikely(!wqe))
672 		goto exit;
673 
674 	if (wqe->mask & WR_LOCAL_OP_MASK) {
675 		ret = rxe_do_local_ops(qp, wqe);
676 		if (unlikely(ret))
677 			goto err;
678 		else
679 			goto next_wqe;
680 	}
681 
682 	if (unlikely(qp_type(qp) == IB_QPT_RC &&
683 		psn_compare(qp->req.psn, (qp->comp.psn +
684 				RXE_MAX_UNACKED_PSNS)) > 0)) {
685 		qp->req.wait_psn = 1;
686 		goto exit;
687 	}
688 
689 	/* Limit the number of inflight SKBs per QP */
690 	if (unlikely(atomic_read(&qp->skb_out) >
691 		     RXE_INFLIGHT_SKBS_PER_QP_HIGH)) {
692 		qp->need_req_skb = 1;
693 		goto exit;
694 	}
695 
696 	opcode = next_opcode(qp, wqe, wqe->wr.opcode);
697 	if (unlikely(opcode < 0)) {
698 		wqe->status = IB_WC_LOC_QP_OP_ERR;
699 		goto exit;
700 	}
701 
702 	mask = rxe_opcode[opcode].mask;
703 	if (unlikely(mask & RXE_READ_OR_ATOMIC)) {
704 		if (check_init_depth(qp, wqe))
705 			goto exit;
706 	}
707 
708 	mtu = get_mtu(qp);
709 	payload = (mask & RXE_WRITE_OR_SEND) ? wqe->dma.resid : 0;
710 	if (payload > mtu) {
711 		if (qp_type(qp) == IB_QPT_UD) {
712 			/* C10-93.1.1: If the total sum of all the buffer lengths specified for a
713 			 * UD message exceeds the MTU of the port as returned by QueryHCA, the CI
714 			 * shall not emit any packets for this message. Further, the CI shall not
715 			 * generate an error due to this condition.
716 			 */
717 
718 			/* fake a successful UD send */
719 			wqe->first_psn = qp->req.psn;
720 			wqe->last_psn = qp->req.psn;
721 			qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
722 			qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
723 			qp->req.wqe_index = next_index(qp->sq.queue,
724 						       qp->req.wqe_index);
725 			wqe->state = wqe_state_done;
726 			wqe->status = IB_WC_SUCCESS;
727 			__rxe_do_task(&qp->comp.task);
728 			rxe_drop_ref(qp);
729 			return 0;
730 		}
731 		payload = mtu;
732 	}
733 
734 	skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
735 	if (unlikely(!skb)) {
736 		pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
737 		wqe->status = IB_WC_LOC_QP_OP_ERR;
738 		goto err;
739 	}
740 
741 	ret = finish_packet(qp, wqe, &pkt, skb, payload);
742 	if (unlikely(ret)) {
743 		pr_debug("qp#%d Error during finish packet\n", qp_num(qp));
744 		if (ret == -EFAULT)
745 			wqe->status = IB_WC_LOC_PROT_ERR;
746 		else
747 			wqe->status = IB_WC_LOC_QP_OP_ERR;
748 		kfree_skb(skb);
749 		goto err;
750 	}
751 
752 	/*
753 	 * To prevent a race on wqe access between requester and completer,
754 	 * wqe members state and psn need to be set before calling
755 	 * rxe_xmit_packet().
756 	 * Otherwise, completer might initiate an unjustified retry flow.
757 	 */
758 	save_state(wqe, qp, &rollback_wqe, &rollback_psn);
759 	update_wqe_state(qp, wqe, &pkt);
760 	update_wqe_psn(qp, wqe, &pkt, payload);
761 	ret = rxe_xmit_packet(qp, &pkt, skb);
762 	if (ret) {
763 		qp->need_req_skb = 1;
764 
765 		rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
766 
767 		if (ret == -EAGAIN) {
768 			rxe_run_task(&qp->req.task, 1);
769 			goto exit;
770 		}
771 
772 		wqe->status = IB_WC_LOC_QP_OP_ERR;
773 		goto err;
774 	}
775 
776 	update_state(qp, wqe, &pkt, payload);
777 
778 	goto next_wqe;
779 
780 err:
781 	wqe->state = wqe_state_error;
782 	__rxe_do_task(&qp->comp.task);
783 
784 exit:
785 	rxe_drop_ref(qp);
786 	return -EAGAIN;
787 }
788