1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6 
7 #include <linux/skbuff.h>
8 #include <crypto/hash.h>
9 
10 #include "rxe.h"
11 #include "rxe_loc.h"
12 #include "rxe_queue.h"
13 
14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
15 		       u32 opcode);
16 
17 static inline void retry_first_write_send(struct rxe_qp *qp,
18 					  struct rxe_send_wqe *wqe,
19 					  unsigned int mask, int npsn)
20 {
21 	int i;
22 
23 	for (i = 0; i < npsn; i++) {
24 		int to_send = (wqe->dma.resid > qp->mtu) ?
25 				qp->mtu : wqe->dma.resid;
26 
27 		qp->req.opcode = next_opcode(qp, wqe,
28 					     wqe->wr.opcode);
29 
30 		if (wqe->wr.send_flags & IB_SEND_INLINE) {
31 			wqe->dma.resid -= to_send;
32 			wqe->dma.sge_offset += to_send;
33 		} else {
34 			advance_dma_data(&wqe->dma, to_send);
35 		}
36 		if (mask & WR_WRITE_MASK)
37 			wqe->iova += qp->mtu;
38 	}
39 }
40 
41 static void req_retry(struct rxe_qp *qp)
42 {
43 	struct rxe_send_wqe *wqe;
44 	unsigned int wqe_index;
45 	unsigned int mask;
46 	int npsn;
47 	int first = 1;
48 	struct rxe_queue *q = qp->sq.queue;
49 	unsigned int cons;
50 	unsigned int prod;
51 
52 	cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
53 	prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
54 
55 	qp->req.wqe_index	= cons;
56 	qp->req.psn		= qp->comp.psn;
57 	qp->req.opcode		= -1;
58 
59 	for (wqe_index = cons; wqe_index != prod;
60 			wqe_index = queue_next_index(q, wqe_index)) {
61 		wqe = queue_addr_from_index(qp->sq.queue, wqe_index);
62 		mask = wr_opcode_mask(wqe->wr.opcode, qp);
63 
64 		if (wqe->state == wqe_state_posted)
65 			break;
66 
67 		if (wqe->state == wqe_state_done)
68 			continue;
69 
70 		wqe->iova = (mask & WR_ATOMIC_MASK) ?
71 			     wqe->wr.wr.atomic.remote_addr :
72 			     (mask & WR_READ_OR_WRITE_MASK) ?
73 			     wqe->wr.wr.rdma.remote_addr :
74 			     0;
75 
76 		if (!first || (mask & WR_READ_MASK) == 0) {
77 			wqe->dma.resid = wqe->dma.length;
78 			wqe->dma.cur_sge = 0;
79 			wqe->dma.sge_offset = 0;
80 		}
81 
82 		if (first) {
83 			first = 0;
84 
85 			if (mask & WR_WRITE_OR_SEND_MASK) {
86 				npsn = (qp->comp.psn - wqe->first_psn) &
87 					BTH_PSN_MASK;
88 				retry_first_write_send(qp, wqe, mask, npsn);
89 			}
90 
91 			if (mask & WR_READ_MASK) {
92 				npsn = (wqe->dma.length - wqe->dma.resid) /
93 					qp->mtu;
94 				wqe->iova += npsn * qp->mtu;
95 			}
96 		}
97 
98 		wqe->state = wqe_state_posted;
99 	}
100 }
101 
102 void rnr_nak_timer(struct timer_list *t)
103 {
104 	struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
105 
106 	pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp));
107 	rxe_run_task(&qp->req.task, 1);
108 }
109 
110 static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
111 {
112 	struct rxe_send_wqe *wqe;
113 	unsigned long flags;
114 	struct rxe_queue *q = qp->sq.queue;
115 	unsigned int index = qp->req.wqe_index;
116 	unsigned int cons;
117 	unsigned int prod;
118 
119 	wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
120 	cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
121 	prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
122 
123 	if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
124 		/* check to see if we are drained;
125 		 * state_lock used by requester and completer
126 		 */
127 		spin_lock_irqsave(&qp->state_lock, flags);
128 		do {
129 			if (qp->req.state != QP_STATE_DRAIN) {
130 				/* comp just finished */
131 				spin_unlock_irqrestore(&qp->state_lock,
132 						       flags);
133 				break;
134 			}
135 
136 			if (wqe && ((index != cons) ||
137 				(wqe->state != wqe_state_posted))) {
138 				/* comp not done yet */
139 				spin_unlock_irqrestore(&qp->state_lock,
140 						       flags);
141 				break;
142 			}
143 
144 			qp->req.state = QP_STATE_DRAINED;
145 			spin_unlock_irqrestore(&qp->state_lock, flags);
146 
147 			if (qp->ibqp.event_handler) {
148 				struct ib_event ev;
149 
150 				ev.device = qp->ibqp.device;
151 				ev.element.qp = &qp->ibqp;
152 				ev.event = IB_EVENT_SQ_DRAINED;
153 				qp->ibqp.event_handler(&ev,
154 					qp->ibqp.qp_context);
155 			}
156 		} while (0);
157 	}
158 
159 	if (index == prod)
160 		return NULL;
161 
162 	wqe = queue_addr_from_index(q, index);
163 
164 	if (unlikely((qp->req.state == QP_STATE_DRAIN ||
165 		      qp->req.state == QP_STATE_DRAINED) &&
166 		     (wqe->state != wqe_state_processing)))
167 		return NULL;
168 
169 	if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) &&
170 						     (index != cons))) {
171 		qp->req.wait_fence = 1;
172 		return NULL;
173 	}
174 
175 	wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
176 	return wqe;
177 }
178 
179 static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
180 {
181 	switch (opcode) {
182 	case IB_WR_RDMA_WRITE:
183 		if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
184 		    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
185 			return fits ?
186 				IB_OPCODE_RC_RDMA_WRITE_LAST :
187 				IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
188 		else
189 			return fits ?
190 				IB_OPCODE_RC_RDMA_WRITE_ONLY :
191 				IB_OPCODE_RC_RDMA_WRITE_FIRST;
192 
193 	case IB_WR_RDMA_WRITE_WITH_IMM:
194 		if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
195 		    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
196 			return fits ?
197 				IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
198 				IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
199 		else
200 			return fits ?
201 				IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
202 				IB_OPCODE_RC_RDMA_WRITE_FIRST;
203 
204 	case IB_WR_SEND:
205 		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
206 		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
207 			return fits ?
208 				IB_OPCODE_RC_SEND_LAST :
209 				IB_OPCODE_RC_SEND_MIDDLE;
210 		else
211 			return fits ?
212 				IB_OPCODE_RC_SEND_ONLY :
213 				IB_OPCODE_RC_SEND_FIRST;
214 
215 	case IB_WR_SEND_WITH_IMM:
216 		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
217 		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
218 			return fits ?
219 				IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
220 				IB_OPCODE_RC_SEND_MIDDLE;
221 		else
222 			return fits ?
223 				IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
224 				IB_OPCODE_RC_SEND_FIRST;
225 
226 	case IB_WR_RDMA_READ:
227 		return IB_OPCODE_RC_RDMA_READ_REQUEST;
228 
229 	case IB_WR_ATOMIC_CMP_AND_SWP:
230 		return IB_OPCODE_RC_COMPARE_SWAP;
231 
232 	case IB_WR_ATOMIC_FETCH_AND_ADD:
233 		return IB_OPCODE_RC_FETCH_ADD;
234 
235 	case IB_WR_SEND_WITH_INV:
236 		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
237 		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
238 			return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
239 				IB_OPCODE_RC_SEND_MIDDLE;
240 		else
241 			return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
242 				IB_OPCODE_RC_SEND_FIRST;
243 	case IB_WR_REG_MR:
244 	case IB_WR_LOCAL_INV:
245 		return opcode;
246 	}
247 
248 	return -EINVAL;
249 }
250 
251 static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
252 {
253 	switch (opcode) {
254 	case IB_WR_RDMA_WRITE:
255 		if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
256 		    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
257 			return fits ?
258 				IB_OPCODE_UC_RDMA_WRITE_LAST :
259 				IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
260 		else
261 			return fits ?
262 				IB_OPCODE_UC_RDMA_WRITE_ONLY :
263 				IB_OPCODE_UC_RDMA_WRITE_FIRST;
264 
265 	case IB_WR_RDMA_WRITE_WITH_IMM:
266 		if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
267 		    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
268 			return fits ?
269 				IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
270 				IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
271 		else
272 			return fits ?
273 				IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
274 				IB_OPCODE_UC_RDMA_WRITE_FIRST;
275 
276 	case IB_WR_SEND:
277 		if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
278 		    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
279 			return fits ?
280 				IB_OPCODE_UC_SEND_LAST :
281 				IB_OPCODE_UC_SEND_MIDDLE;
282 		else
283 			return fits ?
284 				IB_OPCODE_UC_SEND_ONLY :
285 				IB_OPCODE_UC_SEND_FIRST;
286 
287 	case IB_WR_SEND_WITH_IMM:
288 		if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
289 		    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
290 			return fits ?
291 				IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
292 				IB_OPCODE_UC_SEND_MIDDLE;
293 		else
294 			return fits ?
295 				IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
296 				IB_OPCODE_UC_SEND_FIRST;
297 	}
298 
299 	return -EINVAL;
300 }
301 
302 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
303 		       u32 opcode)
304 {
305 	int fits = (wqe->dma.resid <= qp->mtu);
306 
307 	switch (qp_type(qp)) {
308 	case IB_QPT_RC:
309 		return next_opcode_rc(qp, opcode, fits);
310 
311 	case IB_QPT_UC:
312 		return next_opcode_uc(qp, opcode, fits);
313 
314 	case IB_QPT_SMI:
315 	case IB_QPT_UD:
316 	case IB_QPT_GSI:
317 		switch (opcode) {
318 		case IB_WR_SEND:
319 			return IB_OPCODE_UD_SEND_ONLY;
320 
321 		case IB_WR_SEND_WITH_IMM:
322 			return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
323 		}
324 		break;
325 
326 	default:
327 		break;
328 	}
329 
330 	return -EINVAL;
331 }
332 
333 static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
334 {
335 	int depth;
336 
337 	if (wqe->has_rd_atomic)
338 		return 0;
339 
340 	qp->req.need_rd_atomic = 1;
341 	depth = atomic_dec_return(&qp->req.rd_atomic);
342 
343 	if (depth >= 0) {
344 		qp->req.need_rd_atomic = 0;
345 		wqe->has_rd_atomic = 1;
346 		return 0;
347 	}
348 
349 	atomic_inc(&qp->req.rd_atomic);
350 	return -EAGAIN;
351 }
352 
353 static inline int get_mtu(struct rxe_qp *qp)
354 {
355 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
356 
357 	if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))
358 		return qp->mtu;
359 
360 	return rxe->port.mtu_cap;
361 }
362 
363 static struct sk_buff *init_req_packet(struct rxe_qp *qp,
364 				       struct rxe_send_wqe *wqe,
365 				       int opcode, int payload,
366 				       struct rxe_pkt_info *pkt)
367 {
368 	struct rxe_dev		*rxe = to_rdev(qp->ibqp.device);
369 	struct sk_buff		*skb;
370 	struct rxe_send_wr	*ibwr = &wqe->wr;
371 	struct rxe_av		*av;
372 	int			pad = (-payload) & 0x3;
373 	int			paylen;
374 	int			solicited;
375 	u16			pkey;
376 	u32			qp_num;
377 	int			ack_req;
378 
379 	/* length from start of bth to end of icrc */
380 	paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
381 
382 	/* pkt->hdr, port_num and mask are initialized in ifc layer */
383 	pkt->rxe	= rxe;
384 	pkt->opcode	= opcode;
385 	pkt->qp		= qp;
386 	pkt->psn	= qp->req.psn;
387 	pkt->mask	= rxe_opcode[opcode].mask;
388 	pkt->paylen	= paylen;
389 	pkt->wqe	= wqe;
390 
391 	/* init skb */
392 	av = rxe_get_av(pkt);
393 	if (!av)
394 		return NULL;
395 
396 	skb = rxe_init_packet(rxe, av, paylen, pkt);
397 	if (unlikely(!skb))
398 		return NULL;
399 
400 	/* init bth */
401 	solicited = (ibwr->send_flags & IB_SEND_SOLICITED) &&
402 			(pkt->mask & RXE_END_MASK) &&
403 			((pkt->mask & (RXE_SEND_MASK)) ||
404 			(pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
405 			(RXE_WRITE_MASK | RXE_IMMDT_MASK));
406 
407 	pkey = IB_DEFAULT_PKEY_FULL;
408 
409 	qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
410 					 qp->attr.dest_qp_num;
411 
412 	ack_req = ((pkt->mask & RXE_END_MASK) ||
413 		(qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
414 	if (ack_req)
415 		qp->req.noack_pkts = 0;
416 
417 	bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num,
418 		 ack_req, pkt->psn);
419 
420 	/* init optional headers */
421 	if (pkt->mask & RXE_RETH_MASK) {
422 		reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
423 		reth_set_va(pkt, wqe->iova);
424 		reth_set_len(pkt, wqe->dma.resid);
425 	}
426 
427 	if (pkt->mask & RXE_IMMDT_MASK)
428 		immdt_set_imm(pkt, ibwr->ex.imm_data);
429 
430 	if (pkt->mask & RXE_IETH_MASK)
431 		ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);
432 
433 	if (pkt->mask & RXE_ATMETH_MASK) {
434 		atmeth_set_va(pkt, wqe->iova);
435 		if (opcode == IB_OPCODE_RC_COMPARE_SWAP ||
436 		    opcode == IB_OPCODE_RD_COMPARE_SWAP) {
437 			atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
438 			atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
439 		} else {
440 			atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
441 		}
442 		atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
443 	}
444 
445 	if (pkt->mask & RXE_DETH_MASK) {
446 		if (qp->ibqp.qp_num == 1)
447 			deth_set_qkey(pkt, GSI_QKEY);
448 		else
449 			deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
450 		deth_set_sqp(pkt, qp->ibqp.qp_num);
451 	}
452 
453 	return skb;
454 }
455 
456 static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
457 		       struct rxe_pkt_info *pkt, struct sk_buff *skb,
458 		       int paylen)
459 {
460 	int err;
461 
462 	err = rxe_prepare(pkt, skb);
463 	if (err)
464 		return err;
465 
466 	if (pkt->mask & RXE_WRITE_OR_SEND_MASK) {
467 		if (wqe->wr.send_flags & IB_SEND_INLINE) {
468 			u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
469 
470 			memcpy(payload_addr(pkt), tmp, paylen);
471 
472 			wqe->dma.resid -= paylen;
473 			wqe->dma.sge_offset += paylen;
474 		} else {
475 			err = copy_data(qp->pd, 0, &wqe->dma,
476 					payload_addr(pkt), paylen,
477 					RXE_FROM_MR_OBJ);
478 			if (err)
479 				return err;
480 		}
481 		if (bth_pad(pkt)) {
482 			u8 *pad = payload_addr(pkt) + paylen;
483 
484 			memset(pad, 0, bth_pad(pkt));
485 		}
486 	}
487 
488 	return 0;
489 }
490 
491 static void update_wqe_state(struct rxe_qp *qp,
492 		struct rxe_send_wqe *wqe,
493 		struct rxe_pkt_info *pkt)
494 {
495 	if (pkt->mask & RXE_END_MASK) {
496 		if (qp_type(qp) == IB_QPT_RC)
497 			wqe->state = wqe_state_pending;
498 	} else {
499 		wqe->state = wqe_state_processing;
500 	}
501 }
502 
503 static void update_wqe_psn(struct rxe_qp *qp,
504 			   struct rxe_send_wqe *wqe,
505 			   struct rxe_pkt_info *pkt,
506 			   int payload)
507 {
508 	/* number of packets left to send including current one */
509 	int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
510 
511 	/* handle zero length packet case */
512 	if (num_pkt == 0)
513 		num_pkt = 1;
514 
515 	if (pkt->mask & RXE_START_MASK) {
516 		wqe->first_psn = qp->req.psn;
517 		wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
518 	}
519 
520 	if (pkt->mask & RXE_READ_MASK)
521 		qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
522 	else
523 		qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
524 }
525 
526 static void save_state(struct rxe_send_wqe *wqe,
527 		       struct rxe_qp *qp,
528 		       struct rxe_send_wqe *rollback_wqe,
529 		       u32 *rollback_psn)
530 {
531 	rollback_wqe->state     = wqe->state;
532 	rollback_wqe->first_psn = wqe->first_psn;
533 	rollback_wqe->last_psn  = wqe->last_psn;
534 	*rollback_psn		= qp->req.psn;
535 }
536 
537 static void rollback_state(struct rxe_send_wqe *wqe,
538 			   struct rxe_qp *qp,
539 			   struct rxe_send_wqe *rollback_wqe,
540 			   u32 rollback_psn)
541 {
542 	wqe->state     = rollback_wqe->state;
543 	wqe->first_psn = rollback_wqe->first_psn;
544 	wqe->last_psn  = rollback_wqe->last_psn;
545 	qp->req.psn    = rollback_psn;
546 }
547 
548 static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
549 			 struct rxe_pkt_info *pkt, int payload)
550 {
551 	qp->req.opcode = pkt->opcode;
552 
553 	if (pkt->mask & RXE_END_MASK)
554 		qp->req.wqe_index = queue_next_index(qp->sq.queue,
555 						     qp->req.wqe_index);
556 
557 	qp->need_req_skb = 0;
558 
559 	if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer))
560 		mod_timer(&qp->retrans_timer,
561 			  jiffies + qp->qp_timeout_jiffies);
562 }
563 
564 static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
565 {
566 	u8 opcode = wqe->wr.opcode;
567 	u32 rkey;
568 	int ret;
569 
570 	switch (opcode) {
571 	case IB_WR_LOCAL_INV:
572 		rkey = wqe->wr.ex.invalidate_rkey;
573 		if (rkey_is_mw(rkey))
574 			ret = rxe_invalidate_mw(qp, rkey);
575 		else
576 			ret = rxe_invalidate_mr(qp, rkey);
577 
578 		if (unlikely(ret)) {
579 			wqe->status = IB_WC_LOC_QP_OP_ERR;
580 			return ret;
581 		}
582 		break;
583 	case IB_WR_REG_MR:
584 		ret = rxe_reg_fast_mr(qp, wqe);
585 		if (unlikely(ret)) {
586 			wqe->status = IB_WC_LOC_QP_OP_ERR;
587 			return ret;
588 		}
589 		break;
590 	case IB_WR_BIND_MW:
591 		ret = rxe_bind_mw(qp, wqe);
592 		if (unlikely(ret)) {
593 			wqe->status = IB_WC_MW_BIND_ERR;
594 			return ret;
595 		}
596 		break;
597 	default:
598 		pr_err("Unexpected send wqe opcode %d\n", opcode);
599 		wqe->status = IB_WC_LOC_QP_OP_ERR;
600 		return -EINVAL;
601 	}
602 
603 	wqe->state = wqe_state_done;
604 	wqe->status = IB_WC_SUCCESS;
605 	qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
606 
607 	if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
608 	    qp->sq_sig_type == IB_SIGNAL_ALL_WR)
609 		rxe_run_task(&qp->comp.task, 1);
610 
611 	return 0;
612 }
613 
614 int rxe_requester(void *arg)
615 {
616 	struct rxe_qp *qp = (struct rxe_qp *)arg;
617 	struct rxe_pkt_info pkt;
618 	struct sk_buff *skb;
619 	struct rxe_send_wqe *wqe;
620 	enum rxe_hdr_mask mask;
621 	int payload;
622 	int mtu;
623 	int opcode;
624 	int ret;
625 	struct rxe_send_wqe rollback_wqe;
626 	u32 rollback_psn;
627 	struct rxe_queue *q = qp->sq.queue;
628 
629 	rxe_add_ref(qp);
630 
631 next_wqe:
632 	if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
633 		goto exit;
634 
635 	if (unlikely(qp->req.state == QP_STATE_RESET)) {
636 		qp->req.wqe_index = queue_get_consumer(q,
637 						QUEUE_TYPE_FROM_CLIENT);
638 		qp->req.opcode = -1;
639 		qp->req.need_rd_atomic = 0;
640 		qp->req.wait_psn = 0;
641 		qp->req.need_retry = 0;
642 		goto exit;
643 	}
644 
645 	if (unlikely(qp->req.need_retry)) {
646 		req_retry(qp);
647 		qp->req.need_retry = 0;
648 	}
649 
650 	wqe = req_next_wqe(qp);
651 	if (unlikely(!wqe))
652 		goto exit;
653 
654 	if (wqe->mask & WR_LOCAL_OP_MASK) {
655 		ret = rxe_do_local_ops(qp, wqe);
656 		if (unlikely(ret))
657 			goto err;
658 		else
659 			goto next_wqe;
660 	}
661 
662 	if (unlikely(qp_type(qp) == IB_QPT_RC &&
663 		psn_compare(qp->req.psn, (qp->comp.psn +
664 				RXE_MAX_UNACKED_PSNS)) > 0)) {
665 		qp->req.wait_psn = 1;
666 		goto exit;
667 	}
668 
669 	/* Limit the number of inflight SKBs per QP */
670 	if (unlikely(atomic_read(&qp->skb_out) >
671 		     RXE_INFLIGHT_SKBS_PER_QP_HIGH)) {
672 		qp->need_req_skb = 1;
673 		goto exit;
674 	}
675 
676 	opcode = next_opcode(qp, wqe, wqe->wr.opcode);
677 	if (unlikely(opcode < 0)) {
678 		wqe->status = IB_WC_LOC_QP_OP_ERR;
679 		goto exit;
680 	}
681 
682 	mask = rxe_opcode[opcode].mask;
683 	if (unlikely(mask & RXE_READ_OR_ATOMIC_MASK)) {
684 		if (check_init_depth(qp, wqe))
685 			goto exit;
686 	}
687 
688 	mtu = get_mtu(qp);
689 	payload = (mask & RXE_WRITE_OR_SEND_MASK) ? wqe->dma.resid : 0;
690 	if (payload > mtu) {
691 		if (qp_type(qp) == IB_QPT_UD) {
692 			/* C10-93.1.1: If the total sum of all the buffer lengths specified for a
693 			 * UD message exceeds the MTU of the port as returned by QueryHCA, the CI
694 			 * shall not emit any packets for this message. Further, the CI shall not
695 			 * generate an error due to this condition.
696 			 */
697 
698 			/* fake a successful UD send */
699 			wqe->first_psn = qp->req.psn;
700 			wqe->last_psn = qp->req.psn;
701 			qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
702 			qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
703 			qp->req.wqe_index = queue_next_index(qp->sq.queue,
704 						       qp->req.wqe_index);
705 			wqe->state = wqe_state_done;
706 			wqe->status = IB_WC_SUCCESS;
707 			__rxe_do_task(&qp->comp.task);
708 			rxe_drop_ref(qp);
709 			return 0;
710 		}
711 		payload = mtu;
712 	}
713 
714 	skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
715 	if (unlikely(!skb)) {
716 		pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
717 		wqe->status = IB_WC_LOC_QP_OP_ERR;
718 		goto err;
719 	}
720 
721 	ret = finish_packet(qp, wqe, &pkt, skb, payload);
722 	if (unlikely(ret)) {
723 		pr_debug("qp#%d Error during finish packet\n", qp_num(qp));
724 		if (ret == -EFAULT)
725 			wqe->status = IB_WC_LOC_PROT_ERR;
726 		else
727 			wqe->status = IB_WC_LOC_QP_OP_ERR;
728 		kfree_skb(skb);
729 		goto err;
730 	}
731 
732 	/*
733 	 * To prevent a race on wqe access between requester and completer,
734 	 * wqe members state and psn need to be set before calling
735 	 * rxe_xmit_packet().
736 	 * Otherwise, completer might initiate an unjustified retry flow.
737 	 */
738 	save_state(wqe, qp, &rollback_wqe, &rollback_psn);
739 	update_wqe_state(qp, wqe, &pkt);
740 	update_wqe_psn(qp, wqe, &pkt, payload);
741 	ret = rxe_xmit_packet(qp, &pkt, skb);
742 	if (ret) {
743 		qp->need_req_skb = 1;
744 
745 		rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
746 
747 		if (ret == -EAGAIN) {
748 			rxe_run_task(&qp->req.task, 1);
749 			goto exit;
750 		}
751 
752 		wqe->status = IB_WC_LOC_QP_OP_ERR;
753 		goto err;
754 	}
755 
756 	update_state(qp, wqe, &pkt, payload);
757 
758 	goto next_wqe;
759 
760 err:
761 	wqe->state = wqe_state_error;
762 	__rxe_do_task(&qp->comp.task);
763 
764 exit:
765 	rxe_drop_ref(qp);
766 	return -EAGAIN;
767 }
768