1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6 
7 #include <linux/skbuff.h>
8 #include <crypto/hash.h>
9 
10 #include "rxe.h"
11 #include "rxe_loc.h"
12 #include "rxe_queue.h"
13 
14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
15 		       u32 opcode);
16 
17 static inline void retry_first_write_send(struct rxe_qp *qp,
18 					  struct rxe_send_wqe *wqe,
19 					  unsigned int mask, int npsn)
20 {
21 	int i;
22 
23 	for (i = 0; i < npsn; i++) {
24 		int to_send = (wqe->dma.resid > qp->mtu) ?
25 				qp->mtu : wqe->dma.resid;
26 
27 		qp->req.opcode = next_opcode(qp, wqe,
28 					     wqe->wr.opcode);
29 
30 		if (wqe->wr.send_flags & IB_SEND_INLINE) {
31 			wqe->dma.resid -= to_send;
32 			wqe->dma.sge_offset += to_send;
33 		} else {
34 			advance_dma_data(&wqe->dma, to_send);
35 		}
36 		if (mask & WR_WRITE_MASK)
37 			wqe->iova += qp->mtu;
38 	}
39 }
40 
41 static void req_retry(struct rxe_qp *qp)
42 {
43 	struct rxe_send_wqe *wqe;
44 	unsigned int wqe_index;
45 	unsigned int mask;
46 	int npsn;
47 	int first = 1;
48 	struct rxe_queue *q = qp->sq.queue;
49 	unsigned int cons;
50 	unsigned int prod;
51 
52 	cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
53 	prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
54 
55 	qp->req.wqe_index	= cons;
56 	qp->req.psn		= qp->comp.psn;
57 	qp->req.opcode		= -1;
58 
59 	for (wqe_index = cons; wqe_index != prod;
60 			wqe_index = queue_next_index(q, wqe_index)) {
61 		wqe = queue_addr_from_index(qp->sq.queue, wqe_index);
62 		mask = wr_opcode_mask(wqe->wr.opcode, qp);
63 
64 		if (wqe->state == wqe_state_posted)
65 			break;
66 
67 		if (wqe->state == wqe_state_done)
68 			continue;
69 
70 		wqe->iova = (mask & WR_ATOMIC_MASK) ?
71 			     wqe->wr.wr.atomic.remote_addr :
72 			     (mask & WR_READ_OR_WRITE_MASK) ?
73 			     wqe->wr.wr.rdma.remote_addr :
74 			     0;
75 
76 		if (!first || (mask & WR_READ_MASK) == 0) {
77 			wqe->dma.resid = wqe->dma.length;
78 			wqe->dma.cur_sge = 0;
79 			wqe->dma.sge_offset = 0;
80 		}
81 
82 		if (first) {
83 			first = 0;
84 
85 			if (mask & WR_WRITE_OR_SEND_MASK) {
86 				npsn = (qp->comp.psn - wqe->first_psn) &
87 					BTH_PSN_MASK;
88 				retry_first_write_send(qp, wqe, mask, npsn);
89 			}
90 
91 			if (mask & WR_READ_MASK) {
92 				npsn = (wqe->dma.length - wqe->dma.resid) /
93 					qp->mtu;
94 				wqe->iova += npsn * qp->mtu;
95 			}
96 		}
97 
98 		wqe->state = wqe_state_posted;
99 	}
100 }
101 
102 void rnr_nak_timer(struct timer_list *t)
103 {
104 	struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
105 
106 	pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp));
107 	rxe_run_task(&qp->req.task, 1);
108 }
109 
110 static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
111 {
112 	struct rxe_send_wqe *wqe;
113 	struct rxe_queue *q = qp->sq.queue;
114 	unsigned int index = qp->req.wqe_index;
115 	unsigned int cons;
116 	unsigned int prod;
117 
118 	wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
119 	cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
120 	prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
121 
122 	if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
123 		/* check to see if we are drained;
124 		 * state_lock used by requester and completer
125 		 */
126 		spin_lock_bh(&qp->state_lock);
127 		do {
128 			if (qp->req.state != QP_STATE_DRAIN) {
129 				/* comp just finished */
130 				spin_unlock_bh(&qp->state_lock);
131 				break;
132 			}
133 
134 			if (wqe && ((index != cons) ||
135 				(wqe->state != wqe_state_posted))) {
136 				/* comp not done yet */
137 				spin_unlock_bh(&qp->state_lock);
138 				break;
139 			}
140 
141 			qp->req.state = QP_STATE_DRAINED;
142 			spin_unlock_bh(&qp->state_lock);
143 
144 			if (qp->ibqp.event_handler) {
145 				struct ib_event ev;
146 
147 				ev.device = qp->ibqp.device;
148 				ev.element.qp = &qp->ibqp;
149 				ev.event = IB_EVENT_SQ_DRAINED;
150 				qp->ibqp.event_handler(&ev,
151 					qp->ibqp.qp_context);
152 			}
153 		} while (0);
154 	}
155 
156 	if (index == prod)
157 		return NULL;
158 
159 	wqe = queue_addr_from_index(q, index);
160 
161 	if (unlikely((qp->req.state == QP_STATE_DRAIN ||
162 		      qp->req.state == QP_STATE_DRAINED) &&
163 		     (wqe->state != wqe_state_processing)))
164 		return NULL;
165 
166 	if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) &&
167 						     (index != cons))) {
168 		qp->req.wait_fence = 1;
169 		return NULL;
170 	}
171 
172 	wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
173 	return wqe;
174 }
175 
176 static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
177 {
178 	switch (opcode) {
179 	case IB_WR_RDMA_WRITE:
180 		if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
181 		    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
182 			return fits ?
183 				IB_OPCODE_RC_RDMA_WRITE_LAST :
184 				IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
185 		else
186 			return fits ?
187 				IB_OPCODE_RC_RDMA_WRITE_ONLY :
188 				IB_OPCODE_RC_RDMA_WRITE_FIRST;
189 
190 	case IB_WR_RDMA_WRITE_WITH_IMM:
191 		if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
192 		    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
193 			return fits ?
194 				IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
195 				IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
196 		else
197 			return fits ?
198 				IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
199 				IB_OPCODE_RC_RDMA_WRITE_FIRST;
200 
201 	case IB_WR_SEND:
202 		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
203 		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
204 			return fits ?
205 				IB_OPCODE_RC_SEND_LAST :
206 				IB_OPCODE_RC_SEND_MIDDLE;
207 		else
208 			return fits ?
209 				IB_OPCODE_RC_SEND_ONLY :
210 				IB_OPCODE_RC_SEND_FIRST;
211 
212 	case IB_WR_SEND_WITH_IMM:
213 		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
214 		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
215 			return fits ?
216 				IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
217 				IB_OPCODE_RC_SEND_MIDDLE;
218 		else
219 			return fits ?
220 				IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
221 				IB_OPCODE_RC_SEND_FIRST;
222 
223 	case IB_WR_RDMA_READ:
224 		return IB_OPCODE_RC_RDMA_READ_REQUEST;
225 
226 	case IB_WR_ATOMIC_CMP_AND_SWP:
227 		return IB_OPCODE_RC_COMPARE_SWAP;
228 
229 	case IB_WR_ATOMIC_FETCH_AND_ADD:
230 		return IB_OPCODE_RC_FETCH_ADD;
231 
232 	case IB_WR_SEND_WITH_INV:
233 		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
234 		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
235 			return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
236 				IB_OPCODE_RC_SEND_MIDDLE;
237 		else
238 			return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
239 				IB_OPCODE_RC_SEND_FIRST;
240 	case IB_WR_REG_MR:
241 	case IB_WR_LOCAL_INV:
242 		return opcode;
243 	}
244 
245 	return -EINVAL;
246 }
247 
248 static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
249 {
250 	switch (opcode) {
251 	case IB_WR_RDMA_WRITE:
252 		if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
253 		    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
254 			return fits ?
255 				IB_OPCODE_UC_RDMA_WRITE_LAST :
256 				IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
257 		else
258 			return fits ?
259 				IB_OPCODE_UC_RDMA_WRITE_ONLY :
260 				IB_OPCODE_UC_RDMA_WRITE_FIRST;
261 
262 	case IB_WR_RDMA_WRITE_WITH_IMM:
263 		if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
264 		    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
265 			return fits ?
266 				IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
267 				IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
268 		else
269 			return fits ?
270 				IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
271 				IB_OPCODE_UC_RDMA_WRITE_FIRST;
272 
273 	case IB_WR_SEND:
274 		if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
275 		    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
276 			return fits ?
277 				IB_OPCODE_UC_SEND_LAST :
278 				IB_OPCODE_UC_SEND_MIDDLE;
279 		else
280 			return fits ?
281 				IB_OPCODE_UC_SEND_ONLY :
282 				IB_OPCODE_UC_SEND_FIRST;
283 
284 	case IB_WR_SEND_WITH_IMM:
285 		if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
286 		    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
287 			return fits ?
288 				IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
289 				IB_OPCODE_UC_SEND_MIDDLE;
290 		else
291 			return fits ?
292 				IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
293 				IB_OPCODE_UC_SEND_FIRST;
294 	}
295 
296 	return -EINVAL;
297 }
298 
299 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
300 		       u32 opcode)
301 {
302 	int fits = (wqe->dma.resid <= qp->mtu);
303 
304 	switch (qp_type(qp)) {
305 	case IB_QPT_RC:
306 		return next_opcode_rc(qp, opcode, fits);
307 
308 	case IB_QPT_UC:
309 		return next_opcode_uc(qp, opcode, fits);
310 
311 	case IB_QPT_SMI:
312 	case IB_QPT_UD:
313 	case IB_QPT_GSI:
314 		switch (opcode) {
315 		case IB_WR_SEND:
316 			return IB_OPCODE_UD_SEND_ONLY;
317 
318 		case IB_WR_SEND_WITH_IMM:
319 			return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
320 		}
321 		break;
322 
323 	default:
324 		break;
325 	}
326 
327 	return -EINVAL;
328 }
329 
330 static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
331 {
332 	int depth;
333 
334 	if (wqe->has_rd_atomic)
335 		return 0;
336 
337 	qp->req.need_rd_atomic = 1;
338 	depth = atomic_dec_return(&qp->req.rd_atomic);
339 
340 	if (depth >= 0) {
341 		qp->req.need_rd_atomic = 0;
342 		wqe->has_rd_atomic = 1;
343 		return 0;
344 	}
345 
346 	atomic_inc(&qp->req.rd_atomic);
347 	return -EAGAIN;
348 }
349 
350 static inline int get_mtu(struct rxe_qp *qp)
351 {
352 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
353 
354 	if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))
355 		return qp->mtu;
356 
357 	return rxe->port.mtu_cap;
358 }
359 
360 static struct sk_buff *init_req_packet(struct rxe_qp *qp,
361 				       struct rxe_av *av,
362 				       struct rxe_send_wqe *wqe,
363 				       int opcode, u32 payload,
364 				       struct rxe_pkt_info *pkt)
365 {
366 	struct rxe_dev		*rxe = to_rdev(qp->ibqp.device);
367 	struct sk_buff		*skb;
368 	struct rxe_send_wr	*ibwr = &wqe->wr;
369 	int			pad = (-payload) & 0x3;
370 	int			paylen;
371 	int			solicited;
372 	u32			qp_num;
373 	int			ack_req;
374 
375 	/* length from start of bth to end of icrc */
376 	paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
377 	pkt->paylen = paylen;
378 
379 	/* init skb */
380 	skb = rxe_init_packet(rxe, av, paylen, pkt);
381 	if (unlikely(!skb))
382 		return NULL;
383 
384 	/* init bth */
385 	solicited = (ibwr->send_flags & IB_SEND_SOLICITED) &&
386 			(pkt->mask & RXE_END_MASK) &&
387 			((pkt->mask & (RXE_SEND_MASK)) ||
388 			(pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
389 			(RXE_WRITE_MASK | RXE_IMMDT_MASK));
390 
391 	qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
392 					 qp->attr.dest_qp_num;
393 
394 	ack_req = ((pkt->mask & RXE_END_MASK) ||
395 		(qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
396 	if (ack_req)
397 		qp->req.noack_pkts = 0;
398 
399 	bth_init(pkt, pkt->opcode, solicited, 0, pad, IB_DEFAULT_PKEY_FULL, qp_num,
400 		 ack_req, pkt->psn);
401 
402 	/* init optional headers */
403 	if (pkt->mask & RXE_RETH_MASK) {
404 		reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
405 		reth_set_va(pkt, wqe->iova);
406 		reth_set_len(pkt, wqe->dma.resid);
407 	}
408 
409 	if (pkt->mask & RXE_IMMDT_MASK)
410 		immdt_set_imm(pkt, ibwr->ex.imm_data);
411 
412 	if (pkt->mask & RXE_IETH_MASK)
413 		ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);
414 
415 	if (pkt->mask & RXE_ATMETH_MASK) {
416 		atmeth_set_va(pkt, wqe->iova);
417 		if (opcode == IB_OPCODE_RC_COMPARE_SWAP ||
418 		    opcode == IB_OPCODE_RD_COMPARE_SWAP) {
419 			atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
420 			atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
421 		} else {
422 			atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
423 		}
424 		atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
425 	}
426 
427 	if (pkt->mask & RXE_DETH_MASK) {
428 		if (qp->ibqp.qp_num == 1)
429 			deth_set_qkey(pkt, GSI_QKEY);
430 		else
431 			deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
432 		deth_set_sqp(pkt, qp->ibqp.qp_num);
433 	}
434 
435 	return skb;
436 }
437 
438 static int finish_packet(struct rxe_qp *qp, struct rxe_av *av,
439 			 struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt,
440 			 struct sk_buff *skb, u32 paylen)
441 {
442 	int err;
443 
444 	err = rxe_prepare(av, pkt, skb);
445 	if (err)
446 		return err;
447 
448 	if (pkt->mask & RXE_WRITE_OR_SEND_MASK) {
449 		if (wqe->wr.send_flags & IB_SEND_INLINE) {
450 			u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
451 
452 			memcpy(payload_addr(pkt), tmp, paylen);
453 
454 			wqe->dma.resid -= paylen;
455 			wqe->dma.sge_offset += paylen;
456 		} else {
457 			err = copy_data(qp->pd, 0, &wqe->dma,
458 					payload_addr(pkt), paylen,
459 					RXE_FROM_MR_OBJ);
460 			if (err)
461 				return err;
462 		}
463 		if (bth_pad(pkt)) {
464 			u8 *pad = payload_addr(pkt) + paylen;
465 
466 			memset(pad, 0, bth_pad(pkt));
467 		}
468 	}
469 
470 	return 0;
471 }
472 
473 static void update_wqe_state(struct rxe_qp *qp,
474 		struct rxe_send_wqe *wqe,
475 		struct rxe_pkt_info *pkt)
476 {
477 	if (pkt->mask & RXE_END_MASK) {
478 		if (qp_type(qp) == IB_QPT_RC)
479 			wqe->state = wqe_state_pending;
480 	} else {
481 		wqe->state = wqe_state_processing;
482 	}
483 }
484 
485 static void update_wqe_psn(struct rxe_qp *qp,
486 			   struct rxe_send_wqe *wqe,
487 			   struct rxe_pkt_info *pkt,
488 			   u32 payload)
489 {
490 	/* number of packets left to send including current one */
491 	int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
492 
493 	/* handle zero length packet case */
494 	if (num_pkt == 0)
495 		num_pkt = 1;
496 
497 	if (pkt->mask & RXE_START_MASK) {
498 		wqe->first_psn = qp->req.psn;
499 		wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
500 	}
501 
502 	if (pkt->mask & RXE_READ_MASK)
503 		qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
504 	else
505 		qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
506 }
507 
508 static void save_state(struct rxe_send_wqe *wqe,
509 		       struct rxe_qp *qp,
510 		       struct rxe_send_wqe *rollback_wqe,
511 		       u32 *rollback_psn)
512 {
513 	rollback_wqe->state     = wqe->state;
514 	rollback_wqe->first_psn = wqe->first_psn;
515 	rollback_wqe->last_psn  = wqe->last_psn;
516 	*rollback_psn		= qp->req.psn;
517 }
518 
519 static void rollback_state(struct rxe_send_wqe *wqe,
520 			   struct rxe_qp *qp,
521 			   struct rxe_send_wqe *rollback_wqe,
522 			   u32 rollback_psn)
523 {
524 	wqe->state     = rollback_wqe->state;
525 	wqe->first_psn = rollback_wqe->first_psn;
526 	wqe->last_psn  = rollback_wqe->last_psn;
527 	qp->req.psn    = rollback_psn;
528 }
529 
530 static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
531 			 struct rxe_pkt_info *pkt)
532 {
533 	qp->req.opcode = pkt->opcode;
534 
535 	if (pkt->mask & RXE_END_MASK)
536 		qp->req.wqe_index = queue_next_index(qp->sq.queue,
537 						     qp->req.wqe_index);
538 
539 	qp->need_req_skb = 0;
540 
541 	if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer))
542 		mod_timer(&qp->retrans_timer,
543 			  jiffies + qp->qp_timeout_jiffies);
544 }
545 
546 static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
547 {
548 	u8 opcode = wqe->wr.opcode;
549 	u32 rkey;
550 	int ret;
551 
552 	switch (opcode) {
553 	case IB_WR_LOCAL_INV:
554 		rkey = wqe->wr.ex.invalidate_rkey;
555 		if (rkey_is_mw(rkey))
556 			ret = rxe_invalidate_mw(qp, rkey);
557 		else
558 			ret = rxe_invalidate_mr(qp, rkey);
559 
560 		if (unlikely(ret)) {
561 			wqe->status = IB_WC_LOC_QP_OP_ERR;
562 			return ret;
563 		}
564 		break;
565 	case IB_WR_REG_MR:
566 		ret = rxe_reg_fast_mr(qp, wqe);
567 		if (unlikely(ret)) {
568 			wqe->status = IB_WC_LOC_QP_OP_ERR;
569 			return ret;
570 		}
571 		break;
572 	case IB_WR_BIND_MW:
573 		ret = rxe_bind_mw(qp, wqe);
574 		if (unlikely(ret)) {
575 			wqe->status = IB_WC_MW_BIND_ERR;
576 			return ret;
577 		}
578 		break;
579 	default:
580 		pr_err("Unexpected send wqe opcode %d\n", opcode);
581 		wqe->status = IB_WC_LOC_QP_OP_ERR;
582 		return -EINVAL;
583 	}
584 
585 	wqe->state = wqe_state_done;
586 	wqe->status = IB_WC_SUCCESS;
587 	qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
588 
589 	if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
590 	    qp->sq_sig_type == IB_SIGNAL_ALL_WR)
591 		rxe_run_task(&qp->comp.task, 1);
592 
593 	return 0;
594 }
595 
596 int rxe_requester(void *arg)
597 {
598 	struct rxe_qp *qp = (struct rxe_qp *)arg;
599 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
600 	struct rxe_pkt_info pkt;
601 	struct sk_buff *skb;
602 	struct rxe_send_wqe *wqe;
603 	enum rxe_hdr_mask mask;
604 	u32 payload;
605 	int mtu;
606 	int opcode;
607 	int ret;
608 	struct rxe_send_wqe rollback_wqe;
609 	u32 rollback_psn;
610 	struct rxe_queue *q = qp->sq.queue;
611 	struct rxe_ah *ah;
612 	struct rxe_av *av;
613 
614 	rxe_get(qp);
615 
616 next_wqe:
617 	if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
618 		goto exit;
619 
620 	if (unlikely(qp->req.state == QP_STATE_RESET)) {
621 		qp->req.wqe_index = queue_get_consumer(q,
622 						QUEUE_TYPE_FROM_CLIENT);
623 		qp->req.opcode = -1;
624 		qp->req.need_rd_atomic = 0;
625 		qp->req.wait_psn = 0;
626 		qp->req.need_retry = 0;
627 		goto exit;
628 	}
629 
630 	if (unlikely(qp->req.need_retry)) {
631 		req_retry(qp);
632 		qp->req.need_retry = 0;
633 	}
634 
635 	wqe = req_next_wqe(qp);
636 	if (unlikely(!wqe))
637 		goto exit;
638 
639 	if (wqe->mask & WR_LOCAL_OP_MASK) {
640 		ret = rxe_do_local_ops(qp, wqe);
641 		if (unlikely(ret))
642 			goto err;
643 		else
644 			goto next_wqe;
645 	}
646 
647 	if (unlikely(qp_type(qp) == IB_QPT_RC &&
648 		psn_compare(qp->req.psn, (qp->comp.psn +
649 				RXE_MAX_UNACKED_PSNS)) > 0)) {
650 		qp->req.wait_psn = 1;
651 		goto exit;
652 	}
653 
654 	/* Limit the number of inflight SKBs per QP */
655 	if (unlikely(atomic_read(&qp->skb_out) >
656 		     RXE_INFLIGHT_SKBS_PER_QP_HIGH)) {
657 		qp->need_req_skb = 1;
658 		goto exit;
659 	}
660 
661 	opcode = next_opcode(qp, wqe, wqe->wr.opcode);
662 	if (unlikely(opcode < 0)) {
663 		wqe->status = IB_WC_LOC_QP_OP_ERR;
664 		goto exit;
665 	}
666 
667 	mask = rxe_opcode[opcode].mask;
668 	if (unlikely(mask & RXE_READ_OR_ATOMIC_MASK)) {
669 		if (check_init_depth(qp, wqe))
670 			goto exit;
671 	}
672 
673 	mtu = get_mtu(qp);
674 	payload = (mask & RXE_WRITE_OR_SEND_MASK) ? wqe->dma.resid : 0;
675 	if (payload > mtu) {
676 		if (qp_type(qp) == IB_QPT_UD) {
677 			/* C10-93.1.1: If the total sum of all the buffer lengths specified for a
678 			 * UD message exceeds the MTU of the port as returned by QueryHCA, the CI
679 			 * shall not emit any packets for this message. Further, the CI shall not
680 			 * generate an error due to this condition.
681 			 */
682 
683 			/* fake a successful UD send */
684 			wqe->first_psn = qp->req.psn;
685 			wqe->last_psn = qp->req.psn;
686 			qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
687 			qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
688 			qp->req.wqe_index = queue_next_index(qp->sq.queue,
689 						       qp->req.wqe_index);
690 			wqe->state = wqe_state_done;
691 			wqe->status = IB_WC_SUCCESS;
692 			__rxe_do_task(&qp->comp.task);
693 			rxe_put(qp);
694 			return 0;
695 		}
696 		payload = mtu;
697 	}
698 
699 	pkt.rxe = rxe;
700 	pkt.opcode = opcode;
701 	pkt.qp = qp;
702 	pkt.psn = qp->req.psn;
703 	pkt.mask = rxe_opcode[opcode].mask;
704 	pkt.wqe = wqe;
705 
706 	av = rxe_get_av(&pkt, &ah);
707 	if (unlikely(!av)) {
708 		pr_err("qp#%d Failed no address vector\n", qp_num(qp));
709 		wqe->status = IB_WC_LOC_QP_OP_ERR;
710 		goto err_drop_ah;
711 	}
712 
713 	skb = init_req_packet(qp, av, wqe, opcode, payload, &pkt);
714 	if (unlikely(!skb)) {
715 		pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
716 		wqe->status = IB_WC_LOC_QP_OP_ERR;
717 		goto err_drop_ah;
718 	}
719 
720 	ret = finish_packet(qp, av, wqe, &pkt, skb, payload);
721 	if (unlikely(ret)) {
722 		pr_debug("qp#%d Error during finish packet\n", qp_num(qp));
723 		if (ret == -EFAULT)
724 			wqe->status = IB_WC_LOC_PROT_ERR;
725 		else
726 			wqe->status = IB_WC_LOC_QP_OP_ERR;
727 		kfree_skb(skb);
728 		goto err_drop_ah;
729 	}
730 
731 	if (ah)
732 		rxe_put(ah);
733 
734 	/*
735 	 * To prevent a race on wqe access between requester and completer,
736 	 * wqe members state and psn need to be set before calling
737 	 * rxe_xmit_packet().
738 	 * Otherwise, completer might initiate an unjustified retry flow.
739 	 */
740 	save_state(wqe, qp, &rollback_wqe, &rollback_psn);
741 	update_wqe_state(qp, wqe, &pkt);
742 	update_wqe_psn(qp, wqe, &pkt, payload);
743 	ret = rxe_xmit_packet(qp, &pkt, skb);
744 	if (ret) {
745 		qp->need_req_skb = 1;
746 
747 		rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
748 
749 		if (ret == -EAGAIN) {
750 			rxe_run_task(&qp->req.task, 1);
751 			goto exit;
752 		}
753 
754 		wqe->status = IB_WC_LOC_QP_OP_ERR;
755 		goto err;
756 	}
757 
758 	update_state(qp, wqe, &pkt);
759 
760 	goto next_wqe;
761 
762 err_drop_ah:
763 	if (ah)
764 		rxe_put(ah);
765 err:
766 	wqe->state = wqe_state_error;
767 	__rxe_do_task(&qp->comp.task);
768 
769 exit:
770 	rxe_put(qp);
771 	return -EAGAIN;
772 }
773