1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6 
7 #include <linux/skbuff.h>
8 #include <crypto/hash.h>
9 
10 #include "rxe.h"
11 #include "rxe_loc.h"
12 #include "rxe_queue.h"
13 
14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
15 		       u32 opcode);
16 
retry_first_write_send(struct rxe_qp * qp,struct rxe_send_wqe * wqe,int npsn)17 static inline void retry_first_write_send(struct rxe_qp *qp,
18 					  struct rxe_send_wqe *wqe, int npsn)
19 {
20 	int i;
21 
22 	for (i = 0; i < npsn; i++) {
23 		int to_send = (wqe->dma.resid > qp->mtu) ?
24 				qp->mtu : wqe->dma.resid;
25 
26 		qp->req.opcode = next_opcode(qp, wqe,
27 					     wqe->wr.opcode);
28 
29 		if (wqe->wr.send_flags & IB_SEND_INLINE) {
30 			wqe->dma.resid -= to_send;
31 			wqe->dma.sge_offset += to_send;
32 		} else {
33 			advance_dma_data(&wqe->dma, to_send);
34 		}
35 	}
36 }
37 
req_retry(struct rxe_qp * qp)38 static void req_retry(struct rxe_qp *qp)
39 {
40 	struct rxe_send_wqe *wqe;
41 	unsigned int wqe_index;
42 	unsigned int mask;
43 	int npsn;
44 	int first = 1;
45 	struct rxe_queue *q = qp->sq.queue;
46 	unsigned int cons;
47 	unsigned int prod;
48 
49 	cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
50 	prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
51 
52 	qp->req.wqe_index	= cons;
53 	qp->req.psn		= qp->comp.psn;
54 	qp->req.opcode		= -1;
55 
56 	for (wqe_index = cons; wqe_index != prod;
57 			wqe_index = queue_next_index(q, wqe_index)) {
58 		wqe = queue_addr_from_index(qp->sq.queue, wqe_index);
59 		mask = wr_opcode_mask(wqe->wr.opcode, qp);
60 
61 		if (wqe->state == wqe_state_posted)
62 			break;
63 
64 		if (wqe->state == wqe_state_done)
65 			continue;
66 
67 		wqe->iova = (mask & WR_ATOMIC_MASK) ?
68 			     wqe->wr.wr.atomic.remote_addr :
69 			     (mask & WR_READ_OR_WRITE_MASK) ?
70 			     wqe->wr.wr.rdma.remote_addr :
71 			     0;
72 
73 		if (!first || (mask & WR_READ_MASK) == 0) {
74 			wqe->dma.resid = wqe->dma.length;
75 			wqe->dma.cur_sge = 0;
76 			wqe->dma.sge_offset = 0;
77 		}
78 
79 		if (first) {
80 			first = 0;
81 
82 			if (mask & WR_WRITE_OR_SEND_MASK) {
83 				npsn = (qp->comp.psn - wqe->first_psn) &
84 					BTH_PSN_MASK;
85 				retry_first_write_send(qp, wqe, npsn);
86 			}
87 
88 			if (mask & WR_READ_MASK) {
89 				npsn = (wqe->dma.length - wqe->dma.resid) /
90 					qp->mtu;
91 				wqe->iova += npsn * qp->mtu;
92 			}
93 		}
94 
95 		wqe->state = wqe_state_posted;
96 	}
97 }
98 
rnr_nak_timer(struct timer_list * t)99 void rnr_nak_timer(struct timer_list *t)
100 {
101 	struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
102 	unsigned long flags;
103 
104 	rxe_dbg_qp(qp, "nak timer fired\n");
105 
106 	spin_lock_irqsave(&qp->state_lock, flags);
107 	if (qp->valid) {
108 		/* request a send queue retry */
109 		qp->req.need_retry = 1;
110 		qp->req.wait_for_rnr_timer = 0;
111 		rxe_sched_task(&qp->req.task);
112 	}
113 	spin_unlock_irqrestore(&qp->state_lock, flags);
114 }
115 
req_check_sq_drain_done(struct rxe_qp * qp)116 static void req_check_sq_drain_done(struct rxe_qp *qp)
117 {
118 	struct rxe_queue *q;
119 	unsigned int index;
120 	unsigned int cons;
121 	struct rxe_send_wqe *wqe;
122 	unsigned long flags;
123 
124 	spin_lock_irqsave(&qp->state_lock, flags);
125 	if (qp_state(qp) == IB_QPS_SQD) {
126 		q = qp->sq.queue;
127 		index = qp->req.wqe_index;
128 		cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
129 		wqe = queue_addr_from_index(q, cons);
130 
131 		/* check to see if we are drained;
132 		 * state_lock used by requester and completer
133 		 */
134 		do {
135 			if (!qp->attr.sq_draining)
136 				/* comp just finished */
137 				break;
138 
139 			if (wqe && ((index != cons) ||
140 				(wqe->state != wqe_state_posted)))
141 				/* comp not done yet */
142 				break;
143 
144 			qp->attr.sq_draining = 0;
145 			spin_unlock_irqrestore(&qp->state_lock, flags);
146 
147 			if (qp->ibqp.event_handler) {
148 				struct ib_event ev;
149 
150 				ev.device = qp->ibqp.device;
151 				ev.element.qp = &qp->ibqp;
152 				ev.event = IB_EVENT_SQ_DRAINED;
153 				qp->ibqp.event_handler(&ev,
154 					qp->ibqp.qp_context);
155 			}
156 			return;
157 		} while (0);
158 	}
159 	spin_unlock_irqrestore(&qp->state_lock, flags);
160 }
161 
__req_next_wqe(struct rxe_qp * qp)162 static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp)
163 {
164 	struct rxe_queue *q = qp->sq.queue;
165 	unsigned int index = qp->req.wqe_index;
166 	unsigned int prod;
167 
168 	prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
169 	if (index == prod)
170 		return NULL;
171 	else
172 		return queue_addr_from_index(q, index);
173 }
174 
req_next_wqe(struct rxe_qp * qp)175 static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
176 {
177 	struct rxe_send_wqe *wqe;
178 	unsigned long flags;
179 
180 	req_check_sq_drain_done(qp);
181 
182 	wqe = __req_next_wqe(qp);
183 	if (wqe == NULL)
184 		return NULL;
185 
186 	spin_lock_irqsave(&qp->state_lock, flags);
187 	if (unlikely((qp_state(qp) == IB_QPS_SQD) &&
188 		     (wqe->state != wqe_state_processing))) {
189 		spin_unlock_irqrestore(&qp->state_lock, flags);
190 		return NULL;
191 	}
192 	spin_unlock_irqrestore(&qp->state_lock, flags);
193 
194 	wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
195 	return wqe;
196 }
197 
198 /**
199  * rxe_wqe_is_fenced - check if next wqe is fenced
200  * @qp: the queue pair
201  * @wqe: the next wqe
202  *
203  * Returns: 1 if wqe needs to wait
204  *	    0 if wqe is ready to go
205  */
rxe_wqe_is_fenced(struct rxe_qp * qp,struct rxe_send_wqe * wqe)206 static int rxe_wqe_is_fenced(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
207 {
208 	/* Local invalidate fence (LIF) see IBA 10.6.5.1
209 	 * Requires ALL previous operations on the send queue
210 	 * are complete. Make mandatory for the rxe driver.
211 	 */
212 	if (wqe->wr.opcode == IB_WR_LOCAL_INV)
213 		return qp->req.wqe_index != queue_get_consumer(qp->sq.queue,
214 						QUEUE_TYPE_FROM_CLIENT);
215 
216 	/* Fence see IBA 10.8.3.3
217 	 * Requires that all previous read and atomic operations
218 	 * are complete.
219 	 */
220 	return (wqe->wr.send_flags & IB_SEND_FENCE) &&
221 		atomic_read(&qp->req.rd_atomic) != qp->attr.max_rd_atomic;
222 }
223 
next_opcode_rc(struct rxe_qp * qp,u32 opcode,int fits)224 static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
225 {
226 	switch (opcode) {
227 	case IB_WR_RDMA_WRITE:
228 		if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
229 		    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
230 			return fits ?
231 				IB_OPCODE_RC_RDMA_WRITE_LAST :
232 				IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
233 		else
234 			return fits ?
235 				IB_OPCODE_RC_RDMA_WRITE_ONLY :
236 				IB_OPCODE_RC_RDMA_WRITE_FIRST;
237 
238 	case IB_WR_RDMA_WRITE_WITH_IMM:
239 		if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
240 		    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
241 			return fits ?
242 				IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
243 				IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
244 		else
245 			return fits ?
246 				IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
247 				IB_OPCODE_RC_RDMA_WRITE_FIRST;
248 
249 	case IB_WR_SEND:
250 		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
251 		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
252 			return fits ?
253 				IB_OPCODE_RC_SEND_LAST :
254 				IB_OPCODE_RC_SEND_MIDDLE;
255 		else
256 			return fits ?
257 				IB_OPCODE_RC_SEND_ONLY :
258 				IB_OPCODE_RC_SEND_FIRST;
259 
260 	case IB_WR_SEND_WITH_IMM:
261 		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
262 		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
263 			return fits ?
264 				IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
265 				IB_OPCODE_RC_SEND_MIDDLE;
266 		else
267 			return fits ?
268 				IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
269 				IB_OPCODE_RC_SEND_FIRST;
270 
271 	case IB_WR_FLUSH:
272 		return IB_OPCODE_RC_FLUSH;
273 
274 	case IB_WR_RDMA_READ:
275 		return IB_OPCODE_RC_RDMA_READ_REQUEST;
276 
277 	case IB_WR_ATOMIC_CMP_AND_SWP:
278 		return IB_OPCODE_RC_COMPARE_SWAP;
279 
280 	case IB_WR_ATOMIC_FETCH_AND_ADD:
281 		return IB_OPCODE_RC_FETCH_ADD;
282 
283 	case IB_WR_SEND_WITH_INV:
284 		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
285 		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
286 			return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
287 				IB_OPCODE_RC_SEND_MIDDLE;
288 		else
289 			return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
290 				IB_OPCODE_RC_SEND_FIRST;
291 
292 	case IB_WR_ATOMIC_WRITE:
293 		return IB_OPCODE_RC_ATOMIC_WRITE;
294 
295 	case IB_WR_REG_MR:
296 	case IB_WR_LOCAL_INV:
297 		return opcode;
298 	}
299 
300 	return -EINVAL;
301 }
302 
next_opcode_uc(struct rxe_qp * qp,u32 opcode,int fits)303 static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
304 {
305 	switch (opcode) {
306 	case IB_WR_RDMA_WRITE:
307 		if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
308 		    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
309 			return fits ?
310 				IB_OPCODE_UC_RDMA_WRITE_LAST :
311 				IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
312 		else
313 			return fits ?
314 				IB_OPCODE_UC_RDMA_WRITE_ONLY :
315 				IB_OPCODE_UC_RDMA_WRITE_FIRST;
316 
317 	case IB_WR_RDMA_WRITE_WITH_IMM:
318 		if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
319 		    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
320 			return fits ?
321 				IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
322 				IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
323 		else
324 			return fits ?
325 				IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
326 				IB_OPCODE_UC_RDMA_WRITE_FIRST;
327 
328 	case IB_WR_SEND:
329 		if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
330 		    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
331 			return fits ?
332 				IB_OPCODE_UC_SEND_LAST :
333 				IB_OPCODE_UC_SEND_MIDDLE;
334 		else
335 			return fits ?
336 				IB_OPCODE_UC_SEND_ONLY :
337 				IB_OPCODE_UC_SEND_FIRST;
338 
339 	case IB_WR_SEND_WITH_IMM:
340 		if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
341 		    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
342 			return fits ?
343 				IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
344 				IB_OPCODE_UC_SEND_MIDDLE;
345 		else
346 			return fits ?
347 				IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
348 				IB_OPCODE_UC_SEND_FIRST;
349 	}
350 
351 	return -EINVAL;
352 }
353 
next_opcode(struct rxe_qp * qp,struct rxe_send_wqe * wqe,u32 opcode)354 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
355 		       u32 opcode)
356 {
357 	int fits = (wqe->dma.resid <= qp->mtu);
358 
359 	switch (qp_type(qp)) {
360 	case IB_QPT_RC:
361 		return next_opcode_rc(qp, opcode, fits);
362 
363 	case IB_QPT_UC:
364 		return next_opcode_uc(qp, opcode, fits);
365 
366 	case IB_QPT_UD:
367 	case IB_QPT_GSI:
368 		switch (opcode) {
369 		case IB_WR_SEND:
370 			return IB_OPCODE_UD_SEND_ONLY;
371 
372 		case IB_WR_SEND_WITH_IMM:
373 			return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
374 		}
375 		break;
376 
377 	default:
378 		break;
379 	}
380 
381 	return -EINVAL;
382 }
383 
check_init_depth(struct rxe_qp * qp,struct rxe_send_wqe * wqe)384 static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
385 {
386 	int depth;
387 
388 	if (wqe->has_rd_atomic)
389 		return 0;
390 
391 	qp->req.need_rd_atomic = 1;
392 	depth = atomic_dec_return(&qp->req.rd_atomic);
393 
394 	if (depth >= 0) {
395 		qp->req.need_rd_atomic = 0;
396 		wqe->has_rd_atomic = 1;
397 		return 0;
398 	}
399 
400 	atomic_inc(&qp->req.rd_atomic);
401 	return -EAGAIN;
402 }
403 
get_mtu(struct rxe_qp * qp)404 static inline int get_mtu(struct rxe_qp *qp)
405 {
406 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
407 
408 	if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))
409 		return qp->mtu;
410 
411 	return rxe->port.mtu_cap;
412 }
413 
init_req_packet(struct rxe_qp * qp,struct rxe_av * av,struct rxe_send_wqe * wqe,int opcode,u32 payload,struct rxe_pkt_info * pkt)414 static struct sk_buff *init_req_packet(struct rxe_qp *qp,
415 				       struct rxe_av *av,
416 				       struct rxe_send_wqe *wqe,
417 				       int opcode, u32 payload,
418 				       struct rxe_pkt_info *pkt)
419 {
420 	struct rxe_dev		*rxe = to_rdev(qp->ibqp.device);
421 	struct sk_buff		*skb;
422 	struct rxe_send_wr	*ibwr = &wqe->wr;
423 	int			pad = (-payload) & 0x3;
424 	int			paylen;
425 	int			solicited;
426 	u32			qp_num;
427 	int			ack_req = 0;
428 
429 	/* length from start of bth to end of icrc */
430 	paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
431 	pkt->paylen = paylen;
432 
433 	/* init skb */
434 	skb = rxe_init_packet(rxe, av, paylen, pkt);
435 	if (unlikely(!skb))
436 		return NULL;
437 
438 	/* init bth */
439 	solicited = (ibwr->send_flags & IB_SEND_SOLICITED) &&
440 			(pkt->mask & RXE_END_MASK) &&
441 			((pkt->mask & (RXE_SEND_MASK)) ||
442 			(pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
443 			(RXE_WRITE_MASK | RXE_IMMDT_MASK));
444 
445 	qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
446 					 qp->attr.dest_qp_num;
447 
448 	if (qp_type(qp) != IB_QPT_UD && qp_type(qp) != IB_QPT_UC)
449 		ack_req = ((pkt->mask & RXE_END_MASK) ||
450 			   (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
451 	if (ack_req)
452 		qp->req.noack_pkts = 0;
453 
454 	bth_init(pkt, pkt->opcode, solicited, 0, pad, IB_DEFAULT_PKEY_FULL, qp_num,
455 		 ack_req, pkt->psn);
456 
457 	/* init optional headers */
458 	if (pkt->mask & RXE_RETH_MASK) {
459 		if (pkt->mask & RXE_FETH_MASK)
460 			reth_set_rkey(pkt, ibwr->wr.flush.rkey);
461 		else
462 			reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
463 		reth_set_va(pkt, wqe->iova);
464 		reth_set_len(pkt, wqe->dma.resid);
465 	}
466 
467 	/* Fill Flush Extension Transport Header */
468 	if (pkt->mask & RXE_FETH_MASK)
469 		feth_init(pkt, ibwr->wr.flush.type, ibwr->wr.flush.level);
470 
471 	if (pkt->mask & RXE_IMMDT_MASK)
472 		immdt_set_imm(pkt, ibwr->ex.imm_data);
473 
474 	if (pkt->mask & RXE_IETH_MASK)
475 		ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);
476 
477 	if (pkt->mask & RXE_ATMETH_MASK) {
478 		atmeth_set_va(pkt, wqe->iova);
479 		if (opcode == IB_OPCODE_RC_COMPARE_SWAP) {
480 			atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
481 			atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
482 		} else {
483 			atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
484 		}
485 		atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
486 	}
487 
488 	if (pkt->mask & RXE_DETH_MASK) {
489 		if (qp->ibqp.qp_num == 1)
490 			deth_set_qkey(pkt, GSI_QKEY);
491 		else
492 			deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
493 		deth_set_sqp(pkt, qp->ibqp.qp_num);
494 	}
495 
496 	return skb;
497 }
498 
finish_packet(struct rxe_qp * qp,struct rxe_av * av,struct rxe_send_wqe * wqe,struct rxe_pkt_info * pkt,struct sk_buff * skb,u32 payload)499 static int finish_packet(struct rxe_qp *qp, struct rxe_av *av,
500 			 struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt,
501 			 struct sk_buff *skb, u32 payload)
502 {
503 	int err;
504 
505 	err = rxe_prepare(av, pkt, skb);
506 	if (err)
507 		return err;
508 
509 	if (pkt->mask & RXE_WRITE_OR_SEND_MASK) {
510 		if (wqe->wr.send_flags & IB_SEND_INLINE) {
511 			u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
512 
513 			memcpy(payload_addr(pkt), tmp, payload);
514 
515 			wqe->dma.resid -= payload;
516 			wqe->dma.sge_offset += payload;
517 		} else {
518 			err = copy_data(qp->pd, 0, &wqe->dma,
519 					payload_addr(pkt), payload,
520 					RXE_FROM_MR_OBJ);
521 			if (err)
522 				return err;
523 		}
524 		if (bth_pad(pkt)) {
525 			u8 *pad = payload_addr(pkt) + payload;
526 
527 			memset(pad, 0, bth_pad(pkt));
528 		}
529 	} else if (pkt->mask & RXE_FLUSH_MASK) {
530 		/* oA19-2: shall have no payload. */
531 		wqe->dma.resid = 0;
532 	}
533 
534 	if (pkt->mask & RXE_ATOMIC_WRITE_MASK) {
535 		memcpy(payload_addr(pkt), wqe->dma.atomic_wr, payload);
536 		wqe->dma.resid -= payload;
537 	}
538 
539 	return 0;
540 }
541 
update_wqe_state(struct rxe_qp * qp,struct rxe_send_wqe * wqe,struct rxe_pkt_info * pkt)542 static void update_wqe_state(struct rxe_qp *qp,
543 		struct rxe_send_wqe *wqe,
544 		struct rxe_pkt_info *pkt)
545 {
546 	if (pkt->mask & RXE_END_MASK) {
547 		if (qp_type(qp) == IB_QPT_RC)
548 			wqe->state = wqe_state_pending;
549 	} else {
550 		wqe->state = wqe_state_processing;
551 	}
552 }
553 
update_wqe_psn(struct rxe_qp * qp,struct rxe_send_wqe * wqe,struct rxe_pkt_info * pkt,u32 payload)554 static void update_wqe_psn(struct rxe_qp *qp,
555 			   struct rxe_send_wqe *wqe,
556 			   struct rxe_pkt_info *pkt,
557 			   u32 payload)
558 {
559 	/* number of packets left to send including current one */
560 	int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
561 
562 	/* handle zero length packet case */
563 	if (num_pkt == 0)
564 		num_pkt = 1;
565 
566 	if (pkt->mask & RXE_START_MASK) {
567 		wqe->first_psn = qp->req.psn;
568 		wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
569 	}
570 
571 	if (pkt->mask & RXE_READ_MASK)
572 		qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
573 	else
574 		qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
575 }
576 
save_state(struct rxe_send_wqe * wqe,struct rxe_qp * qp,struct rxe_send_wqe * rollback_wqe,u32 * rollback_psn)577 static void save_state(struct rxe_send_wqe *wqe,
578 		       struct rxe_qp *qp,
579 		       struct rxe_send_wqe *rollback_wqe,
580 		       u32 *rollback_psn)
581 {
582 	rollback_wqe->state = wqe->state;
583 	rollback_wqe->first_psn = wqe->first_psn;
584 	rollback_wqe->last_psn = wqe->last_psn;
585 	rollback_wqe->dma = wqe->dma;
586 	*rollback_psn = qp->req.psn;
587 }
588 
rollback_state(struct rxe_send_wqe * wqe,struct rxe_qp * qp,struct rxe_send_wqe * rollback_wqe,u32 rollback_psn)589 static void rollback_state(struct rxe_send_wqe *wqe,
590 			   struct rxe_qp *qp,
591 			   struct rxe_send_wqe *rollback_wqe,
592 			   u32 rollback_psn)
593 {
594 	wqe->state = rollback_wqe->state;
595 	wqe->first_psn = rollback_wqe->first_psn;
596 	wqe->last_psn = rollback_wqe->last_psn;
597 	wqe->dma = rollback_wqe->dma;
598 	qp->req.psn = rollback_psn;
599 }
600 
update_state(struct rxe_qp * qp,struct rxe_pkt_info * pkt)601 static void update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
602 {
603 	qp->req.opcode = pkt->opcode;
604 
605 	if (pkt->mask & RXE_END_MASK)
606 		qp->req.wqe_index = queue_next_index(qp->sq.queue,
607 						     qp->req.wqe_index);
608 
609 	qp->need_req_skb = 0;
610 
611 	if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer))
612 		mod_timer(&qp->retrans_timer,
613 			  jiffies + qp->qp_timeout_jiffies);
614 }
615 
rxe_do_local_ops(struct rxe_qp * qp,struct rxe_send_wqe * wqe)616 static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
617 {
618 	u8 opcode = wqe->wr.opcode;
619 	u32 rkey;
620 	int ret;
621 
622 	switch (opcode) {
623 	case IB_WR_LOCAL_INV:
624 		rkey = wqe->wr.ex.invalidate_rkey;
625 		if (rkey_is_mw(rkey))
626 			ret = rxe_invalidate_mw(qp, rkey);
627 		else
628 			ret = rxe_invalidate_mr(qp, rkey);
629 
630 		if (unlikely(ret)) {
631 			wqe->status = IB_WC_LOC_QP_OP_ERR;
632 			return ret;
633 		}
634 		break;
635 	case IB_WR_REG_MR:
636 		ret = rxe_reg_fast_mr(qp, wqe);
637 		if (unlikely(ret)) {
638 			wqe->status = IB_WC_LOC_QP_OP_ERR;
639 			return ret;
640 		}
641 		break;
642 	case IB_WR_BIND_MW:
643 		ret = rxe_bind_mw(qp, wqe);
644 		if (unlikely(ret)) {
645 			wqe->status = IB_WC_MW_BIND_ERR;
646 			return ret;
647 		}
648 		break;
649 	default:
650 		rxe_dbg_qp(qp, "Unexpected send wqe opcode %d\n", opcode);
651 		wqe->status = IB_WC_LOC_QP_OP_ERR;
652 		return -EINVAL;
653 	}
654 
655 	wqe->state = wqe_state_done;
656 	wqe->status = IB_WC_SUCCESS;
657 	qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
658 
659 	/* There is no ack coming for local work requests
660 	 * which can lead to a deadlock. So go ahead and complete
661 	 * it now.
662 	 */
663 	rxe_sched_task(&qp->comp.task);
664 
665 	return 0;
666 }
667 
rxe_requester(struct rxe_qp * qp)668 int rxe_requester(struct rxe_qp *qp)
669 {
670 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
671 	struct rxe_pkt_info pkt;
672 	struct sk_buff *skb;
673 	struct rxe_send_wqe *wqe;
674 	enum rxe_hdr_mask mask;
675 	u32 payload;
676 	int mtu;
677 	int opcode;
678 	int err;
679 	int ret;
680 	struct rxe_send_wqe rollback_wqe;
681 	u32 rollback_psn;
682 	struct rxe_queue *q = qp->sq.queue;
683 	struct rxe_ah *ah;
684 	struct rxe_av *av;
685 	unsigned long flags;
686 
687 	spin_lock_irqsave(&qp->state_lock, flags);
688 	if (unlikely(!qp->valid)) {
689 		spin_unlock_irqrestore(&qp->state_lock, flags);
690 		goto exit;
691 	}
692 
693 	if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
694 		wqe = __req_next_wqe(qp);
695 		spin_unlock_irqrestore(&qp->state_lock, flags);
696 		if (wqe) {
697 			wqe->status = IB_WC_WR_FLUSH_ERR;
698 			goto err;
699 		} else {
700 			goto exit;
701 		}
702 	}
703 
704 	if (unlikely(qp_state(qp) == IB_QPS_RESET)) {
705 		qp->req.wqe_index = queue_get_consumer(q,
706 						QUEUE_TYPE_FROM_CLIENT);
707 		qp->req.opcode = -1;
708 		qp->req.need_rd_atomic = 0;
709 		qp->req.wait_psn = 0;
710 		qp->req.need_retry = 0;
711 		qp->req.wait_for_rnr_timer = 0;
712 		spin_unlock_irqrestore(&qp->state_lock, flags);
713 		goto exit;
714 	}
715 	spin_unlock_irqrestore(&qp->state_lock, flags);
716 
717 	/* we come here if the retransmit timer has fired
718 	 * or if the rnr timer has fired. If the retransmit
719 	 * timer fires while we are processing an RNR NAK wait
720 	 * until the rnr timer has fired before starting the
721 	 * retry flow
722 	 */
723 	if (unlikely(qp->req.need_retry && !qp->req.wait_for_rnr_timer)) {
724 		req_retry(qp);
725 		qp->req.need_retry = 0;
726 	}
727 
728 	wqe = req_next_wqe(qp);
729 	if (unlikely(!wqe))
730 		goto exit;
731 
732 	if (rxe_wqe_is_fenced(qp, wqe)) {
733 		qp->req.wait_fence = 1;
734 		goto exit;
735 	}
736 
737 	if (wqe->mask & WR_LOCAL_OP_MASK) {
738 		err = rxe_do_local_ops(qp, wqe);
739 		if (unlikely(err))
740 			goto err;
741 		else
742 			goto done;
743 	}
744 
745 	if (unlikely(qp_type(qp) == IB_QPT_RC &&
746 		psn_compare(qp->req.psn, (qp->comp.psn +
747 				RXE_MAX_UNACKED_PSNS)) > 0)) {
748 		qp->req.wait_psn = 1;
749 		goto exit;
750 	}
751 
752 	/* Limit the number of inflight SKBs per QP */
753 	if (unlikely(atomic_read(&qp->skb_out) >
754 		     RXE_INFLIGHT_SKBS_PER_QP_HIGH)) {
755 		qp->need_req_skb = 1;
756 		goto exit;
757 	}
758 
759 	opcode = next_opcode(qp, wqe, wqe->wr.opcode);
760 	if (unlikely(opcode < 0)) {
761 		wqe->status = IB_WC_LOC_QP_OP_ERR;
762 		goto err;
763 	}
764 
765 	mask = rxe_opcode[opcode].mask;
766 	if (unlikely(mask & (RXE_READ_OR_ATOMIC_MASK |
767 			RXE_ATOMIC_WRITE_MASK))) {
768 		if (check_init_depth(qp, wqe))
769 			goto exit;
770 	}
771 
772 	mtu = get_mtu(qp);
773 	payload = (mask & (RXE_WRITE_OR_SEND_MASK | RXE_ATOMIC_WRITE_MASK)) ?
774 			wqe->dma.resid : 0;
775 	if (payload > mtu) {
776 		if (qp_type(qp) == IB_QPT_UD) {
777 			/* C10-93.1.1: If the total sum of all the buffer lengths specified for a
778 			 * UD message exceeds the MTU of the port as returned by QueryHCA, the CI
779 			 * shall not emit any packets for this message. Further, the CI shall not
780 			 * generate an error due to this condition.
781 			 */
782 
783 			/* fake a successful UD send */
784 			wqe->first_psn = qp->req.psn;
785 			wqe->last_psn = qp->req.psn;
786 			qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
787 			qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
788 			qp->req.wqe_index = queue_next_index(qp->sq.queue,
789 						       qp->req.wqe_index);
790 			wqe->state = wqe_state_done;
791 			wqe->status = IB_WC_SUCCESS;
792 			rxe_sched_task(&qp->comp.task);
793 			goto done;
794 		}
795 		payload = mtu;
796 	}
797 
798 	pkt.rxe = rxe;
799 	pkt.opcode = opcode;
800 	pkt.qp = qp;
801 	pkt.psn = qp->req.psn;
802 	pkt.mask = rxe_opcode[opcode].mask;
803 	pkt.wqe = wqe;
804 
805 	/* save wqe state before we build and send packet */
806 	save_state(wqe, qp, &rollback_wqe, &rollback_psn);
807 
808 	av = rxe_get_av(&pkt, &ah);
809 	if (unlikely(!av)) {
810 		rxe_dbg_qp(qp, "Failed no address vector\n");
811 		wqe->status = IB_WC_LOC_QP_OP_ERR;
812 		goto err;
813 	}
814 
815 	skb = init_req_packet(qp, av, wqe, opcode, payload, &pkt);
816 	if (unlikely(!skb)) {
817 		rxe_dbg_qp(qp, "Failed allocating skb\n");
818 		wqe->status = IB_WC_LOC_QP_OP_ERR;
819 		if (ah)
820 			rxe_put(ah);
821 		goto err;
822 	}
823 
824 	err = finish_packet(qp, av, wqe, &pkt, skb, payload);
825 	if (unlikely(err)) {
826 		rxe_dbg_qp(qp, "Error during finish packet\n");
827 		if (err == -EFAULT)
828 			wqe->status = IB_WC_LOC_PROT_ERR;
829 		else
830 			wqe->status = IB_WC_LOC_QP_OP_ERR;
831 		kfree_skb(skb);
832 		if (ah)
833 			rxe_put(ah);
834 		goto err;
835 	}
836 
837 	if (ah)
838 		rxe_put(ah);
839 
840 	/* update wqe state as though we had sent it */
841 	update_wqe_state(qp, wqe, &pkt);
842 	update_wqe_psn(qp, wqe, &pkt, payload);
843 
844 	err = rxe_xmit_packet(qp, &pkt, skb);
845 	if (err) {
846 		if (err != -EAGAIN) {
847 			wqe->status = IB_WC_LOC_QP_OP_ERR;
848 			goto err;
849 		}
850 
851 		/* the packet was dropped so reset wqe to the state
852 		 * before we sent it so we can try to resend
853 		 */
854 		rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
855 
856 		/* force a delay until the dropped packet is freed and
857 		 * the send queue is drained below the low water mark
858 		 */
859 		qp->need_req_skb = 1;
860 
861 		rxe_sched_task(&qp->req.task);
862 		goto exit;
863 	}
864 
865 	update_state(qp, &pkt);
866 
867 	/* A non-zero return value will cause rxe_do_task to
868 	 * exit its loop and end the work item. A zero return
869 	 * will continue looping and return to rxe_requester
870 	 */
871 done:
872 	ret = 0;
873 	goto out;
874 err:
875 	/* update wqe_index for each wqe completion */
876 	qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
877 	wqe->state = wqe_state_error;
878 	rxe_qp_error(qp);
879 exit:
880 	ret = -EAGAIN;
881 out:
882 	return ret;
883 }
884