1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7 #include <linux/skbuff.h>
8 #include <crypto/hash.h>
9
10 #include "rxe.h"
11 #include "rxe_loc.h"
12 #include "rxe_queue.h"
13
14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
15 u32 opcode);
16
retry_first_write_send(struct rxe_qp * qp,struct rxe_send_wqe * wqe,int npsn)17 static inline void retry_first_write_send(struct rxe_qp *qp,
18 struct rxe_send_wqe *wqe, int npsn)
19 {
20 int i;
21
22 for (i = 0; i < npsn; i++) {
23 int to_send = (wqe->dma.resid > qp->mtu) ?
24 qp->mtu : wqe->dma.resid;
25
26 qp->req.opcode = next_opcode(qp, wqe,
27 wqe->wr.opcode);
28
29 if (wqe->wr.send_flags & IB_SEND_INLINE) {
30 wqe->dma.resid -= to_send;
31 wqe->dma.sge_offset += to_send;
32 } else {
33 advance_dma_data(&wqe->dma, to_send);
34 }
35 }
36 }
37
req_retry(struct rxe_qp * qp)38 static void req_retry(struct rxe_qp *qp)
39 {
40 struct rxe_send_wqe *wqe;
41 unsigned int wqe_index;
42 unsigned int mask;
43 int npsn;
44 int first = 1;
45 struct rxe_queue *q = qp->sq.queue;
46 unsigned int cons;
47 unsigned int prod;
48
49 cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
50 prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
51
52 qp->req.wqe_index = cons;
53 qp->req.psn = qp->comp.psn;
54 qp->req.opcode = -1;
55
56 for (wqe_index = cons; wqe_index != prod;
57 wqe_index = queue_next_index(q, wqe_index)) {
58 wqe = queue_addr_from_index(qp->sq.queue, wqe_index);
59 mask = wr_opcode_mask(wqe->wr.opcode, qp);
60
61 if (wqe->state == wqe_state_posted)
62 break;
63
64 if (wqe->state == wqe_state_done)
65 continue;
66
67 wqe->iova = (mask & WR_ATOMIC_MASK) ?
68 wqe->wr.wr.atomic.remote_addr :
69 (mask & WR_READ_OR_WRITE_MASK) ?
70 wqe->wr.wr.rdma.remote_addr :
71 0;
72
73 if (!first || (mask & WR_READ_MASK) == 0) {
74 wqe->dma.resid = wqe->dma.length;
75 wqe->dma.cur_sge = 0;
76 wqe->dma.sge_offset = 0;
77 }
78
79 if (first) {
80 first = 0;
81
82 if (mask & WR_WRITE_OR_SEND_MASK) {
83 npsn = (qp->comp.psn - wqe->first_psn) &
84 BTH_PSN_MASK;
85 retry_first_write_send(qp, wqe, npsn);
86 }
87
88 if (mask & WR_READ_MASK) {
89 npsn = (wqe->dma.length - wqe->dma.resid) /
90 qp->mtu;
91 wqe->iova += npsn * qp->mtu;
92 }
93 }
94
95 wqe->state = wqe_state_posted;
96 }
97 }
98
rnr_nak_timer(struct timer_list * t)99 void rnr_nak_timer(struct timer_list *t)
100 {
101 struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
102 unsigned long flags;
103
104 rxe_dbg_qp(qp, "nak timer fired\n");
105
106 spin_lock_irqsave(&qp->state_lock, flags);
107 if (qp->valid) {
108 /* request a send queue retry */
109 qp->req.need_retry = 1;
110 qp->req.wait_for_rnr_timer = 0;
111 rxe_sched_task(&qp->req.task);
112 }
113 spin_unlock_irqrestore(&qp->state_lock, flags);
114 }
115
req_check_sq_drain_done(struct rxe_qp * qp)116 static void req_check_sq_drain_done(struct rxe_qp *qp)
117 {
118 struct rxe_queue *q;
119 unsigned int index;
120 unsigned int cons;
121 struct rxe_send_wqe *wqe;
122 unsigned long flags;
123
124 spin_lock_irqsave(&qp->state_lock, flags);
125 if (qp_state(qp) == IB_QPS_SQD) {
126 q = qp->sq.queue;
127 index = qp->req.wqe_index;
128 cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
129 wqe = queue_addr_from_index(q, cons);
130
131 /* check to see if we are drained;
132 * state_lock used by requester and completer
133 */
134 do {
135 if (!qp->attr.sq_draining)
136 /* comp just finished */
137 break;
138
139 if (wqe && ((index != cons) ||
140 (wqe->state != wqe_state_posted)))
141 /* comp not done yet */
142 break;
143
144 qp->attr.sq_draining = 0;
145 spin_unlock_irqrestore(&qp->state_lock, flags);
146
147 if (qp->ibqp.event_handler) {
148 struct ib_event ev;
149
150 ev.device = qp->ibqp.device;
151 ev.element.qp = &qp->ibqp;
152 ev.event = IB_EVENT_SQ_DRAINED;
153 qp->ibqp.event_handler(&ev,
154 qp->ibqp.qp_context);
155 }
156 return;
157 } while (0);
158 }
159 spin_unlock_irqrestore(&qp->state_lock, flags);
160 }
161
__req_next_wqe(struct rxe_qp * qp)162 static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp)
163 {
164 struct rxe_queue *q = qp->sq.queue;
165 unsigned int index = qp->req.wqe_index;
166 unsigned int prod;
167
168 prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
169 if (index == prod)
170 return NULL;
171 else
172 return queue_addr_from_index(q, index);
173 }
174
req_next_wqe(struct rxe_qp * qp)175 static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
176 {
177 struct rxe_send_wqe *wqe;
178 unsigned long flags;
179
180 req_check_sq_drain_done(qp);
181
182 wqe = __req_next_wqe(qp);
183 if (wqe == NULL)
184 return NULL;
185
186 spin_lock_irqsave(&qp->state_lock, flags);
187 if (unlikely((qp_state(qp) == IB_QPS_SQD) &&
188 (wqe->state != wqe_state_processing))) {
189 spin_unlock_irqrestore(&qp->state_lock, flags);
190 return NULL;
191 }
192 spin_unlock_irqrestore(&qp->state_lock, flags);
193
194 wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
195 return wqe;
196 }
197
198 /**
199 * rxe_wqe_is_fenced - check if next wqe is fenced
200 * @qp: the queue pair
201 * @wqe: the next wqe
202 *
203 * Returns: 1 if wqe needs to wait
204 * 0 if wqe is ready to go
205 */
rxe_wqe_is_fenced(struct rxe_qp * qp,struct rxe_send_wqe * wqe)206 static int rxe_wqe_is_fenced(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
207 {
208 /* Local invalidate fence (LIF) see IBA 10.6.5.1
209 * Requires ALL previous operations on the send queue
210 * are complete. Make mandatory for the rxe driver.
211 */
212 if (wqe->wr.opcode == IB_WR_LOCAL_INV)
213 return qp->req.wqe_index != queue_get_consumer(qp->sq.queue,
214 QUEUE_TYPE_FROM_CLIENT);
215
216 /* Fence see IBA 10.8.3.3
217 * Requires that all previous read and atomic operations
218 * are complete.
219 */
220 return (wqe->wr.send_flags & IB_SEND_FENCE) &&
221 atomic_read(&qp->req.rd_atomic) != qp->attr.max_rd_atomic;
222 }
223
next_opcode_rc(struct rxe_qp * qp,u32 opcode,int fits)224 static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
225 {
226 switch (opcode) {
227 case IB_WR_RDMA_WRITE:
228 if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
229 qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
230 return fits ?
231 IB_OPCODE_RC_RDMA_WRITE_LAST :
232 IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
233 else
234 return fits ?
235 IB_OPCODE_RC_RDMA_WRITE_ONLY :
236 IB_OPCODE_RC_RDMA_WRITE_FIRST;
237
238 case IB_WR_RDMA_WRITE_WITH_IMM:
239 if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
240 qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
241 return fits ?
242 IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
243 IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
244 else
245 return fits ?
246 IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
247 IB_OPCODE_RC_RDMA_WRITE_FIRST;
248
249 case IB_WR_SEND:
250 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
251 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
252 return fits ?
253 IB_OPCODE_RC_SEND_LAST :
254 IB_OPCODE_RC_SEND_MIDDLE;
255 else
256 return fits ?
257 IB_OPCODE_RC_SEND_ONLY :
258 IB_OPCODE_RC_SEND_FIRST;
259
260 case IB_WR_SEND_WITH_IMM:
261 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
262 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
263 return fits ?
264 IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
265 IB_OPCODE_RC_SEND_MIDDLE;
266 else
267 return fits ?
268 IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
269 IB_OPCODE_RC_SEND_FIRST;
270
271 case IB_WR_FLUSH:
272 return IB_OPCODE_RC_FLUSH;
273
274 case IB_WR_RDMA_READ:
275 return IB_OPCODE_RC_RDMA_READ_REQUEST;
276
277 case IB_WR_ATOMIC_CMP_AND_SWP:
278 return IB_OPCODE_RC_COMPARE_SWAP;
279
280 case IB_WR_ATOMIC_FETCH_AND_ADD:
281 return IB_OPCODE_RC_FETCH_ADD;
282
283 case IB_WR_SEND_WITH_INV:
284 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
285 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
286 return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
287 IB_OPCODE_RC_SEND_MIDDLE;
288 else
289 return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
290 IB_OPCODE_RC_SEND_FIRST;
291
292 case IB_WR_ATOMIC_WRITE:
293 return IB_OPCODE_RC_ATOMIC_WRITE;
294
295 case IB_WR_REG_MR:
296 case IB_WR_LOCAL_INV:
297 return opcode;
298 }
299
300 return -EINVAL;
301 }
302
next_opcode_uc(struct rxe_qp * qp,u32 opcode,int fits)303 static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
304 {
305 switch (opcode) {
306 case IB_WR_RDMA_WRITE:
307 if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
308 qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
309 return fits ?
310 IB_OPCODE_UC_RDMA_WRITE_LAST :
311 IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
312 else
313 return fits ?
314 IB_OPCODE_UC_RDMA_WRITE_ONLY :
315 IB_OPCODE_UC_RDMA_WRITE_FIRST;
316
317 case IB_WR_RDMA_WRITE_WITH_IMM:
318 if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
319 qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
320 return fits ?
321 IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
322 IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
323 else
324 return fits ?
325 IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
326 IB_OPCODE_UC_RDMA_WRITE_FIRST;
327
328 case IB_WR_SEND:
329 if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
330 qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
331 return fits ?
332 IB_OPCODE_UC_SEND_LAST :
333 IB_OPCODE_UC_SEND_MIDDLE;
334 else
335 return fits ?
336 IB_OPCODE_UC_SEND_ONLY :
337 IB_OPCODE_UC_SEND_FIRST;
338
339 case IB_WR_SEND_WITH_IMM:
340 if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
341 qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
342 return fits ?
343 IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
344 IB_OPCODE_UC_SEND_MIDDLE;
345 else
346 return fits ?
347 IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
348 IB_OPCODE_UC_SEND_FIRST;
349 }
350
351 return -EINVAL;
352 }
353
next_opcode(struct rxe_qp * qp,struct rxe_send_wqe * wqe,u32 opcode)354 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
355 u32 opcode)
356 {
357 int fits = (wqe->dma.resid <= qp->mtu);
358
359 switch (qp_type(qp)) {
360 case IB_QPT_RC:
361 return next_opcode_rc(qp, opcode, fits);
362
363 case IB_QPT_UC:
364 return next_opcode_uc(qp, opcode, fits);
365
366 case IB_QPT_UD:
367 case IB_QPT_GSI:
368 switch (opcode) {
369 case IB_WR_SEND:
370 return IB_OPCODE_UD_SEND_ONLY;
371
372 case IB_WR_SEND_WITH_IMM:
373 return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
374 }
375 break;
376
377 default:
378 break;
379 }
380
381 return -EINVAL;
382 }
383
check_init_depth(struct rxe_qp * qp,struct rxe_send_wqe * wqe)384 static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
385 {
386 int depth;
387
388 if (wqe->has_rd_atomic)
389 return 0;
390
391 qp->req.need_rd_atomic = 1;
392 depth = atomic_dec_return(&qp->req.rd_atomic);
393
394 if (depth >= 0) {
395 qp->req.need_rd_atomic = 0;
396 wqe->has_rd_atomic = 1;
397 return 0;
398 }
399
400 atomic_inc(&qp->req.rd_atomic);
401 return -EAGAIN;
402 }
403
get_mtu(struct rxe_qp * qp)404 static inline int get_mtu(struct rxe_qp *qp)
405 {
406 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
407
408 if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))
409 return qp->mtu;
410
411 return rxe->port.mtu_cap;
412 }
413
init_req_packet(struct rxe_qp * qp,struct rxe_av * av,struct rxe_send_wqe * wqe,int opcode,u32 payload,struct rxe_pkt_info * pkt)414 static struct sk_buff *init_req_packet(struct rxe_qp *qp,
415 struct rxe_av *av,
416 struct rxe_send_wqe *wqe,
417 int opcode, u32 payload,
418 struct rxe_pkt_info *pkt)
419 {
420 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
421 struct sk_buff *skb;
422 struct rxe_send_wr *ibwr = &wqe->wr;
423 int pad = (-payload) & 0x3;
424 int paylen;
425 int solicited;
426 u32 qp_num;
427 int ack_req = 0;
428
429 /* length from start of bth to end of icrc */
430 paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
431 pkt->paylen = paylen;
432
433 /* init skb */
434 skb = rxe_init_packet(rxe, av, paylen, pkt);
435 if (unlikely(!skb))
436 return NULL;
437
438 /* init bth */
439 solicited = (ibwr->send_flags & IB_SEND_SOLICITED) &&
440 (pkt->mask & RXE_END_MASK) &&
441 ((pkt->mask & (RXE_SEND_MASK)) ||
442 (pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
443 (RXE_WRITE_MASK | RXE_IMMDT_MASK));
444
445 qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
446 qp->attr.dest_qp_num;
447
448 if (qp_type(qp) != IB_QPT_UD && qp_type(qp) != IB_QPT_UC)
449 ack_req = ((pkt->mask & RXE_END_MASK) ||
450 (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
451 if (ack_req)
452 qp->req.noack_pkts = 0;
453
454 bth_init(pkt, pkt->opcode, solicited, 0, pad, IB_DEFAULT_PKEY_FULL, qp_num,
455 ack_req, pkt->psn);
456
457 /* init optional headers */
458 if (pkt->mask & RXE_RETH_MASK) {
459 if (pkt->mask & RXE_FETH_MASK)
460 reth_set_rkey(pkt, ibwr->wr.flush.rkey);
461 else
462 reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
463 reth_set_va(pkt, wqe->iova);
464 reth_set_len(pkt, wqe->dma.resid);
465 }
466
467 /* Fill Flush Extension Transport Header */
468 if (pkt->mask & RXE_FETH_MASK)
469 feth_init(pkt, ibwr->wr.flush.type, ibwr->wr.flush.level);
470
471 if (pkt->mask & RXE_IMMDT_MASK)
472 immdt_set_imm(pkt, ibwr->ex.imm_data);
473
474 if (pkt->mask & RXE_IETH_MASK)
475 ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);
476
477 if (pkt->mask & RXE_ATMETH_MASK) {
478 atmeth_set_va(pkt, wqe->iova);
479 if (opcode == IB_OPCODE_RC_COMPARE_SWAP) {
480 atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
481 atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
482 } else {
483 atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
484 }
485 atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
486 }
487
488 if (pkt->mask & RXE_DETH_MASK) {
489 if (qp->ibqp.qp_num == 1)
490 deth_set_qkey(pkt, GSI_QKEY);
491 else
492 deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
493 deth_set_sqp(pkt, qp->ibqp.qp_num);
494 }
495
496 return skb;
497 }
498
finish_packet(struct rxe_qp * qp,struct rxe_av * av,struct rxe_send_wqe * wqe,struct rxe_pkt_info * pkt,struct sk_buff * skb,u32 payload)499 static int finish_packet(struct rxe_qp *qp, struct rxe_av *av,
500 struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt,
501 struct sk_buff *skb, u32 payload)
502 {
503 int err;
504
505 err = rxe_prepare(av, pkt, skb);
506 if (err)
507 return err;
508
509 if (pkt->mask & RXE_WRITE_OR_SEND_MASK) {
510 if (wqe->wr.send_flags & IB_SEND_INLINE) {
511 u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
512
513 memcpy(payload_addr(pkt), tmp, payload);
514
515 wqe->dma.resid -= payload;
516 wqe->dma.sge_offset += payload;
517 } else {
518 err = copy_data(qp->pd, 0, &wqe->dma,
519 payload_addr(pkt), payload,
520 RXE_FROM_MR_OBJ);
521 if (err)
522 return err;
523 }
524 if (bth_pad(pkt)) {
525 u8 *pad = payload_addr(pkt) + payload;
526
527 memset(pad, 0, bth_pad(pkt));
528 }
529 } else if (pkt->mask & RXE_FLUSH_MASK) {
530 /* oA19-2: shall have no payload. */
531 wqe->dma.resid = 0;
532 }
533
534 if (pkt->mask & RXE_ATOMIC_WRITE_MASK) {
535 memcpy(payload_addr(pkt), wqe->dma.atomic_wr, payload);
536 wqe->dma.resid -= payload;
537 }
538
539 return 0;
540 }
541
update_wqe_state(struct rxe_qp * qp,struct rxe_send_wqe * wqe,struct rxe_pkt_info * pkt)542 static void update_wqe_state(struct rxe_qp *qp,
543 struct rxe_send_wqe *wqe,
544 struct rxe_pkt_info *pkt)
545 {
546 if (pkt->mask & RXE_END_MASK) {
547 if (qp_type(qp) == IB_QPT_RC)
548 wqe->state = wqe_state_pending;
549 } else {
550 wqe->state = wqe_state_processing;
551 }
552 }
553
update_wqe_psn(struct rxe_qp * qp,struct rxe_send_wqe * wqe,struct rxe_pkt_info * pkt,u32 payload)554 static void update_wqe_psn(struct rxe_qp *qp,
555 struct rxe_send_wqe *wqe,
556 struct rxe_pkt_info *pkt,
557 u32 payload)
558 {
559 /* number of packets left to send including current one */
560 int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
561
562 /* handle zero length packet case */
563 if (num_pkt == 0)
564 num_pkt = 1;
565
566 if (pkt->mask & RXE_START_MASK) {
567 wqe->first_psn = qp->req.psn;
568 wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
569 }
570
571 if (pkt->mask & RXE_READ_MASK)
572 qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
573 else
574 qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
575 }
576
save_state(struct rxe_send_wqe * wqe,struct rxe_qp * qp,struct rxe_send_wqe * rollback_wqe,u32 * rollback_psn)577 static void save_state(struct rxe_send_wqe *wqe,
578 struct rxe_qp *qp,
579 struct rxe_send_wqe *rollback_wqe,
580 u32 *rollback_psn)
581 {
582 rollback_wqe->state = wqe->state;
583 rollback_wqe->first_psn = wqe->first_psn;
584 rollback_wqe->last_psn = wqe->last_psn;
585 rollback_wqe->dma = wqe->dma;
586 *rollback_psn = qp->req.psn;
587 }
588
rollback_state(struct rxe_send_wqe * wqe,struct rxe_qp * qp,struct rxe_send_wqe * rollback_wqe,u32 rollback_psn)589 static void rollback_state(struct rxe_send_wqe *wqe,
590 struct rxe_qp *qp,
591 struct rxe_send_wqe *rollback_wqe,
592 u32 rollback_psn)
593 {
594 wqe->state = rollback_wqe->state;
595 wqe->first_psn = rollback_wqe->first_psn;
596 wqe->last_psn = rollback_wqe->last_psn;
597 wqe->dma = rollback_wqe->dma;
598 qp->req.psn = rollback_psn;
599 }
600
update_state(struct rxe_qp * qp,struct rxe_pkt_info * pkt)601 static void update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
602 {
603 qp->req.opcode = pkt->opcode;
604
605 if (pkt->mask & RXE_END_MASK)
606 qp->req.wqe_index = queue_next_index(qp->sq.queue,
607 qp->req.wqe_index);
608
609 qp->need_req_skb = 0;
610
611 if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer))
612 mod_timer(&qp->retrans_timer,
613 jiffies + qp->qp_timeout_jiffies);
614 }
615
rxe_do_local_ops(struct rxe_qp * qp,struct rxe_send_wqe * wqe)616 static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
617 {
618 u8 opcode = wqe->wr.opcode;
619 u32 rkey;
620 int ret;
621
622 switch (opcode) {
623 case IB_WR_LOCAL_INV:
624 rkey = wqe->wr.ex.invalidate_rkey;
625 if (rkey_is_mw(rkey))
626 ret = rxe_invalidate_mw(qp, rkey);
627 else
628 ret = rxe_invalidate_mr(qp, rkey);
629
630 if (unlikely(ret)) {
631 wqe->status = IB_WC_LOC_QP_OP_ERR;
632 return ret;
633 }
634 break;
635 case IB_WR_REG_MR:
636 ret = rxe_reg_fast_mr(qp, wqe);
637 if (unlikely(ret)) {
638 wqe->status = IB_WC_LOC_QP_OP_ERR;
639 return ret;
640 }
641 break;
642 case IB_WR_BIND_MW:
643 ret = rxe_bind_mw(qp, wqe);
644 if (unlikely(ret)) {
645 wqe->status = IB_WC_MW_BIND_ERR;
646 return ret;
647 }
648 break;
649 default:
650 rxe_dbg_qp(qp, "Unexpected send wqe opcode %d\n", opcode);
651 wqe->status = IB_WC_LOC_QP_OP_ERR;
652 return -EINVAL;
653 }
654
655 wqe->state = wqe_state_done;
656 wqe->status = IB_WC_SUCCESS;
657 qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
658
659 /* There is no ack coming for local work requests
660 * which can lead to a deadlock. So go ahead and complete
661 * it now.
662 */
663 rxe_sched_task(&qp->comp.task);
664
665 return 0;
666 }
667
rxe_requester(struct rxe_qp * qp)668 int rxe_requester(struct rxe_qp *qp)
669 {
670 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
671 struct rxe_pkt_info pkt;
672 struct sk_buff *skb;
673 struct rxe_send_wqe *wqe;
674 enum rxe_hdr_mask mask;
675 u32 payload;
676 int mtu;
677 int opcode;
678 int err;
679 int ret;
680 struct rxe_send_wqe rollback_wqe;
681 u32 rollback_psn;
682 struct rxe_queue *q = qp->sq.queue;
683 struct rxe_ah *ah;
684 struct rxe_av *av;
685 unsigned long flags;
686
687 spin_lock_irqsave(&qp->state_lock, flags);
688 if (unlikely(!qp->valid)) {
689 spin_unlock_irqrestore(&qp->state_lock, flags);
690 goto exit;
691 }
692
693 if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
694 wqe = __req_next_wqe(qp);
695 spin_unlock_irqrestore(&qp->state_lock, flags);
696 if (wqe)
697 goto err;
698 else
699 goto exit;
700 }
701
702 if (unlikely(qp_state(qp) == IB_QPS_RESET)) {
703 qp->req.wqe_index = queue_get_consumer(q,
704 QUEUE_TYPE_FROM_CLIENT);
705 qp->req.opcode = -1;
706 qp->req.need_rd_atomic = 0;
707 qp->req.wait_psn = 0;
708 qp->req.need_retry = 0;
709 qp->req.wait_for_rnr_timer = 0;
710 spin_unlock_irqrestore(&qp->state_lock, flags);
711 goto exit;
712 }
713 spin_unlock_irqrestore(&qp->state_lock, flags);
714
715 /* we come here if the retransmit timer has fired
716 * or if the rnr timer has fired. If the retransmit
717 * timer fires while we are processing an RNR NAK wait
718 * until the rnr timer has fired before starting the
719 * retry flow
720 */
721 if (unlikely(qp->req.need_retry && !qp->req.wait_for_rnr_timer)) {
722 req_retry(qp);
723 qp->req.need_retry = 0;
724 }
725
726 wqe = req_next_wqe(qp);
727 if (unlikely(!wqe))
728 goto exit;
729
730 if (rxe_wqe_is_fenced(qp, wqe)) {
731 qp->req.wait_fence = 1;
732 goto exit;
733 }
734
735 if (wqe->mask & WR_LOCAL_OP_MASK) {
736 err = rxe_do_local_ops(qp, wqe);
737 if (unlikely(err))
738 goto err;
739 else
740 goto done;
741 }
742
743 if (unlikely(qp_type(qp) == IB_QPT_RC &&
744 psn_compare(qp->req.psn, (qp->comp.psn +
745 RXE_MAX_UNACKED_PSNS)) > 0)) {
746 qp->req.wait_psn = 1;
747 goto exit;
748 }
749
750 /* Limit the number of inflight SKBs per QP */
751 if (unlikely(atomic_read(&qp->skb_out) >
752 RXE_INFLIGHT_SKBS_PER_QP_HIGH)) {
753 qp->need_req_skb = 1;
754 goto exit;
755 }
756
757 opcode = next_opcode(qp, wqe, wqe->wr.opcode);
758 if (unlikely(opcode < 0)) {
759 wqe->status = IB_WC_LOC_QP_OP_ERR;
760 goto err;
761 }
762
763 mask = rxe_opcode[opcode].mask;
764 if (unlikely(mask & (RXE_READ_OR_ATOMIC_MASK |
765 RXE_ATOMIC_WRITE_MASK))) {
766 if (check_init_depth(qp, wqe))
767 goto exit;
768 }
769
770 mtu = get_mtu(qp);
771 payload = (mask & (RXE_WRITE_OR_SEND_MASK | RXE_ATOMIC_WRITE_MASK)) ?
772 wqe->dma.resid : 0;
773 if (payload > mtu) {
774 if (qp_type(qp) == IB_QPT_UD) {
775 /* C10-93.1.1: If the total sum of all the buffer lengths specified for a
776 * UD message exceeds the MTU of the port as returned by QueryHCA, the CI
777 * shall not emit any packets for this message. Further, the CI shall not
778 * generate an error due to this condition.
779 */
780
781 /* fake a successful UD send */
782 wqe->first_psn = qp->req.psn;
783 wqe->last_psn = qp->req.psn;
784 qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
785 qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
786 qp->req.wqe_index = queue_next_index(qp->sq.queue,
787 qp->req.wqe_index);
788 wqe->state = wqe_state_done;
789 wqe->status = IB_WC_SUCCESS;
790 rxe_sched_task(&qp->comp.task);
791 goto done;
792 }
793 payload = mtu;
794 }
795
796 pkt.rxe = rxe;
797 pkt.opcode = opcode;
798 pkt.qp = qp;
799 pkt.psn = qp->req.psn;
800 pkt.mask = rxe_opcode[opcode].mask;
801 pkt.wqe = wqe;
802
803 /* save wqe state before we build and send packet */
804 save_state(wqe, qp, &rollback_wqe, &rollback_psn);
805
806 av = rxe_get_av(&pkt, &ah);
807 if (unlikely(!av)) {
808 rxe_dbg_qp(qp, "Failed no address vector\n");
809 wqe->status = IB_WC_LOC_QP_OP_ERR;
810 goto err;
811 }
812
813 skb = init_req_packet(qp, av, wqe, opcode, payload, &pkt);
814 if (unlikely(!skb)) {
815 rxe_dbg_qp(qp, "Failed allocating skb\n");
816 wqe->status = IB_WC_LOC_QP_OP_ERR;
817 if (ah)
818 rxe_put(ah);
819 goto err;
820 }
821
822 err = finish_packet(qp, av, wqe, &pkt, skb, payload);
823 if (unlikely(err)) {
824 rxe_dbg_qp(qp, "Error during finish packet\n");
825 if (err == -EFAULT)
826 wqe->status = IB_WC_LOC_PROT_ERR;
827 else
828 wqe->status = IB_WC_LOC_QP_OP_ERR;
829 kfree_skb(skb);
830 if (ah)
831 rxe_put(ah);
832 goto err;
833 }
834
835 if (ah)
836 rxe_put(ah);
837
838 /* update wqe state as though we had sent it */
839 update_wqe_state(qp, wqe, &pkt);
840 update_wqe_psn(qp, wqe, &pkt, payload);
841
842 err = rxe_xmit_packet(qp, &pkt, skb);
843 if (err) {
844 if (err != -EAGAIN) {
845 wqe->status = IB_WC_LOC_QP_OP_ERR;
846 goto err;
847 }
848
849 /* the packet was dropped so reset wqe to the state
850 * before we sent it so we can try to resend
851 */
852 rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
853
854 /* force a delay until the dropped packet is freed and
855 * the send queue is drained below the low water mark
856 */
857 qp->need_req_skb = 1;
858
859 rxe_sched_task(&qp->req.task);
860 goto exit;
861 }
862
863 update_state(qp, &pkt);
864
865 /* A non-zero return value will cause rxe_do_task to
866 * exit its loop and end the work item. A zero return
867 * will continue looping and return to rxe_requester
868 */
869 done:
870 ret = 0;
871 goto out;
872 err:
873 /* update wqe_index for each wqe completion */
874 qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
875 wqe->state = wqe_state_error;
876 rxe_qp_error(qp);
877 exit:
878 ret = -EAGAIN;
879 out:
880 return ret;
881 }
882