1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 5 */ 6 7 #include <linux/skbuff.h> 8 #include <crypto/hash.h> 9 10 #include "rxe.h" 11 #include "rxe_loc.h" 12 #include "rxe_queue.h" 13 14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 15 u32 opcode); 16 17 static inline void retry_first_write_send(struct rxe_qp *qp, 18 struct rxe_send_wqe *wqe, int npsn) 19 { 20 int i; 21 22 for (i = 0; i < npsn; i++) { 23 int to_send = (wqe->dma.resid > qp->mtu) ? 24 qp->mtu : wqe->dma.resid; 25 26 qp->req.opcode = next_opcode(qp, wqe, 27 wqe->wr.opcode); 28 29 if (wqe->wr.send_flags & IB_SEND_INLINE) { 30 wqe->dma.resid -= to_send; 31 wqe->dma.sge_offset += to_send; 32 } else { 33 advance_dma_data(&wqe->dma, to_send); 34 } 35 } 36 } 37 38 static void req_retry(struct rxe_qp *qp) 39 { 40 struct rxe_send_wqe *wqe; 41 unsigned int wqe_index; 42 unsigned int mask; 43 int npsn; 44 int first = 1; 45 struct rxe_queue *q = qp->sq.queue; 46 unsigned int cons; 47 unsigned int prod; 48 49 cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT); 50 prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT); 51 52 qp->req.wqe_index = cons; 53 qp->req.psn = qp->comp.psn; 54 qp->req.opcode = -1; 55 56 for (wqe_index = cons; wqe_index != prod; 57 wqe_index = queue_next_index(q, wqe_index)) { 58 wqe = queue_addr_from_index(qp->sq.queue, wqe_index); 59 mask = wr_opcode_mask(wqe->wr.opcode, qp); 60 61 if (wqe->state == wqe_state_posted) 62 break; 63 64 if (wqe->state == wqe_state_done) 65 continue; 66 67 wqe->iova = (mask & WR_ATOMIC_MASK) ? 68 wqe->wr.wr.atomic.remote_addr : 69 (mask & WR_READ_OR_WRITE_MASK) ? 70 wqe->wr.wr.rdma.remote_addr : 71 0; 72 73 if (!first || (mask & WR_READ_MASK) == 0) { 74 wqe->dma.resid = wqe->dma.length; 75 wqe->dma.cur_sge = 0; 76 wqe->dma.sge_offset = 0; 77 } 78 79 if (first) { 80 first = 0; 81 82 if (mask & WR_WRITE_OR_SEND_MASK) { 83 npsn = (qp->comp.psn - wqe->first_psn) & 84 BTH_PSN_MASK; 85 retry_first_write_send(qp, wqe, npsn); 86 } 87 88 if (mask & WR_READ_MASK) { 89 npsn = (wqe->dma.length - wqe->dma.resid) / 90 qp->mtu; 91 wqe->iova += npsn * qp->mtu; 92 } 93 } 94 95 wqe->state = wqe_state_posted; 96 } 97 } 98 99 void rnr_nak_timer(struct timer_list *t) 100 { 101 struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer); 102 103 rxe_dbg_qp(qp, "nak timer fired\n"); 104 105 spin_lock_bh(&qp->state_lock); 106 if (qp->valid) { 107 /* request a send queue retry */ 108 qp->req.need_retry = 1; 109 qp->req.wait_for_rnr_timer = 0; 110 rxe_sched_task(&qp->req.task); 111 } 112 spin_unlock_bh(&qp->state_lock); 113 } 114 115 static void req_check_sq_drain_done(struct rxe_qp *qp) 116 { 117 struct rxe_queue *q; 118 unsigned int index; 119 unsigned int cons; 120 struct rxe_send_wqe *wqe; 121 122 spin_lock_bh(&qp->state_lock); 123 if (qp_state(qp) == IB_QPS_SQD) { 124 q = qp->sq.queue; 125 index = qp->req.wqe_index; 126 cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT); 127 wqe = queue_addr_from_index(q, cons); 128 129 /* check to see if we are drained; 130 * state_lock used by requester and completer 131 */ 132 do { 133 if (!qp->attr.sq_draining) 134 /* comp just finished */ 135 break; 136 137 if (wqe && ((index != cons) || 138 (wqe->state != wqe_state_posted))) 139 /* comp not done yet */ 140 break; 141 142 qp->attr.sq_draining = 0; 143 spin_unlock_bh(&qp->state_lock); 144 145 if (qp->ibqp.event_handler) { 146 struct ib_event ev; 147 148 ev.device = qp->ibqp.device; 149 ev.element.qp = &qp->ibqp; 150 ev.event = IB_EVENT_SQ_DRAINED; 151 qp->ibqp.event_handler(&ev, 152 qp->ibqp.qp_context); 153 } 154 return; 155 } while (0); 156 } 157 spin_unlock_bh(&qp->state_lock); 158 } 159 160 static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp) 161 { 162 struct rxe_queue *q = qp->sq.queue; 163 unsigned int index = qp->req.wqe_index; 164 unsigned int prod; 165 166 prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT); 167 if (index == prod) 168 return NULL; 169 else 170 return queue_addr_from_index(q, index); 171 } 172 173 static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) 174 { 175 struct rxe_send_wqe *wqe; 176 177 req_check_sq_drain_done(qp); 178 179 wqe = __req_next_wqe(qp); 180 if (wqe == NULL) 181 return NULL; 182 183 spin_lock_bh(&qp->state_lock); 184 if (unlikely((qp_state(qp) == IB_QPS_SQD) && 185 (wqe->state != wqe_state_processing))) { 186 spin_unlock_bh(&qp->state_lock); 187 return NULL; 188 } 189 spin_unlock_bh(&qp->state_lock); 190 191 wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp); 192 return wqe; 193 } 194 195 /** 196 * rxe_wqe_is_fenced - check if next wqe is fenced 197 * @qp: the queue pair 198 * @wqe: the next wqe 199 * 200 * Returns: 1 if wqe needs to wait 201 * 0 if wqe is ready to go 202 */ 203 static int rxe_wqe_is_fenced(struct rxe_qp *qp, struct rxe_send_wqe *wqe) 204 { 205 /* Local invalidate fence (LIF) see IBA 10.6.5.1 206 * Requires ALL previous operations on the send queue 207 * are complete. Make mandatory for the rxe driver. 208 */ 209 if (wqe->wr.opcode == IB_WR_LOCAL_INV) 210 return qp->req.wqe_index != queue_get_consumer(qp->sq.queue, 211 QUEUE_TYPE_FROM_CLIENT); 212 213 /* Fence see IBA 10.8.3.3 214 * Requires that all previous read and atomic operations 215 * are complete. 216 */ 217 return (wqe->wr.send_flags & IB_SEND_FENCE) && 218 atomic_read(&qp->req.rd_atomic) != qp->attr.max_rd_atomic; 219 } 220 221 static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits) 222 { 223 switch (opcode) { 224 case IB_WR_RDMA_WRITE: 225 if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST || 226 qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE) 227 return fits ? 228 IB_OPCODE_RC_RDMA_WRITE_LAST : 229 IB_OPCODE_RC_RDMA_WRITE_MIDDLE; 230 else 231 return fits ? 232 IB_OPCODE_RC_RDMA_WRITE_ONLY : 233 IB_OPCODE_RC_RDMA_WRITE_FIRST; 234 235 case IB_WR_RDMA_WRITE_WITH_IMM: 236 if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST || 237 qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE) 238 return fits ? 239 IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE : 240 IB_OPCODE_RC_RDMA_WRITE_MIDDLE; 241 else 242 return fits ? 243 IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE : 244 IB_OPCODE_RC_RDMA_WRITE_FIRST; 245 246 case IB_WR_SEND: 247 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST || 248 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE) 249 return fits ? 250 IB_OPCODE_RC_SEND_LAST : 251 IB_OPCODE_RC_SEND_MIDDLE; 252 else 253 return fits ? 254 IB_OPCODE_RC_SEND_ONLY : 255 IB_OPCODE_RC_SEND_FIRST; 256 257 case IB_WR_SEND_WITH_IMM: 258 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST || 259 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE) 260 return fits ? 261 IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE : 262 IB_OPCODE_RC_SEND_MIDDLE; 263 else 264 return fits ? 265 IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE : 266 IB_OPCODE_RC_SEND_FIRST; 267 268 case IB_WR_FLUSH: 269 return IB_OPCODE_RC_FLUSH; 270 271 case IB_WR_RDMA_READ: 272 return IB_OPCODE_RC_RDMA_READ_REQUEST; 273 274 case IB_WR_ATOMIC_CMP_AND_SWP: 275 return IB_OPCODE_RC_COMPARE_SWAP; 276 277 case IB_WR_ATOMIC_FETCH_AND_ADD: 278 return IB_OPCODE_RC_FETCH_ADD; 279 280 case IB_WR_SEND_WITH_INV: 281 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST || 282 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE) 283 return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE : 284 IB_OPCODE_RC_SEND_MIDDLE; 285 else 286 return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE : 287 IB_OPCODE_RC_SEND_FIRST; 288 289 case IB_WR_ATOMIC_WRITE: 290 return IB_OPCODE_RC_ATOMIC_WRITE; 291 292 case IB_WR_REG_MR: 293 case IB_WR_LOCAL_INV: 294 return opcode; 295 } 296 297 return -EINVAL; 298 } 299 300 static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits) 301 { 302 switch (opcode) { 303 case IB_WR_RDMA_WRITE: 304 if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST || 305 qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE) 306 return fits ? 307 IB_OPCODE_UC_RDMA_WRITE_LAST : 308 IB_OPCODE_UC_RDMA_WRITE_MIDDLE; 309 else 310 return fits ? 311 IB_OPCODE_UC_RDMA_WRITE_ONLY : 312 IB_OPCODE_UC_RDMA_WRITE_FIRST; 313 314 case IB_WR_RDMA_WRITE_WITH_IMM: 315 if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST || 316 qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE) 317 return fits ? 318 IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE : 319 IB_OPCODE_UC_RDMA_WRITE_MIDDLE; 320 else 321 return fits ? 322 IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE : 323 IB_OPCODE_UC_RDMA_WRITE_FIRST; 324 325 case IB_WR_SEND: 326 if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST || 327 qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE) 328 return fits ? 329 IB_OPCODE_UC_SEND_LAST : 330 IB_OPCODE_UC_SEND_MIDDLE; 331 else 332 return fits ? 333 IB_OPCODE_UC_SEND_ONLY : 334 IB_OPCODE_UC_SEND_FIRST; 335 336 case IB_WR_SEND_WITH_IMM: 337 if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST || 338 qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE) 339 return fits ? 340 IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE : 341 IB_OPCODE_UC_SEND_MIDDLE; 342 else 343 return fits ? 344 IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE : 345 IB_OPCODE_UC_SEND_FIRST; 346 } 347 348 return -EINVAL; 349 } 350 351 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 352 u32 opcode) 353 { 354 int fits = (wqe->dma.resid <= qp->mtu); 355 356 switch (qp_type(qp)) { 357 case IB_QPT_RC: 358 return next_opcode_rc(qp, opcode, fits); 359 360 case IB_QPT_UC: 361 return next_opcode_uc(qp, opcode, fits); 362 363 case IB_QPT_UD: 364 case IB_QPT_GSI: 365 switch (opcode) { 366 case IB_WR_SEND: 367 return IB_OPCODE_UD_SEND_ONLY; 368 369 case IB_WR_SEND_WITH_IMM: 370 return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; 371 } 372 break; 373 374 default: 375 break; 376 } 377 378 return -EINVAL; 379 } 380 381 static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe) 382 { 383 int depth; 384 385 if (wqe->has_rd_atomic) 386 return 0; 387 388 qp->req.need_rd_atomic = 1; 389 depth = atomic_dec_return(&qp->req.rd_atomic); 390 391 if (depth >= 0) { 392 qp->req.need_rd_atomic = 0; 393 wqe->has_rd_atomic = 1; 394 return 0; 395 } 396 397 atomic_inc(&qp->req.rd_atomic); 398 return -EAGAIN; 399 } 400 401 static inline int get_mtu(struct rxe_qp *qp) 402 { 403 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 404 405 if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC)) 406 return qp->mtu; 407 408 return rxe->port.mtu_cap; 409 } 410 411 static struct sk_buff *init_req_packet(struct rxe_qp *qp, 412 struct rxe_av *av, 413 struct rxe_send_wqe *wqe, 414 int opcode, u32 payload, 415 struct rxe_pkt_info *pkt) 416 { 417 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 418 struct sk_buff *skb; 419 struct rxe_send_wr *ibwr = &wqe->wr; 420 int pad = (-payload) & 0x3; 421 int paylen; 422 int solicited; 423 u32 qp_num; 424 int ack_req; 425 426 /* length from start of bth to end of icrc */ 427 paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE; 428 pkt->paylen = paylen; 429 430 /* init skb */ 431 skb = rxe_init_packet(rxe, av, paylen, pkt); 432 if (unlikely(!skb)) 433 return NULL; 434 435 /* init bth */ 436 solicited = (ibwr->send_flags & IB_SEND_SOLICITED) && 437 (pkt->mask & RXE_END_MASK) && 438 ((pkt->mask & (RXE_SEND_MASK)) || 439 (pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) == 440 (RXE_WRITE_MASK | RXE_IMMDT_MASK)); 441 442 qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn : 443 qp->attr.dest_qp_num; 444 445 ack_req = ((pkt->mask & RXE_END_MASK) || 446 (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK)); 447 if (ack_req) 448 qp->req.noack_pkts = 0; 449 450 bth_init(pkt, pkt->opcode, solicited, 0, pad, IB_DEFAULT_PKEY_FULL, qp_num, 451 ack_req, pkt->psn); 452 453 /* init optional headers */ 454 if (pkt->mask & RXE_RETH_MASK) { 455 if (pkt->mask & RXE_FETH_MASK) 456 reth_set_rkey(pkt, ibwr->wr.flush.rkey); 457 else 458 reth_set_rkey(pkt, ibwr->wr.rdma.rkey); 459 reth_set_va(pkt, wqe->iova); 460 reth_set_len(pkt, wqe->dma.resid); 461 } 462 463 /* Fill Flush Extension Transport Header */ 464 if (pkt->mask & RXE_FETH_MASK) 465 feth_init(pkt, ibwr->wr.flush.type, ibwr->wr.flush.level); 466 467 if (pkt->mask & RXE_IMMDT_MASK) 468 immdt_set_imm(pkt, ibwr->ex.imm_data); 469 470 if (pkt->mask & RXE_IETH_MASK) 471 ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey); 472 473 if (pkt->mask & RXE_ATMETH_MASK) { 474 atmeth_set_va(pkt, wqe->iova); 475 if (opcode == IB_OPCODE_RC_COMPARE_SWAP) { 476 atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap); 477 atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add); 478 } else { 479 atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add); 480 } 481 atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey); 482 } 483 484 if (pkt->mask & RXE_DETH_MASK) { 485 if (qp->ibqp.qp_num == 1) 486 deth_set_qkey(pkt, GSI_QKEY); 487 else 488 deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey); 489 deth_set_sqp(pkt, qp->ibqp.qp_num); 490 } 491 492 return skb; 493 } 494 495 static int finish_packet(struct rxe_qp *qp, struct rxe_av *av, 496 struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, 497 struct sk_buff *skb, u32 payload) 498 { 499 int err; 500 501 err = rxe_prepare(av, pkt, skb); 502 if (err) 503 return err; 504 505 if (pkt->mask & RXE_WRITE_OR_SEND_MASK) { 506 if (wqe->wr.send_flags & IB_SEND_INLINE) { 507 u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset]; 508 509 memcpy(payload_addr(pkt), tmp, payload); 510 511 wqe->dma.resid -= payload; 512 wqe->dma.sge_offset += payload; 513 } else { 514 err = copy_data(qp->pd, 0, &wqe->dma, 515 payload_addr(pkt), payload, 516 RXE_FROM_MR_OBJ); 517 if (err) 518 return err; 519 } 520 if (bth_pad(pkt)) { 521 u8 *pad = payload_addr(pkt) + payload; 522 523 memset(pad, 0, bth_pad(pkt)); 524 } 525 } else if (pkt->mask & RXE_FLUSH_MASK) { 526 /* oA19-2: shall have no payload. */ 527 wqe->dma.resid = 0; 528 } 529 530 if (pkt->mask & RXE_ATOMIC_WRITE_MASK) { 531 memcpy(payload_addr(pkt), wqe->dma.atomic_wr, payload); 532 wqe->dma.resid -= payload; 533 } 534 535 return 0; 536 } 537 538 static void update_wqe_state(struct rxe_qp *qp, 539 struct rxe_send_wqe *wqe, 540 struct rxe_pkt_info *pkt) 541 { 542 if (pkt->mask & RXE_END_MASK) { 543 if (qp_type(qp) == IB_QPT_RC) 544 wqe->state = wqe_state_pending; 545 } else { 546 wqe->state = wqe_state_processing; 547 } 548 } 549 550 static void update_wqe_psn(struct rxe_qp *qp, 551 struct rxe_send_wqe *wqe, 552 struct rxe_pkt_info *pkt, 553 u32 payload) 554 { 555 /* number of packets left to send including current one */ 556 int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu; 557 558 /* handle zero length packet case */ 559 if (num_pkt == 0) 560 num_pkt = 1; 561 562 if (pkt->mask & RXE_START_MASK) { 563 wqe->first_psn = qp->req.psn; 564 wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK; 565 } 566 567 if (pkt->mask & RXE_READ_MASK) 568 qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK; 569 else 570 qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK; 571 } 572 573 static void save_state(struct rxe_send_wqe *wqe, 574 struct rxe_qp *qp, 575 struct rxe_send_wqe *rollback_wqe, 576 u32 *rollback_psn) 577 { 578 rollback_wqe->state = wqe->state; 579 rollback_wqe->first_psn = wqe->first_psn; 580 rollback_wqe->last_psn = wqe->last_psn; 581 *rollback_psn = qp->req.psn; 582 } 583 584 static void rollback_state(struct rxe_send_wqe *wqe, 585 struct rxe_qp *qp, 586 struct rxe_send_wqe *rollback_wqe, 587 u32 rollback_psn) 588 { 589 wqe->state = rollback_wqe->state; 590 wqe->first_psn = rollback_wqe->first_psn; 591 wqe->last_psn = rollback_wqe->last_psn; 592 qp->req.psn = rollback_psn; 593 } 594 595 static void update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt) 596 { 597 qp->req.opcode = pkt->opcode; 598 599 if (pkt->mask & RXE_END_MASK) 600 qp->req.wqe_index = queue_next_index(qp->sq.queue, 601 qp->req.wqe_index); 602 603 qp->need_req_skb = 0; 604 605 if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer)) 606 mod_timer(&qp->retrans_timer, 607 jiffies + qp->qp_timeout_jiffies); 608 } 609 610 static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe) 611 { 612 u8 opcode = wqe->wr.opcode; 613 u32 rkey; 614 int ret; 615 616 switch (opcode) { 617 case IB_WR_LOCAL_INV: 618 rkey = wqe->wr.ex.invalidate_rkey; 619 if (rkey_is_mw(rkey)) 620 ret = rxe_invalidate_mw(qp, rkey); 621 else 622 ret = rxe_invalidate_mr(qp, rkey); 623 624 if (unlikely(ret)) { 625 wqe->status = IB_WC_LOC_QP_OP_ERR; 626 return ret; 627 } 628 break; 629 case IB_WR_REG_MR: 630 ret = rxe_reg_fast_mr(qp, wqe); 631 if (unlikely(ret)) { 632 wqe->status = IB_WC_LOC_QP_OP_ERR; 633 return ret; 634 } 635 break; 636 case IB_WR_BIND_MW: 637 ret = rxe_bind_mw(qp, wqe); 638 if (unlikely(ret)) { 639 wqe->status = IB_WC_MW_BIND_ERR; 640 return ret; 641 } 642 break; 643 default: 644 rxe_dbg_qp(qp, "Unexpected send wqe opcode %d\n", opcode); 645 wqe->status = IB_WC_LOC_QP_OP_ERR; 646 return -EINVAL; 647 } 648 649 wqe->state = wqe_state_done; 650 wqe->status = IB_WC_SUCCESS; 651 qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index); 652 653 /* There is no ack coming for local work requests 654 * which can lead to a deadlock. So go ahead and complete 655 * it now. 656 */ 657 rxe_sched_task(&qp->comp.task); 658 659 return 0; 660 } 661 662 int rxe_requester(struct rxe_qp *qp) 663 { 664 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 665 struct rxe_pkt_info pkt; 666 struct sk_buff *skb; 667 struct rxe_send_wqe *wqe; 668 enum rxe_hdr_mask mask; 669 u32 payload; 670 int mtu; 671 int opcode; 672 int err; 673 int ret; 674 struct rxe_send_wqe rollback_wqe; 675 u32 rollback_psn; 676 struct rxe_queue *q = qp->sq.queue; 677 struct rxe_ah *ah; 678 struct rxe_av *av; 679 680 spin_lock_bh(&qp->state_lock); 681 if (unlikely(!qp->valid)) { 682 spin_unlock_bh(&qp->state_lock); 683 goto exit; 684 } 685 686 if (unlikely(qp_state(qp) == IB_QPS_ERR)) { 687 wqe = __req_next_wqe(qp); 688 spin_unlock_bh(&qp->state_lock); 689 if (wqe) 690 goto err; 691 else 692 goto exit; 693 } 694 695 if (unlikely(qp_state(qp) == IB_QPS_RESET)) { 696 qp->req.wqe_index = queue_get_consumer(q, 697 QUEUE_TYPE_FROM_CLIENT); 698 qp->req.opcode = -1; 699 qp->req.need_rd_atomic = 0; 700 qp->req.wait_psn = 0; 701 qp->req.need_retry = 0; 702 qp->req.wait_for_rnr_timer = 0; 703 spin_unlock_bh(&qp->state_lock); 704 goto exit; 705 } 706 spin_unlock_bh(&qp->state_lock); 707 708 /* we come here if the retransmit timer has fired 709 * or if the rnr timer has fired. If the retransmit 710 * timer fires while we are processing an RNR NAK wait 711 * until the rnr timer has fired before starting the 712 * retry flow 713 */ 714 if (unlikely(qp->req.need_retry && !qp->req.wait_for_rnr_timer)) { 715 req_retry(qp); 716 qp->req.need_retry = 0; 717 } 718 719 wqe = req_next_wqe(qp); 720 if (unlikely(!wqe)) 721 goto exit; 722 723 if (rxe_wqe_is_fenced(qp, wqe)) { 724 qp->req.wait_fence = 1; 725 goto exit; 726 } 727 728 if (wqe->mask & WR_LOCAL_OP_MASK) { 729 err = rxe_do_local_ops(qp, wqe); 730 if (unlikely(err)) 731 goto err; 732 else 733 goto done; 734 } 735 736 if (unlikely(qp_type(qp) == IB_QPT_RC && 737 psn_compare(qp->req.psn, (qp->comp.psn + 738 RXE_MAX_UNACKED_PSNS)) > 0)) { 739 qp->req.wait_psn = 1; 740 goto exit; 741 } 742 743 /* Limit the number of inflight SKBs per QP */ 744 if (unlikely(atomic_read(&qp->skb_out) > 745 RXE_INFLIGHT_SKBS_PER_QP_HIGH)) { 746 qp->need_req_skb = 1; 747 goto exit; 748 } 749 750 opcode = next_opcode(qp, wqe, wqe->wr.opcode); 751 if (unlikely(opcode < 0)) { 752 wqe->status = IB_WC_LOC_QP_OP_ERR; 753 goto err; 754 } 755 756 mask = rxe_opcode[opcode].mask; 757 if (unlikely(mask & (RXE_READ_OR_ATOMIC_MASK | 758 RXE_ATOMIC_WRITE_MASK))) { 759 if (check_init_depth(qp, wqe)) 760 goto exit; 761 } 762 763 mtu = get_mtu(qp); 764 payload = (mask & (RXE_WRITE_OR_SEND_MASK | RXE_ATOMIC_WRITE_MASK)) ? 765 wqe->dma.resid : 0; 766 if (payload > mtu) { 767 if (qp_type(qp) == IB_QPT_UD) { 768 /* C10-93.1.1: If the total sum of all the buffer lengths specified for a 769 * UD message exceeds the MTU of the port as returned by QueryHCA, the CI 770 * shall not emit any packets for this message. Further, the CI shall not 771 * generate an error due to this condition. 772 */ 773 774 /* fake a successful UD send */ 775 wqe->first_psn = qp->req.psn; 776 wqe->last_psn = qp->req.psn; 777 qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK; 778 qp->req.opcode = IB_OPCODE_UD_SEND_ONLY; 779 qp->req.wqe_index = queue_next_index(qp->sq.queue, 780 qp->req.wqe_index); 781 wqe->state = wqe_state_done; 782 wqe->status = IB_WC_SUCCESS; 783 rxe_sched_task(&qp->comp.task); 784 goto done; 785 } 786 payload = mtu; 787 } 788 789 pkt.rxe = rxe; 790 pkt.opcode = opcode; 791 pkt.qp = qp; 792 pkt.psn = qp->req.psn; 793 pkt.mask = rxe_opcode[opcode].mask; 794 pkt.wqe = wqe; 795 796 av = rxe_get_av(&pkt, &ah); 797 if (unlikely(!av)) { 798 rxe_dbg_qp(qp, "Failed no address vector\n"); 799 wqe->status = IB_WC_LOC_QP_OP_ERR; 800 goto err; 801 } 802 803 skb = init_req_packet(qp, av, wqe, opcode, payload, &pkt); 804 if (unlikely(!skb)) { 805 rxe_dbg_qp(qp, "Failed allocating skb\n"); 806 wqe->status = IB_WC_LOC_QP_OP_ERR; 807 if (ah) 808 rxe_put(ah); 809 goto err; 810 } 811 812 err = finish_packet(qp, av, wqe, &pkt, skb, payload); 813 if (unlikely(err)) { 814 rxe_dbg_qp(qp, "Error during finish packet\n"); 815 if (err == -EFAULT) 816 wqe->status = IB_WC_LOC_PROT_ERR; 817 else 818 wqe->status = IB_WC_LOC_QP_OP_ERR; 819 kfree_skb(skb); 820 if (ah) 821 rxe_put(ah); 822 goto err; 823 } 824 825 if (ah) 826 rxe_put(ah); 827 828 /* 829 * To prevent a race on wqe access between requester and completer, 830 * wqe members state and psn need to be set before calling 831 * rxe_xmit_packet(). 832 * Otherwise, completer might initiate an unjustified retry flow. 833 */ 834 save_state(wqe, qp, &rollback_wqe, &rollback_psn); 835 update_wqe_state(qp, wqe, &pkt); 836 update_wqe_psn(qp, wqe, &pkt, payload); 837 838 err = rxe_xmit_packet(qp, &pkt, skb); 839 if (err) { 840 qp->need_req_skb = 1; 841 842 rollback_state(wqe, qp, &rollback_wqe, rollback_psn); 843 844 if (err == -EAGAIN) { 845 rxe_sched_task(&qp->req.task); 846 goto exit; 847 } 848 849 wqe->status = IB_WC_LOC_QP_OP_ERR; 850 goto err; 851 } 852 853 update_state(qp, &pkt); 854 855 /* A non-zero return value will cause rxe_do_task to 856 * exit its loop and end the tasklet. A zero return 857 * will continue looping and return to rxe_requester 858 */ 859 done: 860 ret = 0; 861 goto out; 862 err: 863 /* update wqe_index for each wqe completion */ 864 qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index); 865 wqe->state = wqe_state_error; 866 rxe_qp_error(qp); 867 exit: 868 ret = -EAGAIN; 869 out: 870 return ret; 871 } 872