1 /* 2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/skbuff.h> 35 #include <crypto/hash.h> 36 37 #include "rxe.h" 38 #include "rxe_loc.h" 39 #include "rxe_queue.h" 40 41 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 42 u32 opcode); 43 44 static inline void retry_first_write_send(struct rxe_qp *qp, 45 struct rxe_send_wqe *wqe, 46 unsigned int mask, int npsn) 47 { 48 int i; 49 50 for (i = 0; i < npsn; i++) { 51 int to_send = (wqe->dma.resid > qp->mtu) ? 52 qp->mtu : wqe->dma.resid; 53 54 qp->req.opcode = next_opcode(qp, wqe, 55 wqe->wr.opcode); 56 57 if (wqe->wr.send_flags & IB_SEND_INLINE) { 58 wqe->dma.resid -= to_send; 59 wqe->dma.sge_offset += to_send; 60 } else { 61 advance_dma_data(&wqe->dma, to_send); 62 } 63 if (mask & WR_WRITE_MASK) 64 wqe->iova += qp->mtu; 65 } 66 } 67 68 static void req_retry(struct rxe_qp *qp) 69 { 70 struct rxe_send_wqe *wqe; 71 unsigned int wqe_index; 72 unsigned int mask; 73 int npsn; 74 int first = 1; 75 76 qp->req.wqe_index = consumer_index(qp->sq.queue); 77 qp->req.psn = qp->comp.psn; 78 qp->req.opcode = -1; 79 80 for (wqe_index = consumer_index(qp->sq.queue); 81 wqe_index != producer_index(qp->sq.queue); 82 wqe_index = next_index(qp->sq.queue, wqe_index)) { 83 wqe = addr_from_index(qp->sq.queue, wqe_index); 84 mask = wr_opcode_mask(wqe->wr.opcode, qp); 85 86 if (wqe->state == wqe_state_posted) 87 break; 88 89 if (wqe->state == wqe_state_done) 90 continue; 91 92 wqe->iova = (mask & WR_ATOMIC_MASK) ? 93 wqe->wr.wr.atomic.remote_addr : 94 (mask & WR_READ_OR_WRITE_MASK) ? 95 wqe->wr.wr.rdma.remote_addr : 96 0; 97 98 if (!first || (mask & WR_READ_MASK) == 0) { 99 wqe->dma.resid = wqe->dma.length; 100 wqe->dma.cur_sge = 0; 101 wqe->dma.sge_offset = 0; 102 } 103 104 if (first) { 105 first = 0; 106 107 if (mask & WR_WRITE_OR_SEND_MASK) { 108 npsn = (qp->comp.psn - wqe->first_psn) & 109 BTH_PSN_MASK; 110 retry_first_write_send(qp, wqe, mask, npsn); 111 } 112 113 if (mask & WR_READ_MASK) { 114 npsn = (wqe->dma.length - wqe->dma.resid) / 115 qp->mtu; 116 wqe->iova += npsn * qp->mtu; 117 } 118 } 119 120 wqe->state = wqe_state_posted; 121 } 122 } 123 124 void rnr_nak_timer(struct timer_list *t) 125 { 126 struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer); 127 128 pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp)); 129 rxe_run_task(&qp->req.task, 1); 130 } 131 132 static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) 133 { 134 struct rxe_send_wqe *wqe = queue_head(qp->sq.queue); 135 unsigned long flags; 136 137 if (unlikely(qp->req.state == QP_STATE_DRAIN)) { 138 /* check to see if we are drained; 139 * state_lock used by requester and completer 140 */ 141 spin_lock_irqsave(&qp->state_lock, flags); 142 do { 143 if (qp->req.state != QP_STATE_DRAIN) { 144 /* comp just finished */ 145 spin_unlock_irqrestore(&qp->state_lock, 146 flags); 147 break; 148 } 149 150 if (wqe && ((qp->req.wqe_index != 151 consumer_index(qp->sq.queue)) || 152 (wqe->state != wqe_state_posted))) { 153 /* comp not done yet */ 154 spin_unlock_irqrestore(&qp->state_lock, 155 flags); 156 break; 157 } 158 159 qp->req.state = QP_STATE_DRAINED; 160 spin_unlock_irqrestore(&qp->state_lock, flags); 161 162 if (qp->ibqp.event_handler) { 163 struct ib_event ev; 164 165 ev.device = qp->ibqp.device; 166 ev.element.qp = &qp->ibqp; 167 ev.event = IB_EVENT_SQ_DRAINED; 168 qp->ibqp.event_handler(&ev, 169 qp->ibqp.qp_context); 170 } 171 } while (0); 172 } 173 174 if (qp->req.wqe_index == producer_index(qp->sq.queue)) 175 return NULL; 176 177 wqe = addr_from_index(qp->sq.queue, qp->req.wqe_index); 178 179 if (unlikely((qp->req.state == QP_STATE_DRAIN || 180 qp->req.state == QP_STATE_DRAINED) && 181 (wqe->state != wqe_state_processing))) 182 return NULL; 183 184 if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) && 185 (qp->req.wqe_index != consumer_index(qp->sq.queue)))) { 186 qp->req.wait_fence = 1; 187 return NULL; 188 } 189 190 wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp); 191 return wqe; 192 } 193 194 static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits) 195 { 196 switch (opcode) { 197 case IB_WR_RDMA_WRITE: 198 if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST || 199 qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE) 200 return fits ? 201 IB_OPCODE_RC_RDMA_WRITE_LAST : 202 IB_OPCODE_RC_RDMA_WRITE_MIDDLE; 203 else 204 return fits ? 205 IB_OPCODE_RC_RDMA_WRITE_ONLY : 206 IB_OPCODE_RC_RDMA_WRITE_FIRST; 207 208 case IB_WR_RDMA_WRITE_WITH_IMM: 209 if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST || 210 qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE) 211 return fits ? 212 IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE : 213 IB_OPCODE_RC_RDMA_WRITE_MIDDLE; 214 else 215 return fits ? 216 IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE : 217 IB_OPCODE_RC_RDMA_WRITE_FIRST; 218 219 case IB_WR_SEND: 220 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST || 221 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE) 222 return fits ? 223 IB_OPCODE_RC_SEND_LAST : 224 IB_OPCODE_RC_SEND_MIDDLE; 225 else 226 return fits ? 227 IB_OPCODE_RC_SEND_ONLY : 228 IB_OPCODE_RC_SEND_FIRST; 229 230 case IB_WR_SEND_WITH_IMM: 231 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST || 232 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE) 233 return fits ? 234 IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE : 235 IB_OPCODE_RC_SEND_MIDDLE; 236 else 237 return fits ? 238 IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE : 239 IB_OPCODE_RC_SEND_FIRST; 240 241 case IB_WR_RDMA_READ: 242 return IB_OPCODE_RC_RDMA_READ_REQUEST; 243 244 case IB_WR_ATOMIC_CMP_AND_SWP: 245 return IB_OPCODE_RC_COMPARE_SWAP; 246 247 case IB_WR_ATOMIC_FETCH_AND_ADD: 248 return IB_OPCODE_RC_FETCH_ADD; 249 250 case IB_WR_SEND_WITH_INV: 251 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST || 252 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE) 253 return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE : 254 IB_OPCODE_RC_SEND_MIDDLE; 255 else 256 return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE : 257 IB_OPCODE_RC_SEND_FIRST; 258 case IB_WR_REG_MR: 259 case IB_WR_LOCAL_INV: 260 return opcode; 261 } 262 263 return -EINVAL; 264 } 265 266 static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits) 267 { 268 switch (opcode) { 269 case IB_WR_RDMA_WRITE: 270 if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST || 271 qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE) 272 return fits ? 273 IB_OPCODE_UC_RDMA_WRITE_LAST : 274 IB_OPCODE_UC_RDMA_WRITE_MIDDLE; 275 else 276 return fits ? 277 IB_OPCODE_UC_RDMA_WRITE_ONLY : 278 IB_OPCODE_UC_RDMA_WRITE_FIRST; 279 280 case IB_WR_RDMA_WRITE_WITH_IMM: 281 if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST || 282 qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE) 283 return fits ? 284 IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE : 285 IB_OPCODE_UC_RDMA_WRITE_MIDDLE; 286 else 287 return fits ? 288 IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE : 289 IB_OPCODE_UC_RDMA_WRITE_FIRST; 290 291 case IB_WR_SEND: 292 if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST || 293 qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE) 294 return fits ? 295 IB_OPCODE_UC_SEND_LAST : 296 IB_OPCODE_UC_SEND_MIDDLE; 297 else 298 return fits ? 299 IB_OPCODE_UC_SEND_ONLY : 300 IB_OPCODE_UC_SEND_FIRST; 301 302 case IB_WR_SEND_WITH_IMM: 303 if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST || 304 qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE) 305 return fits ? 306 IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE : 307 IB_OPCODE_UC_SEND_MIDDLE; 308 else 309 return fits ? 310 IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE : 311 IB_OPCODE_UC_SEND_FIRST; 312 } 313 314 return -EINVAL; 315 } 316 317 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 318 u32 opcode) 319 { 320 int fits = (wqe->dma.resid <= qp->mtu); 321 322 switch (qp_type(qp)) { 323 case IB_QPT_RC: 324 return next_opcode_rc(qp, opcode, fits); 325 326 case IB_QPT_UC: 327 return next_opcode_uc(qp, opcode, fits); 328 329 case IB_QPT_SMI: 330 case IB_QPT_UD: 331 case IB_QPT_GSI: 332 switch (opcode) { 333 case IB_WR_SEND: 334 return IB_OPCODE_UD_SEND_ONLY; 335 336 case IB_WR_SEND_WITH_IMM: 337 return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; 338 } 339 break; 340 341 default: 342 break; 343 } 344 345 return -EINVAL; 346 } 347 348 static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe) 349 { 350 int depth; 351 352 if (wqe->has_rd_atomic) 353 return 0; 354 355 qp->req.need_rd_atomic = 1; 356 depth = atomic_dec_return(&qp->req.rd_atomic); 357 358 if (depth >= 0) { 359 qp->req.need_rd_atomic = 0; 360 wqe->has_rd_atomic = 1; 361 return 0; 362 } 363 364 atomic_inc(&qp->req.rd_atomic); 365 return -EAGAIN; 366 } 367 368 static inline int get_mtu(struct rxe_qp *qp) 369 { 370 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 371 372 if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC)) 373 return qp->mtu; 374 375 return rxe->port.mtu_cap; 376 } 377 378 static struct sk_buff *init_req_packet(struct rxe_qp *qp, 379 struct rxe_send_wqe *wqe, 380 int opcode, int payload, 381 struct rxe_pkt_info *pkt) 382 { 383 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 384 struct sk_buff *skb; 385 struct rxe_send_wr *ibwr = &wqe->wr; 386 struct rxe_av *av; 387 int pad = (-payload) & 0x3; 388 int paylen; 389 int solicited; 390 u16 pkey; 391 u32 qp_num; 392 int ack_req; 393 394 /* length from start of bth to end of icrc */ 395 paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE; 396 397 /* pkt->hdr, rxe, port_num and mask are initialized in ifc 398 * layer 399 */ 400 pkt->opcode = opcode; 401 pkt->qp = qp; 402 pkt->psn = qp->req.psn; 403 pkt->mask = rxe_opcode[opcode].mask; 404 pkt->paylen = paylen; 405 pkt->offset = 0; 406 pkt->wqe = wqe; 407 408 /* init skb */ 409 av = rxe_get_av(pkt); 410 skb = rxe_init_packet(rxe, av, paylen, pkt); 411 if (unlikely(!skb)) 412 return NULL; 413 414 /* init bth */ 415 solicited = (ibwr->send_flags & IB_SEND_SOLICITED) && 416 (pkt->mask & RXE_END_MASK) && 417 ((pkt->mask & (RXE_SEND_MASK)) || 418 (pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) == 419 (RXE_WRITE_MASK | RXE_IMMDT_MASK)); 420 421 pkey = IB_DEFAULT_PKEY_FULL; 422 423 qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn : 424 qp->attr.dest_qp_num; 425 426 ack_req = ((pkt->mask & RXE_END_MASK) || 427 (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK)); 428 if (ack_req) 429 qp->req.noack_pkts = 0; 430 431 bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num, 432 ack_req, pkt->psn); 433 434 /* init optional headers */ 435 if (pkt->mask & RXE_RETH_MASK) { 436 reth_set_rkey(pkt, ibwr->wr.rdma.rkey); 437 reth_set_va(pkt, wqe->iova); 438 reth_set_len(pkt, wqe->dma.resid); 439 } 440 441 if (pkt->mask & RXE_IMMDT_MASK) 442 immdt_set_imm(pkt, ibwr->ex.imm_data); 443 444 if (pkt->mask & RXE_IETH_MASK) 445 ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey); 446 447 if (pkt->mask & RXE_ATMETH_MASK) { 448 atmeth_set_va(pkt, wqe->iova); 449 if (opcode == IB_OPCODE_RC_COMPARE_SWAP || 450 opcode == IB_OPCODE_RD_COMPARE_SWAP) { 451 atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap); 452 atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add); 453 } else { 454 atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add); 455 } 456 atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey); 457 } 458 459 if (pkt->mask & RXE_DETH_MASK) { 460 if (qp->ibqp.qp_num == 1) 461 deth_set_qkey(pkt, GSI_QKEY); 462 else 463 deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey); 464 deth_set_sqp(pkt, qp->ibqp.qp_num); 465 } 466 467 return skb; 468 } 469 470 static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 471 struct rxe_pkt_info *pkt, struct sk_buff *skb, 472 int paylen) 473 { 474 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 475 u32 crc = 0; 476 u32 *p; 477 int err; 478 479 err = rxe_prepare(pkt, skb, &crc); 480 if (err) 481 return err; 482 483 if (pkt->mask & RXE_WRITE_OR_SEND) { 484 if (wqe->wr.send_flags & IB_SEND_INLINE) { 485 u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset]; 486 487 crc = rxe_crc32(rxe, crc, tmp, paylen); 488 memcpy(payload_addr(pkt), tmp, paylen); 489 490 wqe->dma.resid -= paylen; 491 wqe->dma.sge_offset += paylen; 492 } else { 493 err = copy_data(qp->pd, 0, &wqe->dma, 494 payload_addr(pkt), paylen, 495 from_mem_obj, 496 &crc); 497 if (err) 498 return err; 499 } 500 if (bth_pad(pkt)) { 501 u8 *pad = payload_addr(pkt) + paylen; 502 503 memset(pad, 0, bth_pad(pkt)); 504 crc = rxe_crc32(rxe, crc, pad, bth_pad(pkt)); 505 } 506 } 507 p = payload_addr(pkt) + paylen + bth_pad(pkt); 508 509 *p = ~crc; 510 511 return 0; 512 } 513 514 static void update_wqe_state(struct rxe_qp *qp, 515 struct rxe_send_wqe *wqe, 516 struct rxe_pkt_info *pkt) 517 { 518 if (pkt->mask & RXE_END_MASK) { 519 if (qp_type(qp) == IB_QPT_RC) 520 wqe->state = wqe_state_pending; 521 } else { 522 wqe->state = wqe_state_processing; 523 } 524 } 525 526 static void update_wqe_psn(struct rxe_qp *qp, 527 struct rxe_send_wqe *wqe, 528 struct rxe_pkt_info *pkt, 529 int payload) 530 { 531 /* number of packets left to send including current one */ 532 int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu; 533 534 /* handle zero length packet case */ 535 if (num_pkt == 0) 536 num_pkt = 1; 537 538 if (pkt->mask & RXE_START_MASK) { 539 wqe->first_psn = qp->req.psn; 540 wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK; 541 } 542 543 if (pkt->mask & RXE_READ_MASK) 544 qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK; 545 else 546 qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK; 547 } 548 549 static void save_state(struct rxe_send_wqe *wqe, 550 struct rxe_qp *qp, 551 struct rxe_send_wqe *rollback_wqe, 552 u32 *rollback_psn) 553 { 554 rollback_wqe->state = wqe->state; 555 rollback_wqe->first_psn = wqe->first_psn; 556 rollback_wqe->last_psn = wqe->last_psn; 557 *rollback_psn = qp->req.psn; 558 } 559 560 static void rollback_state(struct rxe_send_wqe *wqe, 561 struct rxe_qp *qp, 562 struct rxe_send_wqe *rollback_wqe, 563 u32 rollback_psn) 564 { 565 wqe->state = rollback_wqe->state; 566 wqe->first_psn = rollback_wqe->first_psn; 567 wqe->last_psn = rollback_wqe->last_psn; 568 qp->req.psn = rollback_psn; 569 } 570 571 static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 572 struct rxe_pkt_info *pkt, int payload) 573 { 574 qp->req.opcode = pkt->opcode; 575 576 if (pkt->mask & RXE_END_MASK) 577 qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index); 578 579 qp->need_req_skb = 0; 580 581 if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer)) 582 mod_timer(&qp->retrans_timer, 583 jiffies + qp->qp_timeout_jiffies); 584 } 585 586 int rxe_requester(void *arg) 587 { 588 struct rxe_qp *qp = (struct rxe_qp *)arg; 589 struct rxe_pkt_info pkt; 590 struct sk_buff *skb; 591 struct rxe_send_wqe *wqe; 592 enum rxe_hdr_mask mask; 593 int payload; 594 int mtu; 595 int opcode; 596 int ret; 597 struct rxe_send_wqe rollback_wqe; 598 u32 rollback_psn; 599 600 rxe_add_ref(qp); 601 602 next_wqe: 603 if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR)) 604 goto exit; 605 606 if (unlikely(qp->req.state == QP_STATE_RESET)) { 607 qp->req.wqe_index = consumer_index(qp->sq.queue); 608 qp->req.opcode = -1; 609 qp->req.need_rd_atomic = 0; 610 qp->req.wait_psn = 0; 611 qp->req.need_retry = 0; 612 goto exit; 613 } 614 615 if (unlikely(qp->req.need_retry)) { 616 req_retry(qp); 617 qp->req.need_retry = 0; 618 } 619 620 wqe = req_next_wqe(qp); 621 if (unlikely(!wqe)) 622 goto exit; 623 624 if (wqe->mask & WR_REG_MASK) { 625 if (wqe->wr.opcode == IB_WR_LOCAL_INV) { 626 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 627 struct rxe_mem *rmr; 628 629 rmr = rxe_pool_get_index(&rxe->mr_pool, 630 wqe->wr.ex.invalidate_rkey >> 8); 631 if (!rmr) { 632 pr_err("No mr for key %#x\n", 633 wqe->wr.ex.invalidate_rkey); 634 wqe->state = wqe_state_error; 635 wqe->status = IB_WC_MW_BIND_ERR; 636 goto exit; 637 } 638 rmr->state = RXE_MEM_STATE_FREE; 639 rxe_drop_ref(rmr); 640 wqe->state = wqe_state_done; 641 wqe->status = IB_WC_SUCCESS; 642 } else if (wqe->wr.opcode == IB_WR_REG_MR) { 643 struct rxe_mem *rmr = to_rmr(wqe->wr.wr.reg.mr); 644 645 rmr->state = RXE_MEM_STATE_VALID; 646 rmr->access = wqe->wr.wr.reg.access; 647 rmr->lkey = wqe->wr.wr.reg.key; 648 rmr->rkey = wqe->wr.wr.reg.key; 649 rmr->iova = wqe->wr.wr.reg.mr->iova; 650 wqe->state = wqe_state_done; 651 wqe->status = IB_WC_SUCCESS; 652 } else { 653 goto exit; 654 } 655 if ((wqe->wr.send_flags & IB_SEND_SIGNALED) || 656 qp->sq_sig_type == IB_SIGNAL_ALL_WR) 657 rxe_run_task(&qp->comp.task, 1); 658 qp->req.wqe_index = next_index(qp->sq.queue, 659 qp->req.wqe_index); 660 goto next_wqe; 661 } 662 663 if (unlikely(qp_type(qp) == IB_QPT_RC && 664 qp->req.psn > (qp->comp.psn + RXE_MAX_UNACKED_PSNS))) { 665 qp->req.wait_psn = 1; 666 goto exit; 667 } 668 669 /* Limit the number of inflight SKBs per QP */ 670 if (unlikely(atomic_read(&qp->skb_out) > 671 RXE_INFLIGHT_SKBS_PER_QP_HIGH)) { 672 qp->need_req_skb = 1; 673 goto exit; 674 } 675 676 opcode = next_opcode(qp, wqe, wqe->wr.opcode); 677 if (unlikely(opcode < 0)) { 678 wqe->status = IB_WC_LOC_QP_OP_ERR; 679 goto exit; 680 } 681 682 mask = rxe_opcode[opcode].mask; 683 if (unlikely(mask & RXE_READ_OR_ATOMIC)) { 684 if (check_init_depth(qp, wqe)) 685 goto exit; 686 } 687 688 mtu = get_mtu(qp); 689 payload = (mask & RXE_WRITE_OR_SEND) ? wqe->dma.resid : 0; 690 if (payload > mtu) { 691 if (qp_type(qp) == IB_QPT_UD) { 692 /* C10-93.1.1: If the total sum of all the buffer lengths specified for a 693 * UD message exceeds the MTU of the port as returned by QueryHCA, the CI 694 * shall not emit any packets for this message. Further, the CI shall not 695 * generate an error due to this condition. 696 */ 697 698 /* fake a successful UD send */ 699 wqe->first_psn = qp->req.psn; 700 wqe->last_psn = qp->req.psn; 701 qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK; 702 qp->req.opcode = IB_OPCODE_UD_SEND_ONLY; 703 qp->req.wqe_index = next_index(qp->sq.queue, 704 qp->req.wqe_index); 705 wqe->state = wqe_state_done; 706 wqe->status = IB_WC_SUCCESS; 707 __rxe_do_task(&qp->comp.task); 708 rxe_drop_ref(qp); 709 return 0; 710 } 711 payload = mtu; 712 } 713 714 skb = init_req_packet(qp, wqe, opcode, payload, &pkt); 715 if (unlikely(!skb)) { 716 pr_err("qp#%d Failed allocating skb\n", qp_num(qp)); 717 goto err; 718 } 719 720 if (fill_packet(qp, wqe, &pkt, skb, payload)) { 721 pr_debug("qp#%d Error during fill packet\n", qp_num(qp)); 722 kfree_skb(skb); 723 goto err; 724 } 725 726 /* 727 * To prevent a race on wqe access between requester and completer, 728 * wqe members state and psn need to be set before calling 729 * rxe_xmit_packet(). 730 * Otherwise, completer might initiate an unjustified retry flow. 731 */ 732 save_state(wqe, qp, &rollback_wqe, &rollback_psn); 733 update_wqe_state(qp, wqe, &pkt); 734 update_wqe_psn(qp, wqe, &pkt, payload); 735 ret = rxe_xmit_packet(qp, &pkt, skb); 736 if (ret) { 737 qp->need_req_skb = 1; 738 739 rollback_state(wqe, qp, &rollback_wqe, rollback_psn); 740 741 if (ret == -EAGAIN) { 742 rxe_run_task(&qp->req.task, 1); 743 goto exit; 744 } 745 746 goto err; 747 } 748 749 update_state(qp, wqe, &pkt, payload); 750 751 goto next_wqe; 752 753 err: 754 wqe->status = IB_WC_LOC_PROT_ERR; 755 wqe->state = wqe_state_error; 756 __rxe_do_task(&qp->comp.task); 757 758 exit: 759 rxe_drop_ref(qp); 760 return -EAGAIN; 761 } 762