1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 5 */ 6 7 #include <linux/skbuff.h> 8 #include <linux/delay.h> 9 #include <linux/sched.h> 10 #include <linux/vmalloc.h> 11 #include <rdma/uverbs_ioctl.h> 12 13 #include "rxe.h" 14 #include "rxe_loc.h" 15 #include "rxe_queue.h" 16 #include "rxe_task.h" 17 18 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap, 19 int has_srq) 20 { 21 if (cap->max_send_wr > rxe->attr.max_qp_wr) { 22 pr_warn("invalid send wr = %d > %d\n", 23 cap->max_send_wr, rxe->attr.max_qp_wr); 24 goto err1; 25 } 26 27 if (cap->max_send_sge > rxe->attr.max_send_sge) { 28 pr_warn("invalid send sge = %d > %d\n", 29 cap->max_send_sge, rxe->attr.max_send_sge); 30 goto err1; 31 } 32 33 if (!has_srq) { 34 if (cap->max_recv_wr > rxe->attr.max_qp_wr) { 35 pr_warn("invalid recv wr = %d > %d\n", 36 cap->max_recv_wr, rxe->attr.max_qp_wr); 37 goto err1; 38 } 39 40 if (cap->max_recv_sge > rxe->attr.max_recv_sge) { 41 pr_warn("invalid recv sge = %d > %d\n", 42 cap->max_recv_sge, rxe->attr.max_recv_sge); 43 goto err1; 44 } 45 } 46 47 if (cap->max_inline_data > rxe->max_inline_data) { 48 pr_warn("invalid max inline data = %d > %d\n", 49 cap->max_inline_data, rxe->max_inline_data); 50 goto err1; 51 } 52 53 return 0; 54 55 err1: 56 return -EINVAL; 57 } 58 59 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init) 60 { 61 struct ib_qp_cap *cap = &init->cap; 62 struct rxe_port *port; 63 int port_num = init->port_num; 64 65 switch (init->qp_type) { 66 case IB_QPT_SMI: 67 case IB_QPT_GSI: 68 case IB_QPT_RC: 69 case IB_QPT_UC: 70 case IB_QPT_UD: 71 break; 72 default: 73 return -EOPNOTSUPP; 74 } 75 76 if (!init->recv_cq || !init->send_cq) { 77 pr_warn("missing cq\n"); 78 goto err1; 79 } 80 81 if (rxe_qp_chk_cap(rxe, cap, !!init->srq)) 82 goto err1; 83 84 if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) { 85 if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) { 86 pr_warn("invalid port = %d\n", port_num); 87 goto err1; 88 } 89 90 port = &rxe->port; 91 92 if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) { 93 pr_warn("SMI QP exists for port %d\n", port_num); 94 goto err1; 95 } 96 97 if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) { 98 pr_warn("GSI QP exists for port %d\n", port_num); 99 goto err1; 100 } 101 } 102 103 return 0; 104 105 err1: 106 return -EINVAL; 107 } 108 109 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n) 110 { 111 qp->resp.res_head = 0; 112 qp->resp.res_tail = 0; 113 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL); 114 115 if (!qp->resp.resources) 116 return -ENOMEM; 117 118 return 0; 119 } 120 121 static void free_rd_atomic_resources(struct rxe_qp *qp) 122 { 123 if (qp->resp.resources) { 124 int i; 125 126 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { 127 struct resp_res *res = &qp->resp.resources[i]; 128 129 free_rd_atomic_resource(qp, res); 130 } 131 kfree(qp->resp.resources); 132 qp->resp.resources = NULL; 133 } 134 } 135 136 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res) 137 { 138 if (res->type == RXE_ATOMIC_MASK) { 139 rxe_drop_ref(qp); 140 kfree_skb(res->atomic.skb); 141 } else if (res->type == RXE_READ_MASK) { 142 if (res->read.mr) 143 rxe_drop_ref(res->read.mr); 144 } 145 res->type = 0; 146 } 147 148 static void cleanup_rd_atomic_resources(struct rxe_qp *qp) 149 { 150 int i; 151 struct resp_res *res; 152 153 if (qp->resp.resources) { 154 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { 155 res = &qp->resp.resources[i]; 156 free_rd_atomic_resource(qp, res); 157 } 158 } 159 } 160 161 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, 162 struct ib_qp_init_attr *init) 163 { 164 struct rxe_port *port; 165 u32 qpn; 166 167 qp->sq_sig_type = init->sq_sig_type; 168 qp->attr.path_mtu = 1; 169 qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu); 170 171 qpn = qp->pelem.index; 172 port = &rxe->port; 173 174 switch (init->qp_type) { 175 case IB_QPT_SMI: 176 qp->ibqp.qp_num = 0; 177 port->qp_smi_index = qpn; 178 qp->attr.port_num = init->port_num; 179 break; 180 181 case IB_QPT_GSI: 182 qp->ibqp.qp_num = 1; 183 port->qp_gsi_index = qpn; 184 qp->attr.port_num = init->port_num; 185 break; 186 187 default: 188 qp->ibqp.qp_num = qpn; 189 break; 190 } 191 192 INIT_LIST_HEAD(&qp->grp_list); 193 194 skb_queue_head_init(&qp->send_pkts); 195 196 spin_lock_init(&qp->grp_lock); 197 spin_lock_init(&qp->state_lock); 198 199 atomic_set(&qp->ssn, 0); 200 atomic_set(&qp->skb_out, 0); 201 } 202 203 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, 204 struct ib_qp_init_attr *init, struct ib_udata *udata, 205 struct rxe_create_qp_resp __user *uresp) 206 { 207 int err; 208 int wqe_size; 209 210 err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk); 211 if (err < 0) 212 return err; 213 qp->sk->sk->sk_user_data = qp; 214 215 /* pick a source UDP port number for this QP based on 216 * the source QPN. this spreads traffic for different QPs 217 * across different NIC RX queues (while using a single 218 * flow for a given QP to maintain packet order). 219 * the port number must be in the Dynamic Ports range 220 * (0xc000 - 0xffff). 221 */ 222 qp->src_port = RXE_ROCE_V2_SPORT + 223 (hash_32_generic(qp_num(qp), 14) & 0x3fff); 224 qp->sq.max_wr = init->cap.max_send_wr; 225 226 /* These caps are limited by rxe_qp_chk_cap() done by the caller */ 227 wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge), 228 init->cap.max_inline_data); 229 qp->sq.max_sge = init->cap.max_send_sge = 230 wqe_size / sizeof(struct ib_sge); 231 qp->sq.max_inline = init->cap.max_inline_data = wqe_size; 232 wqe_size += sizeof(struct rxe_send_wqe); 233 234 qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size); 235 if (!qp->sq.queue) 236 return -ENOMEM; 237 238 err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata, 239 qp->sq.queue->buf, qp->sq.queue->buf_size, 240 &qp->sq.queue->ip); 241 242 if (err) { 243 vfree(qp->sq.queue->buf); 244 kfree(qp->sq.queue); 245 return err; 246 } 247 248 qp->req.wqe_index = producer_index(qp->sq.queue); 249 qp->req.state = QP_STATE_RESET; 250 qp->req.opcode = -1; 251 qp->comp.opcode = -1; 252 253 spin_lock_init(&qp->sq.sq_lock); 254 skb_queue_head_init(&qp->req_pkts); 255 256 rxe_init_task(rxe, &qp->req.task, qp, 257 rxe_requester, "req"); 258 rxe_init_task(rxe, &qp->comp.task, qp, 259 rxe_completer, "comp"); 260 261 qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */ 262 if (init->qp_type == IB_QPT_RC) { 263 timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0); 264 timer_setup(&qp->retrans_timer, retransmit_timer, 0); 265 } 266 return 0; 267 } 268 269 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, 270 struct ib_qp_init_attr *init, 271 struct ib_udata *udata, 272 struct rxe_create_qp_resp __user *uresp) 273 { 274 int err; 275 int wqe_size; 276 277 if (!qp->srq) { 278 qp->rq.max_wr = init->cap.max_recv_wr; 279 qp->rq.max_sge = init->cap.max_recv_sge; 280 281 wqe_size = rcv_wqe_size(qp->rq.max_sge); 282 283 pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n", 284 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size); 285 286 qp->rq.queue = rxe_queue_init(rxe, 287 &qp->rq.max_wr, 288 wqe_size); 289 if (!qp->rq.queue) 290 return -ENOMEM; 291 292 err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata, 293 qp->rq.queue->buf, qp->rq.queue->buf_size, 294 &qp->rq.queue->ip); 295 if (err) { 296 vfree(qp->rq.queue->buf); 297 kfree(qp->rq.queue); 298 return err; 299 } 300 } 301 302 spin_lock_init(&qp->rq.producer_lock); 303 spin_lock_init(&qp->rq.consumer_lock); 304 305 skb_queue_head_init(&qp->resp_pkts); 306 307 rxe_init_task(rxe, &qp->resp.task, qp, 308 rxe_responder, "resp"); 309 310 qp->resp.opcode = OPCODE_NONE; 311 qp->resp.msn = 0; 312 qp->resp.state = QP_STATE_RESET; 313 314 return 0; 315 } 316 317 /* called by the create qp verb */ 318 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, 319 struct ib_qp_init_attr *init, 320 struct rxe_create_qp_resp __user *uresp, 321 struct ib_pd *ibpd, 322 struct ib_udata *udata) 323 { 324 int err; 325 struct rxe_cq *rcq = to_rcq(init->recv_cq); 326 struct rxe_cq *scq = to_rcq(init->send_cq); 327 struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL; 328 329 rxe_add_ref(pd); 330 rxe_add_ref(rcq); 331 rxe_add_ref(scq); 332 if (srq) 333 rxe_add_ref(srq); 334 335 qp->pd = pd; 336 qp->rcq = rcq; 337 qp->scq = scq; 338 qp->srq = srq; 339 340 rxe_qp_init_misc(rxe, qp, init); 341 342 err = rxe_qp_init_req(rxe, qp, init, udata, uresp); 343 if (err) 344 goto err1; 345 346 err = rxe_qp_init_resp(rxe, qp, init, udata, uresp); 347 if (err) 348 goto err2; 349 350 qp->attr.qp_state = IB_QPS_RESET; 351 qp->valid = 1; 352 353 return 0; 354 355 err2: 356 rxe_queue_cleanup(qp->sq.queue); 357 err1: 358 if (srq) 359 rxe_drop_ref(srq); 360 rxe_drop_ref(scq); 361 rxe_drop_ref(rcq); 362 rxe_drop_ref(pd); 363 364 return err; 365 } 366 367 /* called by the query qp verb */ 368 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init) 369 { 370 init->event_handler = qp->ibqp.event_handler; 371 init->qp_context = qp->ibqp.qp_context; 372 init->send_cq = qp->ibqp.send_cq; 373 init->recv_cq = qp->ibqp.recv_cq; 374 init->srq = qp->ibqp.srq; 375 376 init->cap.max_send_wr = qp->sq.max_wr; 377 init->cap.max_send_sge = qp->sq.max_sge; 378 init->cap.max_inline_data = qp->sq.max_inline; 379 380 if (!qp->srq) { 381 init->cap.max_recv_wr = qp->rq.max_wr; 382 init->cap.max_recv_sge = qp->rq.max_sge; 383 } 384 385 init->sq_sig_type = qp->sq_sig_type; 386 387 init->qp_type = qp->ibqp.qp_type; 388 init->port_num = 1; 389 390 return 0; 391 } 392 393 /* called by the modify qp verb, this routine checks all the parameters before 394 * making any changes 395 */ 396 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, 397 struct ib_qp_attr *attr, int mask) 398 { 399 enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ? 400 attr->cur_qp_state : qp->attr.qp_state; 401 enum ib_qp_state new_state = (mask & IB_QP_STATE) ? 402 attr->qp_state : cur_state; 403 404 if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) { 405 pr_warn("invalid mask or state for qp\n"); 406 goto err1; 407 } 408 409 if (mask & IB_QP_STATE) { 410 if (cur_state == IB_QPS_SQD) { 411 if (qp->req.state == QP_STATE_DRAIN && 412 new_state != IB_QPS_ERR) 413 goto err1; 414 } 415 } 416 417 if (mask & IB_QP_PORT) { 418 if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) { 419 pr_warn("invalid port %d\n", attr->port_num); 420 goto err1; 421 } 422 } 423 424 if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq)) 425 goto err1; 426 427 if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr)) 428 goto err1; 429 430 if (mask & IB_QP_ALT_PATH) { 431 if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr)) 432 goto err1; 433 if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num)) { 434 pr_warn("invalid alt port %d\n", attr->alt_port_num); 435 goto err1; 436 } 437 if (attr->alt_timeout > 31) { 438 pr_warn("invalid QP alt timeout %d > 31\n", 439 attr->alt_timeout); 440 goto err1; 441 } 442 } 443 444 if (mask & IB_QP_PATH_MTU) { 445 struct rxe_port *port = &rxe->port; 446 447 enum ib_mtu max_mtu = port->attr.max_mtu; 448 enum ib_mtu mtu = attr->path_mtu; 449 450 if (mtu > max_mtu) { 451 pr_debug("invalid mtu (%d) > (%d)\n", 452 ib_mtu_enum_to_int(mtu), 453 ib_mtu_enum_to_int(max_mtu)); 454 goto err1; 455 } 456 } 457 458 if (mask & IB_QP_MAX_QP_RD_ATOMIC) { 459 if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) { 460 pr_warn("invalid max_rd_atomic %d > %d\n", 461 attr->max_rd_atomic, 462 rxe->attr.max_qp_rd_atom); 463 goto err1; 464 } 465 } 466 467 if (mask & IB_QP_TIMEOUT) { 468 if (attr->timeout > 31) { 469 pr_warn("invalid QP timeout %d > 31\n", 470 attr->timeout); 471 goto err1; 472 } 473 } 474 475 return 0; 476 477 err1: 478 return -EINVAL; 479 } 480 481 /* move the qp to the reset state */ 482 static void rxe_qp_reset(struct rxe_qp *qp) 483 { 484 /* stop tasks from running */ 485 rxe_disable_task(&qp->resp.task); 486 487 /* stop request/comp */ 488 if (qp->sq.queue) { 489 if (qp_type(qp) == IB_QPT_RC) 490 rxe_disable_task(&qp->comp.task); 491 rxe_disable_task(&qp->req.task); 492 } 493 494 /* move qp to the reset state */ 495 qp->req.state = QP_STATE_RESET; 496 qp->resp.state = QP_STATE_RESET; 497 498 /* let state machines reset themselves drain work and packet queues 499 * etc. 500 */ 501 __rxe_do_task(&qp->resp.task); 502 503 if (qp->sq.queue) { 504 __rxe_do_task(&qp->comp.task); 505 __rxe_do_task(&qp->req.task); 506 rxe_queue_reset(qp->sq.queue); 507 } 508 509 /* cleanup attributes */ 510 atomic_set(&qp->ssn, 0); 511 qp->req.opcode = -1; 512 qp->req.need_retry = 0; 513 qp->req.noack_pkts = 0; 514 qp->resp.msn = 0; 515 qp->resp.opcode = -1; 516 qp->resp.drop_msg = 0; 517 qp->resp.goto_error = 0; 518 qp->resp.sent_psn_nak = 0; 519 520 if (qp->resp.mr) { 521 rxe_drop_ref(qp->resp.mr); 522 qp->resp.mr = NULL; 523 } 524 525 cleanup_rd_atomic_resources(qp); 526 527 /* reenable tasks */ 528 rxe_enable_task(&qp->resp.task); 529 530 if (qp->sq.queue) { 531 if (qp_type(qp) == IB_QPT_RC) 532 rxe_enable_task(&qp->comp.task); 533 534 rxe_enable_task(&qp->req.task); 535 } 536 } 537 538 /* drain the send queue */ 539 static void rxe_qp_drain(struct rxe_qp *qp) 540 { 541 if (qp->sq.queue) { 542 if (qp->req.state != QP_STATE_DRAINED) { 543 qp->req.state = QP_STATE_DRAIN; 544 if (qp_type(qp) == IB_QPT_RC) 545 rxe_run_task(&qp->comp.task, 1); 546 else 547 __rxe_do_task(&qp->comp.task); 548 rxe_run_task(&qp->req.task, 1); 549 } 550 } 551 } 552 553 /* move the qp to the error state */ 554 void rxe_qp_error(struct rxe_qp *qp) 555 { 556 qp->req.state = QP_STATE_ERROR; 557 qp->resp.state = QP_STATE_ERROR; 558 qp->attr.qp_state = IB_QPS_ERR; 559 560 /* drain work and packet queues */ 561 rxe_run_task(&qp->resp.task, 1); 562 563 if (qp_type(qp) == IB_QPT_RC) 564 rxe_run_task(&qp->comp.task, 1); 565 else 566 __rxe_do_task(&qp->comp.task); 567 rxe_run_task(&qp->req.task, 1); 568 } 569 570 /* called by the modify qp verb */ 571 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, 572 struct ib_udata *udata) 573 { 574 int err; 575 576 if (mask & IB_QP_MAX_QP_RD_ATOMIC) { 577 int max_rd_atomic = attr->max_rd_atomic ? 578 roundup_pow_of_two(attr->max_rd_atomic) : 0; 579 580 qp->attr.max_rd_atomic = max_rd_atomic; 581 atomic_set(&qp->req.rd_atomic, max_rd_atomic); 582 } 583 584 if (mask & IB_QP_MAX_DEST_RD_ATOMIC) { 585 int max_dest_rd_atomic = attr->max_dest_rd_atomic ? 586 roundup_pow_of_two(attr->max_dest_rd_atomic) : 0; 587 588 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic; 589 590 free_rd_atomic_resources(qp); 591 592 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic); 593 if (err) 594 return err; 595 } 596 597 if (mask & IB_QP_CUR_STATE) 598 qp->attr.cur_qp_state = attr->qp_state; 599 600 if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY) 601 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify; 602 603 if (mask & IB_QP_ACCESS_FLAGS) 604 qp->attr.qp_access_flags = attr->qp_access_flags; 605 606 if (mask & IB_QP_PKEY_INDEX) 607 qp->attr.pkey_index = attr->pkey_index; 608 609 if (mask & IB_QP_PORT) 610 qp->attr.port_num = attr->port_num; 611 612 if (mask & IB_QP_QKEY) 613 qp->attr.qkey = attr->qkey; 614 615 if (mask & IB_QP_AV) 616 rxe_init_av(&attr->ah_attr, &qp->pri_av); 617 618 if (mask & IB_QP_ALT_PATH) { 619 rxe_init_av(&attr->alt_ah_attr, &qp->alt_av); 620 qp->attr.alt_port_num = attr->alt_port_num; 621 qp->attr.alt_pkey_index = attr->alt_pkey_index; 622 qp->attr.alt_timeout = attr->alt_timeout; 623 } 624 625 if (mask & IB_QP_PATH_MTU) { 626 qp->attr.path_mtu = attr->path_mtu; 627 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu); 628 } 629 630 if (mask & IB_QP_TIMEOUT) { 631 qp->attr.timeout = attr->timeout; 632 if (attr->timeout == 0) { 633 qp->qp_timeout_jiffies = 0; 634 } else { 635 /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */ 636 int j = nsecs_to_jiffies(4096ULL << attr->timeout); 637 638 qp->qp_timeout_jiffies = j ? j : 1; 639 } 640 } 641 642 if (mask & IB_QP_RETRY_CNT) { 643 qp->attr.retry_cnt = attr->retry_cnt; 644 qp->comp.retry_cnt = attr->retry_cnt; 645 pr_debug("qp#%d set retry count = %d\n", qp_num(qp), 646 attr->retry_cnt); 647 } 648 649 if (mask & IB_QP_RNR_RETRY) { 650 qp->attr.rnr_retry = attr->rnr_retry; 651 qp->comp.rnr_retry = attr->rnr_retry; 652 pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp), 653 attr->rnr_retry); 654 } 655 656 if (mask & IB_QP_RQ_PSN) { 657 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK); 658 qp->resp.psn = qp->attr.rq_psn; 659 pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp), 660 qp->resp.psn); 661 } 662 663 if (mask & IB_QP_MIN_RNR_TIMER) { 664 qp->attr.min_rnr_timer = attr->min_rnr_timer; 665 pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp), 666 attr->min_rnr_timer); 667 } 668 669 if (mask & IB_QP_SQ_PSN) { 670 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK); 671 qp->req.psn = qp->attr.sq_psn; 672 qp->comp.psn = qp->attr.sq_psn; 673 pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn); 674 } 675 676 if (mask & IB_QP_PATH_MIG_STATE) 677 qp->attr.path_mig_state = attr->path_mig_state; 678 679 if (mask & IB_QP_DEST_QPN) 680 qp->attr.dest_qp_num = attr->dest_qp_num; 681 682 if (mask & IB_QP_STATE) { 683 qp->attr.qp_state = attr->qp_state; 684 685 switch (attr->qp_state) { 686 case IB_QPS_RESET: 687 pr_debug("qp#%d state -> RESET\n", qp_num(qp)); 688 rxe_qp_reset(qp); 689 break; 690 691 case IB_QPS_INIT: 692 pr_debug("qp#%d state -> INIT\n", qp_num(qp)); 693 qp->req.state = QP_STATE_INIT; 694 qp->resp.state = QP_STATE_INIT; 695 break; 696 697 case IB_QPS_RTR: 698 pr_debug("qp#%d state -> RTR\n", qp_num(qp)); 699 qp->resp.state = QP_STATE_READY; 700 break; 701 702 case IB_QPS_RTS: 703 pr_debug("qp#%d state -> RTS\n", qp_num(qp)); 704 qp->req.state = QP_STATE_READY; 705 break; 706 707 case IB_QPS_SQD: 708 pr_debug("qp#%d state -> SQD\n", qp_num(qp)); 709 rxe_qp_drain(qp); 710 break; 711 712 case IB_QPS_SQE: 713 pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp)); 714 /* Not possible from modify_qp. */ 715 break; 716 717 case IB_QPS_ERR: 718 pr_debug("qp#%d state -> ERR\n", qp_num(qp)); 719 rxe_qp_error(qp); 720 break; 721 } 722 } 723 724 return 0; 725 } 726 727 /* called by the query qp verb */ 728 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) 729 { 730 *attr = qp->attr; 731 732 attr->rq_psn = qp->resp.psn; 733 attr->sq_psn = qp->req.psn; 734 735 attr->cap.max_send_wr = qp->sq.max_wr; 736 attr->cap.max_send_sge = qp->sq.max_sge; 737 attr->cap.max_inline_data = qp->sq.max_inline; 738 739 if (!qp->srq) { 740 attr->cap.max_recv_wr = qp->rq.max_wr; 741 attr->cap.max_recv_sge = qp->rq.max_sge; 742 } 743 744 rxe_av_to_attr(&qp->pri_av, &attr->ah_attr); 745 rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr); 746 747 if (qp->req.state == QP_STATE_DRAIN) { 748 attr->sq_draining = 1; 749 /* applications that get this state 750 * typically spin on it. yield the 751 * processor 752 */ 753 cond_resched(); 754 } else { 755 attr->sq_draining = 0; 756 } 757 758 pr_debug("attr->sq_draining = %d\n", attr->sq_draining); 759 760 return 0; 761 } 762 763 /* called by the destroy qp verb */ 764 void rxe_qp_destroy(struct rxe_qp *qp) 765 { 766 qp->valid = 0; 767 qp->qp_timeout_jiffies = 0; 768 rxe_cleanup_task(&qp->resp.task); 769 770 if (qp_type(qp) == IB_QPT_RC) { 771 del_timer_sync(&qp->retrans_timer); 772 del_timer_sync(&qp->rnr_nak_timer); 773 } 774 775 rxe_cleanup_task(&qp->req.task); 776 rxe_cleanup_task(&qp->comp.task); 777 778 /* flush out any receive wr's or pending requests */ 779 __rxe_do_task(&qp->req.task); 780 if (qp->sq.queue) { 781 __rxe_do_task(&qp->comp.task); 782 __rxe_do_task(&qp->req.task); 783 } 784 } 785 786 /* called when the last reference to the qp is dropped */ 787 static void rxe_qp_do_cleanup(struct work_struct *work) 788 { 789 struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); 790 791 rxe_drop_all_mcast_groups(qp); 792 793 if (qp->sq.queue) 794 rxe_queue_cleanup(qp->sq.queue); 795 796 if (qp->srq) 797 rxe_drop_ref(qp->srq); 798 799 if (qp->rq.queue) 800 rxe_queue_cleanup(qp->rq.queue); 801 802 if (qp->scq) 803 rxe_drop_ref(qp->scq); 804 if (qp->rcq) 805 rxe_drop_ref(qp->rcq); 806 if (qp->pd) 807 rxe_drop_ref(qp->pd); 808 809 if (qp->resp.mr) { 810 rxe_drop_ref(qp->resp.mr); 811 qp->resp.mr = NULL; 812 } 813 814 if (qp_type(qp) == IB_QPT_RC) 815 sk_dst_reset(qp->sk->sk); 816 817 free_rd_atomic_resources(qp); 818 819 kernel_sock_shutdown(qp->sk, SHUT_RDWR); 820 sock_release(qp->sk); 821 } 822 823 /* called when the last reference to the qp is dropped */ 824 void rxe_qp_cleanup(struct rxe_pool_entry *arg) 825 { 826 struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem); 827 828 execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work); 829 } 830