1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 5 */ 6 7 #include <linux/skbuff.h> 8 #include <linux/delay.h> 9 #include <linux/sched.h> 10 #include <linux/vmalloc.h> 11 #include <rdma/uverbs_ioctl.h> 12 13 #include "rxe.h" 14 #include "rxe_loc.h" 15 #include "rxe_queue.h" 16 #include "rxe_task.h" 17 18 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap, 19 int has_srq) 20 { 21 if (cap->max_send_wr > rxe->attr.max_qp_wr) { 22 pr_warn("invalid send wr = %d > %d\n", 23 cap->max_send_wr, rxe->attr.max_qp_wr); 24 goto err1; 25 } 26 27 if (cap->max_send_sge > rxe->attr.max_send_sge) { 28 pr_warn("invalid send sge = %d > %d\n", 29 cap->max_send_sge, rxe->attr.max_send_sge); 30 goto err1; 31 } 32 33 if (!has_srq) { 34 if (cap->max_recv_wr > rxe->attr.max_qp_wr) { 35 pr_warn("invalid recv wr = %d > %d\n", 36 cap->max_recv_wr, rxe->attr.max_qp_wr); 37 goto err1; 38 } 39 40 if (cap->max_recv_sge > rxe->attr.max_recv_sge) { 41 pr_warn("invalid recv sge = %d > %d\n", 42 cap->max_recv_sge, rxe->attr.max_recv_sge); 43 goto err1; 44 } 45 } 46 47 if (cap->max_inline_data > rxe->max_inline_data) { 48 pr_warn("invalid max inline data = %d > %d\n", 49 cap->max_inline_data, rxe->max_inline_data); 50 goto err1; 51 } 52 53 return 0; 54 55 err1: 56 return -EINVAL; 57 } 58 59 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init) 60 { 61 struct ib_qp_cap *cap = &init->cap; 62 struct rxe_port *port; 63 int port_num = init->port_num; 64 65 switch (init->qp_type) { 66 case IB_QPT_SMI: 67 case IB_QPT_GSI: 68 case IB_QPT_RC: 69 case IB_QPT_UC: 70 case IB_QPT_UD: 71 break; 72 default: 73 return -EOPNOTSUPP; 74 } 75 76 if (!init->recv_cq || !init->send_cq) { 77 pr_warn("missing cq\n"); 78 goto err1; 79 } 80 81 if (rxe_qp_chk_cap(rxe, cap, !!init->srq)) 82 goto err1; 83 84 if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) { 85 if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) { 86 pr_warn("invalid port = %d\n", port_num); 87 goto err1; 88 } 89 90 port = &rxe->port; 91 92 if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) { 93 pr_warn("SMI QP exists for port %d\n", port_num); 94 goto err1; 95 } 96 97 if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) { 98 pr_warn("GSI QP exists for port %d\n", port_num); 99 goto err1; 100 } 101 } 102 103 return 0; 104 105 err1: 106 return -EINVAL; 107 } 108 109 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n) 110 { 111 qp->resp.res_head = 0; 112 qp->resp.res_tail = 0; 113 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL); 114 115 if (!qp->resp.resources) 116 return -ENOMEM; 117 118 return 0; 119 } 120 121 static void free_rd_atomic_resources(struct rxe_qp *qp) 122 { 123 if (qp->resp.resources) { 124 int i; 125 126 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { 127 struct resp_res *res = &qp->resp.resources[i]; 128 129 free_rd_atomic_resource(qp, res); 130 } 131 kfree(qp->resp.resources); 132 qp->resp.resources = NULL; 133 } 134 } 135 136 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res) 137 { 138 if (res->type == RXE_ATOMIC_MASK) { 139 rxe_drop_ref(qp); 140 kfree_skb(res->atomic.skb); 141 } else if (res->type == RXE_READ_MASK) { 142 if (res->read.mr) 143 rxe_drop_ref(res->read.mr); 144 } 145 res->type = 0; 146 } 147 148 static void cleanup_rd_atomic_resources(struct rxe_qp *qp) 149 { 150 int i; 151 struct resp_res *res; 152 153 if (qp->resp.resources) { 154 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { 155 res = &qp->resp.resources[i]; 156 free_rd_atomic_resource(qp, res); 157 } 158 } 159 } 160 161 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, 162 struct ib_qp_init_attr *init) 163 { 164 struct rxe_port *port; 165 u32 qpn; 166 167 qp->sq_sig_type = init->sq_sig_type; 168 qp->attr.path_mtu = 1; 169 qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu); 170 171 qpn = qp->pelem.index; 172 port = &rxe->port; 173 174 switch (init->qp_type) { 175 case IB_QPT_SMI: 176 qp->ibqp.qp_num = 0; 177 port->qp_smi_index = qpn; 178 qp->attr.port_num = init->port_num; 179 break; 180 181 case IB_QPT_GSI: 182 qp->ibqp.qp_num = 1; 183 port->qp_gsi_index = qpn; 184 qp->attr.port_num = init->port_num; 185 break; 186 187 default: 188 qp->ibqp.qp_num = qpn; 189 break; 190 } 191 192 INIT_LIST_HEAD(&qp->grp_list); 193 194 skb_queue_head_init(&qp->send_pkts); 195 196 spin_lock_init(&qp->grp_lock); 197 spin_lock_init(&qp->state_lock); 198 199 atomic_set(&qp->ssn, 0); 200 atomic_set(&qp->skb_out, 0); 201 } 202 203 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, 204 struct ib_qp_init_attr *init, struct ib_udata *udata, 205 struct rxe_create_qp_resp __user *uresp) 206 { 207 int err; 208 int wqe_size; 209 210 err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk); 211 if (err < 0) 212 return err; 213 qp->sk->sk->sk_user_data = qp; 214 215 /* pick a source UDP port number for this QP based on 216 * the source QPN. this spreads traffic for different QPs 217 * across different NIC RX queues (while using a single 218 * flow for a given QP to maintain packet order). 219 * the port number must be in the Dynamic Ports range 220 * (0xc000 - 0xffff). 221 */ 222 qp->src_port = RXE_ROCE_V2_SPORT + 223 (hash_32_generic(qp_num(qp), 14) & 0x3fff); 224 qp->sq.max_wr = init->cap.max_send_wr; 225 226 /* These caps are limited by rxe_qp_chk_cap() done by the caller */ 227 wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge), 228 init->cap.max_inline_data); 229 qp->sq.max_sge = init->cap.max_send_sge = 230 wqe_size / sizeof(struct ib_sge); 231 qp->sq.max_inline = init->cap.max_inline_data = wqe_size; 232 wqe_size += sizeof(struct rxe_send_wqe); 233 234 qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size); 235 if (!qp->sq.queue) 236 return -ENOMEM; 237 238 err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata, 239 qp->sq.queue->buf, qp->sq.queue->buf_size, 240 &qp->sq.queue->ip); 241 242 if (err) { 243 vfree(qp->sq.queue->buf); 244 kfree(qp->sq.queue); 245 qp->sq.queue = NULL; 246 return err; 247 } 248 249 qp->req.wqe_index = producer_index(qp->sq.queue); 250 qp->req.state = QP_STATE_RESET; 251 qp->req.opcode = -1; 252 qp->comp.opcode = -1; 253 254 spin_lock_init(&qp->sq.sq_lock); 255 skb_queue_head_init(&qp->req_pkts); 256 257 rxe_init_task(rxe, &qp->req.task, qp, 258 rxe_requester, "req"); 259 rxe_init_task(rxe, &qp->comp.task, qp, 260 rxe_completer, "comp"); 261 262 qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */ 263 if (init->qp_type == IB_QPT_RC) { 264 timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0); 265 timer_setup(&qp->retrans_timer, retransmit_timer, 0); 266 } 267 return 0; 268 } 269 270 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, 271 struct ib_qp_init_attr *init, 272 struct ib_udata *udata, 273 struct rxe_create_qp_resp __user *uresp) 274 { 275 int err; 276 int wqe_size; 277 278 if (!qp->srq) { 279 qp->rq.max_wr = init->cap.max_recv_wr; 280 qp->rq.max_sge = init->cap.max_recv_sge; 281 282 wqe_size = rcv_wqe_size(qp->rq.max_sge); 283 284 pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n", 285 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size); 286 287 qp->rq.queue = rxe_queue_init(rxe, 288 &qp->rq.max_wr, 289 wqe_size); 290 if (!qp->rq.queue) 291 return -ENOMEM; 292 293 err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata, 294 qp->rq.queue->buf, qp->rq.queue->buf_size, 295 &qp->rq.queue->ip); 296 if (err) { 297 vfree(qp->rq.queue->buf); 298 kfree(qp->rq.queue); 299 qp->rq.queue = NULL; 300 return err; 301 } 302 } 303 304 spin_lock_init(&qp->rq.producer_lock); 305 spin_lock_init(&qp->rq.consumer_lock); 306 307 skb_queue_head_init(&qp->resp_pkts); 308 309 rxe_init_task(rxe, &qp->resp.task, qp, 310 rxe_responder, "resp"); 311 312 qp->resp.opcode = OPCODE_NONE; 313 qp->resp.msn = 0; 314 qp->resp.state = QP_STATE_RESET; 315 316 return 0; 317 } 318 319 /* called by the create qp verb */ 320 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, 321 struct ib_qp_init_attr *init, 322 struct rxe_create_qp_resp __user *uresp, 323 struct ib_pd *ibpd, 324 struct ib_udata *udata) 325 { 326 int err; 327 struct rxe_cq *rcq = to_rcq(init->recv_cq); 328 struct rxe_cq *scq = to_rcq(init->send_cq); 329 struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL; 330 331 rxe_add_ref(pd); 332 rxe_add_ref(rcq); 333 rxe_add_ref(scq); 334 if (srq) 335 rxe_add_ref(srq); 336 337 qp->pd = pd; 338 qp->rcq = rcq; 339 qp->scq = scq; 340 qp->srq = srq; 341 342 rxe_qp_init_misc(rxe, qp, init); 343 344 err = rxe_qp_init_req(rxe, qp, init, udata, uresp); 345 if (err) 346 goto err1; 347 348 err = rxe_qp_init_resp(rxe, qp, init, udata, uresp); 349 if (err) 350 goto err2; 351 352 qp->attr.qp_state = IB_QPS_RESET; 353 qp->valid = 1; 354 355 return 0; 356 357 err2: 358 rxe_queue_cleanup(qp->sq.queue); 359 err1: 360 qp->pd = NULL; 361 qp->rcq = NULL; 362 qp->scq = NULL; 363 qp->srq = NULL; 364 365 if (srq) 366 rxe_drop_ref(srq); 367 rxe_drop_ref(scq); 368 rxe_drop_ref(rcq); 369 rxe_drop_ref(pd); 370 371 return err; 372 } 373 374 /* called by the query qp verb */ 375 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init) 376 { 377 init->event_handler = qp->ibqp.event_handler; 378 init->qp_context = qp->ibqp.qp_context; 379 init->send_cq = qp->ibqp.send_cq; 380 init->recv_cq = qp->ibqp.recv_cq; 381 init->srq = qp->ibqp.srq; 382 383 init->cap.max_send_wr = qp->sq.max_wr; 384 init->cap.max_send_sge = qp->sq.max_sge; 385 init->cap.max_inline_data = qp->sq.max_inline; 386 387 if (!qp->srq) { 388 init->cap.max_recv_wr = qp->rq.max_wr; 389 init->cap.max_recv_sge = qp->rq.max_sge; 390 } 391 392 init->sq_sig_type = qp->sq_sig_type; 393 394 init->qp_type = qp->ibqp.qp_type; 395 init->port_num = 1; 396 397 return 0; 398 } 399 400 /* called by the modify qp verb, this routine checks all the parameters before 401 * making any changes 402 */ 403 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, 404 struct ib_qp_attr *attr, int mask) 405 { 406 enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ? 407 attr->cur_qp_state : qp->attr.qp_state; 408 enum ib_qp_state new_state = (mask & IB_QP_STATE) ? 409 attr->qp_state : cur_state; 410 411 if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) { 412 pr_warn("invalid mask or state for qp\n"); 413 goto err1; 414 } 415 416 if (mask & IB_QP_STATE) { 417 if (cur_state == IB_QPS_SQD) { 418 if (qp->req.state == QP_STATE_DRAIN && 419 new_state != IB_QPS_ERR) 420 goto err1; 421 } 422 } 423 424 if (mask & IB_QP_PORT) { 425 if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) { 426 pr_warn("invalid port %d\n", attr->port_num); 427 goto err1; 428 } 429 } 430 431 if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq)) 432 goto err1; 433 434 if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr)) 435 goto err1; 436 437 if (mask & IB_QP_ALT_PATH) { 438 if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr)) 439 goto err1; 440 if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num)) { 441 pr_warn("invalid alt port %d\n", attr->alt_port_num); 442 goto err1; 443 } 444 if (attr->alt_timeout > 31) { 445 pr_warn("invalid QP alt timeout %d > 31\n", 446 attr->alt_timeout); 447 goto err1; 448 } 449 } 450 451 if (mask & IB_QP_PATH_MTU) { 452 struct rxe_port *port = &rxe->port; 453 454 enum ib_mtu max_mtu = port->attr.max_mtu; 455 enum ib_mtu mtu = attr->path_mtu; 456 457 if (mtu > max_mtu) { 458 pr_debug("invalid mtu (%d) > (%d)\n", 459 ib_mtu_enum_to_int(mtu), 460 ib_mtu_enum_to_int(max_mtu)); 461 goto err1; 462 } 463 } 464 465 if (mask & IB_QP_MAX_QP_RD_ATOMIC) { 466 if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) { 467 pr_warn("invalid max_rd_atomic %d > %d\n", 468 attr->max_rd_atomic, 469 rxe->attr.max_qp_rd_atom); 470 goto err1; 471 } 472 } 473 474 if (mask & IB_QP_TIMEOUT) { 475 if (attr->timeout > 31) { 476 pr_warn("invalid QP timeout %d > 31\n", 477 attr->timeout); 478 goto err1; 479 } 480 } 481 482 return 0; 483 484 err1: 485 return -EINVAL; 486 } 487 488 /* move the qp to the reset state */ 489 static void rxe_qp_reset(struct rxe_qp *qp) 490 { 491 /* stop tasks from running */ 492 rxe_disable_task(&qp->resp.task); 493 494 /* stop request/comp */ 495 if (qp->sq.queue) { 496 if (qp_type(qp) == IB_QPT_RC) 497 rxe_disable_task(&qp->comp.task); 498 rxe_disable_task(&qp->req.task); 499 } 500 501 /* move qp to the reset state */ 502 qp->req.state = QP_STATE_RESET; 503 qp->resp.state = QP_STATE_RESET; 504 505 /* let state machines reset themselves drain work and packet queues 506 * etc. 507 */ 508 __rxe_do_task(&qp->resp.task); 509 510 if (qp->sq.queue) { 511 __rxe_do_task(&qp->comp.task); 512 __rxe_do_task(&qp->req.task); 513 rxe_queue_reset(qp->sq.queue); 514 } 515 516 /* cleanup attributes */ 517 atomic_set(&qp->ssn, 0); 518 qp->req.opcode = -1; 519 qp->req.need_retry = 0; 520 qp->req.noack_pkts = 0; 521 qp->resp.msn = 0; 522 qp->resp.opcode = -1; 523 qp->resp.drop_msg = 0; 524 qp->resp.goto_error = 0; 525 qp->resp.sent_psn_nak = 0; 526 527 if (qp->resp.mr) { 528 rxe_drop_ref(qp->resp.mr); 529 qp->resp.mr = NULL; 530 } 531 532 cleanup_rd_atomic_resources(qp); 533 534 /* reenable tasks */ 535 rxe_enable_task(&qp->resp.task); 536 537 if (qp->sq.queue) { 538 if (qp_type(qp) == IB_QPT_RC) 539 rxe_enable_task(&qp->comp.task); 540 541 rxe_enable_task(&qp->req.task); 542 } 543 } 544 545 /* drain the send queue */ 546 static void rxe_qp_drain(struct rxe_qp *qp) 547 { 548 if (qp->sq.queue) { 549 if (qp->req.state != QP_STATE_DRAINED) { 550 qp->req.state = QP_STATE_DRAIN; 551 if (qp_type(qp) == IB_QPT_RC) 552 rxe_run_task(&qp->comp.task, 1); 553 else 554 __rxe_do_task(&qp->comp.task); 555 rxe_run_task(&qp->req.task, 1); 556 } 557 } 558 } 559 560 /* move the qp to the error state */ 561 void rxe_qp_error(struct rxe_qp *qp) 562 { 563 qp->req.state = QP_STATE_ERROR; 564 qp->resp.state = QP_STATE_ERROR; 565 qp->attr.qp_state = IB_QPS_ERR; 566 567 /* drain work and packet queues */ 568 rxe_run_task(&qp->resp.task, 1); 569 570 if (qp_type(qp) == IB_QPT_RC) 571 rxe_run_task(&qp->comp.task, 1); 572 else 573 __rxe_do_task(&qp->comp.task); 574 rxe_run_task(&qp->req.task, 1); 575 } 576 577 /* called by the modify qp verb */ 578 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, 579 struct ib_udata *udata) 580 { 581 int err; 582 583 if (mask & IB_QP_MAX_QP_RD_ATOMIC) { 584 int max_rd_atomic = attr->max_rd_atomic ? 585 roundup_pow_of_two(attr->max_rd_atomic) : 0; 586 587 qp->attr.max_rd_atomic = max_rd_atomic; 588 atomic_set(&qp->req.rd_atomic, max_rd_atomic); 589 } 590 591 if (mask & IB_QP_MAX_DEST_RD_ATOMIC) { 592 int max_dest_rd_atomic = attr->max_dest_rd_atomic ? 593 roundup_pow_of_two(attr->max_dest_rd_atomic) : 0; 594 595 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic; 596 597 free_rd_atomic_resources(qp); 598 599 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic); 600 if (err) 601 return err; 602 } 603 604 if (mask & IB_QP_CUR_STATE) 605 qp->attr.cur_qp_state = attr->qp_state; 606 607 if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY) 608 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify; 609 610 if (mask & IB_QP_ACCESS_FLAGS) 611 qp->attr.qp_access_flags = attr->qp_access_flags; 612 613 if (mask & IB_QP_PKEY_INDEX) 614 qp->attr.pkey_index = attr->pkey_index; 615 616 if (mask & IB_QP_PORT) 617 qp->attr.port_num = attr->port_num; 618 619 if (mask & IB_QP_QKEY) 620 qp->attr.qkey = attr->qkey; 621 622 if (mask & IB_QP_AV) 623 rxe_init_av(&attr->ah_attr, &qp->pri_av); 624 625 if (mask & IB_QP_ALT_PATH) { 626 rxe_init_av(&attr->alt_ah_attr, &qp->alt_av); 627 qp->attr.alt_port_num = attr->alt_port_num; 628 qp->attr.alt_pkey_index = attr->alt_pkey_index; 629 qp->attr.alt_timeout = attr->alt_timeout; 630 } 631 632 if (mask & IB_QP_PATH_MTU) { 633 qp->attr.path_mtu = attr->path_mtu; 634 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu); 635 } 636 637 if (mask & IB_QP_TIMEOUT) { 638 qp->attr.timeout = attr->timeout; 639 if (attr->timeout == 0) { 640 qp->qp_timeout_jiffies = 0; 641 } else { 642 /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */ 643 int j = nsecs_to_jiffies(4096ULL << attr->timeout); 644 645 qp->qp_timeout_jiffies = j ? j : 1; 646 } 647 } 648 649 if (mask & IB_QP_RETRY_CNT) { 650 qp->attr.retry_cnt = attr->retry_cnt; 651 qp->comp.retry_cnt = attr->retry_cnt; 652 pr_debug("qp#%d set retry count = %d\n", qp_num(qp), 653 attr->retry_cnt); 654 } 655 656 if (mask & IB_QP_RNR_RETRY) { 657 qp->attr.rnr_retry = attr->rnr_retry; 658 qp->comp.rnr_retry = attr->rnr_retry; 659 pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp), 660 attr->rnr_retry); 661 } 662 663 if (mask & IB_QP_RQ_PSN) { 664 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK); 665 qp->resp.psn = qp->attr.rq_psn; 666 pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp), 667 qp->resp.psn); 668 } 669 670 if (mask & IB_QP_MIN_RNR_TIMER) { 671 qp->attr.min_rnr_timer = attr->min_rnr_timer; 672 pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp), 673 attr->min_rnr_timer); 674 } 675 676 if (mask & IB_QP_SQ_PSN) { 677 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK); 678 qp->req.psn = qp->attr.sq_psn; 679 qp->comp.psn = qp->attr.sq_psn; 680 pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn); 681 } 682 683 if (mask & IB_QP_PATH_MIG_STATE) 684 qp->attr.path_mig_state = attr->path_mig_state; 685 686 if (mask & IB_QP_DEST_QPN) 687 qp->attr.dest_qp_num = attr->dest_qp_num; 688 689 if (mask & IB_QP_STATE) { 690 qp->attr.qp_state = attr->qp_state; 691 692 switch (attr->qp_state) { 693 case IB_QPS_RESET: 694 pr_debug("qp#%d state -> RESET\n", qp_num(qp)); 695 rxe_qp_reset(qp); 696 break; 697 698 case IB_QPS_INIT: 699 pr_debug("qp#%d state -> INIT\n", qp_num(qp)); 700 qp->req.state = QP_STATE_INIT; 701 qp->resp.state = QP_STATE_INIT; 702 break; 703 704 case IB_QPS_RTR: 705 pr_debug("qp#%d state -> RTR\n", qp_num(qp)); 706 qp->resp.state = QP_STATE_READY; 707 break; 708 709 case IB_QPS_RTS: 710 pr_debug("qp#%d state -> RTS\n", qp_num(qp)); 711 qp->req.state = QP_STATE_READY; 712 break; 713 714 case IB_QPS_SQD: 715 pr_debug("qp#%d state -> SQD\n", qp_num(qp)); 716 rxe_qp_drain(qp); 717 break; 718 719 case IB_QPS_SQE: 720 pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp)); 721 /* Not possible from modify_qp. */ 722 break; 723 724 case IB_QPS_ERR: 725 pr_debug("qp#%d state -> ERR\n", qp_num(qp)); 726 rxe_qp_error(qp); 727 break; 728 } 729 } 730 731 return 0; 732 } 733 734 /* called by the query qp verb */ 735 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) 736 { 737 *attr = qp->attr; 738 739 attr->rq_psn = qp->resp.psn; 740 attr->sq_psn = qp->req.psn; 741 742 attr->cap.max_send_wr = qp->sq.max_wr; 743 attr->cap.max_send_sge = qp->sq.max_sge; 744 attr->cap.max_inline_data = qp->sq.max_inline; 745 746 if (!qp->srq) { 747 attr->cap.max_recv_wr = qp->rq.max_wr; 748 attr->cap.max_recv_sge = qp->rq.max_sge; 749 } 750 751 rxe_av_to_attr(&qp->pri_av, &attr->ah_attr); 752 rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr); 753 754 if (qp->req.state == QP_STATE_DRAIN) { 755 attr->sq_draining = 1; 756 /* applications that get this state 757 * typically spin on it. yield the 758 * processor 759 */ 760 cond_resched(); 761 } else { 762 attr->sq_draining = 0; 763 } 764 765 pr_debug("attr->sq_draining = %d\n", attr->sq_draining); 766 767 return 0; 768 } 769 770 /* called by the destroy qp verb */ 771 void rxe_qp_destroy(struct rxe_qp *qp) 772 { 773 qp->valid = 0; 774 qp->qp_timeout_jiffies = 0; 775 rxe_cleanup_task(&qp->resp.task); 776 777 if (qp_type(qp) == IB_QPT_RC) { 778 del_timer_sync(&qp->retrans_timer); 779 del_timer_sync(&qp->rnr_nak_timer); 780 } 781 782 rxe_cleanup_task(&qp->req.task); 783 rxe_cleanup_task(&qp->comp.task); 784 785 /* flush out any receive wr's or pending requests */ 786 __rxe_do_task(&qp->req.task); 787 if (qp->sq.queue) { 788 __rxe_do_task(&qp->comp.task); 789 __rxe_do_task(&qp->req.task); 790 } 791 } 792 793 /* called when the last reference to the qp is dropped */ 794 static void rxe_qp_do_cleanup(struct work_struct *work) 795 { 796 struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); 797 798 rxe_drop_all_mcast_groups(qp); 799 800 if (qp->sq.queue) 801 rxe_queue_cleanup(qp->sq.queue); 802 803 if (qp->srq) 804 rxe_drop_ref(qp->srq); 805 806 if (qp->rq.queue) 807 rxe_queue_cleanup(qp->rq.queue); 808 809 if (qp->scq) 810 rxe_drop_ref(qp->scq); 811 if (qp->rcq) 812 rxe_drop_ref(qp->rcq); 813 if (qp->pd) 814 rxe_drop_ref(qp->pd); 815 816 if (qp->resp.mr) { 817 rxe_drop_ref(qp->resp.mr); 818 qp->resp.mr = NULL; 819 } 820 821 if (qp_type(qp) == IB_QPT_RC) 822 sk_dst_reset(qp->sk->sk); 823 824 free_rd_atomic_resources(qp); 825 826 kernel_sock_shutdown(qp->sk, SHUT_RDWR); 827 sock_release(qp->sk); 828 } 829 830 /* called when the last reference to the qp is dropped */ 831 void rxe_qp_cleanup(struct rxe_pool_entry *arg) 832 { 833 struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem); 834 835 execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work); 836 } 837