1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 5 */ 6 7 #include <linux/skbuff.h> 8 #include <linux/delay.h> 9 #include <linux/sched.h> 10 #include <linux/vmalloc.h> 11 #include <rdma/uverbs_ioctl.h> 12 13 #include "rxe.h" 14 #include "rxe_loc.h" 15 #include "rxe_queue.h" 16 #include "rxe_task.h" 17 18 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap, 19 int has_srq) 20 { 21 if (cap->max_send_wr > rxe->attr.max_qp_wr) { 22 pr_warn("invalid send wr = %d > %d\n", 23 cap->max_send_wr, rxe->attr.max_qp_wr); 24 goto err1; 25 } 26 27 if (cap->max_send_sge > rxe->attr.max_send_sge) { 28 pr_warn("invalid send sge = %d > %d\n", 29 cap->max_send_sge, rxe->attr.max_send_sge); 30 goto err1; 31 } 32 33 if (!has_srq) { 34 if (cap->max_recv_wr > rxe->attr.max_qp_wr) { 35 pr_warn("invalid recv wr = %d > %d\n", 36 cap->max_recv_wr, rxe->attr.max_qp_wr); 37 goto err1; 38 } 39 40 if (cap->max_recv_sge > rxe->attr.max_recv_sge) { 41 pr_warn("invalid recv sge = %d > %d\n", 42 cap->max_recv_sge, rxe->attr.max_recv_sge); 43 goto err1; 44 } 45 } 46 47 if (cap->max_inline_data > rxe->max_inline_data) { 48 pr_warn("invalid max inline data = %d > %d\n", 49 cap->max_inline_data, rxe->max_inline_data); 50 goto err1; 51 } 52 53 return 0; 54 55 err1: 56 return -EINVAL; 57 } 58 59 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init) 60 { 61 struct ib_qp_cap *cap = &init->cap; 62 struct rxe_port *port; 63 int port_num = init->port_num; 64 65 switch (init->qp_type) { 66 case IB_QPT_SMI: 67 case IB_QPT_GSI: 68 case IB_QPT_RC: 69 case IB_QPT_UC: 70 case IB_QPT_UD: 71 break; 72 default: 73 return -EOPNOTSUPP; 74 } 75 76 if (!init->recv_cq || !init->send_cq) { 77 pr_warn("missing cq\n"); 78 goto err1; 79 } 80 81 if (rxe_qp_chk_cap(rxe, cap, !!init->srq)) 82 goto err1; 83 84 if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) { 85 if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) { 86 pr_warn("invalid port = %d\n", port_num); 87 goto err1; 88 } 89 90 port = &rxe->port; 91 92 if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) { 93 pr_warn("SMI QP exists for port %d\n", port_num); 94 goto err1; 95 } 96 97 if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) { 98 pr_warn("GSI QP exists for port %d\n", port_num); 99 goto err1; 100 } 101 } 102 103 return 0; 104 105 err1: 106 return -EINVAL; 107 } 108 109 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n) 110 { 111 qp->resp.res_head = 0; 112 qp->resp.res_tail = 0; 113 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL); 114 115 if (!qp->resp.resources) 116 return -ENOMEM; 117 118 return 0; 119 } 120 121 static void free_rd_atomic_resources(struct rxe_qp *qp) 122 { 123 if (qp->resp.resources) { 124 int i; 125 126 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { 127 struct resp_res *res = &qp->resp.resources[i]; 128 129 free_rd_atomic_resource(qp, res); 130 } 131 kfree(qp->resp.resources); 132 qp->resp.resources = NULL; 133 } 134 } 135 136 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res) 137 { 138 if (res->type == RXE_ATOMIC_MASK) { 139 kfree_skb(res->atomic.skb); 140 } else if (res->type == RXE_READ_MASK) { 141 if (res->read.mr) 142 rxe_drop_ref(res->read.mr); 143 } 144 res->type = 0; 145 } 146 147 static void cleanup_rd_atomic_resources(struct rxe_qp *qp) 148 { 149 int i; 150 struct resp_res *res; 151 152 if (qp->resp.resources) { 153 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { 154 res = &qp->resp.resources[i]; 155 free_rd_atomic_resource(qp, res); 156 } 157 } 158 } 159 160 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, 161 struct ib_qp_init_attr *init) 162 { 163 struct rxe_port *port; 164 u32 qpn; 165 166 qp->sq_sig_type = init->sq_sig_type; 167 qp->attr.path_mtu = 1; 168 qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu); 169 170 qpn = qp->elem.index; 171 port = &rxe->port; 172 173 switch (init->qp_type) { 174 case IB_QPT_SMI: 175 qp->ibqp.qp_num = 0; 176 port->qp_smi_index = qpn; 177 qp->attr.port_num = init->port_num; 178 break; 179 180 case IB_QPT_GSI: 181 qp->ibqp.qp_num = 1; 182 port->qp_gsi_index = qpn; 183 qp->attr.port_num = init->port_num; 184 break; 185 186 default: 187 qp->ibqp.qp_num = qpn; 188 break; 189 } 190 191 INIT_LIST_HEAD(&qp->grp_list); 192 193 spin_lock_init(&qp->grp_lock); 194 spin_lock_init(&qp->state_lock); 195 196 atomic_set(&qp->ssn, 0); 197 atomic_set(&qp->skb_out, 0); 198 } 199 200 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, 201 struct ib_qp_init_attr *init, struct ib_udata *udata, 202 struct rxe_create_qp_resp __user *uresp) 203 { 204 int err; 205 int wqe_size; 206 enum queue_type type; 207 208 err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk); 209 if (err < 0) 210 return err; 211 qp->sk->sk->sk_user_data = qp; 212 213 /* pick a source UDP port number for this QP based on 214 * the source QPN. this spreads traffic for different QPs 215 * across different NIC RX queues (while using a single 216 * flow for a given QP to maintain packet order). 217 * the port number must be in the Dynamic Ports range 218 * (0xc000 - 0xffff). 219 */ 220 qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff); 221 qp->sq.max_wr = init->cap.max_send_wr; 222 223 /* These caps are limited by rxe_qp_chk_cap() done by the caller */ 224 wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge), 225 init->cap.max_inline_data); 226 qp->sq.max_sge = init->cap.max_send_sge = 227 wqe_size / sizeof(struct ib_sge); 228 qp->sq.max_inline = init->cap.max_inline_data = wqe_size; 229 wqe_size += sizeof(struct rxe_send_wqe); 230 231 type = QUEUE_TYPE_FROM_CLIENT; 232 qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, 233 wqe_size, type); 234 if (!qp->sq.queue) 235 return -ENOMEM; 236 237 err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata, 238 qp->sq.queue->buf, qp->sq.queue->buf_size, 239 &qp->sq.queue->ip); 240 241 if (err) { 242 vfree(qp->sq.queue->buf); 243 kfree(qp->sq.queue); 244 qp->sq.queue = NULL; 245 return err; 246 } 247 248 qp->req.wqe_index = queue_get_producer(qp->sq.queue, 249 QUEUE_TYPE_FROM_CLIENT); 250 251 qp->req.state = QP_STATE_RESET; 252 qp->req.opcode = -1; 253 qp->comp.opcode = -1; 254 255 spin_lock_init(&qp->sq.sq_lock); 256 skb_queue_head_init(&qp->req_pkts); 257 258 rxe_init_task(rxe, &qp->req.task, qp, 259 rxe_requester, "req"); 260 rxe_init_task(rxe, &qp->comp.task, qp, 261 rxe_completer, "comp"); 262 263 qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */ 264 if (init->qp_type == IB_QPT_RC) { 265 timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0); 266 timer_setup(&qp->retrans_timer, retransmit_timer, 0); 267 } 268 return 0; 269 } 270 271 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, 272 struct ib_qp_init_attr *init, 273 struct ib_udata *udata, 274 struct rxe_create_qp_resp __user *uresp) 275 { 276 int err; 277 int wqe_size; 278 enum queue_type type; 279 280 if (!qp->srq) { 281 qp->rq.max_wr = init->cap.max_recv_wr; 282 qp->rq.max_sge = init->cap.max_recv_sge; 283 284 wqe_size = rcv_wqe_size(qp->rq.max_sge); 285 286 pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n", 287 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size); 288 289 type = QUEUE_TYPE_FROM_CLIENT; 290 qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, 291 wqe_size, type); 292 if (!qp->rq.queue) 293 return -ENOMEM; 294 295 err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata, 296 qp->rq.queue->buf, qp->rq.queue->buf_size, 297 &qp->rq.queue->ip); 298 if (err) { 299 vfree(qp->rq.queue->buf); 300 kfree(qp->rq.queue); 301 qp->rq.queue = NULL; 302 return err; 303 } 304 } 305 306 spin_lock_init(&qp->rq.producer_lock); 307 spin_lock_init(&qp->rq.consumer_lock); 308 309 skb_queue_head_init(&qp->resp_pkts); 310 311 rxe_init_task(rxe, &qp->resp.task, qp, 312 rxe_responder, "resp"); 313 314 qp->resp.opcode = OPCODE_NONE; 315 qp->resp.msn = 0; 316 qp->resp.state = QP_STATE_RESET; 317 318 return 0; 319 } 320 321 /* called by the create qp verb */ 322 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, 323 struct ib_qp_init_attr *init, 324 struct rxe_create_qp_resp __user *uresp, 325 struct ib_pd *ibpd, 326 struct ib_udata *udata) 327 { 328 int err; 329 struct rxe_cq *rcq = to_rcq(init->recv_cq); 330 struct rxe_cq *scq = to_rcq(init->send_cq); 331 struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL; 332 333 rxe_add_ref(pd); 334 rxe_add_ref(rcq); 335 rxe_add_ref(scq); 336 if (srq) 337 rxe_add_ref(srq); 338 339 qp->pd = pd; 340 qp->rcq = rcq; 341 qp->scq = scq; 342 qp->srq = srq; 343 344 rxe_qp_init_misc(rxe, qp, init); 345 346 err = rxe_qp_init_req(rxe, qp, init, udata, uresp); 347 if (err) 348 goto err1; 349 350 err = rxe_qp_init_resp(rxe, qp, init, udata, uresp); 351 if (err) 352 goto err2; 353 354 qp->attr.qp_state = IB_QPS_RESET; 355 qp->valid = 1; 356 357 return 0; 358 359 err2: 360 rxe_queue_cleanup(qp->sq.queue); 361 qp->sq.queue = NULL; 362 err1: 363 qp->pd = NULL; 364 qp->rcq = NULL; 365 qp->scq = NULL; 366 qp->srq = NULL; 367 368 if (srq) 369 rxe_drop_ref(srq); 370 rxe_drop_ref(scq); 371 rxe_drop_ref(rcq); 372 rxe_drop_ref(pd); 373 374 return err; 375 } 376 377 /* called by the query qp verb */ 378 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init) 379 { 380 init->event_handler = qp->ibqp.event_handler; 381 init->qp_context = qp->ibqp.qp_context; 382 init->send_cq = qp->ibqp.send_cq; 383 init->recv_cq = qp->ibqp.recv_cq; 384 init->srq = qp->ibqp.srq; 385 386 init->cap.max_send_wr = qp->sq.max_wr; 387 init->cap.max_send_sge = qp->sq.max_sge; 388 init->cap.max_inline_data = qp->sq.max_inline; 389 390 if (!qp->srq) { 391 init->cap.max_recv_wr = qp->rq.max_wr; 392 init->cap.max_recv_sge = qp->rq.max_sge; 393 } 394 395 init->sq_sig_type = qp->sq_sig_type; 396 397 init->qp_type = qp->ibqp.qp_type; 398 init->port_num = 1; 399 400 return 0; 401 } 402 403 /* called by the modify qp verb, this routine checks all the parameters before 404 * making any changes 405 */ 406 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, 407 struct ib_qp_attr *attr, int mask) 408 { 409 enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ? 410 attr->cur_qp_state : qp->attr.qp_state; 411 enum ib_qp_state new_state = (mask & IB_QP_STATE) ? 412 attr->qp_state : cur_state; 413 414 if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) { 415 pr_warn("invalid mask or state for qp\n"); 416 goto err1; 417 } 418 419 if (mask & IB_QP_STATE) { 420 if (cur_state == IB_QPS_SQD) { 421 if (qp->req.state == QP_STATE_DRAIN && 422 new_state != IB_QPS_ERR) 423 goto err1; 424 } 425 } 426 427 if (mask & IB_QP_PORT) { 428 if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) { 429 pr_warn("invalid port %d\n", attr->port_num); 430 goto err1; 431 } 432 } 433 434 if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq)) 435 goto err1; 436 437 if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr)) 438 goto err1; 439 440 if (mask & IB_QP_ALT_PATH) { 441 if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr)) 442 goto err1; 443 if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num)) { 444 pr_warn("invalid alt port %d\n", attr->alt_port_num); 445 goto err1; 446 } 447 if (attr->alt_timeout > 31) { 448 pr_warn("invalid QP alt timeout %d > 31\n", 449 attr->alt_timeout); 450 goto err1; 451 } 452 } 453 454 if (mask & IB_QP_PATH_MTU) { 455 struct rxe_port *port = &rxe->port; 456 457 enum ib_mtu max_mtu = port->attr.max_mtu; 458 enum ib_mtu mtu = attr->path_mtu; 459 460 if (mtu > max_mtu) { 461 pr_debug("invalid mtu (%d) > (%d)\n", 462 ib_mtu_enum_to_int(mtu), 463 ib_mtu_enum_to_int(max_mtu)); 464 goto err1; 465 } 466 } 467 468 if (mask & IB_QP_MAX_QP_RD_ATOMIC) { 469 if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) { 470 pr_warn("invalid max_rd_atomic %d > %d\n", 471 attr->max_rd_atomic, 472 rxe->attr.max_qp_rd_atom); 473 goto err1; 474 } 475 } 476 477 if (mask & IB_QP_TIMEOUT) { 478 if (attr->timeout > 31) { 479 pr_warn("invalid QP timeout %d > 31\n", 480 attr->timeout); 481 goto err1; 482 } 483 } 484 485 return 0; 486 487 err1: 488 return -EINVAL; 489 } 490 491 /* move the qp to the reset state */ 492 static void rxe_qp_reset(struct rxe_qp *qp) 493 { 494 /* stop tasks from running */ 495 rxe_disable_task(&qp->resp.task); 496 497 /* stop request/comp */ 498 if (qp->sq.queue) { 499 if (qp_type(qp) == IB_QPT_RC) 500 rxe_disable_task(&qp->comp.task); 501 rxe_disable_task(&qp->req.task); 502 } 503 504 /* move qp to the reset state */ 505 qp->req.state = QP_STATE_RESET; 506 qp->resp.state = QP_STATE_RESET; 507 508 /* let state machines reset themselves drain work and packet queues 509 * etc. 510 */ 511 __rxe_do_task(&qp->resp.task); 512 513 if (qp->sq.queue) { 514 __rxe_do_task(&qp->comp.task); 515 __rxe_do_task(&qp->req.task); 516 rxe_queue_reset(qp->sq.queue); 517 } 518 519 /* cleanup attributes */ 520 atomic_set(&qp->ssn, 0); 521 qp->req.opcode = -1; 522 qp->req.need_retry = 0; 523 qp->req.noack_pkts = 0; 524 qp->resp.msn = 0; 525 qp->resp.opcode = -1; 526 qp->resp.drop_msg = 0; 527 qp->resp.goto_error = 0; 528 qp->resp.sent_psn_nak = 0; 529 530 if (qp->resp.mr) { 531 rxe_drop_ref(qp->resp.mr); 532 qp->resp.mr = NULL; 533 } 534 535 cleanup_rd_atomic_resources(qp); 536 537 /* reenable tasks */ 538 rxe_enable_task(&qp->resp.task); 539 540 if (qp->sq.queue) { 541 if (qp_type(qp) == IB_QPT_RC) 542 rxe_enable_task(&qp->comp.task); 543 544 rxe_enable_task(&qp->req.task); 545 } 546 } 547 548 /* drain the send queue */ 549 static void rxe_qp_drain(struct rxe_qp *qp) 550 { 551 if (qp->sq.queue) { 552 if (qp->req.state != QP_STATE_DRAINED) { 553 qp->req.state = QP_STATE_DRAIN; 554 if (qp_type(qp) == IB_QPT_RC) 555 rxe_run_task(&qp->comp.task, 1); 556 else 557 __rxe_do_task(&qp->comp.task); 558 rxe_run_task(&qp->req.task, 1); 559 } 560 } 561 } 562 563 /* move the qp to the error state */ 564 void rxe_qp_error(struct rxe_qp *qp) 565 { 566 qp->req.state = QP_STATE_ERROR; 567 qp->resp.state = QP_STATE_ERROR; 568 qp->attr.qp_state = IB_QPS_ERR; 569 570 /* drain work and packet queues */ 571 rxe_run_task(&qp->resp.task, 1); 572 573 if (qp_type(qp) == IB_QPT_RC) 574 rxe_run_task(&qp->comp.task, 1); 575 else 576 __rxe_do_task(&qp->comp.task); 577 rxe_run_task(&qp->req.task, 1); 578 } 579 580 /* called by the modify qp verb */ 581 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, 582 struct ib_udata *udata) 583 { 584 int err; 585 586 if (mask & IB_QP_MAX_QP_RD_ATOMIC) { 587 int max_rd_atomic = attr->max_rd_atomic ? 588 roundup_pow_of_two(attr->max_rd_atomic) : 0; 589 590 qp->attr.max_rd_atomic = max_rd_atomic; 591 atomic_set(&qp->req.rd_atomic, max_rd_atomic); 592 } 593 594 if (mask & IB_QP_MAX_DEST_RD_ATOMIC) { 595 int max_dest_rd_atomic = attr->max_dest_rd_atomic ? 596 roundup_pow_of_two(attr->max_dest_rd_atomic) : 0; 597 598 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic; 599 600 free_rd_atomic_resources(qp); 601 602 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic); 603 if (err) 604 return err; 605 } 606 607 if (mask & IB_QP_CUR_STATE) 608 qp->attr.cur_qp_state = attr->qp_state; 609 610 if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY) 611 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify; 612 613 if (mask & IB_QP_ACCESS_FLAGS) 614 qp->attr.qp_access_flags = attr->qp_access_flags; 615 616 if (mask & IB_QP_PKEY_INDEX) 617 qp->attr.pkey_index = attr->pkey_index; 618 619 if (mask & IB_QP_PORT) 620 qp->attr.port_num = attr->port_num; 621 622 if (mask & IB_QP_QKEY) 623 qp->attr.qkey = attr->qkey; 624 625 if (mask & IB_QP_AV) 626 rxe_init_av(&attr->ah_attr, &qp->pri_av); 627 628 if (mask & IB_QP_ALT_PATH) { 629 rxe_init_av(&attr->alt_ah_attr, &qp->alt_av); 630 qp->attr.alt_port_num = attr->alt_port_num; 631 qp->attr.alt_pkey_index = attr->alt_pkey_index; 632 qp->attr.alt_timeout = attr->alt_timeout; 633 } 634 635 if (mask & IB_QP_PATH_MTU) { 636 qp->attr.path_mtu = attr->path_mtu; 637 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu); 638 } 639 640 if (mask & IB_QP_TIMEOUT) { 641 qp->attr.timeout = attr->timeout; 642 if (attr->timeout == 0) { 643 qp->qp_timeout_jiffies = 0; 644 } else { 645 /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */ 646 int j = nsecs_to_jiffies(4096ULL << attr->timeout); 647 648 qp->qp_timeout_jiffies = j ? j : 1; 649 } 650 } 651 652 if (mask & IB_QP_RETRY_CNT) { 653 qp->attr.retry_cnt = attr->retry_cnt; 654 qp->comp.retry_cnt = attr->retry_cnt; 655 pr_debug("qp#%d set retry count = %d\n", qp_num(qp), 656 attr->retry_cnt); 657 } 658 659 if (mask & IB_QP_RNR_RETRY) { 660 qp->attr.rnr_retry = attr->rnr_retry; 661 qp->comp.rnr_retry = attr->rnr_retry; 662 pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp), 663 attr->rnr_retry); 664 } 665 666 if (mask & IB_QP_RQ_PSN) { 667 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK); 668 qp->resp.psn = qp->attr.rq_psn; 669 pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp), 670 qp->resp.psn); 671 } 672 673 if (mask & IB_QP_MIN_RNR_TIMER) { 674 qp->attr.min_rnr_timer = attr->min_rnr_timer; 675 pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp), 676 attr->min_rnr_timer); 677 } 678 679 if (mask & IB_QP_SQ_PSN) { 680 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK); 681 qp->req.psn = qp->attr.sq_psn; 682 qp->comp.psn = qp->attr.sq_psn; 683 pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn); 684 } 685 686 if (mask & IB_QP_PATH_MIG_STATE) 687 qp->attr.path_mig_state = attr->path_mig_state; 688 689 if (mask & IB_QP_DEST_QPN) 690 qp->attr.dest_qp_num = attr->dest_qp_num; 691 692 if (mask & IB_QP_STATE) { 693 qp->attr.qp_state = attr->qp_state; 694 695 switch (attr->qp_state) { 696 case IB_QPS_RESET: 697 pr_debug("qp#%d state -> RESET\n", qp_num(qp)); 698 rxe_qp_reset(qp); 699 break; 700 701 case IB_QPS_INIT: 702 pr_debug("qp#%d state -> INIT\n", qp_num(qp)); 703 qp->req.state = QP_STATE_INIT; 704 qp->resp.state = QP_STATE_INIT; 705 break; 706 707 case IB_QPS_RTR: 708 pr_debug("qp#%d state -> RTR\n", qp_num(qp)); 709 qp->resp.state = QP_STATE_READY; 710 break; 711 712 case IB_QPS_RTS: 713 pr_debug("qp#%d state -> RTS\n", qp_num(qp)); 714 qp->req.state = QP_STATE_READY; 715 break; 716 717 case IB_QPS_SQD: 718 pr_debug("qp#%d state -> SQD\n", qp_num(qp)); 719 rxe_qp_drain(qp); 720 break; 721 722 case IB_QPS_SQE: 723 pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp)); 724 /* Not possible from modify_qp. */ 725 break; 726 727 case IB_QPS_ERR: 728 pr_debug("qp#%d state -> ERR\n", qp_num(qp)); 729 rxe_qp_error(qp); 730 break; 731 } 732 } 733 734 return 0; 735 } 736 737 /* called by the query qp verb */ 738 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) 739 { 740 *attr = qp->attr; 741 742 attr->rq_psn = qp->resp.psn; 743 attr->sq_psn = qp->req.psn; 744 745 attr->cap.max_send_wr = qp->sq.max_wr; 746 attr->cap.max_send_sge = qp->sq.max_sge; 747 attr->cap.max_inline_data = qp->sq.max_inline; 748 749 if (!qp->srq) { 750 attr->cap.max_recv_wr = qp->rq.max_wr; 751 attr->cap.max_recv_sge = qp->rq.max_sge; 752 } 753 754 rxe_av_to_attr(&qp->pri_av, &attr->ah_attr); 755 rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr); 756 757 if (qp->req.state == QP_STATE_DRAIN) { 758 attr->sq_draining = 1; 759 /* applications that get this state 760 * typically spin on it. yield the 761 * processor 762 */ 763 cond_resched(); 764 } else { 765 attr->sq_draining = 0; 766 } 767 768 pr_debug("attr->sq_draining = %d\n", attr->sq_draining); 769 770 return 0; 771 } 772 773 /* called by the destroy qp verb */ 774 void rxe_qp_destroy(struct rxe_qp *qp) 775 { 776 qp->valid = 0; 777 qp->qp_timeout_jiffies = 0; 778 rxe_cleanup_task(&qp->resp.task); 779 780 if (qp_type(qp) == IB_QPT_RC) { 781 del_timer_sync(&qp->retrans_timer); 782 del_timer_sync(&qp->rnr_nak_timer); 783 } 784 785 rxe_cleanup_task(&qp->req.task); 786 rxe_cleanup_task(&qp->comp.task); 787 788 /* flush out any receive wr's or pending requests */ 789 __rxe_do_task(&qp->req.task); 790 if (qp->sq.queue) { 791 __rxe_do_task(&qp->comp.task); 792 __rxe_do_task(&qp->req.task); 793 } 794 } 795 796 /* called when the last reference to the qp is dropped */ 797 static void rxe_qp_do_cleanup(struct work_struct *work) 798 { 799 struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); 800 801 rxe_drop_all_mcast_groups(qp); 802 803 if (qp->sq.queue) 804 rxe_queue_cleanup(qp->sq.queue); 805 806 if (qp->srq) 807 rxe_drop_ref(qp->srq); 808 809 if (qp->rq.queue) 810 rxe_queue_cleanup(qp->rq.queue); 811 812 if (qp->scq) 813 rxe_drop_ref(qp->scq); 814 if (qp->rcq) 815 rxe_drop_ref(qp->rcq); 816 if (qp->pd) 817 rxe_drop_ref(qp->pd); 818 819 if (qp->resp.mr) { 820 rxe_drop_ref(qp->resp.mr); 821 qp->resp.mr = NULL; 822 } 823 824 if (qp_type(qp) == IB_QPT_RC) 825 sk_dst_reset(qp->sk->sk); 826 827 free_rd_atomic_resources(qp); 828 829 kernel_sock_shutdown(qp->sk, SHUT_RDWR); 830 sock_release(qp->sk); 831 } 832 833 /* called when the last reference to the qp is dropped */ 834 void rxe_qp_cleanup(struct rxe_pool_elem *elem) 835 { 836 struct rxe_qp *qp = container_of(elem, typeof(*qp), elem); 837 838 execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work); 839 } 840