1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 5 */ 6 7 #include <linux/skbuff.h> 8 #include <linux/delay.h> 9 #include <linux/sched.h> 10 #include <linux/vmalloc.h> 11 #include <rdma/uverbs_ioctl.h> 12 13 #include "rxe.h" 14 #include "rxe_loc.h" 15 #include "rxe_queue.h" 16 #include "rxe_task.h" 17 18 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap, 19 int has_srq) 20 { 21 if (cap->max_send_wr > rxe->attr.max_qp_wr) { 22 pr_warn("invalid send wr = %d > %d\n", 23 cap->max_send_wr, rxe->attr.max_qp_wr); 24 goto err1; 25 } 26 27 if (cap->max_send_sge > rxe->attr.max_send_sge) { 28 pr_warn("invalid send sge = %d > %d\n", 29 cap->max_send_sge, rxe->attr.max_send_sge); 30 goto err1; 31 } 32 33 if (!has_srq) { 34 if (cap->max_recv_wr > rxe->attr.max_qp_wr) { 35 pr_warn("invalid recv wr = %d > %d\n", 36 cap->max_recv_wr, rxe->attr.max_qp_wr); 37 goto err1; 38 } 39 40 if (cap->max_recv_sge > rxe->attr.max_recv_sge) { 41 pr_warn("invalid recv sge = %d > %d\n", 42 cap->max_recv_sge, rxe->attr.max_recv_sge); 43 goto err1; 44 } 45 } 46 47 if (cap->max_inline_data > rxe->max_inline_data) { 48 pr_warn("invalid max inline data = %d > %d\n", 49 cap->max_inline_data, rxe->max_inline_data); 50 goto err1; 51 } 52 53 return 0; 54 55 err1: 56 return -EINVAL; 57 } 58 59 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init) 60 { 61 struct ib_qp_cap *cap = &init->cap; 62 struct rxe_port *port; 63 int port_num = init->port_num; 64 65 switch (init->qp_type) { 66 case IB_QPT_SMI: 67 case IB_QPT_GSI: 68 case IB_QPT_RC: 69 case IB_QPT_UC: 70 case IB_QPT_UD: 71 break; 72 default: 73 return -EOPNOTSUPP; 74 } 75 76 if (!init->recv_cq || !init->send_cq) { 77 pr_warn("missing cq\n"); 78 goto err1; 79 } 80 81 if (rxe_qp_chk_cap(rxe, cap, !!init->srq)) 82 goto err1; 83 84 if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) { 85 if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) { 86 pr_warn("invalid port = %d\n", port_num); 87 goto err1; 88 } 89 90 port = &rxe->port; 91 92 if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) { 93 pr_warn("SMI QP exists for port %d\n", port_num); 94 goto err1; 95 } 96 97 if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) { 98 pr_warn("GSI QP exists for port %d\n", port_num); 99 goto err1; 100 } 101 } 102 103 return 0; 104 105 err1: 106 return -EINVAL; 107 } 108 109 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n) 110 { 111 qp->resp.res_head = 0; 112 qp->resp.res_tail = 0; 113 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL); 114 115 if (!qp->resp.resources) 116 return -ENOMEM; 117 118 return 0; 119 } 120 121 static void free_rd_atomic_resources(struct rxe_qp *qp) 122 { 123 if (qp->resp.resources) { 124 int i; 125 126 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { 127 struct resp_res *res = &qp->resp.resources[i]; 128 129 free_rd_atomic_resource(qp, res); 130 } 131 kfree(qp->resp.resources); 132 qp->resp.resources = NULL; 133 } 134 } 135 136 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res) 137 { 138 if (res->type == RXE_ATOMIC_MASK) 139 kfree_skb(res->atomic.skb); 140 res->type = 0; 141 } 142 143 static void cleanup_rd_atomic_resources(struct rxe_qp *qp) 144 { 145 int i; 146 struct resp_res *res; 147 148 if (qp->resp.resources) { 149 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { 150 res = &qp->resp.resources[i]; 151 free_rd_atomic_resource(qp, res); 152 } 153 } 154 } 155 156 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, 157 struct ib_qp_init_attr *init) 158 { 159 struct rxe_port *port; 160 u32 qpn; 161 162 qp->sq_sig_type = init->sq_sig_type; 163 qp->attr.path_mtu = 1; 164 qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu); 165 166 qpn = qp->elem.index; 167 port = &rxe->port; 168 169 switch (init->qp_type) { 170 case IB_QPT_SMI: 171 qp->ibqp.qp_num = 0; 172 port->qp_smi_index = qpn; 173 qp->attr.port_num = init->port_num; 174 break; 175 176 case IB_QPT_GSI: 177 qp->ibqp.qp_num = 1; 178 port->qp_gsi_index = qpn; 179 qp->attr.port_num = init->port_num; 180 break; 181 182 default: 183 qp->ibqp.qp_num = qpn; 184 break; 185 } 186 187 spin_lock_init(&qp->state_lock); 188 189 atomic_set(&qp->ssn, 0); 190 atomic_set(&qp->skb_out, 0); 191 } 192 193 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, 194 struct ib_qp_init_attr *init, struct ib_udata *udata, 195 struct rxe_create_qp_resp __user *uresp) 196 { 197 int err; 198 int wqe_size; 199 enum queue_type type; 200 201 err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk); 202 if (err < 0) 203 return err; 204 qp->sk->sk->sk_user_data = qp; 205 206 /* pick a source UDP port number for this QP based on 207 * the source QPN. this spreads traffic for different QPs 208 * across different NIC RX queues (while using a single 209 * flow for a given QP to maintain packet order). 210 * the port number must be in the Dynamic Ports range 211 * (0xc000 - 0xffff). 212 */ 213 qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff); 214 qp->sq.max_wr = init->cap.max_send_wr; 215 216 /* These caps are limited by rxe_qp_chk_cap() done by the caller */ 217 wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge), 218 init->cap.max_inline_data); 219 qp->sq.max_sge = init->cap.max_send_sge = 220 wqe_size / sizeof(struct ib_sge); 221 qp->sq.max_inline = init->cap.max_inline_data = wqe_size; 222 wqe_size += sizeof(struct rxe_send_wqe); 223 224 type = QUEUE_TYPE_FROM_CLIENT; 225 qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, 226 wqe_size, type); 227 if (!qp->sq.queue) 228 return -ENOMEM; 229 230 err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata, 231 qp->sq.queue->buf, qp->sq.queue->buf_size, 232 &qp->sq.queue->ip); 233 234 if (err) { 235 vfree(qp->sq.queue->buf); 236 kfree(qp->sq.queue); 237 qp->sq.queue = NULL; 238 return err; 239 } 240 241 qp->req.wqe_index = queue_get_producer(qp->sq.queue, 242 QUEUE_TYPE_FROM_CLIENT); 243 244 qp->req.state = QP_STATE_RESET; 245 qp->req.opcode = -1; 246 qp->comp.opcode = -1; 247 248 spin_lock_init(&qp->sq.sq_lock); 249 skb_queue_head_init(&qp->req_pkts); 250 251 rxe_init_task(rxe, &qp->req.task, qp, 252 rxe_requester, "req"); 253 rxe_init_task(rxe, &qp->comp.task, qp, 254 rxe_completer, "comp"); 255 256 qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */ 257 if (init->qp_type == IB_QPT_RC) { 258 timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0); 259 timer_setup(&qp->retrans_timer, retransmit_timer, 0); 260 } 261 return 0; 262 } 263 264 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, 265 struct ib_qp_init_attr *init, 266 struct ib_udata *udata, 267 struct rxe_create_qp_resp __user *uresp) 268 { 269 int err; 270 int wqe_size; 271 enum queue_type type; 272 273 if (!qp->srq) { 274 qp->rq.max_wr = init->cap.max_recv_wr; 275 qp->rq.max_sge = init->cap.max_recv_sge; 276 277 wqe_size = rcv_wqe_size(qp->rq.max_sge); 278 279 pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n", 280 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size); 281 282 type = QUEUE_TYPE_FROM_CLIENT; 283 qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, 284 wqe_size, type); 285 if (!qp->rq.queue) 286 return -ENOMEM; 287 288 err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata, 289 qp->rq.queue->buf, qp->rq.queue->buf_size, 290 &qp->rq.queue->ip); 291 if (err) { 292 vfree(qp->rq.queue->buf); 293 kfree(qp->rq.queue); 294 qp->rq.queue = NULL; 295 return err; 296 } 297 } 298 299 spin_lock_init(&qp->rq.producer_lock); 300 spin_lock_init(&qp->rq.consumer_lock); 301 302 skb_queue_head_init(&qp->resp_pkts); 303 304 rxe_init_task(rxe, &qp->resp.task, qp, 305 rxe_responder, "resp"); 306 307 qp->resp.opcode = OPCODE_NONE; 308 qp->resp.msn = 0; 309 qp->resp.state = QP_STATE_RESET; 310 311 return 0; 312 } 313 314 /* called by the create qp verb */ 315 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, 316 struct ib_qp_init_attr *init, 317 struct rxe_create_qp_resp __user *uresp, 318 struct ib_pd *ibpd, 319 struct ib_udata *udata) 320 { 321 int err; 322 struct rxe_cq *rcq = to_rcq(init->recv_cq); 323 struct rxe_cq *scq = to_rcq(init->send_cq); 324 struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL; 325 326 rxe_get(pd); 327 rxe_get(rcq); 328 rxe_get(scq); 329 if (srq) 330 rxe_get(srq); 331 332 qp->pd = pd; 333 qp->rcq = rcq; 334 qp->scq = scq; 335 qp->srq = srq; 336 337 rxe_qp_init_misc(rxe, qp, init); 338 339 err = rxe_qp_init_req(rxe, qp, init, udata, uresp); 340 if (err) 341 goto err1; 342 343 err = rxe_qp_init_resp(rxe, qp, init, udata, uresp); 344 if (err) 345 goto err2; 346 347 qp->attr.qp_state = IB_QPS_RESET; 348 qp->valid = 1; 349 350 return 0; 351 352 err2: 353 rxe_queue_cleanup(qp->sq.queue); 354 qp->sq.queue = NULL; 355 err1: 356 qp->pd = NULL; 357 qp->rcq = NULL; 358 qp->scq = NULL; 359 qp->srq = NULL; 360 361 if (srq) 362 rxe_put(srq); 363 rxe_put(scq); 364 rxe_put(rcq); 365 rxe_put(pd); 366 367 return err; 368 } 369 370 /* called by the query qp verb */ 371 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init) 372 { 373 init->event_handler = qp->ibqp.event_handler; 374 init->qp_context = qp->ibqp.qp_context; 375 init->send_cq = qp->ibqp.send_cq; 376 init->recv_cq = qp->ibqp.recv_cq; 377 init->srq = qp->ibqp.srq; 378 379 init->cap.max_send_wr = qp->sq.max_wr; 380 init->cap.max_send_sge = qp->sq.max_sge; 381 init->cap.max_inline_data = qp->sq.max_inline; 382 383 if (!qp->srq) { 384 init->cap.max_recv_wr = qp->rq.max_wr; 385 init->cap.max_recv_sge = qp->rq.max_sge; 386 } 387 388 init->sq_sig_type = qp->sq_sig_type; 389 390 init->qp_type = qp->ibqp.qp_type; 391 init->port_num = 1; 392 393 return 0; 394 } 395 396 /* called by the modify qp verb, this routine checks all the parameters before 397 * making any changes 398 */ 399 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, 400 struct ib_qp_attr *attr, int mask) 401 { 402 enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ? 403 attr->cur_qp_state : qp->attr.qp_state; 404 enum ib_qp_state new_state = (mask & IB_QP_STATE) ? 405 attr->qp_state : cur_state; 406 407 if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) { 408 pr_warn("invalid mask or state for qp\n"); 409 goto err1; 410 } 411 412 if (mask & IB_QP_STATE) { 413 if (cur_state == IB_QPS_SQD) { 414 if (qp->req.state == QP_STATE_DRAIN && 415 new_state != IB_QPS_ERR) 416 goto err1; 417 } 418 } 419 420 if (mask & IB_QP_PORT) { 421 if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) { 422 pr_warn("invalid port %d\n", attr->port_num); 423 goto err1; 424 } 425 } 426 427 if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq)) 428 goto err1; 429 430 if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr)) 431 goto err1; 432 433 if (mask & IB_QP_ALT_PATH) { 434 if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr)) 435 goto err1; 436 if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num)) { 437 pr_warn("invalid alt port %d\n", attr->alt_port_num); 438 goto err1; 439 } 440 if (attr->alt_timeout > 31) { 441 pr_warn("invalid QP alt timeout %d > 31\n", 442 attr->alt_timeout); 443 goto err1; 444 } 445 } 446 447 if (mask & IB_QP_PATH_MTU) { 448 struct rxe_port *port = &rxe->port; 449 450 enum ib_mtu max_mtu = port->attr.max_mtu; 451 enum ib_mtu mtu = attr->path_mtu; 452 453 if (mtu > max_mtu) { 454 pr_debug("invalid mtu (%d) > (%d)\n", 455 ib_mtu_enum_to_int(mtu), 456 ib_mtu_enum_to_int(max_mtu)); 457 goto err1; 458 } 459 } 460 461 if (mask & IB_QP_MAX_QP_RD_ATOMIC) { 462 if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) { 463 pr_warn("invalid max_rd_atomic %d > %d\n", 464 attr->max_rd_atomic, 465 rxe->attr.max_qp_rd_atom); 466 goto err1; 467 } 468 } 469 470 if (mask & IB_QP_TIMEOUT) { 471 if (attr->timeout > 31) { 472 pr_warn("invalid QP timeout %d > 31\n", 473 attr->timeout); 474 goto err1; 475 } 476 } 477 478 return 0; 479 480 err1: 481 return -EINVAL; 482 } 483 484 /* move the qp to the reset state */ 485 static void rxe_qp_reset(struct rxe_qp *qp) 486 { 487 /* stop tasks from running */ 488 rxe_disable_task(&qp->resp.task); 489 490 /* stop request/comp */ 491 if (qp->sq.queue) { 492 if (qp_type(qp) == IB_QPT_RC) 493 rxe_disable_task(&qp->comp.task); 494 rxe_disable_task(&qp->req.task); 495 } 496 497 /* move qp to the reset state */ 498 qp->req.state = QP_STATE_RESET; 499 qp->resp.state = QP_STATE_RESET; 500 501 /* let state machines reset themselves drain work and packet queues 502 * etc. 503 */ 504 __rxe_do_task(&qp->resp.task); 505 506 if (qp->sq.queue) { 507 __rxe_do_task(&qp->comp.task); 508 __rxe_do_task(&qp->req.task); 509 rxe_queue_reset(qp->sq.queue); 510 } 511 512 /* cleanup attributes */ 513 atomic_set(&qp->ssn, 0); 514 qp->req.opcode = -1; 515 qp->req.need_retry = 0; 516 qp->req.noack_pkts = 0; 517 qp->resp.msn = 0; 518 qp->resp.opcode = -1; 519 qp->resp.drop_msg = 0; 520 qp->resp.goto_error = 0; 521 qp->resp.sent_psn_nak = 0; 522 523 if (qp->resp.mr) { 524 rxe_put(qp->resp.mr); 525 qp->resp.mr = NULL; 526 } 527 528 cleanup_rd_atomic_resources(qp); 529 530 /* reenable tasks */ 531 rxe_enable_task(&qp->resp.task); 532 533 if (qp->sq.queue) { 534 if (qp_type(qp) == IB_QPT_RC) 535 rxe_enable_task(&qp->comp.task); 536 537 rxe_enable_task(&qp->req.task); 538 } 539 } 540 541 /* drain the send queue */ 542 static void rxe_qp_drain(struct rxe_qp *qp) 543 { 544 if (qp->sq.queue) { 545 if (qp->req.state != QP_STATE_DRAINED) { 546 qp->req.state = QP_STATE_DRAIN; 547 if (qp_type(qp) == IB_QPT_RC) 548 rxe_run_task(&qp->comp.task, 1); 549 else 550 __rxe_do_task(&qp->comp.task); 551 rxe_run_task(&qp->req.task, 1); 552 } 553 } 554 } 555 556 /* move the qp to the error state */ 557 void rxe_qp_error(struct rxe_qp *qp) 558 { 559 qp->req.state = QP_STATE_ERROR; 560 qp->resp.state = QP_STATE_ERROR; 561 qp->attr.qp_state = IB_QPS_ERR; 562 563 /* drain work and packet queues */ 564 rxe_run_task(&qp->resp.task, 1); 565 566 if (qp_type(qp) == IB_QPT_RC) 567 rxe_run_task(&qp->comp.task, 1); 568 else 569 __rxe_do_task(&qp->comp.task); 570 rxe_run_task(&qp->req.task, 1); 571 } 572 573 /* called by the modify qp verb */ 574 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, 575 struct ib_udata *udata) 576 { 577 int err; 578 579 if (mask & IB_QP_MAX_QP_RD_ATOMIC) { 580 int max_rd_atomic = attr->max_rd_atomic ? 581 roundup_pow_of_two(attr->max_rd_atomic) : 0; 582 583 qp->attr.max_rd_atomic = max_rd_atomic; 584 atomic_set(&qp->req.rd_atomic, max_rd_atomic); 585 } 586 587 if (mask & IB_QP_MAX_DEST_RD_ATOMIC) { 588 int max_dest_rd_atomic = attr->max_dest_rd_atomic ? 589 roundup_pow_of_two(attr->max_dest_rd_atomic) : 0; 590 591 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic; 592 593 free_rd_atomic_resources(qp); 594 595 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic); 596 if (err) 597 return err; 598 } 599 600 if (mask & IB_QP_CUR_STATE) 601 qp->attr.cur_qp_state = attr->qp_state; 602 603 if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY) 604 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify; 605 606 if (mask & IB_QP_ACCESS_FLAGS) 607 qp->attr.qp_access_flags = attr->qp_access_flags; 608 609 if (mask & IB_QP_PKEY_INDEX) 610 qp->attr.pkey_index = attr->pkey_index; 611 612 if (mask & IB_QP_PORT) 613 qp->attr.port_num = attr->port_num; 614 615 if (mask & IB_QP_QKEY) 616 qp->attr.qkey = attr->qkey; 617 618 if (mask & IB_QP_AV) 619 rxe_init_av(&attr->ah_attr, &qp->pri_av); 620 621 if (mask & IB_QP_ALT_PATH) { 622 rxe_init_av(&attr->alt_ah_attr, &qp->alt_av); 623 qp->attr.alt_port_num = attr->alt_port_num; 624 qp->attr.alt_pkey_index = attr->alt_pkey_index; 625 qp->attr.alt_timeout = attr->alt_timeout; 626 } 627 628 if (mask & IB_QP_PATH_MTU) { 629 qp->attr.path_mtu = attr->path_mtu; 630 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu); 631 } 632 633 if (mask & IB_QP_TIMEOUT) { 634 qp->attr.timeout = attr->timeout; 635 if (attr->timeout == 0) { 636 qp->qp_timeout_jiffies = 0; 637 } else { 638 /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */ 639 int j = nsecs_to_jiffies(4096ULL << attr->timeout); 640 641 qp->qp_timeout_jiffies = j ? j : 1; 642 } 643 } 644 645 if (mask & IB_QP_RETRY_CNT) { 646 qp->attr.retry_cnt = attr->retry_cnt; 647 qp->comp.retry_cnt = attr->retry_cnt; 648 pr_debug("qp#%d set retry count = %d\n", qp_num(qp), 649 attr->retry_cnt); 650 } 651 652 if (mask & IB_QP_RNR_RETRY) { 653 qp->attr.rnr_retry = attr->rnr_retry; 654 qp->comp.rnr_retry = attr->rnr_retry; 655 pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp), 656 attr->rnr_retry); 657 } 658 659 if (mask & IB_QP_RQ_PSN) { 660 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK); 661 qp->resp.psn = qp->attr.rq_psn; 662 pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp), 663 qp->resp.psn); 664 } 665 666 if (mask & IB_QP_MIN_RNR_TIMER) { 667 qp->attr.min_rnr_timer = attr->min_rnr_timer; 668 pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp), 669 attr->min_rnr_timer); 670 } 671 672 if (mask & IB_QP_SQ_PSN) { 673 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK); 674 qp->req.psn = qp->attr.sq_psn; 675 qp->comp.psn = qp->attr.sq_psn; 676 pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn); 677 } 678 679 if (mask & IB_QP_PATH_MIG_STATE) 680 qp->attr.path_mig_state = attr->path_mig_state; 681 682 if (mask & IB_QP_DEST_QPN) 683 qp->attr.dest_qp_num = attr->dest_qp_num; 684 685 if (mask & IB_QP_STATE) { 686 qp->attr.qp_state = attr->qp_state; 687 688 switch (attr->qp_state) { 689 case IB_QPS_RESET: 690 pr_debug("qp#%d state -> RESET\n", qp_num(qp)); 691 rxe_qp_reset(qp); 692 break; 693 694 case IB_QPS_INIT: 695 pr_debug("qp#%d state -> INIT\n", qp_num(qp)); 696 qp->req.state = QP_STATE_INIT; 697 qp->resp.state = QP_STATE_INIT; 698 break; 699 700 case IB_QPS_RTR: 701 pr_debug("qp#%d state -> RTR\n", qp_num(qp)); 702 qp->resp.state = QP_STATE_READY; 703 break; 704 705 case IB_QPS_RTS: 706 pr_debug("qp#%d state -> RTS\n", qp_num(qp)); 707 qp->req.state = QP_STATE_READY; 708 break; 709 710 case IB_QPS_SQD: 711 pr_debug("qp#%d state -> SQD\n", qp_num(qp)); 712 rxe_qp_drain(qp); 713 break; 714 715 case IB_QPS_SQE: 716 pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp)); 717 /* Not possible from modify_qp. */ 718 break; 719 720 case IB_QPS_ERR: 721 pr_debug("qp#%d state -> ERR\n", qp_num(qp)); 722 rxe_qp_error(qp); 723 break; 724 } 725 } 726 727 return 0; 728 } 729 730 /* called by the query qp verb */ 731 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) 732 { 733 *attr = qp->attr; 734 735 attr->rq_psn = qp->resp.psn; 736 attr->sq_psn = qp->req.psn; 737 738 attr->cap.max_send_wr = qp->sq.max_wr; 739 attr->cap.max_send_sge = qp->sq.max_sge; 740 attr->cap.max_inline_data = qp->sq.max_inline; 741 742 if (!qp->srq) { 743 attr->cap.max_recv_wr = qp->rq.max_wr; 744 attr->cap.max_recv_sge = qp->rq.max_sge; 745 } 746 747 rxe_av_to_attr(&qp->pri_av, &attr->ah_attr); 748 rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr); 749 750 if (qp->req.state == QP_STATE_DRAIN) { 751 attr->sq_draining = 1; 752 /* applications that get this state 753 * typically spin on it. yield the 754 * processor 755 */ 756 cond_resched(); 757 } else { 758 attr->sq_draining = 0; 759 } 760 761 pr_debug("attr->sq_draining = %d\n", attr->sq_draining); 762 763 return 0; 764 } 765 766 int rxe_qp_chk_destroy(struct rxe_qp *qp) 767 { 768 /* See IBA o10-2.2.3 769 * An attempt to destroy a QP while attached to a mcast group 770 * will fail immediately. 771 */ 772 if (atomic_read(&qp->mcg_num)) { 773 pr_debug("Attempt to destroy QP while attached to multicast group\n"); 774 return -EBUSY; 775 } 776 777 return 0; 778 } 779 780 /* called by the destroy qp verb */ 781 void rxe_qp_destroy(struct rxe_qp *qp) 782 { 783 qp->valid = 0; 784 qp->qp_timeout_jiffies = 0; 785 rxe_cleanup_task(&qp->resp.task); 786 787 if (qp_type(qp) == IB_QPT_RC) { 788 del_timer_sync(&qp->retrans_timer); 789 del_timer_sync(&qp->rnr_nak_timer); 790 } 791 792 rxe_cleanup_task(&qp->req.task); 793 rxe_cleanup_task(&qp->comp.task); 794 795 /* flush out any receive wr's or pending requests */ 796 __rxe_do_task(&qp->req.task); 797 if (qp->sq.queue) { 798 __rxe_do_task(&qp->comp.task); 799 __rxe_do_task(&qp->req.task); 800 } 801 } 802 803 /* called when the last reference to the qp is dropped */ 804 static void rxe_qp_do_cleanup(struct work_struct *work) 805 { 806 struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); 807 808 if (qp->sq.queue) 809 rxe_queue_cleanup(qp->sq.queue); 810 811 if (qp->srq) 812 rxe_put(qp->srq); 813 814 if (qp->rq.queue) 815 rxe_queue_cleanup(qp->rq.queue); 816 817 if (qp->scq) 818 rxe_put(qp->scq); 819 if (qp->rcq) 820 rxe_put(qp->rcq); 821 if (qp->pd) 822 rxe_put(qp->pd); 823 824 if (qp->resp.mr) 825 rxe_put(qp->resp.mr); 826 827 if (qp_type(qp) == IB_QPT_RC) 828 sk_dst_reset(qp->sk->sk); 829 830 free_rd_atomic_resources(qp); 831 832 kernel_sock_shutdown(qp->sk, SHUT_RDWR); 833 sock_release(qp->sk); 834 } 835 836 /* called when the last reference to the qp is dropped */ 837 void rxe_qp_cleanup(struct rxe_pool_elem *elem) 838 { 839 struct rxe_qp *qp = container_of(elem, typeof(*qp), elem); 840 841 execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work); 842 } 843