1 /* 2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/skbuff.h> 35 #include <linux/delay.h> 36 #include <linux/sched.h> 37 #include <linux/vmalloc.h> 38 39 #include "rxe.h" 40 #include "rxe_loc.h" 41 #include "rxe_queue.h" 42 #include "rxe_task.h" 43 44 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap, 45 int has_srq) 46 { 47 if (cap->max_send_wr > rxe->attr.max_qp_wr) { 48 pr_warn("invalid send wr = %d > %d\n", 49 cap->max_send_wr, rxe->attr.max_qp_wr); 50 goto err1; 51 } 52 53 if (cap->max_send_sge > rxe->attr.max_send_sge) { 54 pr_warn("invalid send sge = %d > %d\n", 55 cap->max_send_sge, rxe->attr.max_send_sge); 56 goto err1; 57 } 58 59 if (!has_srq) { 60 if (cap->max_recv_wr > rxe->attr.max_qp_wr) { 61 pr_warn("invalid recv wr = %d > %d\n", 62 cap->max_recv_wr, rxe->attr.max_qp_wr); 63 goto err1; 64 } 65 66 if (cap->max_recv_sge > rxe->attr.max_recv_sge) { 67 pr_warn("invalid recv sge = %d > %d\n", 68 cap->max_recv_sge, rxe->attr.max_recv_sge); 69 goto err1; 70 } 71 } 72 73 if (cap->max_inline_data > rxe->max_inline_data) { 74 pr_warn("invalid max inline data = %d > %d\n", 75 cap->max_inline_data, rxe->max_inline_data); 76 goto err1; 77 } 78 79 return 0; 80 81 err1: 82 return -EINVAL; 83 } 84 85 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init) 86 { 87 struct ib_qp_cap *cap = &init->cap; 88 struct rxe_port *port; 89 int port_num = init->port_num; 90 91 if (!init->recv_cq || !init->send_cq) { 92 pr_warn("missing cq\n"); 93 goto err1; 94 } 95 96 if (rxe_qp_chk_cap(rxe, cap, !!init->srq)) 97 goto err1; 98 99 if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) { 100 if (port_num != 1) { 101 pr_warn("invalid port = %d\n", port_num); 102 goto err1; 103 } 104 105 port = &rxe->port; 106 107 if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) { 108 pr_warn("SMI QP exists for port %d\n", port_num); 109 goto err1; 110 } 111 112 if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) { 113 pr_warn("GSI QP exists for port %d\n", port_num); 114 goto err1; 115 } 116 } 117 118 return 0; 119 120 err1: 121 return -EINVAL; 122 } 123 124 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n) 125 { 126 qp->resp.res_head = 0; 127 qp->resp.res_tail = 0; 128 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL); 129 130 if (!qp->resp.resources) 131 return -ENOMEM; 132 133 return 0; 134 } 135 136 static void free_rd_atomic_resources(struct rxe_qp *qp) 137 { 138 if (qp->resp.resources) { 139 int i; 140 141 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { 142 struct resp_res *res = &qp->resp.resources[i]; 143 144 free_rd_atomic_resource(qp, res); 145 } 146 kfree(qp->resp.resources); 147 qp->resp.resources = NULL; 148 } 149 } 150 151 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res) 152 { 153 if (res->type == RXE_ATOMIC_MASK) { 154 rxe_drop_ref(qp); 155 kfree_skb(res->atomic.skb); 156 } else if (res->type == RXE_READ_MASK) { 157 if (res->read.mr) 158 rxe_drop_ref(res->read.mr); 159 } 160 res->type = 0; 161 } 162 163 static void cleanup_rd_atomic_resources(struct rxe_qp *qp) 164 { 165 int i; 166 struct resp_res *res; 167 168 if (qp->resp.resources) { 169 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { 170 res = &qp->resp.resources[i]; 171 free_rd_atomic_resource(qp, res); 172 } 173 } 174 } 175 176 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, 177 struct ib_qp_init_attr *init) 178 { 179 struct rxe_port *port; 180 u32 qpn; 181 182 qp->sq_sig_type = init->sq_sig_type; 183 qp->attr.path_mtu = 1; 184 qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu); 185 186 qpn = qp->pelem.index; 187 port = &rxe->port; 188 189 switch (init->qp_type) { 190 case IB_QPT_SMI: 191 qp->ibqp.qp_num = 0; 192 port->qp_smi_index = qpn; 193 qp->attr.port_num = init->port_num; 194 break; 195 196 case IB_QPT_GSI: 197 qp->ibqp.qp_num = 1; 198 port->qp_gsi_index = qpn; 199 qp->attr.port_num = init->port_num; 200 break; 201 202 default: 203 qp->ibqp.qp_num = qpn; 204 break; 205 } 206 207 INIT_LIST_HEAD(&qp->grp_list); 208 209 skb_queue_head_init(&qp->send_pkts); 210 211 spin_lock_init(&qp->grp_lock); 212 spin_lock_init(&qp->state_lock); 213 214 atomic_set(&qp->ssn, 0); 215 atomic_set(&qp->skb_out, 0); 216 } 217 218 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, 219 struct ib_qp_init_attr *init, 220 struct ib_ucontext *context, 221 struct rxe_create_qp_resp __user *uresp) 222 { 223 int err; 224 int wqe_size; 225 226 err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk); 227 if (err < 0) 228 return err; 229 qp->sk->sk->sk_user_data = qp; 230 231 /* pick a source UDP port number for this QP based on 232 * the source QPN. this spreads traffic for different QPs 233 * across different NIC RX queues (while using a single 234 * flow for a given QP to maintain packet order). 235 * the port number must be in the Dynamic Ports range 236 * (0xc000 - 0xffff). 237 */ 238 qp->src_port = RXE_ROCE_V2_SPORT + 239 (hash_32_generic(qp_num(qp), 14) & 0x3fff); 240 241 qp->sq.max_wr = init->cap.max_send_wr; 242 qp->sq.max_sge = init->cap.max_send_sge; 243 qp->sq.max_inline = init->cap.max_inline_data; 244 245 wqe_size = max_t(int, sizeof(struct rxe_send_wqe) + 246 qp->sq.max_sge * sizeof(struct ib_sge), 247 sizeof(struct rxe_send_wqe) + 248 qp->sq.max_inline); 249 250 qp->sq.queue = rxe_queue_init(rxe, 251 &qp->sq.max_wr, 252 wqe_size); 253 if (!qp->sq.queue) 254 return -ENOMEM; 255 256 err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, context, 257 qp->sq.queue->buf, qp->sq.queue->buf_size, 258 &qp->sq.queue->ip); 259 260 if (err) { 261 vfree(qp->sq.queue->buf); 262 kfree(qp->sq.queue); 263 return err; 264 } 265 266 qp->req.wqe_index = producer_index(qp->sq.queue); 267 qp->req.state = QP_STATE_RESET; 268 qp->req.opcode = -1; 269 qp->comp.opcode = -1; 270 271 spin_lock_init(&qp->sq.sq_lock); 272 skb_queue_head_init(&qp->req_pkts); 273 274 rxe_init_task(rxe, &qp->req.task, qp, 275 rxe_requester, "req"); 276 rxe_init_task(rxe, &qp->comp.task, qp, 277 rxe_completer, "comp"); 278 279 qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */ 280 if (init->qp_type == IB_QPT_RC) { 281 timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0); 282 timer_setup(&qp->retrans_timer, retransmit_timer, 0); 283 } 284 return 0; 285 } 286 287 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, 288 struct ib_qp_init_attr *init, 289 struct ib_ucontext *context, 290 struct rxe_create_qp_resp __user *uresp) 291 { 292 int err; 293 int wqe_size; 294 295 if (!qp->srq) { 296 qp->rq.max_wr = init->cap.max_recv_wr; 297 qp->rq.max_sge = init->cap.max_recv_sge; 298 299 wqe_size = rcv_wqe_size(qp->rq.max_sge); 300 301 pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n", 302 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size); 303 304 qp->rq.queue = rxe_queue_init(rxe, 305 &qp->rq.max_wr, 306 wqe_size); 307 if (!qp->rq.queue) 308 return -ENOMEM; 309 310 err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, context, 311 qp->rq.queue->buf, qp->rq.queue->buf_size, 312 &qp->rq.queue->ip); 313 if (err) { 314 vfree(qp->rq.queue->buf); 315 kfree(qp->rq.queue); 316 return err; 317 } 318 } 319 320 spin_lock_init(&qp->rq.producer_lock); 321 spin_lock_init(&qp->rq.consumer_lock); 322 323 skb_queue_head_init(&qp->resp_pkts); 324 325 rxe_init_task(rxe, &qp->resp.task, qp, 326 rxe_responder, "resp"); 327 328 qp->resp.opcode = OPCODE_NONE; 329 qp->resp.msn = 0; 330 qp->resp.state = QP_STATE_RESET; 331 332 return 0; 333 } 334 335 /* called by the create qp verb */ 336 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, 337 struct ib_qp_init_attr *init, 338 struct rxe_create_qp_resp __user *uresp, 339 struct ib_pd *ibpd) 340 { 341 int err; 342 struct rxe_cq *rcq = to_rcq(init->recv_cq); 343 struct rxe_cq *scq = to_rcq(init->send_cq); 344 struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL; 345 struct ib_ucontext *context = ibpd->uobject ? ibpd->uobject->context : NULL; 346 347 rxe_add_ref(pd); 348 rxe_add_ref(rcq); 349 rxe_add_ref(scq); 350 if (srq) 351 rxe_add_ref(srq); 352 353 qp->pd = pd; 354 qp->rcq = rcq; 355 qp->scq = scq; 356 qp->srq = srq; 357 358 rxe_qp_init_misc(rxe, qp, init); 359 360 err = rxe_qp_init_req(rxe, qp, init, context, uresp); 361 if (err) 362 goto err1; 363 364 err = rxe_qp_init_resp(rxe, qp, init, context, uresp); 365 if (err) 366 goto err2; 367 368 qp->attr.qp_state = IB_QPS_RESET; 369 qp->valid = 1; 370 371 return 0; 372 373 err2: 374 rxe_queue_cleanup(qp->sq.queue); 375 err1: 376 if (srq) 377 rxe_drop_ref(srq); 378 rxe_drop_ref(scq); 379 rxe_drop_ref(rcq); 380 rxe_drop_ref(pd); 381 382 return err; 383 } 384 385 /* called by the query qp verb */ 386 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init) 387 { 388 init->event_handler = qp->ibqp.event_handler; 389 init->qp_context = qp->ibqp.qp_context; 390 init->send_cq = qp->ibqp.send_cq; 391 init->recv_cq = qp->ibqp.recv_cq; 392 init->srq = qp->ibqp.srq; 393 394 init->cap.max_send_wr = qp->sq.max_wr; 395 init->cap.max_send_sge = qp->sq.max_sge; 396 init->cap.max_inline_data = qp->sq.max_inline; 397 398 if (!qp->srq) { 399 init->cap.max_recv_wr = qp->rq.max_wr; 400 init->cap.max_recv_sge = qp->rq.max_sge; 401 } 402 403 init->sq_sig_type = qp->sq_sig_type; 404 405 init->qp_type = qp->ibqp.qp_type; 406 init->port_num = 1; 407 408 return 0; 409 } 410 411 /* called by the modify qp verb, this routine checks all the parameters before 412 * making any changes 413 */ 414 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, 415 struct ib_qp_attr *attr, int mask) 416 { 417 enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ? 418 attr->cur_qp_state : qp->attr.qp_state; 419 enum ib_qp_state new_state = (mask & IB_QP_STATE) ? 420 attr->qp_state : cur_state; 421 422 if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) { 423 pr_warn("invalid mask or state for qp\n"); 424 goto err1; 425 } 426 427 if (mask & IB_QP_STATE) { 428 if (cur_state == IB_QPS_SQD) { 429 if (qp->req.state == QP_STATE_DRAIN && 430 new_state != IB_QPS_ERR) 431 goto err1; 432 } 433 } 434 435 if (mask & IB_QP_PORT) { 436 if (attr->port_num != 1) { 437 pr_warn("invalid port %d\n", attr->port_num); 438 goto err1; 439 } 440 } 441 442 if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq)) 443 goto err1; 444 445 if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr)) 446 goto err1; 447 448 if (mask & IB_QP_ALT_PATH) { 449 if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr)) 450 goto err1; 451 if (attr->alt_port_num != 1) { 452 pr_warn("invalid alt port %d\n", attr->alt_port_num); 453 goto err1; 454 } 455 if (attr->alt_timeout > 31) { 456 pr_warn("invalid QP alt timeout %d > 31\n", 457 attr->alt_timeout); 458 goto err1; 459 } 460 } 461 462 if (mask & IB_QP_PATH_MTU) { 463 struct rxe_port *port = &rxe->port; 464 465 enum ib_mtu max_mtu = port->attr.max_mtu; 466 enum ib_mtu mtu = attr->path_mtu; 467 468 if (mtu > max_mtu) { 469 pr_debug("invalid mtu (%d) > (%d)\n", 470 ib_mtu_enum_to_int(mtu), 471 ib_mtu_enum_to_int(max_mtu)); 472 goto err1; 473 } 474 } 475 476 if (mask & IB_QP_MAX_QP_RD_ATOMIC) { 477 if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) { 478 pr_warn("invalid max_rd_atomic %d > %d\n", 479 attr->max_rd_atomic, 480 rxe->attr.max_qp_rd_atom); 481 goto err1; 482 } 483 } 484 485 if (mask & IB_QP_TIMEOUT) { 486 if (attr->timeout > 31) { 487 pr_warn("invalid QP timeout %d > 31\n", 488 attr->timeout); 489 goto err1; 490 } 491 } 492 493 return 0; 494 495 err1: 496 return -EINVAL; 497 } 498 499 /* move the qp to the reset state */ 500 static void rxe_qp_reset(struct rxe_qp *qp) 501 { 502 /* stop tasks from running */ 503 rxe_disable_task(&qp->resp.task); 504 505 /* stop request/comp */ 506 if (qp->sq.queue) { 507 if (qp_type(qp) == IB_QPT_RC) 508 rxe_disable_task(&qp->comp.task); 509 rxe_disable_task(&qp->req.task); 510 } 511 512 /* move qp to the reset state */ 513 qp->req.state = QP_STATE_RESET; 514 qp->resp.state = QP_STATE_RESET; 515 516 /* let state machines reset themselves drain work and packet queues 517 * etc. 518 */ 519 __rxe_do_task(&qp->resp.task); 520 521 if (qp->sq.queue) { 522 __rxe_do_task(&qp->comp.task); 523 __rxe_do_task(&qp->req.task); 524 rxe_queue_reset(qp->sq.queue); 525 } 526 527 /* cleanup attributes */ 528 atomic_set(&qp->ssn, 0); 529 qp->req.opcode = -1; 530 qp->req.need_retry = 0; 531 qp->req.noack_pkts = 0; 532 qp->resp.msn = 0; 533 qp->resp.opcode = -1; 534 qp->resp.drop_msg = 0; 535 qp->resp.goto_error = 0; 536 qp->resp.sent_psn_nak = 0; 537 538 if (qp->resp.mr) { 539 rxe_drop_ref(qp->resp.mr); 540 qp->resp.mr = NULL; 541 } 542 543 cleanup_rd_atomic_resources(qp); 544 545 /* reenable tasks */ 546 rxe_enable_task(&qp->resp.task); 547 548 if (qp->sq.queue) { 549 if (qp_type(qp) == IB_QPT_RC) 550 rxe_enable_task(&qp->comp.task); 551 552 rxe_enable_task(&qp->req.task); 553 } 554 } 555 556 /* drain the send queue */ 557 static void rxe_qp_drain(struct rxe_qp *qp) 558 { 559 if (qp->sq.queue) { 560 if (qp->req.state != QP_STATE_DRAINED) { 561 qp->req.state = QP_STATE_DRAIN; 562 if (qp_type(qp) == IB_QPT_RC) 563 rxe_run_task(&qp->comp.task, 1); 564 else 565 __rxe_do_task(&qp->comp.task); 566 rxe_run_task(&qp->req.task, 1); 567 } 568 } 569 } 570 571 /* move the qp to the error state */ 572 void rxe_qp_error(struct rxe_qp *qp) 573 { 574 qp->req.state = QP_STATE_ERROR; 575 qp->resp.state = QP_STATE_ERROR; 576 qp->attr.qp_state = IB_QPS_ERR; 577 578 /* drain work and packet queues */ 579 rxe_run_task(&qp->resp.task, 1); 580 581 if (qp_type(qp) == IB_QPT_RC) 582 rxe_run_task(&qp->comp.task, 1); 583 else 584 __rxe_do_task(&qp->comp.task); 585 rxe_run_task(&qp->req.task, 1); 586 } 587 588 /* called by the modify qp verb */ 589 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, 590 struct ib_udata *udata) 591 { 592 int err; 593 594 if (mask & IB_QP_MAX_QP_RD_ATOMIC) { 595 int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic); 596 597 qp->attr.max_rd_atomic = max_rd_atomic; 598 atomic_set(&qp->req.rd_atomic, max_rd_atomic); 599 } 600 601 if (mask & IB_QP_MAX_DEST_RD_ATOMIC) { 602 int max_dest_rd_atomic = 603 __roundup_pow_of_two(attr->max_dest_rd_atomic); 604 605 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic; 606 607 free_rd_atomic_resources(qp); 608 609 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic); 610 if (err) 611 return err; 612 } 613 614 if (mask & IB_QP_CUR_STATE) 615 qp->attr.cur_qp_state = attr->qp_state; 616 617 if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY) 618 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify; 619 620 if (mask & IB_QP_ACCESS_FLAGS) 621 qp->attr.qp_access_flags = attr->qp_access_flags; 622 623 if (mask & IB_QP_PKEY_INDEX) 624 qp->attr.pkey_index = attr->pkey_index; 625 626 if (mask & IB_QP_PORT) 627 qp->attr.port_num = attr->port_num; 628 629 if (mask & IB_QP_QKEY) 630 qp->attr.qkey = attr->qkey; 631 632 if (mask & IB_QP_AV) { 633 rxe_av_from_attr(attr->port_num, &qp->pri_av, &attr->ah_attr); 634 rxe_av_fill_ip_info(&qp->pri_av, &attr->ah_attr); 635 } 636 637 if (mask & IB_QP_ALT_PATH) { 638 rxe_av_from_attr(attr->alt_port_num, &qp->alt_av, 639 &attr->alt_ah_attr); 640 rxe_av_fill_ip_info(&qp->alt_av, &attr->alt_ah_attr); 641 qp->attr.alt_port_num = attr->alt_port_num; 642 qp->attr.alt_pkey_index = attr->alt_pkey_index; 643 qp->attr.alt_timeout = attr->alt_timeout; 644 } 645 646 if (mask & IB_QP_PATH_MTU) { 647 qp->attr.path_mtu = attr->path_mtu; 648 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu); 649 } 650 651 if (mask & IB_QP_TIMEOUT) { 652 qp->attr.timeout = attr->timeout; 653 if (attr->timeout == 0) { 654 qp->qp_timeout_jiffies = 0; 655 } else { 656 /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */ 657 int j = nsecs_to_jiffies(4096ULL << attr->timeout); 658 659 qp->qp_timeout_jiffies = j ? j : 1; 660 } 661 } 662 663 if (mask & IB_QP_RETRY_CNT) { 664 qp->attr.retry_cnt = attr->retry_cnt; 665 qp->comp.retry_cnt = attr->retry_cnt; 666 pr_debug("qp#%d set retry count = %d\n", qp_num(qp), 667 attr->retry_cnt); 668 } 669 670 if (mask & IB_QP_RNR_RETRY) { 671 qp->attr.rnr_retry = attr->rnr_retry; 672 qp->comp.rnr_retry = attr->rnr_retry; 673 pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp), 674 attr->rnr_retry); 675 } 676 677 if (mask & IB_QP_RQ_PSN) { 678 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK); 679 qp->resp.psn = qp->attr.rq_psn; 680 pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp), 681 qp->resp.psn); 682 } 683 684 if (mask & IB_QP_MIN_RNR_TIMER) { 685 qp->attr.min_rnr_timer = attr->min_rnr_timer; 686 pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp), 687 attr->min_rnr_timer); 688 } 689 690 if (mask & IB_QP_SQ_PSN) { 691 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK); 692 qp->req.psn = qp->attr.sq_psn; 693 qp->comp.psn = qp->attr.sq_psn; 694 pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn); 695 } 696 697 if (mask & IB_QP_PATH_MIG_STATE) 698 qp->attr.path_mig_state = attr->path_mig_state; 699 700 if (mask & IB_QP_DEST_QPN) 701 qp->attr.dest_qp_num = attr->dest_qp_num; 702 703 if (mask & IB_QP_STATE) { 704 qp->attr.qp_state = attr->qp_state; 705 706 switch (attr->qp_state) { 707 case IB_QPS_RESET: 708 pr_debug("qp#%d state -> RESET\n", qp_num(qp)); 709 rxe_qp_reset(qp); 710 break; 711 712 case IB_QPS_INIT: 713 pr_debug("qp#%d state -> INIT\n", qp_num(qp)); 714 qp->req.state = QP_STATE_INIT; 715 qp->resp.state = QP_STATE_INIT; 716 break; 717 718 case IB_QPS_RTR: 719 pr_debug("qp#%d state -> RTR\n", qp_num(qp)); 720 qp->resp.state = QP_STATE_READY; 721 break; 722 723 case IB_QPS_RTS: 724 pr_debug("qp#%d state -> RTS\n", qp_num(qp)); 725 qp->req.state = QP_STATE_READY; 726 break; 727 728 case IB_QPS_SQD: 729 pr_debug("qp#%d state -> SQD\n", qp_num(qp)); 730 rxe_qp_drain(qp); 731 break; 732 733 case IB_QPS_SQE: 734 pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp)); 735 /* Not possible from modify_qp. */ 736 break; 737 738 case IB_QPS_ERR: 739 pr_debug("qp#%d state -> ERR\n", qp_num(qp)); 740 rxe_qp_error(qp); 741 break; 742 } 743 } 744 745 return 0; 746 } 747 748 /* called by the query qp verb */ 749 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) 750 { 751 *attr = qp->attr; 752 753 attr->rq_psn = qp->resp.psn; 754 attr->sq_psn = qp->req.psn; 755 756 attr->cap.max_send_wr = qp->sq.max_wr; 757 attr->cap.max_send_sge = qp->sq.max_sge; 758 attr->cap.max_inline_data = qp->sq.max_inline; 759 760 if (!qp->srq) { 761 attr->cap.max_recv_wr = qp->rq.max_wr; 762 attr->cap.max_recv_sge = qp->rq.max_sge; 763 } 764 765 rxe_av_to_attr(&qp->pri_av, &attr->ah_attr); 766 rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr); 767 768 if (qp->req.state == QP_STATE_DRAIN) { 769 attr->sq_draining = 1; 770 /* applications that get this state 771 * typically spin on it. yield the 772 * processor 773 */ 774 cond_resched(); 775 } else { 776 attr->sq_draining = 0; 777 } 778 779 pr_debug("attr->sq_draining = %d\n", attr->sq_draining); 780 781 return 0; 782 } 783 784 /* called by the destroy qp verb */ 785 void rxe_qp_destroy(struct rxe_qp *qp) 786 { 787 qp->valid = 0; 788 qp->qp_timeout_jiffies = 0; 789 rxe_cleanup_task(&qp->resp.task); 790 791 if (qp_type(qp) == IB_QPT_RC) { 792 del_timer_sync(&qp->retrans_timer); 793 del_timer_sync(&qp->rnr_nak_timer); 794 } 795 796 rxe_cleanup_task(&qp->req.task); 797 rxe_cleanup_task(&qp->comp.task); 798 799 /* flush out any receive wr's or pending requests */ 800 __rxe_do_task(&qp->req.task); 801 if (qp->sq.queue) { 802 __rxe_do_task(&qp->comp.task); 803 __rxe_do_task(&qp->req.task); 804 } 805 } 806 807 /* called when the last reference to the qp is dropped */ 808 static void rxe_qp_do_cleanup(struct work_struct *work) 809 { 810 struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); 811 812 rxe_drop_all_mcast_groups(qp); 813 814 if (qp->sq.queue) 815 rxe_queue_cleanup(qp->sq.queue); 816 817 if (qp->srq) 818 rxe_drop_ref(qp->srq); 819 820 if (qp->rq.queue) 821 rxe_queue_cleanup(qp->rq.queue); 822 823 if (qp->scq) 824 rxe_drop_ref(qp->scq); 825 if (qp->rcq) 826 rxe_drop_ref(qp->rcq); 827 if (qp->pd) 828 rxe_drop_ref(qp->pd); 829 830 if (qp->resp.mr) { 831 rxe_drop_ref(qp->resp.mr); 832 qp->resp.mr = NULL; 833 } 834 835 if (qp_type(qp) == IB_QPT_RC) 836 sk_dst_reset(qp->sk->sk); 837 838 free_rd_atomic_resources(qp); 839 840 kernel_sock_shutdown(qp->sk, SHUT_RDWR); 841 sock_release(qp->sk); 842 } 843 844 /* called when the last reference to the qp is dropped */ 845 void rxe_qp_cleanup(struct rxe_pool_entry *arg) 846 { 847 struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem); 848 849 execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work); 850 } 851