1 /* 2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/skbuff.h> 35 #include <linux/delay.h> 36 #include <linux/sched.h> 37 38 #include "rxe.h" 39 #include "rxe_loc.h" 40 #include "rxe_queue.h" 41 #include "rxe_task.h" 42 43 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap, 44 int has_srq) 45 { 46 if (cap->max_send_wr > rxe->attr.max_qp_wr) { 47 pr_warn("invalid send wr = %d > %d\n", 48 cap->max_send_wr, rxe->attr.max_qp_wr); 49 goto err1; 50 } 51 52 if (cap->max_send_sge > rxe->attr.max_send_sge) { 53 pr_warn("invalid send sge = %d > %d\n", 54 cap->max_send_sge, rxe->attr.max_send_sge); 55 goto err1; 56 } 57 58 if (!has_srq) { 59 if (cap->max_recv_wr > rxe->attr.max_qp_wr) { 60 pr_warn("invalid recv wr = %d > %d\n", 61 cap->max_recv_wr, rxe->attr.max_qp_wr); 62 goto err1; 63 } 64 65 if (cap->max_recv_sge > rxe->attr.max_recv_sge) { 66 pr_warn("invalid recv sge = %d > %d\n", 67 cap->max_recv_sge, rxe->attr.max_recv_sge); 68 goto err1; 69 } 70 } 71 72 if (cap->max_inline_data > rxe->max_inline_data) { 73 pr_warn("invalid max inline data = %d > %d\n", 74 cap->max_inline_data, rxe->max_inline_data); 75 goto err1; 76 } 77 78 return 0; 79 80 err1: 81 return -EINVAL; 82 } 83 84 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init) 85 { 86 struct ib_qp_cap *cap = &init->cap; 87 struct rxe_port *port; 88 int port_num = init->port_num; 89 90 if (!init->recv_cq || !init->send_cq) { 91 pr_warn("missing cq\n"); 92 goto err1; 93 } 94 95 if (rxe_qp_chk_cap(rxe, cap, !!init->srq)) 96 goto err1; 97 98 if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) { 99 if (port_num != 1) { 100 pr_warn("invalid port = %d\n", port_num); 101 goto err1; 102 } 103 104 port = &rxe->port; 105 106 if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) { 107 pr_warn("SMI QP exists for port %d\n", port_num); 108 goto err1; 109 } 110 111 if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) { 112 pr_warn("GSI QP exists for port %d\n", port_num); 113 goto err1; 114 } 115 } 116 117 return 0; 118 119 err1: 120 return -EINVAL; 121 } 122 123 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n) 124 { 125 qp->resp.res_head = 0; 126 qp->resp.res_tail = 0; 127 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL); 128 129 if (!qp->resp.resources) 130 return -ENOMEM; 131 132 return 0; 133 } 134 135 static void free_rd_atomic_resources(struct rxe_qp *qp) 136 { 137 if (qp->resp.resources) { 138 int i; 139 140 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { 141 struct resp_res *res = &qp->resp.resources[i]; 142 143 free_rd_atomic_resource(qp, res); 144 } 145 kfree(qp->resp.resources); 146 qp->resp.resources = NULL; 147 } 148 } 149 150 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res) 151 { 152 if (res->type == RXE_ATOMIC_MASK) { 153 rxe_drop_ref(qp); 154 kfree_skb(res->atomic.skb); 155 } else if (res->type == RXE_READ_MASK) { 156 if (res->read.mr) 157 rxe_drop_ref(res->read.mr); 158 } 159 res->type = 0; 160 } 161 162 static void cleanup_rd_atomic_resources(struct rxe_qp *qp) 163 { 164 int i; 165 struct resp_res *res; 166 167 if (qp->resp.resources) { 168 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { 169 res = &qp->resp.resources[i]; 170 free_rd_atomic_resource(qp, res); 171 } 172 } 173 } 174 175 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, 176 struct ib_qp_init_attr *init) 177 { 178 struct rxe_port *port; 179 u32 qpn; 180 181 qp->sq_sig_type = init->sq_sig_type; 182 qp->attr.path_mtu = 1; 183 qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu); 184 185 qpn = qp->pelem.index; 186 port = &rxe->port; 187 188 switch (init->qp_type) { 189 case IB_QPT_SMI: 190 qp->ibqp.qp_num = 0; 191 port->qp_smi_index = qpn; 192 qp->attr.port_num = init->port_num; 193 break; 194 195 case IB_QPT_GSI: 196 qp->ibqp.qp_num = 1; 197 port->qp_gsi_index = qpn; 198 qp->attr.port_num = init->port_num; 199 break; 200 201 default: 202 qp->ibqp.qp_num = qpn; 203 break; 204 } 205 206 INIT_LIST_HEAD(&qp->grp_list); 207 208 skb_queue_head_init(&qp->send_pkts); 209 210 spin_lock_init(&qp->grp_lock); 211 spin_lock_init(&qp->state_lock); 212 213 atomic_set(&qp->ssn, 0); 214 atomic_set(&qp->skb_out, 0); 215 } 216 217 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, 218 struct ib_qp_init_attr *init, 219 struct ib_ucontext *context, 220 struct rxe_create_qp_resp __user *uresp) 221 { 222 int err; 223 int wqe_size; 224 225 err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk); 226 if (err < 0) 227 return err; 228 qp->sk->sk->sk_user_data = qp; 229 230 qp->sq.max_wr = init->cap.max_send_wr; 231 qp->sq.max_sge = init->cap.max_send_sge; 232 qp->sq.max_inline = init->cap.max_inline_data; 233 234 wqe_size = max_t(int, sizeof(struct rxe_send_wqe) + 235 qp->sq.max_sge * sizeof(struct ib_sge), 236 sizeof(struct rxe_send_wqe) + 237 qp->sq.max_inline); 238 239 qp->sq.queue = rxe_queue_init(rxe, 240 &qp->sq.max_wr, 241 wqe_size); 242 if (!qp->sq.queue) 243 return -ENOMEM; 244 245 err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, context, 246 qp->sq.queue->buf, qp->sq.queue->buf_size, 247 &qp->sq.queue->ip); 248 249 if (err) { 250 kvfree(qp->sq.queue->buf); 251 kfree(qp->sq.queue); 252 return err; 253 } 254 255 qp->req.wqe_index = producer_index(qp->sq.queue); 256 qp->req.state = QP_STATE_RESET; 257 qp->req.opcode = -1; 258 qp->comp.opcode = -1; 259 260 spin_lock_init(&qp->sq.sq_lock); 261 skb_queue_head_init(&qp->req_pkts); 262 263 rxe_init_task(rxe, &qp->req.task, qp, 264 rxe_requester, "req"); 265 rxe_init_task(rxe, &qp->comp.task, qp, 266 rxe_completer, "comp"); 267 268 qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */ 269 if (init->qp_type == IB_QPT_RC) { 270 timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0); 271 timer_setup(&qp->retrans_timer, retransmit_timer, 0); 272 } 273 return 0; 274 } 275 276 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, 277 struct ib_qp_init_attr *init, 278 struct ib_ucontext *context, 279 struct rxe_create_qp_resp __user *uresp) 280 { 281 int err; 282 int wqe_size; 283 284 if (!qp->srq) { 285 qp->rq.max_wr = init->cap.max_recv_wr; 286 qp->rq.max_sge = init->cap.max_recv_sge; 287 288 wqe_size = rcv_wqe_size(qp->rq.max_sge); 289 290 pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n", 291 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size); 292 293 qp->rq.queue = rxe_queue_init(rxe, 294 &qp->rq.max_wr, 295 wqe_size); 296 if (!qp->rq.queue) 297 return -ENOMEM; 298 299 err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, context, 300 qp->rq.queue->buf, qp->rq.queue->buf_size, 301 &qp->rq.queue->ip); 302 if (err) { 303 kvfree(qp->rq.queue->buf); 304 kfree(qp->rq.queue); 305 return err; 306 } 307 } 308 309 spin_lock_init(&qp->rq.producer_lock); 310 spin_lock_init(&qp->rq.consumer_lock); 311 312 skb_queue_head_init(&qp->resp_pkts); 313 314 rxe_init_task(rxe, &qp->resp.task, qp, 315 rxe_responder, "resp"); 316 317 qp->resp.opcode = OPCODE_NONE; 318 qp->resp.msn = 0; 319 qp->resp.state = QP_STATE_RESET; 320 321 return 0; 322 } 323 324 /* called by the create qp verb */ 325 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, 326 struct ib_qp_init_attr *init, 327 struct rxe_create_qp_resp __user *uresp, 328 struct ib_pd *ibpd) 329 { 330 int err; 331 struct rxe_cq *rcq = to_rcq(init->recv_cq); 332 struct rxe_cq *scq = to_rcq(init->send_cq); 333 struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL; 334 struct ib_ucontext *context = ibpd->uobject ? ibpd->uobject->context : NULL; 335 336 rxe_add_ref(pd); 337 rxe_add_ref(rcq); 338 rxe_add_ref(scq); 339 if (srq) 340 rxe_add_ref(srq); 341 342 qp->pd = pd; 343 qp->rcq = rcq; 344 qp->scq = scq; 345 qp->srq = srq; 346 347 rxe_qp_init_misc(rxe, qp, init); 348 349 err = rxe_qp_init_req(rxe, qp, init, context, uresp); 350 if (err) 351 goto err1; 352 353 err = rxe_qp_init_resp(rxe, qp, init, context, uresp); 354 if (err) 355 goto err2; 356 357 qp->attr.qp_state = IB_QPS_RESET; 358 qp->valid = 1; 359 360 return 0; 361 362 err2: 363 rxe_queue_cleanup(qp->sq.queue); 364 err1: 365 if (srq) 366 rxe_drop_ref(srq); 367 rxe_drop_ref(scq); 368 rxe_drop_ref(rcq); 369 rxe_drop_ref(pd); 370 371 return err; 372 } 373 374 /* called by the query qp verb */ 375 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init) 376 { 377 init->event_handler = qp->ibqp.event_handler; 378 init->qp_context = qp->ibqp.qp_context; 379 init->send_cq = qp->ibqp.send_cq; 380 init->recv_cq = qp->ibqp.recv_cq; 381 init->srq = qp->ibqp.srq; 382 383 init->cap.max_send_wr = qp->sq.max_wr; 384 init->cap.max_send_sge = qp->sq.max_sge; 385 init->cap.max_inline_data = qp->sq.max_inline; 386 387 if (!qp->srq) { 388 init->cap.max_recv_wr = qp->rq.max_wr; 389 init->cap.max_recv_sge = qp->rq.max_sge; 390 } 391 392 init->sq_sig_type = qp->sq_sig_type; 393 394 init->qp_type = qp->ibqp.qp_type; 395 init->port_num = 1; 396 397 return 0; 398 } 399 400 /* called by the modify qp verb, this routine checks all the parameters before 401 * making any changes 402 */ 403 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, 404 struct ib_qp_attr *attr, int mask) 405 { 406 enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ? 407 attr->cur_qp_state : qp->attr.qp_state; 408 enum ib_qp_state new_state = (mask & IB_QP_STATE) ? 409 attr->qp_state : cur_state; 410 411 if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask, 412 IB_LINK_LAYER_ETHERNET)) { 413 pr_warn("invalid mask or state for qp\n"); 414 goto err1; 415 } 416 417 if (mask & IB_QP_STATE) { 418 if (cur_state == IB_QPS_SQD) { 419 if (qp->req.state == QP_STATE_DRAIN && 420 new_state != IB_QPS_ERR) 421 goto err1; 422 } 423 } 424 425 if (mask & IB_QP_PORT) { 426 if (attr->port_num != 1) { 427 pr_warn("invalid port %d\n", attr->port_num); 428 goto err1; 429 } 430 } 431 432 if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq)) 433 goto err1; 434 435 if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr)) 436 goto err1; 437 438 if (mask & IB_QP_ALT_PATH) { 439 if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr)) 440 goto err1; 441 if (attr->alt_port_num != 1) { 442 pr_warn("invalid alt port %d\n", attr->alt_port_num); 443 goto err1; 444 } 445 if (attr->alt_timeout > 31) { 446 pr_warn("invalid QP alt timeout %d > 31\n", 447 attr->alt_timeout); 448 goto err1; 449 } 450 } 451 452 if (mask & IB_QP_PATH_MTU) { 453 struct rxe_port *port = &rxe->port; 454 455 enum ib_mtu max_mtu = port->attr.max_mtu; 456 enum ib_mtu mtu = attr->path_mtu; 457 458 if (mtu > max_mtu) { 459 pr_debug("invalid mtu (%d) > (%d)\n", 460 ib_mtu_enum_to_int(mtu), 461 ib_mtu_enum_to_int(max_mtu)); 462 goto err1; 463 } 464 } 465 466 if (mask & IB_QP_MAX_QP_RD_ATOMIC) { 467 if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) { 468 pr_warn("invalid max_rd_atomic %d > %d\n", 469 attr->max_rd_atomic, 470 rxe->attr.max_qp_rd_atom); 471 goto err1; 472 } 473 } 474 475 if (mask & IB_QP_TIMEOUT) { 476 if (attr->timeout > 31) { 477 pr_warn("invalid QP timeout %d > 31\n", 478 attr->timeout); 479 goto err1; 480 } 481 } 482 483 return 0; 484 485 err1: 486 return -EINVAL; 487 } 488 489 /* move the qp to the reset state */ 490 static void rxe_qp_reset(struct rxe_qp *qp) 491 { 492 /* stop tasks from running */ 493 rxe_disable_task(&qp->resp.task); 494 495 /* stop request/comp */ 496 if (qp->sq.queue) { 497 if (qp_type(qp) == IB_QPT_RC) 498 rxe_disable_task(&qp->comp.task); 499 rxe_disable_task(&qp->req.task); 500 } 501 502 /* move qp to the reset state */ 503 qp->req.state = QP_STATE_RESET; 504 qp->resp.state = QP_STATE_RESET; 505 506 /* let state machines reset themselves drain work and packet queues 507 * etc. 508 */ 509 __rxe_do_task(&qp->resp.task); 510 511 if (qp->sq.queue) { 512 __rxe_do_task(&qp->comp.task); 513 __rxe_do_task(&qp->req.task); 514 rxe_queue_reset(qp->sq.queue); 515 } 516 517 /* cleanup attributes */ 518 atomic_set(&qp->ssn, 0); 519 qp->req.opcode = -1; 520 qp->req.need_retry = 0; 521 qp->req.noack_pkts = 0; 522 qp->resp.msn = 0; 523 qp->resp.opcode = -1; 524 qp->resp.drop_msg = 0; 525 qp->resp.goto_error = 0; 526 qp->resp.sent_psn_nak = 0; 527 528 if (qp->resp.mr) { 529 rxe_drop_ref(qp->resp.mr); 530 qp->resp.mr = NULL; 531 } 532 533 cleanup_rd_atomic_resources(qp); 534 535 /* reenable tasks */ 536 rxe_enable_task(&qp->resp.task); 537 538 if (qp->sq.queue) { 539 if (qp_type(qp) == IB_QPT_RC) 540 rxe_enable_task(&qp->comp.task); 541 542 rxe_enable_task(&qp->req.task); 543 } 544 } 545 546 /* drain the send queue */ 547 static void rxe_qp_drain(struct rxe_qp *qp) 548 { 549 if (qp->sq.queue) { 550 if (qp->req.state != QP_STATE_DRAINED) { 551 qp->req.state = QP_STATE_DRAIN; 552 if (qp_type(qp) == IB_QPT_RC) 553 rxe_run_task(&qp->comp.task, 1); 554 else 555 __rxe_do_task(&qp->comp.task); 556 rxe_run_task(&qp->req.task, 1); 557 } 558 } 559 } 560 561 /* move the qp to the error state */ 562 void rxe_qp_error(struct rxe_qp *qp) 563 { 564 qp->req.state = QP_STATE_ERROR; 565 qp->resp.state = QP_STATE_ERROR; 566 qp->attr.qp_state = IB_QPS_ERR; 567 568 /* drain work and packet queues */ 569 rxe_run_task(&qp->resp.task, 1); 570 571 if (qp_type(qp) == IB_QPT_RC) 572 rxe_run_task(&qp->comp.task, 1); 573 else 574 __rxe_do_task(&qp->comp.task); 575 rxe_run_task(&qp->req.task, 1); 576 } 577 578 /* called by the modify qp verb */ 579 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, 580 struct ib_udata *udata) 581 { 582 int err; 583 584 if (mask & IB_QP_MAX_QP_RD_ATOMIC) { 585 int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic); 586 587 qp->attr.max_rd_atomic = max_rd_atomic; 588 atomic_set(&qp->req.rd_atomic, max_rd_atomic); 589 } 590 591 if (mask & IB_QP_MAX_DEST_RD_ATOMIC) { 592 int max_dest_rd_atomic = 593 __roundup_pow_of_two(attr->max_dest_rd_atomic); 594 595 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic; 596 597 free_rd_atomic_resources(qp); 598 599 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic); 600 if (err) 601 return err; 602 } 603 604 if (mask & IB_QP_CUR_STATE) 605 qp->attr.cur_qp_state = attr->qp_state; 606 607 if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY) 608 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify; 609 610 if (mask & IB_QP_ACCESS_FLAGS) 611 qp->attr.qp_access_flags = attr->qp_access_flags; 612 613 if (mask & IB_QP_PKEY_INDEX) 614 qp->attr.pkey_index = attr->pkey_index; 615 616 if (mask & IB_QP_PORT) 617 qp->attr.port_num = attr->port_num; 618 619 if (mask & IB_QP_QKEY) 620 qp->attr.qkey = attr->qkey; 621 622 if (mask & IB_QP_AV) { 623 rxe_av_from_attr(attr->port_num, &qp->pri_av, &attr->ah_attr); 624 rxe_av_fill_ip_info(&qp->pri_av, &attr->ah_attr); 625 } 626 627 if (mask & IB_QP_ALT_PATH) { 628 rxe_av_from_attr(attr->alt_port_num, &qp->alt_av, 629 &attr->alt_ah_attr); 630 rxe_av_fill_ip_info(&qp->alt_av, &attr->alt_ah_attr); 631 qp->attr.alt_port_num = attr->alt_port_num; 632 qp->attr.alt_pkey_index = attr->alt_pkey_index; 633 qp->attr.alt_timeout = attr->alt_timeout; 634 } 635 636 if (mask & IB_QP_PATH_MTU) { 637 qp->attr.path_mtu = attr->path_mtu; 638 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu); 639 } 640 641 if (mask & IB_QP_TIMEOUT) { 642 qp->attr.timeout = attr->timeout; 643 if (attr->timeout == 0) { 644 qp->qp_timeout_jiffies = 0; 645 } else { 646 /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */ 647 int j = nsecs_to_jiffies(4096ULL << attr->timeout); 648 649 qp->qp_timeout_jiffies = j ? j : 1; 650 } 651 } 652 653 if (mask & IB_QP_RETRY_CNT) { 654 qp->attr.retry_cnt = attr->retry_cnt; 655 qp->comp.retry_cnt = attr->retry_cnt; 656 pr_debug("qp#%d set retry count = %d\n", qp_num(qp), 657 attr->retry_cnt); 658 } 659 660 if (mask & IB_QP_RNR_RETRY) { 661 qp->attr.rnr_retry = attr->rnr_retry; 662 qp->comp.rnr_retry = attr->rnr_retry; 663 pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp), 664 attr->rnr_retry); 665 } 666 667 if (mask & IB_QP_RQ_PSN) { 668 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK); 669 qp->resp.psn = qp->attr.rq_psn; 670 pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp), 671 qp->resp.psn); 672 } 673 674 if (mask & IB_QP_MIN_RNR_TIMER) { 675 qp->attr.min_rnr_timer = attr->min_rnr_timer; 676 pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp), 677 attr->min_rnr_timer); 678 } 679 680 if (mask & IB_QP_SQ_PSN) { 681 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK); 682 qp->req.psn = qp->attr.sq_psn; 683 qp->comp.psn = qp->attr.sq_psn; 684 pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn); 685 } 686 687 if (mask & IB_QP_PATH_MIG_STATE) 688 qp->attr.path_mig_state = attr->path_mig_state; 689 690 if (mask & IB_QP_DEST_QPN) 691 qp->attr.dest_qp_num = attr->dest_qp_num; 692 693 if (mask & IB_QP_STATE) { 694 qp->attr.qp_state = attr->qp_state; 695 696 switch (attr->qp_state) { 697 case IB_QPS_RESET: 698 pr_debug("qp#%d state -> RESET\n", qp_num(qp)); 699 rxe_qp_reset(qp); 700 break; 701 702 case IB_QPS_INIT: 703 pr_debug("qp#%d state -> INIT\n", qp_num(qp)); 704 qp->req.state = QP_STATE_INIT; 705 qp->resp.state = QP_STATE_INIT; 706 break; 707 708 case IB_QPS_RTR: 709 pr_debug("qp#%d state -> RTR\n", qp_num(qp)); 710 qp->resp.state = QP_STATE_READY; 711 break; 712 713 case IB_QPS_RTS: 714 pr_debug("qp#%d state -> RTS\n", qp_num(qp)); 715 qp->req.state = QP_STATE_READY; 716 break; 717 718 case IB_QPS_SQD: 719 pr_debug("qp#%d state -> SQD\n", qp_num(qp)); 720 rxe_qp_drain(qp); 721 break; 722 723 case IB_QPS_SQE: 724 pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp)); 725 /* Not possible from modify_qp. */ 726 break; 727 728 case IB_QPS_ERR: 729 pr_debug("qp#%d state -> ERR\n", qp_num(qp)); 730 rxe_qp_error(qp); 731 break; 732 } 733 } 734 735 return 0; 736 } 737 738 /* called by the query qp verb */ 739 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) 740 { 741 *attr = qp->attr; 742 743 attr->rq_psn = qp->resp.psn; 744 attr->sq_psn = qp->req.psn; 745 746 attr->cap.max_send_wr = qp->sq.max_wr; 747 attr->cap.max_send_sge = qp->sq.max_sge; 748 attr->cap.max_inline_data = qp->sq.max_inline; 749 750 if (!qp->srq) { 751 attr->cap.max_recv_wr = qp->rq.max_wr; 752 attr->cap.max_recv_sge = qp->rq.max_sge; 753 } 754 755 rxe_av_to_attr(&qp->pri_av, &attr->ah_attr); 756 rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr); 757 758 if (qp->req.state == QP_STATE_DRAIN) { 759 attr->sq_draining = 1; 760 /* applications that get this state 761 * typically spin on it. yield the 762 * processor 763 */ 764 cond_resched(); 765 } else { 766 attr->sq_draining = 0; 767 } 768 769 pr_debug("attr->sq_draining = %d\n", attr->sq_draining); 770 771 return 0; 772 } 773 774 /* called by the destroy qp verb */ 775 void rxe_qp_destroy(struct rxe_qp *qp) 776 { 777 qp->valid = 0; 778 qp->qp_timeout_jiffies = 0; 779 rxe_cleanup_task(&qp->resp.task); 780 781 if (qp_type(qp) == IB_QPT_RC) { 782 del_timer_sync(&qp->retrans_timer); 783 del_timer_sync(&qp->rnr_nak_timer); 784 } 785 786 rxe_cleanup_task(&qp->req.task); 787 rxe_cleanup_task(&qp->comp.task); 788 789 /* flush out any receive wr's or pending requests */ 790 __rxe_do_task(&qp->req.task); 791 if (qp->sq.queue) { 792 __rxe_do_task(&qp->comp.task); 793 __rxe_do_task(&qp->req.task); 794 } 795 } 796 797 /* called when the last reference to the qp is dropped */ 798 static void rxe_qp_do_cleanup(struct work_struct *work) 799 { 800 struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); 801 802 rxe_drop_all_mcast_groups(qp); 803 804 if (qp->sq.queue) 805 rxe_queue_cleanup(qp->sq.queue); 806 807 if (qp->srq) 808 rxe_drop_ref(qp->srq); 809 810 if (qp->rq.queue) 811 rxe_queue_cleanup(qp->rq.queue); 812 813 if (qp->scq) 814 rxe_drop_ref(qp->scq); 815 if (qp->rcq) 816 rxe_drop_ref(qp->rcq); 817 if (qp->pd) 818 rxe_drop_ref(qp->pd); 819 820 if (qp->resp.mr) { 821 rxe_drop_ref(qp->resp.mr); 822 qp->resp.mr = NULL; 823 } 824 825 if (qp_type(qp) == IB_QPT_RC) 826 sk_dst_reset(qp->sk->sk); 827 828 free_rd_atomic_resources(qp); 829 830 kernel_sock_shutdown(qp->sk, SHUT_RDWR); 831 sock_release(qp->sk); 832 } 833 834 /* called when the last reference to the qp is dropped */ 835 void rxe_qp_cleanup(struct rxe_pool_entry *arg) 836 { 837 struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem); 838 839 execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work); 840 } 841