1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 5 */ 6 7 #include <linux/skbuff.h> 8 #include <linux/delay.h> 9 #include <linux/sched.h> 10 #include <linux/vmalloc.h> 11 #include <rdma/uverbs_ioctl.h> 12 13 #include "rxe.h" 14 #include "rxe_loc.h" 15 #include "rxe_queue.h" 16 #include "rxe_task.h" 17 18 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap, 19 int has_srq) 20 { 21 if (cap->max_send_wr > rxe->attr.max_qp_wr) { 22 rxe_dbg_dev(rxe, "invalid send wr = %u > %d\n", 23 cap->max_send_wr, rxe->attr.max_qp_wr); 24 goto err1; 25 } 26 27 if (cap->max_send_sge > rxe->attr.max_send_sge) { 28 rxe_dbg_dev(rxe, "invalid send sge = %u > %d\n", 29 cap->max_send_sge, rxe->attr.max_send_sge); 30 goto err1; 31 } 32 33 if (!has_srq) { 34 if (cap->max_recv_wr > rxe->attr.max_qp_wr) { 35 rxe_dbg_dev(rxe, "invalid recv wr = %u > %d\n", 36 cap->max_recv_wr, rxe->attr.max_qp_wr); 37 goto err1; 38 } 39 40 if (cap->max_recv_sge > rxe->attr.max_recv_sge) { 41 rxe_dbg_dev(rxe, "invalid recv sge = %u > %d\n", 42 cap->max_recv_sge, rxe->attr.max_recv_sge); 43 goto err1; 44 } 45 } 46 47 if (cap->max_inline_data > rxe->max_inline_data) { 48 rxe_dbg_dev(rxe, "invalid max inline data = %u > %d\n", 49 cap->max_inline_data, rxe->max_inline_data); 50 goto err1; 51 } 52 53 return 0; 54 55 err1: 56 return -EINVAL; 57 } 58 59 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init) 60 { 61 struct ib_qp_cap *cap = &init->cap; 62 struct rxe_port *port; 63 int port_num = init->port_num; 64 65 switch (init->qp_type) { 66 case IB_QPT_GSI: 67 case IB_QPT_RC: 68 case IB_QPT_UC: 69 case IB_QPT_UD: 70 break; 71 default: 72 return -EOPNOTSUPP; 73 } 74 75 if (!init->recv_cq || !init->send_cq) { 76 rxe_dbg_dev(rxe, "missing cq\n"); 77 goto err1; 78 } 79 80 if (rxe_qp_chk_cap(rxe, cap, !!init->srq)) 81 goto err1; 82 83 if (init->qp_type == IB_QPT_GSI) { 84 if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) { 85 rxe_dbg_dev(rxe, "invalid port = %d\n", port_num); 86 goto err1; 87 } 88 89 port = &rxe->port; 90 91 if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) { 92 rxe_dbg_dev(rxe, "GSI QP exists for port %d\n", port_num); 93 goto err1; 94 } 95 } 96 97 return 0; 98 99 err1: 100 return -EINVAL; 101 } 102 103 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n) 104 { 105 qp->resp.res_head = 0; 106 qp->resp.res_tail = 0; 107 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL); 108 109 if (!qp->resp.resources) 110 return -ENOMEM; 111 112 return 0; 113 } 114 115 static void free_rd_atomic_resources(struct rxe_qp *qp) 116 { 117 if (qp->resp.resources) { 118 int i; 119 120 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { 121 struct resp_res *res = &qp->resp.resources[i]; 122 123 free_rd_atomic_resource(res); 124 } 125 kfree(qp->resp.resources); 126 qp->resp.resources = NULL; 127 } 128 } 129 130 void free_rd_atomic_resource(struct resp_res *res) 131 { 132 res->type = 0; 133 } 134 135 static void cleanup_rd_atomic_resources(struct rxe_qp *qp) 136 { 137 int i; 138 struct resp_res *res; 139 140 if (qp->resp.resources) { 141 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { 142 res = &qp->resp.resources[i]; 143 free_rd_atomic_resource(res); 144 } 145 } 146 } 147 148 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, 149 struct ib_qp_init_attr *init) 150 { 151 struct rxe_port *port; 152 u32 qpn; 153 154 qp->sq_sig_type = init->sq_sig_type; 155 qp->attr.path_mtu = 1; 156 qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu); 157 158 qpn = qp->elem.index; 159 port = &rxe->port; 160 161 switch (init->qp_type) { 162 case IB_QPT_GSI: 163 qp->ibqp.qp_num = 1; 164 port->qp_gsi_index = qpn; 165 qp->attr.port_num = init->port_num; 166 break; 167 168 default: 169 qp->ibqp.qp_num = qpn; 170 break; 171 } 172 173 spin_lock_init(&qp->state_lock); 174 175 spin_lock_init(&qp->sq.sq_lock); 176 spin_lock_init(&qp->rq.producer_lock); 177 spin_lock_init(&qp->rq.consumer_lock); 178 179 atomic_set(&qp->ssn, 0); 180 atomic_set(&qp->skb_out, 0); 181 } 182 183 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, 184 struct ib_qp_init_attr *init, struct ib_udata *udata, 185 struct rxe_create_qp_resp __user *uresp) 186 { 187 int err; 188 int wqe_size; 189 enum queue_type type; 190 191 err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk); 192 if (err < 0) 193 return err; 194 qp->sk->sk->sk_user_data = qp; 195 196 /* pick a source UDP port number for this QP based on 197 * the source QPN. this spreads traffic for different QPs 198 * across different NIC RX queues (while using a single 199 * flow for a given QP to maintain packet order). 200 * the port number must be in the Dynamic Ports range 201 * (0xc000 - 0xffff). 202 */ 203 qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff); 204 qp->sq.max_wr = init->cap.max_send_wr; 205 206 /* These caps are limited by rxe_qp_chk_cap() done by the caller */ 207 wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge), 208 init->cap.max_inline_data); 209 qp->sq.max_sge = init->cap.max_send_sge = 210 wqe_size / sizeof(struct ib_sge); 211 qp->sq.max_inline = init->cap.max_inline_data = wqe_size; 212 wqe_size += sizeof(struct rxe_send_wqe); 213 214 type = QUEUE_TYPE_FROM_CLIENT; 215 qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, 216 wqe_size, type); 217 if (!qp->sq.queue) 218 return -ENOMEM; 219 220 err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata, 221 qp->sq.queue->buf, qp->sq.queue->buf_size, 222 &qp->sq.queue->ip); 223 224 if (err) { 225 vfree(qp->sq.queue->buf); 226 kfree(qp->sq.queue); 227 qp->sq.queue = NULL; 228 return err; 229 } 230 231 qp->req.wqe_index = queue_get_producer(qp->sq.queue, 232 QUEUE_TYPE_FROM_CLIENT); 233 234 qp->req.opcode = -1; 235 qp->comp.opcode = -1; 236 237 skb_queue_head_init(&qp->req_pkts); 238 239 rxe_init_task(&qp->req.task, qp, rxe_requester); 240 rxe_init_task(&qp->comp.task, qp, rxe_completer); 241 242 qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */ 243 if (init->qp_type == IB_QPT_RC) { 244 timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0); 245 timer_setup(&qp->retrans_timer, retransmit_timer, 0); 246 } 247 return 0; 248 } 249 250 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, 251 struct ib_qp_init_attr *init, 252 struct ib_udata *udata, 253 struct rxe_create_qp_resp __user *uresp) 254 { 255 int err; 256 int wqe_size; 257 enum queue_type type; 258 259 if (!qp->srq) { 260 qp->rq.max_wr = init->cap.max_recv_wr; 261 qp->rq.max_sge = init->cap.max_recv_sge; 262 263 wqe_size = rcv_wqe_size(qp->rq.max_sge); 264 265 type = QUEUE_TYPE_FROM_CLIENT; 266 qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, 267 wqe_size, type); 268 if (!qp->rq.queue) 269 return -ENOMEM; 270 271 err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata, 272 qp->rq.queue->buf, qp->rq.queue->buf_size, 273 &qp->rq.queue->ip); 274 if (err) { 275 vfree(qp->rq.queue->buf); 276 kfree(qp->rq.queue); 277 qp->rq.queue = NULL; 278 return err; 279 } 280 } 281 282 skb_queue_head_init(&qp->resp_pkts); 283 284 rxe_init_task(&qp->resp.task, qp, rxe_responder); 285 286 qp->resp.opcode = OPCODE_NONE; 287 qp->resp.msn = 0; 288 289 return 0; 290 } 291 292 /* called by the create qp verb */ 293 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, 294 struct ib_qp_init_attr *init, 295 struct rxe_create_qp_resp __user *uresp, 296 struct ib_pd *ibpd, 297 struct ib_udata *udata) 298 { 299 int err; 300 struct rxe_cq *rcq = to_rcq(init->recv_cq); 301 struct rxe_cq *scq = to_rcq(init->send_cq); 302 struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL; 303 unsigned long flags; 304 305 rxe_get(pd); 306 rxe_get(rcq); 307 rxe_get(scq); 308 if (srq) 309 rxe_get(srq); 310 311 qp->pd = pd; 312 qp->rcq = rcq; 313 qp->scq = scq; 314 qp->srq = srq; 315 316 atomic_inc(&rcq->num_wq); 317 atomic_inc(&scq->num_wq); 318 319 rxe_qp_init_misc(rxe, qp, init); 320 321 err = rxe_qp_init_req(rxe, qp, init, udata, uresp); 322 if (err) 323 goto err1; 324 325 err = rxe_qp_init_resp(rxe, qp, init, udata, uresp); 326 if (err) 327 goto err2; 328 329 spin_lock_irqsave(&qp->state_lock, flags); 330 qp->attr.qp_state = IB_QPS_RESET; 331 qp->valid = 1; 332 spin_unlock_irqrestore(&qp->state_lock, flags); 333 334 return 0; 335 336 err2: 337 rxe_queue_cleanup(qp->sq.queue); 338 qp->sq.queue = NULL; 339 err1: 340 atomic_dec(&rcq->num_wq); 341 atomic_dec(&scq->num_wq); 342 343 qp->pd = NULL; 344 qp->rcq = NULL; 345 qp->scq = NULL; 346 qp->srq = NULL; 347 348 if (srq) 349 rxe_put(srq); 350 rxe_put(scq); 351 rxe_put(rcq); 352 rxe_put(pd); 353 354 return err; 355 } 356 357 /* called by the query qp verb */ 358 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init) 359 { 360 init->event_handler = qp->ibqp.event_handler; 361 init->qp_context = qp->ibqp.qp_context; 362 init->send_cq = qp->ibqp.send_cq; 363 init->recv_cq = qp->ibqp.recv_cq; 364 init->srq = qp->ibqp.srq; 365 366 init->cap.max_send_wr = qp->sq.max_wr; 367 init->cap.max_send_sge = qp->sq.max_sge; 368 init->cap.max_inline_data = qp->sq.max_inline; 369 370 if (!qp->srq) { 371 init->cap.max_recv_wr = qp->rq.max_wr; 372 init->cap.max_recv_sge = qp->rq.max_sge; 373 } 374 375 init->sq_sig_type = qp->sq_sig_type; 376 377 init->qp_type = qp->ibqp.qp_type; 378 init->port_num = 1; 379 380 return 0; 381 } 382 383 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, 384 struct ib_qp_attr *attr, int mask) 385 { 386 if (mask & IB_QP_PORT) { 387 if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) { 388 rxe_dbg_qp(qp, "invalid port %d\n", attr->port_num); 389 goto err1; 390 } 391 } 392 393 if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq)) 394 goto err1; 395 396 if (mask & IB_QP_AV && rxe_av_chk_attr(qp, &attr->ah_attr)) 397 goto err1; 398 399 if (mask & IB_QP_ALT_PATH) { 400 if (rxe_av_chk_attr(qp, &attr->alt_ah_attr)) 401 goto err1; 402 if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num)) { 403 rxe_dbg_qp(qp, "invalid alt port %d\n", attr->alt_port_num); 404 goto err1; 405 } 406 if (attr->alt_timeout > 31) { 407 rxe_dbg_qp(qp, "invalid alt timeout %d > 31\n", 408 attr->alt_timeout); 409 goto err1; 410 } 411 } 412 413 if (mask & IB_QP_PATH_MTU) { 414 struct rxe_port *port = &rxe->port; 415 416 enum ib_mtu max_mtu = port->attr.max_mtu; 417 enum ib_mtu mtu = attr->path_mtu; 418 419 if (mtu > max_mtu) { 420 rxe_dbg_qp(qp, "invalid mtu (%d) > (%d)\n", 421 ib_mtu_enum_to_int(mtu), 422 ib_mtu_enum_to_int(max_mtu)); 423 goto err1; 424 } 425 } 426 427 if (mask & IB_QP_MAX_QP_RD_ATOMIC) { 428 if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) { 429 rxe_dbg_qp(qp, "invalid max_rd_atomic %d > %d\n", 430 attr->max_rd_atomic, 431 rxe->attr.max_qp_rd_atom); 432 goto err1; 433 } 434 } 435 436 if (mask & IB_QP_TIMEOUT) { 437 if (attr->timeout > 31) { 438 rxe_dbg_qp(qp, "invalid timeout %d > 31\n", 439 attr->timeout); 440 goto err1; 441 } 442 } 443 444 return 0; 445 446 err1: 447 return -EINVAL; 448 } 449 450 /* move the qp to the reset state */ 451 static void rxe_qp_reset(struct rxe_qp *qp) 452 { 453 /* stop tasks from running */ 454 rxe_disable_task(&qp->resp.task); 455 rxe_disable_task(&qp->comp.task); 456 rxe_disable_task(&qp->req.task); 457 458 /* drain work and packet queuesc */ 459 rxe_requester(qp); 460 rxe_completer(qp); 461 rxe_responder(qp); 462 463 if (qp->rq.queue) 464 rxe_queue_reset(qp->rq.queue); 465 if (qp->sq.queue) 466 rxe_queue_reset(qp->sq.queue); 467 468 /* cleanup attributes */ 469 atomic_set(&qp->ssn, 0); 470 qp->req.opcode = -1; 471 qp->req.need_retry = 0; 472 qp->req.wait_for_rnr_timer = 0; 473 qp->req.noack_pkts = 0; 474 qp->resp.msn = 0; 475 qp->resp.opcode = -1; 476 qp->resp.drop_msg = 0; 477 qp->resp.goto_error = 0; 478 qp->resp.sent_psn_nak = 0; 479 480 if (qp->resp.mr) { 481 rxe_put(qp->resp.mr); 482 qp->resp.mr = NULL; 483 } 484 485 cleanup_rd_atomic_resources(qp); 486 487 /* reenable tasks */ 488 rxe_enable_task(&qp->resp.task); 489 rxe_enable_task(&qp->comp.task); 490 rxe_enable_task(&qp->req.task); 491 } 492 493 /* move the qp to the error state */ 494 void rxe_qp_error(struct rxe_qp *qp) 495 { 496 unsigned long flags; 497 498 spin_lock_irqsave(&qp->state_lock, flags); 499 qp->attr.qp_state = IB_QPS_ERR; 500 501 /* drain work and packet queues */ 502 rxe_sched_task(&qp->resp.task); 503 rxe_sched_task(&qp->comp.task); 504 rxe_sched_task(&qp->req.task); 505 spin_unlock_irqrestore(&qp->state_lock, flags); 506 } 507 508 static void rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr, 509 int mask) 510 { 511 unsigned long flags; 512 513 spin_lock_irqsave(&qp->state_lock, flags); 514 qp->attr.sq_draining = 1; 515 rxe_sched_task(&qp->comp.task); 516 rxe_sched_task(&qp->req.task); 517 spin_unlock_irqrestore(&qp->state_lock, flags); 518 } 519 520 /* caller should hold qp->state_lock */ 521 static int __qp_chk_state(struct rxe_qp *qp, struct ib_qp_attr *attr, 522 int mask) 523 { 524 enum ib_qp_state cur_state; 525 enum ib_qp_state new_state; 526 527 cur_state = (mask & IB_QP_CUR_STATE) ? 528 attr->cur_qp_state : qp->attr.qp_state; 529 new_state = (mask & IB_QP_STATE) ? 530 attr->qp_state : cur_state; 531 532 if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) 533 return -EINVAL; 534 535 if (mask & IB_QP_STATE && cur_state == IB_QPS_SQD) { 536 if (qp->attr.sq_draining && new_state != IB_QPS_ERR) 537 return -EINVAL; 538 } 539 540 return 0; 541 } 542 543 static const char *const qps2str[] = { 544 [IB_QPS_RESET] = "RESET", 545 [IB_QPS_INIT] = "INIT", 546 [IB_QPS_RTR] = "RTR", 547 [IB_QPS_RTS] = "RTS", 548 [IB_QPS_SQD] = "SQD", 549 [IB_QPS_SQE] = "SQE", 550 [IB_QPS_ERR] = "ERR", 551 }; 552 553 /* called by the modify qp verb */ 554 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, 555 struct ib_udata *udata) 556 { 557 int err; 558 559 if (mask & IB_QP_CUR_STATE) 560 qp->attr.cur_qp_state = attr->qp_state; 561 562 if (mask & IB_QP_STATE) { 563 unsigned long flags; 564 565 spin_lock_irqsave(&qp->state_lock, flags); 566 err = __qp_chk_state(qp, attr, mask); 567 if (!err) { 568 qp->attr.qp_state = attr->qp_state; 569 rxe_dbg_qp(qp, "state -> %s\n", 570 qps2str[attr->qp_state]); 571 } 572 spin_unlock_irqrestore(&qp->state_lock, flags); 573 574 if (err) 575 return err; 576 577 switch (attr->qp_state) { 578 case IB_QPS_RESET: 579 rxe_qp_reset(qp); 580 break; 581 case IB_QPS_SQD: 582 rxe_qp_sqd(qp, attr, mask); 583 break; 584 case IB_QPS_ERR: 585 rxe_qp_error(qp); 586 break; 587 default: 588 break; 589 } 590 } 591 592 if (mask & IB_QP_MAX_QP_RD_ATOMIC) { 593 int max_rd_atomic = attr->max_rd_atomic ? 594 roundup_pow_of_two(attr->max_rd_atomic) : 0; 595 596 qp->attr.max_rd_atomic = max_rd_atomic; 597 atomic_set(&qp->req.rd_atomic, max_rd_atomic); 598 } 599 600 if (mask & IB_QP_MAX_DEST_RD_ATOMIC) { 601 int max_dest_rd_atomic = attr->max_dest_rd_atomic ? 602 roundup_pow_of_two(attr->max_dest_rd_atomic) : 0; 603 604 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic; 605 606 free_rd_atomic_resources(qp); 607 608 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic); 609 if (err) 610 return err; 611 } 612 613 if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY) 614 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify; 615 616 if (mask & IB_QP_ACCESS_FLAGS) 617 qp->attr.qp_access_flags = attr->qp_access_flags; 618 619 if (mask & IB_QP_PKEY_INDEX) 620 qp->attr.pkey_index = attr->pkey_index; 621 622 if (mask & IB_QP_PORT) 623 qp->attr.port_num = attr->port_num; 624 625 if (mask & IB_QP_QKEY) 626 qp->attr.qkey = attr->qkey; 627 628 if (mask & IB_QP_AV) 629 rxe_init_av(&attr->ah_attr, &qp->pri_av); 630 631 if (mask & IB_QP_ALT_PATH) { 632 rxe_init_av(&attr->alt_ah_attr, &qp->alt_av); 633 qp->attr.alt_port_num = attr->alt_port_num; 634 qp->attr.alt_pkey_index = attr->alt_pkey_index; 635 qp->attr.alt_timeout = attr->alt_timeout; 636 } 637 638 if (mask & IB_QP_PATH_MTU) { 639 qp->attr.path_mtu = attr->path_mtu; 640 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu); 641 } 642 643 if (mask & IB_QP_TIMEOUT) { 644 qp->attr.timeout = attr->timeout; 645 if (attr->timeout == 0) { 646 qp->qp_timeout_jiffies = 0; 647 } else { 648 /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */ 649 int j = nsecs_to_jiffies(4096ULL << attr->timeout); 650 651 qp->qp_timeout_jiffies = j ? j : 1; 652 } 653 } 654 655 if (mask & IB_QP_RETRY_CNT) { 656 qp->attr.retry_cnt = attr->retry_cnt; 657 qp->comp.retry_cnt = attr->retry_cnt; 658 rxe_dbg_qp(qp, "set retry count = %d\n", attr->retry_cnt); 659 } 660 661 if (mask & IB_QP_RNR_RETRY) { 662 qp->attr.rnr_retry = attr->rnr_retry; 663 qp->comp.rnr_retry = attr->rnr_retry; 664 rxe_dbg_qp(qp, "set rnr retry count = %d\n", attr->rnr_retry); 665 } 666 667 if (mask & IB_QP_RQ_PSN) { 668 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK); 669 qp->resp.psn = qp->attr.rq_psn; 670 rxe_dbg_qp(qp, "set resp psn = 0x%x\n", qp->resp.psn); 671 } 672 673 if (mask & IB_QP_MIN_RNR_TIMER) { 674 qp->attr.min_rnr_timer = attr->min_rnr_timer; 675 rxe_dbg_qp(qp, "set min rnr timer = 0x%x\n", 676 attr->min_rnr_timer); 677 } 678 679 if (mask & IB_QP_SQ_PSN) { 680 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK); 681 qp->req.psn = qp->attr.sq_psn; 682 qp->comp.psn = qp->attr.sq_psn; 683 rxe_dbg_qp(qp, "set req psn = 0x%x\n", qp->req.psn); 684 } 685 686 if (mask & IB_QP_PATH_MIG_STATE) 687 qp->attr.path_mig_state = attr->path_mig_state; 688 689 if (mask & IB_QP_DEST_QPN) 690 qp->attr.dest_qp_num = attr->dest_qp_num; 691 692 return 0; 693 } 694 695 /* called by the query qp verb */ 696 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) 697 { 698 unsigned long flags; 699 700 *attr = qp->attr; 701 702 attr->rq_psn = qp->resp.psn; 703 attr->sq_psn = qp->req.psn; 704 705 attr->cap.max_send_wr = qp->sq.max_wr; 706 attr->cap.max_send_sge = qp->sq.max_sge; 707 attr->cap.max_inline_data = qp->sq.max_inline; 708 709 if (!qp->srq) { 710 attr->cap.max_recv_wr = qp->rq.max_wr; 711 attr->cap.max_recv_sge = qp->rq.max_sge; 712 } 713 714 rxe_av_to_attr(&qp->pri_av, &attr->ah_attr); 715 rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr); 716 717 /* Applications that get this state typically spin on it. 718 * Yield the processor 719 */ 720 spin_lock_irqsave(&qp->state_lock, flags); 721 if (qp->attr.sq_draining) { 722 spin_unlock_irqrestore(&qp->state_lock, flags); 723 cond_resched(); 724 } else { 725 spin_unlock_irqrestore(&qp->state_lock, flags); 726 } 727 728 return 0; 729 } 730 731 int rxe_qp_chk_destroy(struct rxe_qp *qp) 732 { 733 /* See IBA o10-2.2.3 734 * An attempt to destroy a QP while attached to a mcast group 735 * will fail immediately. 736 */ 737 if (atomic_read(&qp->mcg_num)) { 738 rxe_dbg_qp(qp, "Attempt to destroy while attached to multicast group\n"); 739 return -EBUSY; 740 } 741 742 return 0; 743 } 744 745 /* called when the last reference to the qp is dropped */ 746 static void rxe_qp_do_cleanup(struct work_struct *work) 747 { 748 struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); 749 unsigned long flags; 750 751 spin_lock_irqsave(&qp->state_lock, flags); 752 qp->valid = 0; 753 spin_unlock_irqrestore(&qp->state_lock, flags); 754 qp->qp_timeout_jiffies = 0; 755 756 if (qp_type(qp) == IB_QPT_RC) { 757 del_timer_sync(&qp->retrans_timer); 758 del_timer_sync(&qp->rnr_nak_timer); 759 } 760 761 if (qp->resp.task.func) 762 rxe_cleanup_task(&qp->resp.task); 763 764 if (qp->req.task.func) 765 rxe_cleanup_task(&qp->req.task); 766 767 if (qp->comp.task.func) 768 rxe_cleanup_task(&qp->comp.task); 769 770 /* flush out any receive wr's or pending requests */ 771 rxe_requester(qp); 772 rxe_completer(qp); 773 rxe_responder(qp); 774 775 if (qp->sq.queue) 776 rxe_queue_cleanup(qp->sq.queue); 777 778 if (qp->srq) 779 rxe_put(qp->srq); 780 781 if (qp->rq.queue) 782 rxe_queue_cleanup(qp->rq.queue); 783 784 if (qp->scq) { 785 atomic_dec(&qp->scq->num_wq); 786 rxe_put(qp->scq); 787 } 788 789 if (qp->rcq) { 790 atomic_dec(&qp->rcq->num_wq); 791 rxe_put(qp->rcq); 792 } 793 794 if (qp->pd) 795 rxe_put(qp->pd); 796 797 if (qp->resp.mr) 798 rxe_put(qp->resp.mr); 799 800 free_rd_atomic_resources(qp); 801 802 if (qp->sk) { 803 if (qp_type(qp) == IB_QPT_RC) 804 sk_dst_reset(qp->sk->sk); 805 806 kernel_sock_shutdown(qp->sk, SHUT_RDWR); 807 sock_release(qp->sk); 808 } 809 } 810 811 /* called when the last reference to the qp is dropped */ 812 void rxe_qp_cleanup(struct rxe_pool_elem *elem) 813 { 814 struct rxe_qp *qp = container_of(elem, typeof(*qp), elem); 815 816 execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work); 817 } 818