1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 5 */ 6 7 #include <linux/dma-mapping.h> 8 #include <net/addrconf.h> 9 #include <rdma/uverbs_ioctl.h> 10 #include "rxe.h" 11 #include "rxe_loc.h" 12 #include "rxe_queue.h" 13 #include "rxe_hw_counters.h" 14 15 static int rxe_query_device(struct ib_device *dev, 16 struct ib_device_attr *attr, 17 struct ib_udata *uhw) 18 { 19 struct rxe_dev *rxe = to_rdev(dev); 20 21 if (uhw->inlen || uhw->outlen) 22 return -EINVAL; 23 24 *attr = rxe->attr; 25 return 0; 26 } 27 28 static int rxe_query_port(struct ib_device *dev, 29 u32 port_num, struct ib_port_attr *attr) 30 { 31 struct rxe_dev *rxe = to_rdev(dev); 32 struct rxe_port *port; 33 int rc; 34 35 port = &rxe->port; 36 37 /* *attr being zeroed by the caller, avoid zeroing it here */ 38 *attr = port->attr; 39 40 mutex_lock(&rxe->usdev_lock); 41 rc = ib_get_eth_speed(dev, port_num, &attr->active_speed, 42 &attr->active_width); 43 44 if (attr->state == IB_PORT_ACTIVE) 45 attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; 46 else if (dev_get_flags(rxe->ndev) & IFF_UP) 47 attr->phys_state = IB_PORT_PHYS_STATE_POLLING; 48 else 49 attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; 50 51 mutex_unlock(&rxe->usdev_lock); 52 53 return rc; 54 } 55 56 static int rxe_query_pkey(struct ib_device *device, 57 u32 port_num, u16 index, u16 *pkey) 58 { 59 if (index > 0) 60 return -EINVAL; 61 62 *pkey = IB_DEFAULT_PKEY_FULL; 63 return 0; 64 } 65 66 static int rxe_modify_device(struct ib_device *dev, 67 int mask, struct ib_device_modify *attr) 68 { 69 struct rxe_dev *rxe = to_rdev(dev); 70 71 if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID | 72 IB_DEVICE_MODIFY_NODE_DESC)) 73 return -EOPNOTSUPP; 74 75 if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) 76 rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid); 77 78 if (mask & IB_DEVICE_MODIFY_NODE_DESC) { 79 memcpy(rxe->ib_dev.node_desc, 80 attr->node_desc, sizeof(rxe->ib_dev.node_desc)); 81 } 82 83 return 0; 84 } 85 86 static int rxe_modify_port(struct ib_device *dev, 87 u32 port_num, int mask, struct ib_port_modify *attr) 88 { 89 struct rxe_dev *rxe = to_rdev(dev); 90 struct rxe_port *port; 91 92 port = &rxe->port; 93 94 port->attr.port_cap_flags |= attr->set_port_cap_mask; 95 port->attr.port_cap_flags &= ~attr->clr_port_cap_mask; 96 97 if (mask & IB_PORT_RESET_QKEY_CNTR) 98 port->attr.qkey_viol_cntr = 0; 99 100 return 0; 101 } 102 103 static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev, 104 u32 port_num) 105 { 106 return IB_LINK_LAYER_ETHERNET; 107 } 108 109 static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata) 110 { 111 struct rxe_dev *rxe = to_rdev(ibuc->device); 112 struct rxe_ucontext *uc = to_ruc(ibuc); 113 114 return rxe_add_to_pool(&rxe->uc_pool, uc); 115 } 116 117 static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc) 118 { 119 struct rxe_ucontext *uc = to_ruc(ibuc); 120 121 rxe_drop_ref(uc); 122 } 123 124 static int rxe_port_immutable(struct ib_device *dev, u32 port_num, 125 struct ib_port_immutable *immutable) 126 { 127 int err; 128 struct ib_port_attr attr; 129 130 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 131 132 err = ib_query_port(dev, port_num, &attr); 133 if (err) 134 return err; 135 136 immutable->pkey_tbl_len = attr.pkey_tbl_len; 137 immutable->gid_tbl_len = attr.gid_tbl_len; 138 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 139 140 return 0; 141 } 142 143 static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 144 { 145 struct rxe_dev *rxe = to_rdev(ibpd->device); 146 struct rxe_pd *pd = to_rpd(ibpd); 147 148 return rxe_add_to_pool(&rxe->pd_pool, pd); 149 } 150 151 static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 152 { 153 struct rxe_pd *pd = to_rpd(ibpd); 154 155 rxe_drop_ref(pd); 156 return 0; 157 } 158 159 static int rxe_create_ah(struct ib_ah *ibah, 160 struct rdma_ah_init_attr *init_attr, 161 struct ib_udata *udata) 162 163 { 164 int err; 165 struct rxe_dev *rxe = to_rdev(ibah->device); 166 struct rxe_ah *ah = to_rah(ibah); 167 168 err = rxe_av_chk_attr(rxe, init_attr->ah_attr); 169 if (err) 170 return err; 171 172 err = rxe_add_to_pool(&rxe->ah_pool, ah); 173 if (err) 174 return err; 175 176 rxe_init_av(init_attr->ah_attr, &ah->av); 177 return 0; 178 } 179 180 static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) 181 { 182 int err; 183 struct rxe_dev *rxe = to_rdev(ibah->device); 184 struct rxe_ah *ah = to_rah(ibah); 185 186 err = rxe_av_chk_attr(rxe, attr); 187 if (err) 188 return err; 189 190 rxe_init_av(attr, &ah->av); 191 return 0; 192 } 193 194 static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) 195 { 196 struct rxe_ah *ah = to_rah(ibah); 197 198 memset(attr, 0, sizeof(*attr)); 199 attr->type = ibah->type; 200 rxe_av_to_attr(&ah->av, attr); 201 return 0; 202 } 203 204 static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags) 205 { 206 struct rxe_ah *ah = to_rah(ibah); 207 208 rxe_drop_ref(ah); 209 return 0; 210 } 211 212 static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) 213 { 214 int err; 215 int i; 216 u32 length; 217 struct rxe_recv_wqe *recv_wqe; 218 int num_sge = ibwr->num_sge; 219 220 if (unlikely(queue_full(rq->queue))) { 221 err = -ENOMEM; 222 goto err1; 223 } 224 225 if (unlikely(num_sge > rq->max_sge)) { 226 err = -EINVAL; 227 goto err1; 228 } 229 230 length = 0; 231 for (i = 0; i < num_sge; i++) 232 length += ibwr->sg_list[i].length; 233 234 recv_wqe = producer_addr(rq->queue); 235 recv_wqe->wr_id = ibwr->wr_id; 236 recv_wqe->num_sge = num_sge; 237 238 memcpy(recv_wqe->dma.sge, ibwr->sg_list, 239 num_sge * sizeof(struct ib_sge)); 240 241 recv_wqe->dma.length = length; 242 recv_wqe->dma.resid = length; 243 recv_wqe->dma.num_sge = num_sge; 244 recv_wqe->dma.cur_sge = 0; 245 recv_wqe->dma.sge_offset = 0; 246 247 advance_producer(rq->queue); 248 return 0; 249 250 err1: 251 return err; 252 } 253 254 static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init, 255 struct ib_udata *udata) 256 { 257 int err; 258 struct rxe_dev *rxe = to_rdev(ibsrq->device); 259 struct rxe_pd *pd = to_rpd(ibsrq->pd); 260 struct rxe_srq *srq = to_rsrq(ibsrq); 261 struct rxe_create_srq_resp __user *uresp = NULL; 262 263 if (init->srq_type != IB_SRQT_BASIC) 264 return -EOPNOTSUPP; 265 266 if (udata) { 267 if (udata->outlen < sizeof(*uresp)) 268 return -EINVAL; 269 uresp = udata->outbuf; 270 } 271 272 err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK); 273 if (err) 274 goto err1; 275 276 err = rxe_add_to_pool(&rxe->srq_pool, srq); 277 if (err) 278 goto err1; 279 280 rxe_add_ref(pd); 281 srq->pd = pd; 282 283 err = rxe_srq_from_init(rxe, srq, init, udata, uresp); 284 if (err) 285 goto err2; 286 287 return 0; 288 289 err2: 290 rxe_drop_ref(pd); 291 rxe_drop_ref(srq); 292 err1: 293 return err; 294 } 295 296 static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 297 enum ib_srq_attr_mask mask, 298 struct ib_udata *udata) 299 { 300 int err; 301 struct rxe_srq *srq = to_rsrq(ibsrq); 302 struct rxe_dev *rxe = to_rdev(ibsrq->device); 303 struct rxe_modify_srq_cmd ucmd = {}; 304 305 if (udata) { 306 if (udata->inlen < sizeof(ucmd)) 307 return -EINVAL; 308 309 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); 310 if (err) 311 return err; 312 } 313 314 err = rxe_srq_chk_attr(rxe, srq, attr, mask); 315 if (err) 316 goto err1; 317 318 err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata); 319 if (err) 320 goto err1; 321 322 return 0; 323 324 err1: 325 return err; 326 } 327 328 static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) 329 { 330 struct rxe_srq *srq = to_rsrq(ibsrq); 331 332 if (srq->error) 333 return -EINVAL; 334 335 attr->max_wr = srq->rq.queue->buf->index_mask; 336 attr->max_sge = srq->rq.max_sge; 337 attr->srq_limit = srq->limit; 338 return 0; 339 } 340 341 static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) 342 { 343 struct rxe_srq *srq = to_rsrq(ibsrq); 344 345 if (srq->rq.queue) 346 rxe_queue_cleanup(srq->rq.queue); 347 348 rxe_drop_ref(srq->pd); 349 rxe_drop_ref(srq); 350 return 0; 351 } 352 353 static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, 354 const struct ib_recv_wr **bad_wr) 355 { 356 int err = 0; 357 unsigned long flags; 358 struct rxe_srq *srq = to_rsrq(ibsrq); 359 360 spin_lock_irqsave(&srq->rq.producer_lock, flags); 361 362 while (wr) { 363 err = post_one_recv(&srq->rq, wr); 364 if (unlikely(err)) 365 break; 366 wr = wr->next; 367 } 368 369 spin_unlock_irqrestore(&srq->rq.producer_lock, flags); 370 371 if (err) 372 *bad_wr = wr; 373 374 return err; 375 } 376 377 static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd, 378 struct ib_qp_init_attr *init, 379 struct ib_udata *udata) 380 { 381 int err; 382 struct rxe_dev *rxe = to_rdev(ibpd->device); 383 struct rxe_pd *pd = to_rpd(ibpd); 384 struct rxe_qp *qp; 385 struct rxe_create_qp_resp __user *uresp = NULL; 386 387 if (udata) { 388 if (udata->outlen < sizeof(*uresp)) 389 return ERR_PTR(-EINVAL); 390 uresp = udata->outbuf; 391 } 392 393 if (init->create_flags) 394 return ERR_PTR(-EOPNOTSUPP); 395 396 err = rxe_qp_chk_init(rxe, init); 397 if (err) 398 goto err1; 399 400 qp = rxe_alloc(&rxe->qp_pool); 401 if (!qp) { 402 err = -ENOMEM; 403 goto err1; 404 } 405 406 if (udata) { 407 if (udata->inlen) { 408 err = -EINVAL; 409 goto err2; 410 } 411 qp->is_user = 1; 412 } 413 414 rxe_add_index(qp); 415 416 err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd, udata); 417 if (err) 418 goto err3; 419 420 return &qp->ibqp; 421 422 err3: 423 rxe_drop_index(qp); 424 err2: 425 rxe_drop_ref(qp); 426 err1: 427 return ERR_PTR(err); 428 } 429 430 static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 431 int mask, struct ib_udata *udata) 432 { 433 int err; 434 struct rxe_dev *rxe = to_rdev(ibqp->device); 435 struct rxe_qp *qp = to_rqp(ibqp); 436 437 if (mask & ~IB_QP_ATTR_STANDARD_BITS) 438 return -EOPNOTSUPP; 439 440 err = rxe_qp_chk_attr(rxe, qp, attr, mask); 441 if (err) 442 goto err1; 443 444 err = rxe_qp_from_attr(qp, attr, mask, udata); 445 if (err) 446 goto err1; 447 448 return 0; 449 450 err1: 451 return err; 452 } 453 454 static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 455 int mask, struct ib_qp_init_attr *init) 456 { 457 struct rxe_qp *qp = to_rqp(ibqp); 458 459 rxe_qp_to_init(qp, init); 460 rxe_qp_to_attr(qp, attr, mask); 461 462 return 0; 463 } 464 465 static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) 466 { 467 struct rxe_qp *qp = to_rqp(ibqp); 468 469 rxe_qp_destroy(qp); 470 rxe_drop_index(qp); 471 rxe_drop_ref(qp); 472 return 0; 473 } 474 475 static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr, 476 unsigned int mask, unsigned int length) 477 { 478 int num_sge = ibwr->num_sge; 479 struct rxe_sq *sq = &qp->sq; 480 481 if (unlikely(num_sge > sq->max_sge)) 482 goto err1; 483 484 if (unlikely(mask & WR_ATOMIC_MASK)) { 485 if (length < 8) 486 goto err1; 487 488 if (atomic_wr(ibwr)->remote_addr & 0x7) 489 goto err1; 490 } 491 492 if (unlikely((ibwr->send_flags & IB_SEND_INLINE) && 493 (length > sq->max_inline))) 494 goto err1; 495 496 return 0; 497 498 err1: 499 return -EINVAL; 500 } 501 502 static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, 503 const struct ib_send_wr *ibwr) 504 { 505 wr->wr_id = ibwr->wr_id; 506 wr->num_sge = ibwr->num_sge; 507 wr->opcode = ibwr->opcode; 508 wr->send_flags = ibwr->send_flags; 509 510 if (qp_type(qp) == IB_QPT_UD || 511 qp_type(qp) == IB_QPT_SMI || 512 qp_type(qp) == IB_QPT_GSI) { 513 wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn; 514 wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey; 515 if (qp_type(qp) == IB_QPT_GSI) 516 wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index; 517 if (wr->opcode == IB_WR_SEND_WITH_IMM) 518 wr->ex.imm_data = ibwr->ex.imm_data; 519 } else { 520 switch (wr->opcode) { 521 case IB_WR_RDMA_WRITE_WITH_IMM: 522 wr->ex.imm_data = ibwr->ex.imm_data; 523 fallthrough; 524 case IB_WR_RDMA_READ: 525 case IB_WR_RDMA_WRITE: 526 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr; 527 wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey; 528 break; 529 case IB_WR_SEND_WITH_IMM: 530 wr->ex.imm_data = ibwr->ex.imm_data; 531 break; 532 case IB_WR_SEND_WITH_INV: 533 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey; 534 break; 535 case IB_WR_ATOMIC_CMP_AND_SWP: 536 case IB_WR_ATOMIC_FETCH_AND_ADD: 537 wr->wr.atomic.remote_addr = 538 atomic_wr(ibwr)->remote_addr; 539 wr->wr.atomic.compare_add = 540 atomic_wr(ibwr)->compare_add; 541 wr->wr.atomic.swap = atomic_wr(ibwr)->swap; 542 wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey; 543 break; 544 case IB_WR_LOCAL_INV: 545 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey; 546 break; 547 case IB_WR_REG_MR: 548 wr->wr.reg.mr = reg_wr(ibwr)->mr; 549 wr->wr.reg.key = reg_wr(ibwr)->key; 550 wr->wr.reg.access = reg_wr(ibwr)->access; 551 break; 552 default: 553 break; 554 } 555 } 556 } 557 558 static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe, 559 const struct ib_send_wr *ibwr) 560 { 561 struct ib_sge *sge = ibwr->sg_list; 562 u8 *p = wqe->dma.inline_data; 563 int i; 564 565 for (i = 0; i < ibwr->num_sge; i++, sge++) { 566 memcpy(p, (void *)(uintptr_t)sge->addr, sge->length); 567 p += sge->length; 568 } 569 } 570 571 static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr, 572 unsigned int mask, unsigned int length, 573 struct rxe_send_wqe *wqe) 574 { 575 int num_sge = ibwr->num_sge; 576 577 init_send_wr(qp, &wqe->wr, ibwr); 578 579 /* local operation */ 580 if (unlikely(mask & WR_REG_MASK)) { 581 wqe->mask = mask; 582 wqe->state = wqe_state_posted; 583 return; 584 } 585 586 if (qp_type(qp) == IB_QPT_UD || 587 qp_type(qp) == IB_QPT_SMI || 588 qp_type(qp) == IB_QPT_GSI) 589 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av)); 590 591 if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) 592 copy_inline_data_to_wqe(wqe, ibwr); 593 else 594 memcpy(wqe->dma.sge, ibwr->sg_list, 595 num_sge * sizeof(struct ib_sge)); 596 597 wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr : 598 mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0; 599 wqe->mask = mask; 600 wqe->dma.length = length; 601 wqe->dma.resid = length; 602 wqe->dma.num_sge = num_sge; 603 wqe->dma.cur_sge = 0; 604 wqe->dma.sge_offset = 0; 605 wqe->state = wqe_state_posted; 606 wqe->ssn = atomic_add_return(1, &qp->ssn); 607 } 608 609 static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr, 610 unsigned int mask, u32 length) 611 { 612 int err; 613 struct rxe_sq *sq = &qp->sq; 614 struct rxe_send_wqe *send_wqe; 615 unsigned long flags; 616 617 err = validate_send_wr(qp, ibwr, mask, length); 618 if (err) 619 return err; 620 621 spin_lock_irqsave(&qp->sq.sq_lock, flags); 622 623 if (unlikely(queue_full(sq->queue))) { 624 err = -ENOMEM; 625 goto err1; 626 } 627 628 send_wqe = producer_addr(sq->queue); 629 init_send_wqe(qp, ibwr, mask, length, send_wqe); 630 631 advance_producer(sq->queue); 632 spin_unlock_irqrestore(&qp->sq.sq_lock, flags); 633 634 return 0; 635 636 err1: 637 spin_unlock_irqrestore(&qp->sq.sq_lock, flags); 638 return err; 639 } 640 641 static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr, 642 const struct ib_send_wr **bad_wr) 643 { 644 int err = 0; 645 unsigned int mask; 646 unsigned int length = 0; 647 int i; 648 struct ib_send_wr *next; 649 650 while (wr) { 651 mask = wr_opcode_mask(wr->opcode, qp); 652 if (unlikely(!mask)) { 653 err = -EINVAL; 654 *bad_wr = wr; 655 break; 656 } 657 658 if (unlikely((wr->send_flags & IB_SEND_INLINE) && 659 !(mask & WR_INLINE_MASK))) { 660 err = -EINVAL; 661 *bad_wr = wr; 662 break; 663 } 664 665 next = wr->next; 666 667 length = 0; 668 for (i = 0; i < wr->num_sge; i++) 669 length += wr->sg_list[i].length; 670 671 err = post_one_send(qp, wr, mask, length); 672 673 if (err) { 674 *bad_wr = wr; 675 break; 676 } 677 wr = next; 678 } 679 680 rxe_run_task(&qp->req.task, 1); 681 if (unlikely(qp->req.state == QP_STATE_ERROR)) 682 rxe_run_task(&qp->comp.task, 1); 683 684 return err; 685 } 686 687 static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, 688 const struct ib_send_wr **bad_wr) 689 { 690 struct rxe_qp *qp = to_rqp(ibqp); 691 692 if (unlikely(!qp->valid)) { 693 *bad_wr = wr; 694 return -EINVAL; 695 } 696 697 if (unlikely(qp->req.state < QP_STATE_READY)) { 698 *bad_wr = wr; 699 return -EINVAL; 700 } 701 702 if (qp->is_user) { 703 /* Utilize process context to do protocol processing */ 704 rxe_run_task(&qp->req.task, 0); 705 return 0; 706 } else 707 return rxe_post_send_kernel(qp, wr, bad_wr); 708 } 709 710 static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, 711 const struct ib_recv_wr **bad_wr) 712 { 713 int err = 0; 714 struct rxe_qp *qp = to_rqp(ibqp); 715 struct rxe_rq *rq = &qp->rq; 716 unsigned long flags; 717 718 if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) { 719 *bad_wr = wr; 720 err = -EINVAL; 721 goto err1; 722 } 723 724 if (unlikely(qp->srq)) { 725 *bad_wr = wr; 726 err = -EINVAL; 727 goto err1; 728 } 729 730 spin_lock_irqsave(&rq->producer_lock, flags); 731 732 while (wr) { 733 err = post_one_recv(rq, wr); 734 if (unlikely(err)) { 735 *bad_wr = wr; 736 break; 737 } 738 wr = wr->next; 739 } 740 741 spin_unlock_irqrestore(&rq->producer_lock, flags); 742 743 if (qp->resp.state == QP_STATE_ERROR) 744 rxe_run_task(&qp->resp.task, 1); 745 746 err1: 747 return err; 748 } 749 750 static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 751 struct ib_udata *udata) 752 { 753 int err; 754 struct ib_device *dev = ibcq->device; 755 struct rxe_dev *rxe = to_rdev(dev); 756 struct rxe_cq *cq = to_rcq(ibcq); 757 struct rxe_create_cq_resp __user *uresp = NULL; 758 759 if (udata) { 760 if (udata->outlen < sizeof(*uresp)) 761 return -EINVAL; 762 uresp = udata->outbuf; 763 } 764 765 if (attr->flags) 766 return -EOPNOTSUPP; 767 768 err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector); 769 if (err) 770 return err; 771 772 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata, 773 uresp); 774 if (err) 775 return err; 776 777 return rxe_add_to_pool(&rxe->cq_pool, cq); 778 } 779 780 static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) 781 { 782 struct rxe_cq *cq = to_rcq(ibcq); 783 784 rxe_cq_disable(cq); 785 786 rxe_drop_ref(cq); 787 return 0; 788 } 789 790 static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) 791 { 792 int err; 793 struct rxe_cq *cq = to_rcq(ibcq); 794 struct rxe_dev *rxe = to_rdev(ibcq->device); 795 struct rxe_resize_cq_resp __user *uresp = NULL; 796 797 if (udata) { 798 if (udata->outlen < sizeof(*uresp)) 799 return -EINVAL; 800 uresp = udata->outbuf; 801 } 802 803 err = rxe_cq_chk_attr(rxe, cq, cqe, 0); 804 if (err) 805 goto err1; 806 807 err = rxe_cq_resize_queue(cq, cqe, uresp, udata); 808 if (err) 809 goto err1; 810 811 return 0; 812 813 err1: 814 return err; 815 } 816 817 static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) 818 { 819 int i; 820 struct rxe_cq *cq = to_rcq(ibcq); 821 struct rxe_cqe *cqe; 822 unsigned long flags; 823 824 spin_lock_irqsave(&cq->cq_lock, flags); 825 for (i = 0; i < num_entries; i++) { 826 cqe = queue_head(cq->queue); 827 if (!cqe) 828 break; 829 830 memcpy(wc++, &cqe->ibwc, sizeof(*wc)); 831 advance_consumer(cq->queue); 832 } 833 spin_unlock_irqrestore(&cq->cq_lock, flags); 834 835 return i; 836 } 837 838 static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt) 839 { 840 struct rxe_cq *cq = to_rcq(ibcq); 841 int count = queue_count(cq->queue); 842 843 return (count > wc_cnt) ? wc_cnt : count; 844 } 845 846 static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) 847 { 848 struct rxe_cq *cq = to_rcq(ibcq); 849 unsigned long irq_flags; 850 int ret = 0; 851 852 spin_lock_irqsave(&cq->cq_lock, irq_flags); 853 if (cq->notify != IB_CQ_NEXT_COMP) 854 cq->notify = flags & IB_CQ_SOLICITED_MASK; 855 856 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue)) 857 ret = 1; 858 859 spin_unlock_irqrestore(&cq->cq_lock, irq_flags); 860 861 return ret; 862 } 863 864 static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) 865 { 866 struct rxe_dev *rxe = to_rdev(ibpd->device); 867 struct rxe_pd *pd = to_rpd(ibpd); 868 struct rxe_mr *mr; 869 870 mr = rxe_alloc(&rxe->mr_pool); 871 if (!mr) 872 return ERR_PTR(-ENOMEM); 873 874 rxe_add_index(mr); 875 rxe_add_ref(pd); 876 rxe_mr_init_dma(pd, access, mr); 877 878 return &mr->ibmr; 879 } 880 881 static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, 882 u64 start, 883 u64 length, 884 u64 iova, 885 int access, struct ib_udata *udata) 886 { 887 int err; 888 struct rxe_dev *rxe = to_rdev(ibpd->device); 889 struct rxe_pd *pd = to_rpd(ibpd); 890 struct rxe_mr *mr; 891 892 mr = rxe_alloc(&rxe->mr_pool); 893 if (!mr) { 894 err = -ENOMEM; 895 goto err2; 896 } 897 898 rxe_add_index(mr); 899 900 rxe_add_ref(pd); 901 902 err = rxe_mr_init_user(pd, start, length, iova, access, udata, mr); 903 if (err) 904 goto err3; 905 906 return &mr->ibmr; 907 908 err3: 909 rxe_drop_ref(pd); 910 rxe_drop_index(mr); 911 rxe_drop_ref(mr); 912 err2: 913 return ERR_PTR(err); 914 } 915 916 static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) 917 { 918 struct rxe_mr *mr = to_rmr(ibmr); 919 920 mr->state = RXE_MR_STATE_ZOMBIE; 921 rxe_drop_ref(mr_pd(mr)); 922 rxe_drop_index(mr); 923 rxe_drop_ref(mr); 924 return 0; 925 } 926 927 static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, 928 u32 max_num_sg) 929 { 930 struct rxe_dev *rxe = to_rdev(ibpd->device); 931 struct rxe_pd *pd = to_rpd(ibpd); 932 struct rxe_mr *mr; 933 int err; 934 935 if (mr_type != IB_MR_TYPE_MEM_REG) 936 return ERR_PTR(-EINVAL); 937 938 mr = rxe_alloc(&rxe->mr_pool); 939 if (!mr) { 940 err = -ENOMEM; 941 goto err1; 942 } 943 944 rxe_add_index(mr); 945 946 rxe_add_ref(pd); 947 948 err = rxe_mr_init_fast(pd, max_num_sg, mr); 949 if (err) 950 goto err2; 951 952 return &mr->ibmr; 953 954 err2: 955 rxe_drop_ref(pd); 956 rxe_drop_index(mr); 957 rxe_drop_ref(mr); 958 err1: 959 return ERR_PTR(err); 960 } 961 962 static int rxe_set_page(struct ib_mr *ibmr, u64 addr) 963 { 964 struct rxe_mr *mr = to_rmr(ibmr); 965 struct rxe_map *map; 966 struct rxe_phys_buf *buf; 967 968 if (unlikely(mr->nbuf == mr->num_buf)) 969 return -ENOMEM; 970 971 map = mr->map[mr->nbuf / RXE_BUF_PER_MAP]; 972 buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP]; 973 974 buf->addr = addr; 975 buf->size = ibmr->page_size; 976 mr->nbuf++; 977 978 return 0; 979 } 980 981 static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, 982 int sg_nents, unsigned int *sg_offset) 983 { 984 struct rxe_mr *mr = to_rmr(ibmr); 985 int n; 986 987 mr->nbuf = 0; 988 989 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page); 990 991 mr->va = ibmr->iova; 992 mr->iova = ibmr->iova; 993 mr->length = ibmr->length; 994 mr->page_shift = ilog2(ibmr->page_size); 995 mr->page_mask = ibmr->page_size - 1; 996 mr->offset = mr->iova & mr->page_mask; 997 998 return n; 999 } 1000 1001 static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid) 1002 { 1003 int err; 1004 struct rxe_dev *rxe = to_rdev(ibqp->device); 1005 struct rxe_qp *qp = to_rqp(ibqp); 1006 struct rxe_mc_grp *grp; 1007 1008 /* takes a ref on grp if successful */ 1009 err = rxe_mcast_get_grp(rxe, mgid, &grp); 1010 if (err) 1011 return err; 1012 1013 err = rxe_mcast_add_grp_elem(rxe, qp, grp); 1014 1015 rxe_drop_ref(grp); 1016 return err; 1017 } 1018 1019 static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid) 1020 { 1021 struct rxe_dev *rxe = to_rdev(ibqp->device); 1022 struct rxe_qp *qp = to_rqp(ibqp); 1023 1024 return rxe_mcast_drop_grp_elem(rxe, qp, mgid); 1025 } 1026 1027 static ssize_t parent_show(struct device *device, 1028 struct device_attribute *attr, char *buf) 1029 { 1030 struct rxe_dev *rxe = 1031 rdma_device_to_drv_device(device, struct rxe_dev, ib_dev); 1032 1033 return sysfs_emit(buf, "%s\n", rxe_parent_name(rxe, 1)); 1034 } 1035 1036 static DEVICE_ATTR_RO(parent); 1037 1038 static struct attribute *rxe_dev_attributes[] = { 1039 &dev_attr_parent.attr, 1040 NULL 1041 }; 1042 1043 static const struct attribute_group rxe_attr_group = { 1044 .attrs = rxe_dev_attributes, 1045 }; 1046 1047 static int rxe_enable_driver(struct ib_device *ib_dev) 1048 { 1049 struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev); 1050 1051 rxe_set_port_state(rxe); 1052 dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(rxe->ndev)); 1053 return 0; 1054 } 1055 1056 static const struct ib_device_ops rxe_dev_ops = { 1057 .owner = THIS_MODULE, 1058 .driver_id = RDMA_DRIVER_RXE, 1059 .uverbs_abi_ver = RXE_UVERBS_ABI_VERSION, 1060 1061 .alloc_hw_stats = rxe_ib_alloc_hw_stats, 1062 .alloc_mr = rxe_alloc_mr, 1063 .alloc_pd = rxe_alloc_pd, 1064 .alloc_ucontext = rxe_alloc_ucontext, 1065 .attach_mcast = rxe_attach_mcast, 1066 .create_ah = rxe_create_ah, 1067 .create_cq = rxe_create_cq, 1068 .create_qp = rxe_create_qp, 1069 .create_srq = rxe_create_srq, 1070 .create_user_ah = rxe_create_ah, 1071 .dealloc_driver = rxe_dealloc, 1072 .dealloc_pd = rxe_dealloc_pd, 1073 .dealloc_ucontext = rxe_dealloc_ucontext, 1074 .dereg_mr = rxe_dereg_mr, 1075 .destroy_ah = rxe_destroy_ah, 1076 .destroy_cq = rxe_destroy_cq, 1077 .destroy_qp = rxe_destroy_qp, 1078 .destroy_srq = rxe_destroy_srq, 1079 .detach_mcast = rxe_detach_mcast, 1080 .enable_driver = rxe_enable_driver, 1081 .get_dma_mr = rxe_get_dma_mr, 1082 .get_hw_stats = rxe_ib_get_hw_stats, 1083 .get_link_layer = rxe_get_link_layer, 1084 .get_port_immutable = rxe_port_immutable, 1085 .map_mr_sg = rxe_map_mr_sg, 1086 .mmap = rxe_mmap, 1087 .modify_ah = rxe_modify_ah, 1088 .modify_device = rxe_modify_device, 1089 .modify_port = rxe_modify_port, 1090 .modify_qp = rxe_modify_qp, 1091 .modify_srq = rxe_modify_srq, 1092 .peek_cq = rxe_peek_cq, 1093 .poll_cq = rxe_poll_cq, 1094 .post_recv = rxe_post_recv, 1095 .post_send = rxe_post_send, 1096 .post_srq_recv = rxe_post_srq_recv, 1097 .query_ah = rxe_query_ah, 1098 .query_device = rxe_query_device, 1099 .query_pkey = rxe_query_pkey, 1100 .query_port = rxe_query_port, 1101 .query_qp = rxe_query_qp, 1102 .query_srq = rxe_query_srq, 1103 .reg_user_mr = rxe_reg_user_mr, 1104 .req_notify_cq = rxe_req_notify_cq, 1105 .resize_cq = rxe_resize_cq, 1106 1107 INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah), 1108 INIT_RDMA_OBJ_SIZE(ib_cq, rxe_cq, ibcq), 1109 INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd), 1110 INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq), 1111 INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc), 1112 INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw), 1113 }; 1114 1115 int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name) 1116 { 1117 int err; 1118 struct ib_device *dev = &rxe->ib_dev; 1119 struct crypto_shash *tfm; 1120 1121 strscpy(dev->node_desc, "rxe", sizeof(dev->node_desc)); 1122 1123 dev->node_type = RDMA_NODE_IB_CA; 1124 dev->phys_port_cnt = 1; 1125 dev->num_comp_vectors = num_possible_cpus(); 1126 dev->local_dma_lkey = 0; 1127 addrconf_addr_eui48((unsigned char *)&dev->node_guid, 1128 rxe->ndev->dev_addr); 1129 1130 dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND) | 1131 BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ); 1132 1133 ib_set_device_ops(dev, &rxe_dev_ops); 1134 err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1); 1135 if (err) 1136 return err; 1137 1138 tfm = crypto_alloc_shash("crc32", 0, 0); 1139 if (IS_ERR(tfm)) { 1140 pr_err("failed to allocate crc algorithm err:%ld\n", 1141 PTR_ERR(tfm)); 1142 return PTR_ERR(tfm); 1143 } 1144 rxe->tfm = tfm; 1145 1146 rdma_set_device_sysfs_group(dev, &rxe_attr_group); 1147 err = ib_register_device(dev, ibdev_name, NULL); 1148 if (err) 1149 pr_warn("%s failed with error %d\n", __func__, err); 1150 1151 /* 1152 * Note that rxe may be invalid at this point if another thread 1153 * unregistered it. 1154 */ 1155 return err; 1156 } 1157