1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/file.h> 37 #include <linux/fs.h> 38 #include <linux/slab.h> 39 40 #include <asm/uaccess.h> 41 42 #include "uverbs.h" 43 44 static struct lock_class_key pd_lock_key; 45 static struct lock_class_key mr_lock_key; 46 static struct lock_class_key cq_lock_key; 47 static struct lock_class_key qp_lock_key; 48 static struct lock_class_key ah_lock_key; 49 static struct lock_class_key srq_lock_key; 50 51 #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ 52 do { \ 53 (udata)->inbuf = (void __user *) (ibuf); \ 54 (udata)->outbuf = (void __user *) (obuf); \ 55 (udata)->inlen = (ilen); \ 56 (udata)->outlen = (olen); \ 57 } while (0) 58 59 /* 60 * The ib_uobject locking scheme is as follows: 61 * 62 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it 63 * needs to be held during all idr operations. When an object is 64 * looked up, a reference must be taken on the object's kref before 65 * dropping this lock. 66 * 67 * - Each object also has an rwsem. This rwsem must be held for 68 * reading while an operation that uses the object is performed. 69 * For example, while registering an MR, the associated PD's 70 * uobject.mutex must be held for reading. The rwsem must be held 71 * for writing while initializing or destroying an object. 72 * 73 * - In addition, each object has a "live" flag. If this flag is not 74 * set, then lookups of the object will fail even if it is found in 75 * the idr. This handles a reader that blocks and does not acquire 76 * the rwsem until after the object is destroyed. The destroy 77 * operation will set the live flag to 0 and then drop the rwsem; 78 * this will allow the reader to acquire the rwsem, see that the 79 * live flag is 0, and then drop the rwsem and its reference to 80 * object. The underlying storage will not be freed until the last 81 * reference to the object is dropped. 82 */ 83 84 static void init_uobj(struct ib_uobject *uobj, u64 user_handle, 85 struct ib_ucontext *context, struct lock_class_key *key) 86 { 87 uobj->user_handle = user_handle; 88 uobj->context = context; 89 kref_init(&uobj->ref); 90 init_rwsem(&uobj->mutex); 91 lockdep_set_class(&uobj->mutex, key); 92 uobj->live = 0; 93 } 94 95 static void release_uobj(struct kref *kref) 96 { 97 kfree(container_of(kref, struct ib_uobject, ref)); 98 } 99 100 static void put_uobj(struct ib_uobject *uobj) 101 { 102 kref_put(&uobj->ref, release_uobj); 103 } 104 105 static void put_uobj_read(struct ib_uobject *uobj) 106 { 107 up_read(&uobj->mutex); 108 put_uobj(uobj); 109 } 110 111 static void put_uobj_write(struct ib_uobject *uobj) 112 { 113 up_write(&uobj->mutex); 114 put_uobj(uobj); 115 } 116 117 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj) 118 { 119 int ret; 120 121 retry: 122 if (!idr_pre_get(idr, GFP_KERNEL)) 123 return -ENOMEM; 124 125 spin_lock(&ib_uverbs_idr_lock); 126 ret = idr_get_new(idr, uobj, &uobj->id); 127 spin_unlock(&ib_uverbs_idr_lock); 128 129 if (ret == -EAGAIN) 130 goto retry; 131 132 return ret; 133 } 134 135 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj) 136 { 137 spin_lock(&ib_uverbs_idr_lock); 138 idr_remove(idr, uobj->id); 139 spin_unlock(&ib_uverbs_idr_lock); 140 } 141 142 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, 143 struct ib_ucontext *context) 144 { 145 struct ib_uobject *uobj; 146 147 spin_lock(&ib_uverbs_idr_lock); 148 uobj = idr_find(idr, id); 149 if (uobj) { 150 if (uobj->context == context) 151 kref_get(&uobj->ref); 152 else 153 uobj = NULL; 154 } 155 spin_unlock(&ib_uverbs_idr_lock); 156 157 return uobj; 158 } 159 160 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, 161 struct ib_ucontext *context, int nested) 162 { 163 struct ib_uobject *uobj; 164 165 uobj = __idr_get_uobj(idr, id, context); 166 if (!uobj) 167 return NULL; 168 169 if (nested) 170 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING); 171 else 172 down_read(&uobj->mutex); 173 if (!uobj->live) { 174 put_uobj_read(uobj); 175 return NULL; 176 } 177 178 return uobj; 179 } 180 181 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id, 182 struct ib_ucontext *context) 183 { 184 struct ib_uobject *uobj; 185 186 uobj = __idr_get_uobj(idr, id, context); 187 if (!uobj) 188 return NULL; 189 190 down_write(&uobj->mutex); 191 if (!uobj->live) { 192 put_uobj_write(uobj); 193 return NULL; 194 } 195 196 return uobj; 197 } 198 199 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context, 200 int nested) 201 { 202 struct ib_uobject *uobj; 203 204 uobj = idr_read_uobj(idr, id, context, nested); 205 return uobj ? uobj->object : NULL; 206 } 207 208 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context) 209 { 210 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0); 211 } 212 213 static void put_pd_read(struct ib_pd *pd) 214 { 215 put_uobj_read(pd->uobject); 216 } 217 218 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested) 219 { 220 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested); 221 } 222 223 static void put_cq_read(struct ib_cq *cq) 224 { 225 put_uobj_read(cq->uobject); 226 } 227 228 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context) 229 { 230 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0); 231 } 232 233 static void put_ah_read(struct ib_ah *ah) 234 { 235 put_uobj_read(ah->uobject); 236 } 237 238 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context) 239 { 240 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0); 241 } 242 243 static void put_qp_read(struct ib_qp *qp) 244 { 245 put_uobj_read(qp->uobject); 246 } 247 248 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context) 249 { 250 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0); 251 } 252 253 static void put_srq_read(struct ib_srq *srq) 254 { 255 put_uobj_read(srq->uobject); 256 } 257 258 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 259 const char __user *buf, 260 int in_len, int out_len) 261 { 262 struct ib_uverbs_get_context cmd; 263 struct ib_uverbs_get_context_resp resp; 264 struct ib_udata udata; 265 struct ib_device *ibdev = file->device->ib_dev; 266 struct ib_ucontext *ucontext; 267 struct file *filp; 268 int ret; 269 270 if (out_len < sizeof resp) 271 return -ENOSPC; 272 273 if (copy_from_user(&cmd, buf, sizeof cmd)) 274 return -EFAULT; 275 276 mutex_lock(&file->mutex); 277 278 if (file->ucontext) { 279 ret = -EINVAL; 280 goto err; 281 } 282 283 INIT_UDATA(&udata, buf + sizeof cmd, 284 (unsigned long) cmd.response + sizeof resp, 285 in_len - sizeof cmd, out_len - sizeof resp); 286 287 ucontext = ibdev->alloc_ucontext(ibdev, &udata); 288 if (IS_ERR(ucontext)) { 289 ret = PTR_ERR(ucontext); 290 goto err; 291 } 292 293 ucontext->device = ibdev; 294 INIT_LIST_HEAD(&ucontext->pd_list); 295 INIT_LIST_HEAD(&ucontext->mr_list); 296 INIT_LIST_HEAD(&ucontext->mw_list); 297 INIT_LIST_HEAD(&ucontext->cq_list); 298 INIT_LIST_HEAD(&ucontext->qp_list); 299 INIT_LIST_HEAD(&ucontext->srq_list); 300 INIT_LIST_HEAD(&ucontext->ah_list); 301 ucontext->closing = 0; 302 303 resp.num_comp_vectors = file->device->num_comp_vectors; 304 305 ret = get_unused_fd(); 306 if (ret < 0) 307 goto err_free; 308 resp.async_fd = ret; 309 310 filp = ib_uverbs_alloc_event_file(file, 1); 311 if (IS_ERR(filp)) { 312 ret = PTR_ERR(filp); 313 goto err_fd; 314 } 315 316 if (copy_to_user((void __user *) (unsigned long) cmd.response, 317 &resp, sizeof resp)) { 318 ret = -EFAULT; 319 goto err_file; 320 } 321 322 file->async_file = filp->private_data; 323 324 INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev, 325 ib_uverbs_event_handler); 326 ret = ib_register_event_handler(&file->event_handler); 327 if (ret) 328 goto err_file; 329 330 kref_get(&file->async_file->ref); 331 kref_get(&file->ref); 332 file->ucontext = ucontext; 333 334 fd_install(resp.async_fd, filp); 335 336 mutex_unlock(&file->mutex); 337 338 return in_len; 339 340 err_file: 341 fput(filp); 342 343 err_fd: 344 put_unused_fd(resp.async_fd); 345 346 err_free: 347 ibdev->dealloc_ucontext(ucontext); 348 349 err: 350 mutex_unlock(&file->mutex); 351 return ret; 352 } 353 354 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 355 const char __user *buf, 356 int in_len, int out_len) 357 { 358 struct ib_uverbs_query_device cmd; 359 struct ib_uverbs_query_device_resp resp; 360 struct ib_device_attr attr; 361 int ret; 362 363 if (out_len < sizeof resp) 364 return -ENOSPC; 365 366 if (copy_from_user(&cmd, buf, sizeof cmd)) 367 return -EFAULT; 368 369 ret = ib_query_device(file->device->ib_dev, &attr); 370 if (ret) 371 return ret; 372 373 memset(&resp, 0, sizeof resp); 374 375 resp.fw_ver = attr.fw_ver; 376 resp.node_guid = file->device->ib_dev->node_guid; 377 resp.sys_image_guid = attr.sys_image_guid; 378 resp.max_mr_size = attr.max_mr_size; 379 resp.page_size_cap = attr.page_size_cap; 380 resp.vendor_id = attr.vendor_id; 381 resp.vendor_part_id = attr.vendor_part_id; 382 resp.hw_ver = attr.hw_ver; 383 resp.max_qp = attr.max_qp; 384 resp.max_qp_wr = attr.max_qp_wr; 385 resp.device_cap_flags = attr.device_cap_flags; 386 resp.max_sge = attr.max_sge; 387 resp.max_sge_rd = attr.max_sge_rd; 388 resp.max_cq = attr.max_cq; 389 resp.max_cqe = attr.max_cqe; 390 resp.max_mr = attr.max_mr; 391 resp.max_pd = attr.max_pd; 392 resp.max_qp_rd_atom = attr.max_qp_rd_atom; 393 resp.max_ee_rd_atom = attr.max_ee_rd_atom; 394 resp.max_res_rd_atom = attr.max_res_rd_atom; 395 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom; 396 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom; 397 resp.atomic_cap = attr.atomic_cap; 398 resp.max_ee = attr.max_ee; 399 resp.max_rdd = attr.max_rdd; 400 resp.max_mw = attr.max_mw; 401 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp; 402 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp; 403 resp.max_mcast_grp = attr.max_mcast_grp; 404 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach; 405 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach; 406 resp.max_ah = attr.max_ah; 407 resp.max_fmr = attr.max_fmr; 408 resp.max_map_per_fmr = attr.max_map_per_fmr; 409 resp.max_srq = attr.max_srq; 410 resp.max_srq_wr = attr.max_srq_wr; 411 resp.max_srq_sge = attr.max_srq_sge; 412 resp.max_pkeys = attr.max_pkeys; 413 resp.local_ca_ack_delay = attr.local_ca_ack_delay; 414 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt; 415 416 if (copy_to_user((void __user *) (unsigned long) cmd.response, 417 &resp, sizeof resp)) 418 return -EFAULT; 419 420 return in_len; 421 } 422 423 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 424 const char __user *buf, 425 int in_len, int out_len) 426 { 427 struct ib_uverbs_query_port cmd; 428 struct ib_uverbs_query_port_resp resp; 429 struct ib_port_attr attr; 430 int ret; 431 432 if (out_len < sizeof resp) 433 return -ENOSPC; 434 435 if (copy_from_user(&cmd, buf, sizeof cmd)) 436 return -EFAULT; 437 438 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr); 439 if (ret) 440 return ret; 441 442 memset(&resp, 0, sizeof resp); 443 444 resp.state = attr.state; 445 resp.max_mtu = attr.max_mtu; 446 resp.active_mtu = attr.active_mtu; 447 resp.gid_tbl_len = attr.gid_tbl_len; 448 resp.port_cap_flags = attr.port_cap_flags; 449 resp.max_msg_sz = attr.max_msg_sz; 450 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 451 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 452 resp.pkey_tbl_len = attr.pkey_tbl_len; 453 resp.lid = attr.lid; 454 resp.sm_lid = attr.sm_lid; 455 resp.lmc = attr.lmc; 456 resp.max_vl_num = attr.max_vl_num; 457 resp.sm_sl = attr.sm_sl; 458 resp.subnet_timeout = attr.subnet_timeout; 459 resp.init_type_reply = attr.init_type_reply; 460 resp.active_width = attr.active_width; 461 resp.active_speed = attr.active_speed; 462 resp.phys_state = attr.phys_state; 463 464 if (copy_to_user((void __user *) (unsigned long) cmd.response, 465 &resp, sizeof resp)) 466 return -EFAULT; 467 468 return in_len; 469 } 470 471 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 472 const char __user *buf, 473 int in_len, int out_len) 474 { 475 struct ib_uverbs_alloc_pd cmd; 476 struct ib_uverbs_alloc_pd_resp resp; 477 struct ib_udata udata; 478 struct ib_uobject *uobj; 479 struct ib_pd *pd; 480 int ret; 481 482 if (out_len < sizeof resp) 483 return -ENOSPC; 484 485 if (copy_from_user(&cmd, buf, sizeof cmd)) 486 return -EFAULT; 487 488 INIT_UDATA(&udata, buf + sizeof cmd, 489 (unsigned long) cmd.response + sizeof resp, 490 in_len - sizeof cmd, out_len - sizeof resp); 491 492 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 493 if (!uobj) 494 return -ENOMEM; 495 496 init_uobj(uobj, 0, file->ucontext, &pd_lock_key); 497 down_write(&uobj->mutex); 498 499 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, 500 file->ucontext, &udata); 501 if (IS_ERR(pd)) { 502 ret = PTR_ERR(pd); 503 goto err; 504 } 505 506 pd->device = file->device->ib_dev; 507 pd->uobject = uobj; 508 atomic_set(&pd->usecnt, 0); 509 510 uobj->object = pd; 511 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj); 512 if (ret) 513 goto err_idr; 514 515 memset(&resp, 0, sizeof resp); 516 resp.pd_handle = uobj->id; 517 518 if (copy_to_user((void __user *) (unsigned long) cmd.response, 519 &resp, sizeof resp)) { 520 ret = -EFAULT; 521 goto err_copy; 522 } 523 524 mutex_lock(&file->mutex); 525 list_add_tail(&uobj->list, &file->ucontext->pd_list); 526 mutex_unlock(&file->mutex); 527 528 uobj->live = 1; 529 530 up_write(&uobj->mutex); 531 532 return in_len; 533 534 err_copy: 535 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 536 537 err_idr: 538 ib_dealloc_pd(pd); 539 540 err: 541 put_uobj_write(uobj); 542 return ret; 543 } 544 545 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 546 const char __user *buf, 547 int in_len, int out_len) 548 { 549 struct ib_uverbs_dealloc_pd cmd; 550 struct ib_uobject *uobj; 551 int ret; 552 553 if (copy_from_user(&cmd, buf, sizeof cmd)) 554 return -EFAULT; 555 556 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext); 557 if (!uobj) 558 return -EINVAL; 559 560 ret = ib_dealloc_pd(uobj->object); 561 if (!ret) 562 uobj->live = 0; 563 564 put_uobj_write(uobj); 565 566 if (ret) 567 return ret; 568 569 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 570 571 mutex_lock(&file->mutex); 572 list_del(&uobj->list); 573 mutex_unlock(&file->mutex); 574 575 put_uobj(uobj); 576 577 return in_len; 578 } 579 580 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 581 const char __user *buf, int in_len, 582 int out_len) 583 { 584 struct ib_uverbs_reg_mr cmd; 585 struct ib_uverbs_reg_mr_resp resp; 586 struct ib_udata udata; 587 struct ib_uobject *uobj; 588 struct ib_pd *pd; 589 struct ib_mr *mr; 590 int ret; 591 592 if (out_len < sizeof resp) 593 return -ENOSPC; 594 595 if (copy_from_user(&cmd, buf, sizeof cmd)) 596 return -EFAULT; 597 598 INIT_UDATA(&udata, buf + sizeof cmd, 599 (unsigned long) cmd.response + sizeof resp, 600 in_len - sizeof cmd, out_len - sizeof resp); 601 602 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 603 return -EINVAL; 604 605 /* 606 * Local write permission is required if remote write or 607 * remote atomic permission is also requested. 608 */ 609 if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 610 !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE)) 611 return -EINVAL; 612 613 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 614 if (!uobj) 615 return -ENOMEM; 616 617 init_uobj(uobj, 0, file->ucontext, &mr_lock_key); 618 down_write(&uobj->mutex); 619 620 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 621 if (!pd) { 622 ret = -EINVAL; 623 goto err_free; 624 } 625 626 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 627 cmd.access_flags, &udata); 628 if (IS_ERR(mr)) { 629 ret = PTR_ERR(mr); 630 goto err_put; 631 } 632 633 mr->device = pd->device; 634 mr->pd = pd; 635 mr->uobject = uobj; 636 atomic_inc(&pd->usecnt); 637 atomic_set(&mr->usecnt, 0); 638 639 uobj->object = mr; 640 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj); 641 if (ret) 642 goto err_unreg; 643 644 memset(&resp, 0, sizeof resp); 645 resp.lkey = mr->lkey; 646 resp.rkey = mr->rkey; 647 resp.mr_handle = uobj->id; 648 649 if (copy_to_user((void __user *) (unsigned long) cmd.response, 650 &resp, sizeof resp)) { 651 ret = -EFAULT; 652 goto err_copy; 653 } 654 655 put_pd_read(pd); 656 657 mutex_lock(&file->mutex); 658 list_add_tail(&uobj->list, &file->ucontext->mr_list); 659 mutex_unlock(&file->mutex); 660 661 uobj->live = 1; 662 663 up_write(&uobj->mutex); 664 665 return in_len; 666 667 err_copy: 668 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 669 670 err_unreg: 671 ib_dereg_mr(mr); 672 673 err_put: 674 put_pd_read(pd); 675 676 err_free: 677 put_uobj_write(uobj); 678 return ret; 679 } 680 681 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 682 const char __user *buf, int in_len, 683 int out_len) 684 { 685 struct ib_uverbs_dereg_mr cmd; 686 struct ib_mr *mr; 687 struct ib_uobject *uobj; 688 int ret = -EINVAL; 689 690 if (copy_from_user(&cmd, buf, sizeof cmd)) 691 return -EFAULT; 692 693 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext); 694 if (!uobj) 695 return -EINVAL; 696 697 mr = uobj->object; 698 699 ret = ib_dereg_mr(mr); 700 if (!ret) 701 uobj->live = 0; 702 703 put_uobj_write(uobj); 704 705 if (ret) 706 return ret; 707 708 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 709 710 mutex_lock(&file->mutex); 711 list_del(&uobj->list); 712 mutex_unlock(&file->mutex); 713 714 put_uobj(uobj); 715 716 return in_len; 717 } 718 719 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 720 const char __user *buf, int in_len, 721 int out_len) 722 { 723 struct ib_uverbs_create_comp_channel cmd; 724 struct ib_uverbs_create_comp_channel_resp resp; 725 struct file *filp; 726 int ret; 727 728 if (out_len < sizeof resp) 729 return -ENOSPC; 730 731 if (copy_from_user(&cmd, buf, sizeof cmd)) 732 return -EFAULT; 733 734 ret = get_unused_fd(); 735 if (ret < 0) 736 return ret; 737 resp.fd = ret; 738 739 filp = ib_uverbs_alloc_event_file(file, 0); 740 if (IS_ERR(filp)) { 741 put_unused_fd(resp.fd); 742 return PTR_ERR(filp); 743 } 744 745 if (copy_to_user((void __user *) (unsigned long) cmd.response, 746 &resp, sizeof resp)) { 747 put_unused_fd(resp.fd); 748 fput(filp); 749 return -EFAULT; 750 } 751 752 fd_install(resp.fd, filp); 753 return in_len; 754 } 755 756 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 757 const char __user *buf, int in_len, 758 int out_len) 759 { 760 struct ib_uverbs_create_cq cmd; 761 struct ib_uverbs_create_cq_resp resp; 762 struct ib_udata udata; 763 struct ib_ucq_object *obj; 764 struct ib_uverbs_event_file *ev_file = NULL; 765 struct ib_cq *cq; 766 int ret; 767 768 if (out_len < sizeof resp) 769 return -ENOSPC; 770 771 if (copy_from_user(&cmd, buf, sizeof cmd)) 772 return -EFAULT; 773 774 INIT_UDATA(&udata, buf + sizeof cmd, 775 (unsigned long) cmd.response + sizeof resp, 776 in_len - sizeof cmd, out_len - sizeof resp); 777 778 if (cmd.comp_vector >= file->device->num_comp_vectors) 779 return -EINVAL; 780 781 obj = kmalloc(sizeof *obj, GFP_KERNEL); 782 if (!obj) 783 return -ENOMEM; 784 785 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_key); 786 down_write(&obj->uobject.mutex); 787 788 if (cmd.comp_channel >= 0) { 789 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel); 790 if (!ev_file) { 791 ret = -EINVAL; 792 goto err; 793 } 794 } 795 796 obj->uverbs_file = file; 797 obj->comp_events_reported = 0; 798 obj->async_events_reported = 0; 799 INIT_LIST_HEAD(&obj->comp_list); 800 INIT_LIST_HEAD(&obj->async_list); 801 802 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, 803 cmd.comp_vector, 804 file->ucontext, &udata); 805 if (IS_ERR(cq)) { 806 ret = PTR_ERR(cq); 807 goto err_file; 808 } 809 810 cq->device = file->device->ib_dev; 811 cq->uobject = &obj->uobject; 812 cq->comp_handler = ib_uverbs_comp_handler; 813 cq->event_handler = ib_uverbs_cq_event_handler; 814 cq->cq_context = ev_file; 815 atomic_set(&cq->usecnt, 0); 816 817 obj->uobject.object = cq; 818 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject); 819 if (ret) 820 goto err_free; 821 822 memset(&resp, 0, sizeof resp); 823 resp.cq_handle = obj->uobject.id; 824 resp.cqe = cq->cqe; 825 826 if (copy_to_user((void __user *) (unsigned long) cmd.response, 827 &resp, sizeof resp)) { 828 ret = -EFAULT; 829 goto err_copy; 830 } 831 832 mutex_lock(&file->mutex); 833 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list); 834 mutex_unlock(&file->mutex); 835 836 obj->uobject.live = 1; 837 838 up_write(&obj->uobject.mutex); 839 840 return in_len; 841 842 err_copy: 843 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject); 844 845 err_free: 846 ib_destroy_cq(cq); 847 848 err_file: 849 if (ev_file) 850 ib_uverbs_release_ucq(file, ev_file, obj); 851 852 err: 853 put_uobj_write(&obj->uobject); 854 return ret; 855 } 856 857 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 858 const char __user *buf, int in_len, 859 int out_len) 860 { 861 struct ib_uverbs_resize_cq cmd; 862 struct ib_uverbs_resize_cq_resp resp; 863 struct ib_udata udata; 864 struct ib_cq *cq; 865 int ret = -EINVAL; 866 867 if (copy_from_user(&cmd, buf, sizeof cmd)) 868 return -EFAULT; 869 870 INIT_UDATA(&udata, buf + sizeof cmd, 871 (unsigned long) cmd.response + sizeof resp, 872 in_len - sizeof cmd, out_len - sizeof resp); 873 874 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 875 if (!cq) 876 return -EINVAL; 877 878 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 879 if (ret) 880 goto out; 881 882 resp.cqe = cq->cqe; 883 884 if (copy_to_user((void __user *) (unsigned long) cmd.response, 885 &resp, sizeof resp.cqe)) 886 ret = -EFAULT; 887 888 out: 889 put_cq_read(cq); 890 891 return ret ? ret : in_len; 892 } 893 894 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 895 const char __user *buf, int in_len, 896 int out_len) 897 { 898 struct ib_uverbs_poll_cq cmd; 899 struct ib_uverbs_poll_cq_resp *resp; 900 struct ib_cq *cq; 901 struct ib_wc *wc; 902 int ret = 0; 903 int i; 904 int rsize; 905 906 if (copy_from_user(&cmd, buf, sizeof cmd)) 907 return -EFAULT; 908 909 wc = kmalloc(cmd.ne * sizeof *wc, GFP_KERNEL); 910 if (!wc) 911 return -ENOMEM; 912 913 rsize = sizeof *resp + cmd.ne * sizeof(struct ib_uverbs_wc); 914 resp = kmalloc(rsize, GFP_KERNEL); 915 if (!resp) { 916 ret = -ENOMEM; 917 goto out_wc; 918 } 919 920 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 921 if (!cq) { 922 ret = -EINVAL; 923 goto out; 924 } 925 926 resp->count = ib_poll_cq(cq, cmd.ne, wc); 927 928 put_cq_read(cq); 929 930 for (i = 0; i < resp->count; i++) { 931 resp->wc[i].wr_id = wc[i].wr_id; 932 resp->wc[i].status = wc[i].status; 933 resp->wc[i].opcode = wc[i].opcode; 934 resp->wc[i].vendor_err = wc[i].vendor_err; 935 resp->wc[i].byte_len = wc[i].byte_len; 936 resp->wc[i].ex.imm_data = (__u32 __force) wc[i].ex.imm_data; 937 resp->wc[i].qp_num = wc[i].qp->qp_num; 938 resp->wc[i].src_qp = wc[i].src_qp; 939 resp->wc[i].wc_flags = wc[i].wc_flags; 940 resp->wc[i].pkey_index = wc[i].pkey_index; 941 resp->wc[i].slid = wc[i].slid; 942 resp->wc[i].sl = wc[i].sl; 943 resp->wc[i].dlid_path_bits = wc[i].dlid_path_bits; 944 resp->wc[i].port_num = wc[i].port_num; 945 } 946 947 if (copy_to_user((void __user *) (unsigned long) cmd.response, resp, rsize)) 948 ret = -EFAULT; 949 950 out: 951 kfree(resp); 952 953 out_wc: 954 kfree(wc); 955 return ret ? ret : in_len; 956 } 957 958 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 959 const char __user *buf, int in_len, 960 int out_len) 961 { 962 struct ib_uverbs_req_notify_cq cmd; 963 struct ib_cq *cq; 964 965 if (copy_from_user(&cmd, buf, sizeof cmd)) 966 return -EFAULT; 967 968 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 969 if (!cq) 970 return -EINVAL; 971 972 ib_req_notify_cq(cq, cmd.solicited_only ? 973 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 974 975 put_cq_read(cq); 976 977 return in_len; 978 } 979 980 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 981 const char __user *buf, int in_len, 982 int out_len) 983 { 984 struct ib_uverbs_destroy_cq cmd; 985 struct ib_uverbs_destroy_cq_resp resp; 986 struct ib_uobject *uobj; 987 struct ib_cq *cq; 988 struct ib_ucq_object *obj; 989 struct ib_uverbs_event_file *ev_file; 990 int ret = -EINVAL; 991 992 if (copy_from_user(&cmd, buf, sizeof cmd)) 993 return -EFAULT; 994 995 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); 996 if (!uobj) 997 return -EINVAL; 998 cq = uobj->object; 999 ev_file = cq->cq_context; 1000 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1001 1002 ret = ib_destroy_cq(cq); 1003 if (!ret) 1004 uobj->live = 0; 1005 1006 put_uobj_write(uobj); 1007 1008 if (ret) 1009 return ret; 1010 1011 idr_remove_uobj(&ib_uverbs_cq_idr, uobj); 1012 1013 mutex_lock(&file->mutex); 1014 list_del(&uobj->list); 1015 mutex_unlock(&file->mutex); 1016 1017 ib_uverbs_release_ucq(file, ev_file, obj); 1018 1019 memset(&resp, 0, sizeof resp); 1020 resp.comp_events_reported = obj->comp_events_reported; 1021 resp.async_events_reported = obj->async_events_reported; 1022 1023 put_uobj(uobj); 1024 1025 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1026 &resp, sizeof resp)) 1027 return -EFAULT; 1028 1029 return in_len; 1030 } 1031 1032 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 1033 const char __user *buf, int in_len, 1034 int out_len) 1035 { 1036 struct ib_uverbs_create_qp cmd; 1037 struct ib_uverbs_create_qp_resp resp; 1038 struct ib_udata udata; 1039 struct ib_uqp_object *obj; 1040 struct ib_pd *pd; 1041 struct ib_cq *scq, *rcq; 1042 struct ib_srq *srq; 1043 struct ib_qp *qp; 1044 struct ib_qp_init_attr attr; 1045 int ret; 1046 1047 if (out_len < sizeof resp) 1048 return -ENOSPC; 1049 1050 if (copy_from_user(&cmd, buf, sizeof cmd)) 1051 return -EFAULT; 1052 1053 INIT_UDATA(&udata, buf + sizeof cmd, 1054 (unsigned long) cmd.response + sizeof resp, 1055 in_len - sizeof cmd, out_len - sizeof resp); 1056 1057 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1058 if (!obj) 1059 return -ENOMEM; 1060 1061 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key); 1062 down_write(&obj->uevent.uobject.mutex); 1063 1064 srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL; 1065 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1066 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0); 1067 rcq = cmd.recv_cq_handle == cmd.send_cq_handle ? 1068 scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1); 1069 1070 if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) { 1071 ret = -EINVAL; 1072 goto err_put; 1073 } 1074 1075 attr.event_handler = ib_uverbs_qp_event_handler; 1076 attr.qp_context = file; 1077 attr.send_cq = scq; 1078 attr.recv_cq = rcq; 1079 attr.srq = srq; 1080 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 1081 attr.qp_type = cmd.qp_type; 1082 attr.create_flags = 0; 1083 1084 attr.cap.max_send_wr = cmd.max_send_wr; 1085 attr.cap.max_recv_wr = cmd.max_recv_wr; 1086 attr.cap.max_send_sge = cmd.max_send_sge; 1087 attr.cap.max_recv_sge = cmd.max_recv_sge; 1088 attr.cap.max_inline_data = cmd.max_inline_data; 1089 1090 obj->uevent.events_reported = 0; 1091 INIT_LIST_HEAD(&obj->uevent.event_list); 1092 INIT_LIST_HEAD(&obj->mcast_list); 1093 1094 qp = pd->device->create_qp(pd, &attr, &udata); 1095 if (IS_ERR(qp)) { 1096 ret = PTR_ERR(qp); 1097 goto err_put; 1098 } 1099 1100 qp->device = pd->device; 1101 qp->pd = pd; 1102 qp->send_cq = attr.send_cq; 1103 qp->recv_cq = attr.recv_cq; 1104 qp->srq = attr.srq; 1105 qp->uobject = &obj->uevent.uobject; 1106 qp->event_handler = attr.event_handler; 1107 qp->qp_context = attr.qp_context; 1108 qp->qp_type = attr.qp_type; 1109 atomic_inc(&pd->usecnt); 1110 atomic_inc(&attr.send_cq->usecnt); 1111 atomic_inc(&attr.recv_cq->usecnt); 1112 if (attr.srq) 1113 atomic_inc(&attr.srq->usecnt); 1114 1115 obj->uevent.uobject.object = qp; 1116 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1117 if (ret) 1118 goto err_destroy; 1119 1120 memset(&resp, 0, sizeof resp); 1121 resp.qpn = qp->qp_num; 1122 resp.qp_handle = obj->uevent.uobject.id; 1123 resp.max_recv_sge = attr.cap.max_recv_sge; 1124 resp.max_send_sge = attr.cap.max_send_sge; 1125 resp.max_recv_wr = attr.cap.max_recv_wr; 1126 resp.max_send_wr = attr.cap.max_send_wr; 1127 resp.max_inline_data = attr.cap.max_inline_data; 1128 1129 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1130 &resp, sizeof resp)) { 1131 ret = -EFAULT; 1132 goto err_copy; 1133 } 1134 1135 put_pd_read(pd); 1136 put_cq_read(scq); 1137 if (rcq != scq) 1138 put_cq_read(rcq); 1139 if (srq) 1140 put_srq_read(srq); 1141 1142 mutex_lock(&file->mutex); 1143 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 1144 mutex_unlock(&file->mutex); 1145 1146 obj->uevent.uobject.live = 1; 1147 1148 up_write(&obj->uevent.uobject.mutex); 1149 1150 return in_len; 1151 1152 err_copy: 1153 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1154 1155 err_destroy: 1156 ib_destroy_qp(qp); 1157 1158 err_put: 1159 if (pd) 1160 put_pd_read(pd); 1161 if (scq) 1162 put_cq_read(scq); 1163 if (rcq && rcq != scq) 1164 put_cq_read(rcq); 1165 if (srq) 1166 put_srq_read(srq); 1167 1168 put_uobj_write(&obj->uevent.uobject); 1169 return ret; 1170 } 1171 1172 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 1173 const char __user *buf, int in_len, 1174 int out_len) 1175 { 1176 struct ib_uverbs_query_qp cmd; 1177 struct ib_uverbs_query_qp_resp resp; 1178 struct ib_qp *qp; 1179 struct ib_qp_attr *attr; 1180 struct ib_qp_init_attr *init_attr; 1181 int ret; 1182 1183 if (copy_from_user(&cmd, buf, sizeof cmd)) 1184 return -EFAULT; 1185 1186 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1187 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 1188 if (!attr || !init_attr) { 1189 ret = -ENOMEM; 1190 goto out; 1191 } 1192 1193 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1194 if (!qp) { 1195 ret = -EINVAL; 1196 goto out; 1197 } 1198 1199 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 1200 1201 put_qp_read(qp); 1202 1203 if (ret) 1204 goto out; 1205 1206 memset(&resp, 0, sizeof resp); 1207 1208 resp.qp_state = attr->qp_state; 1209 resp.cur_qp_state = attr->cur_qp_state; 1210 resp.path_mtu = attr->path_mtu; 1211 resp.path_mig_state = attr->path_mig_state; 1212 resp.qkey = attr->qkey; 1213 resp.rq_psn = attr->rq_psn; 1214 resp.sq_psn = attr->sq_psn; 1215 resp.dest_qp_num = attr->dest_qp_num; 1216 resp.qp_access_flags = attr->qp_access_flags; 1217 resp.pkey_index = attr->pkey_index; 1218 resp.alt_pkey_index = attr->alt_pkey_index; 1219 resp.sq_draining = attr->sq_draining; 1220 resp.max_rd_atomic = attr->max_rd_atomic; 1221 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 1222 resp.min_rnr_timer = attr->min_rnr_timer; 1223 resp.port_num = attr->port_num; 1224 resp.timeout = attr->timeout; 1225 resp.retry_cnt = attr->retry_cnt; 1226 resp.rnr_retry = attr->rnr_retry; 1227 resp.alt_port_num = attr->alt_port_num; 1228 resp.alt_timeout = attr->alt_timeout; 1229 1230 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16); 1231 resp.dest.flow_label = attr->ah_attr.grh.flow_label; 1232 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index; 1233 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit; 1234 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class; 1235 resp.dest.dlid = attr->ah_attr.dlid; 1236 resp.dest.sl = attr->ah_attr.sl; 1237 resp.dest.src_path_bits = attr->ah_attr.src_path_bits; 1238 resp.dest.static_rate = attr->ah_attr.static_rate; 1239 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH); 1240 resp.dest.port_num = attr->ah_attr.port_num; 1241 1242 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16); 1243 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label; 1244 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index; 1245 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit; 1246 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class; 1247 resp.alt_dest.dlid = attr->alt_ah_attr.dlid; 1248 resp.alt_dest.sl = attr->alt_ah_attr.sl; 1249 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits; 1250 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate; 1251 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH); 1252 resp.alt_dest.port_num = attr->alt_ah_attr.port_num; 1253 1254 resp.max_send_wr = init_attr->cap.max_send_wr; 1255 resp.max_recv_wr = init_attr->cap.max_recv_wr; 1256 resp.max_send_sge = init_attr->cap.max_send_sge; 1257 resp.max_recv_sge = init_attr->cap.max_recv_sge; 1258 resp.max_inline_data = init_attr->cap.max_inline_data; 1259 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 1260 1261 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1262 &resp, sizeof resp)) 1263 ret = -EFAULT; 1264 1265 out: 1266 kfree(attr); 1267 kfree(init_attr); 1268 1269 return ret ? ret : in_len; 1270 } 1271 1272 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 1273 const char __user *buf, int in_len, 1274 int out_len) 1275 { 1276 struct ib_uverbs_modify_qp cmd; 1277 struct ib_udata udata; 1278 struct ib_qp *qp; 1279 struct ib_qp_attr *attr; 1280 int ret; 1281 1282 if (copy_from_user(&cmd, buf, sizeof cmd)) 1283 return -EFAULT; 1284 1285 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 1286 out_len); 1287 1288 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1289 if (!attr) 1290 return -ENOMEM; 1291 1292 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1293 if (!qp) { 1294 ret = -EINVAL; 1295 goto out; 1296 } 1297 1298 attr->qp_state = cmd.qp_state; 1299 attr->cur_qp_state = cmd.cur_qp_state; 1300 attr->path_mtu = cmd.path_mtu; 1301 attr->path_mig_state = cmd.path_mig_state; 1302 attr->qkey = cmd.qkey; 1303 attr->rq_psn = cmd.rq_psn; 1304 attr->sq_psn = cmd.sq_psn; 1305 attr->dest_qp_num = cmd.dest_qp_num; 1306 attr->qp_access_flags = cmd.qp_access_flags; 1307 attr->pkey_index = cmd.pkey_index; 1308 attr->alt_pkey_index = cmd.alt_pkey_index; 1309 attr->en_sqd_async_notify = cmd.en_sqd_async_notify; 1310 attr->max_rd_atomic = cmd.max_rd_atomic; 1311 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic; 1312 attr->min_rnr_timer = cmd.min_rnr_timer; 1313 attr->port_num = cmd.port_num; 1314 attr->timeout = cmd.timeout; 1315 attr->retry_cnt = cmd.retry_cnt; 1316 attr->rnr_retry = cmd.rnr_retry; 1317 attr->alt_port_num = cmd.alt_port_num; 1318 attr->alt_timeout = cmd.alt_timeout; 1319 1320 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16); 1321 attr->ah_attr.grh.flow_label = cmd.dest.flow_label; 1322 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index; 1323 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit; 1324 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class; 1325 attr->ah_attr.dlid = cmd.dest.dlid; 1326 attr->ah_attr.sl = cmd.dest.sl; 1327 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits; 1328 attr->ah_attr.static_rate = cmd.dest.static_rate; 1329 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0; 1330 attr->ah_attr.port_num = cmd.dest.port_num; 1331 1332 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16); 1333 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label; 1334 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index; 1335 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit; 1336 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class; 1337 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid; 1338 attr->alt_ah_attr.sl = cmd.alt_dest.sl; 1339 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits; 1340 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate; 1341 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; 1342 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; 1343 1344 ret = qp->device->modify_qp(qp, attr, cmd.attr_mask, &udata); 1345 1346 put_qp_read(qp); 1347 1348 if (ret) 1349 goto out; 1350 1351 ret = in_len; 1352 1353 out: 1354 kfree(attr); 1355 1356 return ret; 1357 } 1358 1359 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 1360 const char __user *buf, int in_len, 1361 int out_len) 1362 { 1363 struct ib_uverbs_destroy_qp cmd; 1364 struct ib_uverbs_destroy_qp_resp resp; 1365 struct ib_uobject *uobj; 1366 struct ib_qp *qp; 1367 struct ib_uqp_object *obj; 1368 int ret = -EINVAL; 1369 1370 if (copy_from_user(&cmd, buf, sizeof cmd)) 1371 return -EFAULT; 1372 1373 memset(&resp, 0, sizeof resp); 1374 1375 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext); 1376 if (!uobj) 1377 return -EINVAL; 1378 qp = uobj->object; 1379 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 1380 1381 if (!list_empty(&obj->mcast_list)) { 1382 put_uobj_write(uobj); 1383 return -EBUSY; 1384 } 1385 1386 ret = ib_destroy_qp(qp); 1387 if (!ret) 1388 uobj->live = 0; 1389 1390 put_uobj_write(uobj); 1391 1392 if (ret) 1393 return ret; 1394 1395 idr_remove_uobj(&ib_uverbs_qp_idr, uobj); 1396 1397 mutex_lock(&file->mutex); 1398 list_del(&uobj->list); 1399 mutex_unlock(&file->mutex); 1400 1401 ib_uverbs_release_uevent(file, &obj->uevent); 1402 1403 resp.events_reported = obj->uevent.events_reported; 1404 1405 put_uobj(uobj); 1406 1407 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1408 &resp, sizeof resp)) 1409 return -EFAULT; 1410 1411 return in_len; 1412 } 1413 1414 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 1415 const char __user *buf, int in_len, 1416 int out_len) 1417 { 1418 struct ib_uverbs_post_send cmd; 1419 struct ib_uverbs_post_send_resp resp; 1420 struct ib_uverbs_send_wr *user_wr; 1421 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 1422 struct ib_qp *qp; 1423 int i, sg_ind; 1424 int is_ud; 1425 ssize_t ret = -EINVAL; 1426 1427 if (copy_from_user(&cmd, buf, sizeof cmd)) 1428 return -EFAULT; 1429 1430 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 1431 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 1432 return -EINVAL; 1433 1434 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 1435 return -EINVAL; 1436 1437 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 1438 if (!user_wr) 1439 return -ENOMEM; 1440 1441 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1442 if (!qp) 1443 goto out; 1444 1445 is_ud = qp->qp_type == IB_QPT_UD; 1446 sg_ind = 0; 1447 last = NULL; 1448 for (i = 0; i < cmd.wr_count; ++i) { 1449 if (copy_from_user(user_wr, 1450 buf + sizeof cmd + i * cmd.wqe_size, 1451 cmd.wqe_size)) { 1452 ret = -EFAULT; 1453 goto out_put; 1454 } 1455 1456 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 1457 ret = -EINVAL; 1458 goto out_put; 1459 } 1460 1461 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 1462 user_wr->num_sge * sizeof (struct ib_sge), 1463 GFP_KERNEL); 1464 if (!next) { 1465 ret = -ENOMEM; 1466 goto out_put; 1467 } 1468 1469 if (!last) 1470 wr = next; 1471 else 1472 last->next = next; 1473 last = next; 1474 1475 next->next = NULL; 1476 next->wr_id = user_wr->wr_id; 1477 next->num_sge = user_wr->num_sge; 1478 next->opcode = user_wr->opcode; 1479 next->send_flags = user_wr->send_flags; 1480 1481 if (is_ud) { 1482 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah, 1483 file->ucontext); 1484 if (!next->wr.ud.ah) { 1485 ret = -EINVAL; 1486 goto out_put; 1487 } 1488 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn; 1489 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey; 1490 } else { 1491 switch (next->opcode) { 1492 case IB_WR_RDMA_WRITE_WITH_IMM: 1493 next->ex.imm_data = 1494 (__be32 __force) user_wr->ex.imm_data; 1495 case IB_WR_RDMA_WRITE: 1496 case IB_WR_RDMA_READ: 1497 next->wr.rdma.remote_addr = 1498 user_wr->wr.rdma.remote_addr; 1499 next->wr.rdma.rkey = 1500 user_wr->wr.rdma.rkey; 1501 break; 1502 case IB_WR_SEND_WITH_IMM: 1503 next->ex.imm_data = 1504 (__be32 __force) user_wr->ex.imm_data; 1505 break; 1506 case IB_WR_SEND_WITH_INV: 1507 next->ex.invalidate_rkey = 1508 user_wr->ex.invalidate_rkey; 1509 break; 1510 case IB_WR_ATOMIC_CMP_AND_SWP: 1511 case IB_WR_ATOMIC_FETCH_AND_ADD: 1512 next->wr.atomic.remote_addr = 1513 user_wr->wr.atomic.remote_addr; 1514 next->wr.atomic.compare_add = 1515 user_wr->wr.atomic.compare_add; 1516 next->wr.atomic.swap = user_wr->wr.atomic.swap; 1517 next->wr.atomic.rkey = user_wr->wr.atomic.rkey; 1518 break; 1519 default: 1520 break; 1521 } 1522 } 1523 1524 if (next->num_sge) { 1525 next->sg_list = (void *) next + 1526 ALIGN(sizeof *next, sizeof (struct ib_sge)); 1527 if (copy_from_user(next->sg_list, 1528 buf + sizeof cmd + 1529 cmd.wr_count * cmd.wqe_size + 1530 sg_ind * sizeof (struct ib_sge), 1531 next->num_sge * sizeof (struct ib_sge))) { 1532 ret = -EFAULT; 1533 goto out_put; 1534 } 1535 sg_ind += next->num_sge; 1536 } else 1537 next->sg_list = NULL; 1538 } 1539 1540 resp.bad_wr = 0; 1541 ret = qp->device->post_send(qp, wr, &bad_wr); 1542 if (ret) 1543 for (next = wr; next; next = next->next) { 1544 ++resp.bad_wr; 1545 if (next == bad_wr) 1546 break; 1547 } 1548 1549 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1550 &resp, sizeof resp)) 1551 ret = -EFAULT; 1552 1553 out_put: 1554 put_qp_read(qp); 1555 1556 while (wr) { 1557 if (is_ud && wr->wr.ud.ah) 1558 put_ah_read(wr->wr.ud.ah); 1559 next = wr->next; 1560 kfree(wr); 1561 wr = next; 1562 } 1563 1564 out: 1565 kfree(user_wr); 1566 1567 return ret ? ret : in_len; 1568 } 1569 1570 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 1571 int in_len, 1572 u32 wr_count, 1573 u32 sge_count, 1574 u32 wqe_size) 1575 { 1576 struct ib_uverbs_recv_wr *user_wr; 1577 struct ib_recv_wr *wr = NULL, *last, *next; 1578 int sg_ind; 1579 int i; 1580 int ret; 1581 1582 if (in_len < wqe_size * wr_count + 1583 sge_count * sizeof (struct ib_uverbs_sge)) 1584 return ERR_PTR(-EINVAL); 1585 1586 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 1587 return ERR_PTR(-EINVAL); 1588 1589 user_wr = kmalloc(wqe_size, GFP_KERNEL); 1590 if (!user_wr) 1591 return ERR_PTR(-ENOMEM); 1592 1593 sg_ind = 0; 1594 last = NULL; 1595 for (i = 0; i < wr_count; ++i) { 1596 if (copy_from_user(user_wr, buf + i * wqe_size, 1597 wqe_size)) { 1598 ret = -EFAULT; 1599 goto err; 1600 } 1601 1602 if (user_wr->num_sge + sg_ind > sge_count) { 1603 ret = -EINVAL; 1604 goto err; 1605 } 1606 1607 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 1608 user_wr->num_sge * sizeof (struct ib_sge), 1609 GFP_KERNEL); 1610 if (!next) { 1611 ret = -ENOMEM; 1612 goto err; 1613 } 1614 1615 if (!last) 1616 wr = next; 1617 else 1618 last->next = next; 1619 last = next; 1620 1621 next->next = NULL; 1622 next->wr_id = user_wr->wr_id; 1623 next->num_sge = user_wr->num_sge; 1624 1625 if (next->num_sge) { 1626 next->sg_list = (void *) next + 1627 ALIGN(sizeof *next, sizeof (struct ib_sge)); 1628 if (copy_from_user(next->sg_list, 1629 buf + wr_count * wqe_size + 1630 sg_ind * sizeof (struct ib_sge), 1631 next->num_sge * sizeof (struct ib_sge))) { 1632 ret = -EFAULT; 1633 goto err; 1634 } 1635 sg_ind += next->num_sge; 1636 } else 1637 next->sg_list = NULL; 1638 } 1639 1640 kfree(user_wr); 1641 return wr; 1642 1643 err: 1644 kfree(user_wr); 1645 1646 while (wr) { 1647 next = wr->next; 1648 kfree(wr); 1649 wr = next; 1650 } 1651 1652 return ERR_PTR(ret); 1653 } 1654 1655 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 1656 const char __user *buf, int in_len, 1657 int out_len) 1658 { 1659 struct ib_uverbs_post_recv cmd; 1660 struct ib_uverbs_post_recv_resp resp; 1661 struct ib_recv_wr *wr, *next, *bad_wr; 1662 struct ib_qp *qp; 1663 ssize_t ret = -EINVAL; 1664 1665 if (copy_from_user(&cmd, buf, sizeof cmd)) 1666 return -EFAULT; 1667 1668 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 1669 in_len - sizeof cmd, cmd.wr_count, 1670 cmd.sge_count, cmd.wqe_size); 1671 if (IS_ERR(wr)) 1672 return PTR_ERR(wr); 1673 1674 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1675 if (!qp) 1676 goto out; 1677 1678 resp.bad_wr = 0; 1679 ret = qp->device->post_recv(qp, wr, &bad_wr); 1680 1681 put_qp_read(qp); 1682 1683 if (ret) 1684 for (next = wr; next; next = next->next) { 1685 ++resp.bad_wr; 1686 if (next == bad_wr) 1687 break; 1688 } 1689 1690 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1691 &resp, sizeof resp)) 1692 ret = -EFAULT; 1693 1694 out: 1695 while (wr) { 1696 next = wr->next; 1697 kfree(wr); 1698 wr = next; 1699 } 1700 1701 return ret ? ret : in_len; 1702 } 1703 1704 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 1705 const char __user *buf, int in_len, 1706 int out_len) 1707 { 1708 struct ib_uverbs_post_srq_recv cmd; 1709 struct ib_uverbs_post_srq_recv_resp resp; 1710 struct ib_recv_wr *wr, *next, *bad_wr; 1711 struct ib_srq *srq; 1712 ssize_t ret = -EINVAL; 1713 1714 if (copy_from_user(&cmd, buf, sizeof cmd)) 1715 return -EFAULT; 1716 1717 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 1718 in_len - sizeof cmd, cmd.wr_count, 1719 cmd.sge_count, cmd.wqe_size); 1720 if (IS_ERR(wr)) 1721 return PTR_ERR(wr); 1722 1723 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 1724 if (!srq) 1725 goto out; 1726 1727 resp.bad_wr = 0; 1728 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 1729 1730 put_srq_read(srq); 1731 1732 if (ret) 1733 for (next = wr; next; next = next->next) { 1734 ++resp.bad_wr; 1735 if (next == bad_wr) 1736 break; 1737 } 1738 1739 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1740 &resp, sizeof resp)) 1741 ret = -EFAULT; 1742 1743 out: 1744 while (wr) { 1745 next = wr->next; 1746 kfree(wr); 1747 wr = next; 1748 } 1749 1750 return ret ? ret : in_len; 1751 } 1752 1753 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 1754 const char __user *buf, int in_len, 1755 int out_len) 1756 { 1757 struct ib_uverbs_create_ah cmd; 1758 struct ib_uverbs_create_ah_resp resp; 1759 struct ib_uobject *uobj; 1760 struct ib_pd *pd; 1761 struct ib_ah *ah; 1762 struct ib_ah_attr attr; 1763 int ret; 1764 1765 if (out_len < sizeof resp) 1766 return -ENOSPC; 1767 1768 if (copy_from_user(&cmd, buf, sizeof cmd)) 1769 return -EFAULT; 1770 1771 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 1772 if (!uobj) 1773 return -ENOMEM; 1774 1775 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_key); 1776 down_write(&uobj->mutex); 1777 1778 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1779 if (!pd) { 1780 ret = -EINVAL; 1781 goto err; 1782 } 1783 1784 attr.dlid = cmd.attr.dlid; 1785 attr.sl = cmd.attr.sl; 1786 attr.src_path_bits = cmd.attr.src_path_bits; 1787 attr.static_rate = cmd.attr.static_rate; 1788 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0; 1789 attr.port_num = cmd.attr.port_num; 1790 attr.grh.flow_label = cmd.attr.grh.flow_label; 1791 attr.grh.sgid_index = cmd.attr.grh.sgid_index; 1792 attr.grh.hop_limit = cmd.attr.grh.hop_limit; 1793 attr.grh.traffic_class = cmd.attr.grh.traffic_class; 1794 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); 1795 1796 ah = ib_create_ah(pd, &attr); 1797 if (IS_ERR(ah)) { 1798 ret = PTR_ERR(ah); 1799 goto err_put; 1800 } 1801 1802 ah->uobject = uobj; 1803 uobj->object = ah; 1804 1805 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj); 1806 if (ret) 1807 goto err_destroy; 1808 1809 resp.ah_handle = uobj->id; 1810 1811 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1812 &resp, sizeof resp)) { 1813 ret = -EFAULT; 1814 goto err_copy; 1815 } 1816 1817 put_pd_read(pd); 1818 1819 mutex_lock(&file->mutex); 1820 list_add_tail(&uobj->list, &file->ucontext->ah_list); 1821 mutex_unlock(&file->mutex); 1822 1823 uobj->live = 1; 1824 1825 up_write(&uobj->mutex); 1826 1827 return in_len; 1828 1829 err_copy: 1830 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 1831 1832 err_destroy: 1833 ib_destroy_ah(ah); 1834 1835 err_put: 1836 put_pd_read(pd); 1837 1838 err: 1839 put_uobj_write(uobj); 1840 return ret; 1841 } 1842 1843 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 1844 const char __user *buf, int in_len, int out_len) 1845 { 1846 struct ib_uverbs_destroy_ah cmd; 1847 struct ib_ah *ah; 1848 struct ib_uobject *uobj; 1849 int ret; 1850 1851 if (copy_from_user(&cmd, buf, sizeof cmd)) 1852 return -EFAULT; 1853 1854 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext); 1855 if (!uobj) 1856 return -EINVAL; 1857 ah = uobj->object; 1858 1859 ret = ib_destroy_ah(ah); 1860 if (!ret) 1861 uobj->live = 0; 1862 1863 put_uobj_write(uobj); 1864 1865 if (ret) 1866 return ret; 1867 1868 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 1869 1870 mutex_lock(&file->mutex); 1871 list_del(&uobj->list); 1872 mutex_unlock(&file->mutex); 1873 1874 put_uobj(uobj); 1875 1876 return in_len; 1877 } 1878 1879 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 1880 const char __user *buf, int in_len, 1881 int out_len) 1882 { 1883 struct ib_uverbs_attach_mcast cmd; 1884 struct ib_qp *qp; 1885 struct ib_uqp_object *obj; 1886 struct ib_uverbs_mcast_entry *mcast; 1887 int ret; 1888 1889 if (copy_from_user(&cmd, buf, sizeof cmd)) 1890 return -EFAULT; 1891 1892 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1893 if (!qp) 1894 return -EINVAL; 1895 1896 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 1897 1898 list_for_each_entry(mcast, &obj->mcast_list, list) 1899 if (cmd.mlid == mcast->lid && 1900 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 1901 ret = 0; 1902 goto out_put; 1903 } 1904 1905 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 1906 if (!mcast) { 1907 ret = -ENOMEM; 1908 goto out_put; 1909 } 1910 1911 mcast->lid = cmd.mlid; 1912 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 1913 1914 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 1915 if (!ret) 1916 list_add_tail(&mcast->list, &obj->mcast_list); 1917 else 1918 kfree(mcast); 1919 1920 out_put: 1921 put_qp_read(qp); 1922 1923 return ret ? ret : in_len; 1924 } 1925 1926 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 1927 const char __user *buf, int in_len, 1928 int out_len) 1929 { 1930 struct ib_uverbs_detach_mcast cmd; 1931 struct ib_uqp_object *obj; 1932 struct ib_qp *qp; 1933 struct ib_uverbs_mcast_entry *mcast; 1934 int ret = -EINVAL; 1935 1936 if (copy_from_user(&cmd, buf, sizeof cmd)) 1937 return -EFAULT; 1938 1939 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1940 if (!qp) 1941 return -EINVAL; 1942 1943 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 1944 if (ret) 1945 goto out_put; 1946 1947 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 1948 1949 list_for_each_entry(mcast, &obj->mcast_list, list) 1950 if (cmd.mlid == mcast->lid && 1951 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 1952 list_del(&mcast->list); 1953 kfree(mcast); 1954 break; 1955 } 1956 1957 out_put: 1958 put_qp_read(qp); 1959 1960 return ret ? ret : in_len; 1961 } 1962 1963 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 1964 const char __user *buf, int in_len, 1965 int out_len) 1966 { 1967 struct ib_uverbs_create_srq cmd; 1968 struct ib_uverbs_create_srq_resp resp; 1969 struct ib_udata udata; 1970 struct ib_uevent_object *obj; 1971 struct ib_pd *pd; 1972 struct ib_srq *srq; 1973 struct ib_srq_init_attr attr; 1974 int ret; 1975 1976 if (out_len < sizeof resp) 1977 return -ENOSPC; 1978 1979 if (copy_from_user(&cmd, buf, sizeof cmd)) 1980 return -EFAULT; 1981 1982 INIT_UDATA(&udata, buf + sizeof cmd, 1983 (unsigned long) cmd.response + sizeof resp, 1984 in_len - sizeof cmd, out_len - sizeof resp); 1985 1986 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1987 if (!obj) 1988 return -ENOMEM; 1989 1990 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &srq_lock_key); 1991 down_write(&obj->uobject.mutex); 1992 1993 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1994 if (!pd) { 1995 ret = -EINVAL; 1996 goto err; 1997 } 1998 1999 attr.event_handler = ib_uverbs_srq_event_handler; 2000 attr.srq_context = file; 2001 attr.attr.max_wr = cmd.max_wr; 2002 attr.attr.max_sge = cmd.max_sge; 2003 attr.attr.srq_limit = cmd.srq_limit; 2004 2005 obj->events_reported = 0; 2006 INIT_LIST_HEAD(&obj->event_list); 2007 2008 srq = pd->device->create_srq(pd, &attr, &udata); 2009 if (IS_ERR(srq)) { 2010 ret = PTR_ERR(srq); 2011 goto err_put; 2012 } 2013 2014 srq->device = pd->device; 2015 srq->pd = pd; 2016 srq->uobject = &obj->uobject; 2017 srq->event_handler = attr.event_handler; 2018 srq->srq_context = attr.srq_context; 2019 atomic_inc(&pd->usecnt); 2020 atomic_set(&srq->usecnt, 0); 2021 2022 obj->uobject.object = srq; 2023 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uobject); 2024 if (ret) 2025 goto err_destroy; 2026 2027 memset(&resp, 0, sizeof resp); 2028 resp.srq_handle = obj->uobject.id; 2029 resp.max_wr = attr.attr.max_wr; 2030 resp.max_sge = attr.attr.max_sge; 2031 2032 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2033 &resp, sizeof resp)) { 2034 ret = -EFAULT; 2035 goto err_copy; 2036 } 2037 2038 put_pd_read(pd); 2039 2040 mutex_lock(&file->mutex); 2041 list_add_tail(&obj->uobject.list, &file->ucontext->srq_list); 2042 mutex_unlock(&file->mutex); 2043 2044 obj->uobject.live = 1; 2045 2046 up_write(&obj->uobject.mutex); 2047 2048 return in_len; 2049 2050 err_copy: 2051 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uobject); 2052 2053 err_destroy: 2054 ib_destroy_srq(srq); 2055 2056 err_put: 2057 put_pd_read(pd); 2058 2059 err: 2060 put_uobj_write(&obj->uobject); 2061 return ret; 2062 } 2063 2064 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 2065 const char __user *buf, int in_len, 2066 int out_len) 2067 { 2068 struct ib_uverbs_modify_srq cmd; 2069 struct ib_udata udata; 2070 struct ib_srq *srq; 2071 struct ib_srq_attr attr; 2072 int ret; 2073 2074 if (copy_from_user(&cmd, buf, sizeof cmd)) 2075 return -EFAULT; 2076 2077 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 2078 out_len); 2079 2080 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2081 if (!srq) 2082 return -EINVAL; 2083 2084 attr.max_wr = cmd.max_wr; 2085 attr.srq_limit = cmd.srq_limit; 2086 2087 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 2088 2089 put_srq_read(srq); 2090 2091 return ret ? ret : in_len; 2092 } 2093 2094 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 2095 const char __user *buf, 2096 int in_len, int out_len) 2097 { 2098 struct ib_uverbs_query_srq cmd; 2099 struct ib_uverbs_query_srq_resp resp; 2100 struct ib_srq_attr attr; 2101 struct ib_srq *srq; 2102 int ret; 2103 2104 if (out_len < sizeof resp) 2105 return -ENOSPC; 2106 2107 if (copy_from_user(&cmd, buf, sizeof cmd)) 2108 return -EFAULT; 2109 2110 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2111 if (!srq) 2112 return -EINVAL; 2113 2114 ret = ib_query_srq(srq, &attr); 2115 2116 put_srq_read(srq); 2117 2118 if (ret) 2119 return ret; 2120 2121 memset(&resp, 0, sizeof resp); 2122 2123 resp.max_wr = attr.max_wr; 2124 resp.max_sge = attr.max_sge; 2125 resp.srq_limit = attr.srq_limit; 2126 2127 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2128 &resp, sizeof resp)) 2129 return -EFAULT; 2130 2131 return in_len; 2132 } 2133 2134 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 2135 const char __user *buf, int in_len, 2136 int out_len) 2137 { 2138 struct ib_uverbs_destroy_srq cmd; 2139 struct ib_uverbs_destroy_srq_resp resp; 2140 struct ib_uobject *uobj; 2141 struct ib_srq *srq; 2142 struct ib_uevent_object *obj; 2143 int ret = -EINVAL; 2144 2145 if (copy_from_user(&cmd, buf, sizeof cmd)) 2146 return -EFAULT; 2147 2148 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext); 2149 if (!uobj) 2150 return -EINVAL; 2151 srq = uobj->object; 2152 obj = container_of(uobj, struct ib_uevent_object, uobject); 2153 2154 ret = ib_destroy_srq(srq); 2155 if (!ret) 2156 uobj->live = 0; 2157 2158 put_uobj_write(uobj); 2159 2160 if (ret) 2161 return ret; 2162 2163 idr_remove_uobj(&ib_uverbs_srq_idr, uobj); 2164 2165 mutex_lock(&file->mutex); 2166 list_del(&uobj->list); 2167 mutex_unlock(&file->mutex); 2168 2169 ib_uverbs_release_uevent(file, obj); 2170 2171 memset(&resp, 0, sizeof resp); 2172 resp.events_reported = obj->events_reported; 2173 2174 put_uobj(uobj); 2175 2176 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2177 &resp, sizeof resp)) 2178 ret = -EFAULT; 2179 2180 return ret ? ret : in_len; 2181 } 2182