1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/file.h> 37 #include <linux/fs.h> 38 39 #include <asm/uaccess.h> 40 41 #include "uverbs.h" 42 43 static struct lock_class_key pd_lock_key; 44 static struct lock_class_key mr_lock_key; 45 static struct lock_class_key cq_lock_key; 46 static struct lock_class_key qp_lock_key; 47 static struct lock_class_key ah_lock_key; 48 static struct lock_class_key srq_lock_key; 49 50 #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ 51 do { \ 52 (udata)->inbuf = (void __user *) (ibuf); \ 53 (udata)->outbuf = (void __user *) (obuf); \ 54 (udata)->inlen = (ilen); \ 55 (udata)->outlen = (olen); \ 56 } while (0) 57 58 /* 59 * The ib_uobject locking scheme is as follows: 60 * 61 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it 62 * needs to be held during all idr operations. When an object is 63 * looked up, a reference must be taken on the object's kref before 64 * dropping this lock. 65 * 66 * - Each object also has an rwsem. This rwsem must be held for 67 * reading while an operation that uses the object is performed. 68 * For example, while registering an MR, the associated PD's 69 * uobject.mutex must be held for reading. The rwsem must be held 70 * for writing while initializing or destroying an object. 71 * 72 * - In addition, each object has a "live" flag. If this flag is not 73 * set, then lookups of the object will fail even if it is found in 74 * the idr. This handles a reader that blocks and does not acquire 75 * the rwsem until after the object is destroyed. The destroy 76 * operation will set the live flag to 0 and then drop the rwsem; 77 * this will allow the reader to acquire the rwsem, see that the 78 * live flag is 0, and then drop the rwsem and its reference to 79 * object. The underlying storage will not be freed until the last 80 * reference to the object is dropped. 81 */ 82 83 static void init_uobj(struct ib_uobject *uobj, u64 user_handle, 84 struct ib_ucontext *context, struct lock_class_key *key) 85 { 86 uobj->user_handle = user_handle; 87 uobj->context = context; 88 kref_init(&uobj->ref); 89 init_rwsem(&uobj->mutex); 90 lockdep_set_class(&uobj->mutex, key); 91 uobj->live = 0; 92 } 93 94 static void release_uobj(struct kref *kref) 95 { 96 kfree(container_of(kref, struct ib_uobject, ref)); 97 } 98 99 static void put_uobj(struct ib_uobject *uobj) 100 { 101 kref_put(&uobj->ref, release_uobj); 102 } 103 104 static void put_uobj_read(struct ib_uobject *uobj) 105 { 106 up_read(&uobj->mutex); 107 put_uobj(uobj); 108 } 109 110 static void put_uobj_write(struct ib_uobject *uobj) 111 { 112 up_write(&uobj->mutex); 113 put_uobj(uobj); 114 } 115 116 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj) 117 { 118 int ret; 119 120 retry: 121 if (!idr_pre_get(idr, GFP_KERNEL)) 122 return -ENOMEM; 123 124 spin_lock(&ib_uverbs_idr_lock); 125 ret = idr_get_new(idr, uobj, &uobj->id); 126 spin_unlock(&ib_uverbs_idr_lock); 127 128 if (ret == -EAGAIN) 129 goto retry; 130 131 return ret; 132 } 133 134 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj) 135 { 136 spin_lock(&ib_uverbs_idr_lock); 137 idr_remove(idr, uobj->id); 138 spin_unlock(&ib_uverbs_idr_lock); 139 } 140 141 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, 142 struct ib_ucontext *context) 143 { 144 struct ib_uobject *uobj; 145 146 spin_lock(&ib_uverbs_idr_lock); 147 uobj = idr_find(idr, id); 148 if (uobj) { 149 if (uobj->context == context) 150 kref_get(&uobj->ref); 151 else 152 uobj = NULL; 153 } 154 spin_unlock(&ib_uverbs_idr_lock); 155 156 return uobj; 157 } 158 159 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, 160 struct ib_ucontext *context, int nested) 161 { 162 struct ib_uobject *uobj; 163 164 uobj = __idr_get_uobj(idr, id, context); 165 if (!uobj) 166 return NULL; 167 168 if (nested) 169 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING); 170 else 171 down_read(&uobj->mutex); 172 if (!uobj->live) { 173 put_uobj_read(uobj); 174 return NULL; 175 } 176 177 return uobj; 178 } 179 180 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id, 181 struct ib_ucontext *context) 182 { 183 struct ib_uobject *uobj; 184 185 uobj = __idr_get_uobj(idr, id, context); 186 if (!uobj) 187 return NULL; 188 189 down_write(&uobj->mutex); 190 if (!uobj->live) { 191 put_uobj_write(uobj); 192 return NULL; 193 } 194 195 return uobj; 196 } 197 198 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context, 199 int nested) 200 { 201 struct ib_uobject *uobj; 202 203 uobj = idr_read_uobj(idr, id, context, nested); 204 return uobj ? uobj->object : NULL; 205 } 206 207 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context) 208 { 209 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0); 210 } 211 212 static void put_pd_read(struct ib_pd *pd) 213 { 214 put_uobj_read(pd->uobject); 215 } 216 217 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested) 218 { 219 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested); 220 } 221 222 static void put_cq_read(struct ib_cq *cq) 223 { 224 put_uobj_read(cq->uobject); 225 } 226 227 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context) 228 { 229 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0); 230 } 231 232 static void put_ah_read(struct ib_ah *ah) 233 { 234 put_uobj_read(ah->uobject); 235 } 236 237 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context) 238 { 239 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0); 240 } 241 242 static void put_qp_read(struct ib_qp *qp) 243 { 244 put_uobj_read(qp->uobject); 245 } 246 247 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context) 248 { 249 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0); 250 } 251 252 static void put_srq_read(struct ib_srq *srq) 253 { 254 put_uobj_read(srq->uobject); 255 } 256 257 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 258 const char __user *buf, 259 int in_len, int out_len) 260 { 261 struct ib_uverbs_get_context cmd; 262 struct ib_uverbs_get_context_resp resp; 263 struct ib_udata udata; 264 struct ib_device *ibdev = file->device->ib_dev; 265 struct ib_ucontext *ucontext; 266 struct file *filp; 267 int ret; 268 269 if (out_len < sizeof resp) 270 return -ENOSPC; 271 272 if (copy_from_user(&cmd, buf, sizeof cmd)) 273 return -EFAULT; 274 275 mutex_lock(&file->mutex); 276 277 if (file->ucontext) { 278 ret = -EINVAL; 279 goto err; 280 } 281 282 INIT_UDATA(&udata, buf + sizeof cmd, 283 (unsigned long) cmd.response + sizeof resp, 284 in_len - sizeof cmd, out_len - sizeof resp); 285 286 ucontext = ibdev->alloc_ucontext(ibdev, &udata); 287 if (IS_ERR(ucontext)) { 288 ret = PTR_ERR(ucontext); 289 goto err; 290 } 291 292 ucontext->device = ibdev; 293 INIT_LIST_HEAD(&ucontext->pd_list); 294 INIT_LIST_HEAD(&ucontext->mr_list); 295 INIT_LIST_HEAD(&ucontext->mw_list); 296 INIT_LIST_HEAD(&ucontext->cq_list); 297 INIT_LIST_HEAD(&ucontext->qp_list); 298 INIT_LIST_HEAD(&ucontext->srq_list); 299 INIT_LIST_HEAD(&ucontext->ah_list); 300 ucontext->closing = 0; 301 302 resp.num_comp_vectors = file->device->num_comp_vectors; 303 304 filp = ib_uverbs_alloc_event_file(file, 1, &resp.async_fd); 305 if (IS_ERR(filp)) { 306 ret = PTR_ERR(filp); 307 goto err_free; 308 } 309 310 if (copy_to_user((void __user *) (unsigned long) cmd.response, 311 &resp, sizeof resp)) { 312 ret = -EFAULT; 313 goto err_file; 314 } 315 316 file->async_file = filp->private_data; 317 318 INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev, 319 ib_uverbs_event_handler); 320 ret = ib_register_event_handler(&file->event_handler); 321 if (ret) 322 goto err_file; 323 324 kref_get(&file->async_file->ref); 325 kref_get(&file->ref); 326 file->ucontext = ucontext; 327 328 fd_install(resp.async_fd, filp); 329 330 mutex_unlock(&file->mutex); 331 332 return in_len; 333 334 err_file: 335 put_unused_fd(resp.async_fd); 336 fput(filp); 337 338 err_free: 339 ibdev->dealloc_ucontext(ucontext); 340 341 err: 342 mutex_unlock(&file->mutex); 343 return ret; 344 } 345 346 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 347 const char __user *buf, 348 int in_len, int out_len) 349 { 350 struct ib_uverbs_query_device cmd; 351 struct ib_uverbs_query_device_resp resp; 352 struct ib_device_attr attr; 353 int ret; 354 355 if (out_len < sizeof resp) 356 return -ENOSPC; 357 358 if (copy_from_user(&cmd, buf, sizeof cmd)) 359 return -EFAULT; 360 361 ret = ib_query_device(file->device->ib_dev, &attr); 362 if (ret) 363 return ret; 364 365 memset(&resp, 0, sizeof resp); 366 367 resp.fw_ver = attr.fw_ver; 368 resp.node_guid = file->device->ib_dev->node_guid; 369 resp.sys_image_guid = attr.sys_image_guid; 370 resp.max_mr_size = attr.max_mr_size; 371 resp.page_size_cap = attr.page_size_cap; 372 resp.vendor_id = attr.vendor_id; 373 resp.vendor_part_id = attr.vendor_part_id; 374 resp.hw_ver = attr.hw_ver; 375 resp.max_qp = attr.max_qp; 376 resp.max_qp_wr = attr.max_qp_wr; 377 resp.device_cap_flags = attr.device_cap_flags; 378 resp.max_sge = attr.max_sge; 379 resp.max_sge_rd = attr.max_sge_rd; 380 resp.max_cq = attr.max_cq; 381 resp.max_cqe = attr.max_cqe; 382 resp.max_mr = attr.max_mr; 383 resp.max_pd = attr.max_pd; 384 resp.max_qp_rd_atom = attr.max_qp_rd_atom; 385 resp.max_ee_rd_atom = attr.max_ee_rd_atom; 386 resp.max_res_rd_atom = attr.max_res_rd_atom; 387 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom; 388 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom; 389 resp.atomic_cap = attr.atomic_cap; 390 resp.max_ee = attr.max_ee; 391 resp.max_rdd = attr.max_rdd; 392 resp.max_mw = attr.max_mw; 393 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp; 394 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp; 395 resp.max_mcast_grp = attr.max_mcast_grp; 396 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach; 397 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach; 398 resp.max_ah = attr.max_ah; 399 resp.max_fmr = attr.max_fmr; 400 resp.max_map_per_fmr = attr.max_map_per_fmr; 401 resp.max_srq = attr.max_srq; 402 resp.max_srq_wr = attr.max_srq_wr; 403 resp.max_srq_sge = attr.max_srq_sge; 404 resp.max_pkeys = attr.max_pkeys; 405 resp.local_ca_ack_delay = attr.local_ca_ack_delay; 406 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt; 407 408 if (copy_to_user((void __user *) (unsigned long) cmd.response, 409 &resp, sizeof resp)) 410 return -EFAULT; 411 412 return in_len; 413 } 414 415 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 416 const char __user *buf, 417 int in_len, int out_len) 418 { 419 struct ib_uverbs_query_port cmd; 420 struct ib_uverbs_query_port_resp resp; 421 struct ib_port_attr attr; 422 int ret; 423 424 if (out_len < sizeof resp) 425 return -ENOSPC; 426 427 if (copy_from_user(&cmd, buf, sizeof cmd)) 428 return -EFAULT; 429 430 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr); 431 if (ret) 432 return ret; 433 434 memset(&resp, 0, sizeof resp); 435 436 resp.state = attr.state; 437 resp.max_mtu = attr.max_mtu; 438 resp.active_mtu = attr.active_mtu; 439 resp.gid_tbl_len = attr.gid_tbl_len; 440 resp.port_cap_flags = attr.port_cap_flags; 441 resp.max_msg_sz = attr.max_msg_sz; 442 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 443 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 444 resp.pkey_tbl_len = attr.pkey_tbl_len; 445 resp.lid = attr.lid; 446 resp.sm_lid = attr.sm_lid; 447 resp.lmc = attr.lmc; 448 resp.max_vl_num = attr.max_vl_num; 449 resp.sm_sl = attr.sm_sl; 450 resp.subnet_timeout = attr.subnet_timeout; 451 resp.init_type_reply = attr.init_type_reply; 452 resp.active_width = attr.active_width; 453 resp.active_speed = attr.active_speed; 454 resp.phys_state = attr.phys_state; 455 456 if (copy_to_user((void __user *) (unsigned long) cmd.response, 457 &resp, sizeof resp)) 458 return -EFAULT; 459 460 return in_len; 461 } 462 463 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 464 const char __user *buf, 465 int in_len, int out_len) 466 { 467 struct ib_uverbs_alloc_pd cmd; 468 struct ib_uverbs_alloc_pd_resp resp; 469 struct ib_udata udata; 470 struct ib_uobject *uobj; 471 struct ib_pd *pd; 472 int ret; 473 474 if (out_len < sizeof resp) 475 return -ENOSPC; 476 477 if (copy_from_user(&cmd, buf, sizeof cmd)) 478 return -EFAULT; 479 480 INIT_UDATA(&udata, buf + sizeof cmd, 481 (unsigned long) cmd.response + sizeof resp, 482 in_len - sizeof cmd, out_len - sizeof resp); 483 484 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 485 if (!uobj) 486 return -ENOMEM; 487 488 init_uobj(uobj, 0, file->ucontext, &pd_lock_key); 489 down_write(&uobj->mutex); 490 491 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, 492 file->ucontext, &udata); 493 if (IS_ERR(pd)) { 494 ret = PTR_ERR(pd); 495 goto err; 496 } 497 498 pd->device = file->device->ib_dev; 499 pd->uobject = uobj; 500 atomic_set(&pd->usecnt, 0); 501 502 uobj->object = pd; 503 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj); 504 if (ret) 505 goto err_idr; 506 507 memset(&resp, 0, sizeof resp); 508 resp.pd_handle = uobj->id; 509 510 if (copy_to_user((void __user *) (unsigned long) cmd.response, 511 &resp, sizeof resp)) { 512 ret = -EFAULT; 513 goto err_copy; 514 } 515 516 mutex_lock(&file->mutex); 517 list_add_tail(&uobj->list, &file->ucontext->pd_list); 518 mutex_unlock(&file->mutex); 519 520 uobj->live = 1; 521 522 up_write(&uobj->mutex); 523 524 return in_len; 525 526 err_copy: 527 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 528 529 err_idr: 530 ib_dealloc_pd(pd); 531 532 err: 533 put_uobj_write(uobj); 534 return ret; 535 } 536 537 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 538 const char __user *buf, 539 int in_len, int out_len) 540 { 541 struct ib_uverbs_dealloc_pd cmd; 542 struct ib_uobject *uobj; 543 int ret; 544 545 if (copy_from_user(&cmd, buf, sizeof cmd)) 546 return -EFAULT; 547 548 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext); 549 if (!uobj) 550 return -EINVAL; 551 552 ret = ib_dealloc_pd(uobj->object); 553 if (!ret) 554 uobj->live = 0; 555 556 put_uobj_write(uobj); 557 558 if (ret) 559 return ret; 560 561 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 562 563 mutex_lock(&file->mutex); 564 list_del(&uobj->list); 565 mutex_unlock(&file->mutex); 566 567 put_uobj(uobj); 568 569 return in_len; 570 } 571 572 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 573 const char __user *buf, int in_len, 574 int out_len) 575 { 576 struct ib_uverbs_reg_mr cmd; 577 struct ib_uverbs_reg_mr_resp resp; 578 struct ib_udata udata; 579 struct ib_uobject *uobj; 580 struct ib_pd *pd; 581 struct ib_mr *mr; 582 int ret; 583 584 if (out_len < sizeof resp) 585 return -ENOSPC; 586 587 if (copy_from_user(&cmd, buf, sizeof cmd)) 588 return -EFAULT; 589 590 INIT_UDATA(&udata, buf + sizeof cmd, 591 (unsigned long) cmd.response + sizeof resp, 592 in_len - sizeof cmd, out_len - sizeof resp); 593 594 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 595 return -EINVAL; 596 597 /* 598 * Local write permission is required if remote write or 599 * remote atomic permission is also requested. 600 */ 601 if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 602 !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE)) 603 return -EINVAL; 604 605 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 606 if (!uobj) 607 return -ENOMEM; 608 609 init_uobj(uobj, 0, file->ucontext, &mr_lock_key); 610 down_write(&uobj->mutex); 611 612 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 613 if (!pd) { 614 ret = -EINVAL; 615 goto err_free; 616 } 617 618 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 619 cmd.access_flags, &udata); 620 if (IS_ERR(mr)) { 621 ret = PTR_ERR(mr); 622 goto err_put; 623 } 624 625 mr->device = pd->device; 626 mr->pd = pd; 627 mr->uobject = uobj; 628 atomic_inc(&pd->usecnt); 629 atomic_set(&mr->usecnt, 0); 630 631 uobj->object = mr; 632 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj); 633 if (ret) 634 goto err_unreg; 635 636 memset(&resp, 0, sizeof resp); 637 resp.lkey = mr->lkey; 638 resp.rkey = mr->rkey; 639 resp.mr_handle = uobj->id; 640 641 if (copy_to_user((void __user *) (unsigned long) cmd.response, 642 &resp, sizeof resp)) { 643 ret = -EFAULT; 644 goto err_copy; 645 } 646 647 put_pd_read(pd); 648 649 mutex_lock(&file->mutex); 650 list_add_tail(&uobj->list, &file->ucontext->mr_list); 651 mutex_unlock(&file->mutex); 652 653 uobj->live = 1; 654 655 up_write(&uobj->mutex); 656 657 return in_len; 658 659 err_copy: 660 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 661 662 err_unreg: 663 ib_dereg_mr(mr); 664 665 err_put: 666 put_pd_read(pd); 667 668 err_free: 669 put_uobj_write(uobj); 670 return ret; 671 } 672 673 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 674 const char __user *buf, int in_len, 675 int out_len) 676 { 677 struct ib_uverbs_dereg_mr cmd; 678 struct ib_mr *mr; 679 struct ib_uobject *uobj; 680 int ret = -EINVAL; 681 682 if (copy_from_user(&cmd, buf, sizeof cmd)) 683 return -EFAULT; 684 685 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext); 686 if (!uobj) 687 return -EINVAL; 688 689 mr = uobj->object; 690 691 ret = ib_dereg_mr(mr); 692 if (!ret) 693 uobj->live = 0; 694 695 put_uobj_write(uobj); 696 697 if (ret) 698 return ret; 699 700 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 701 702 mutex_lock(&file->mutex); 703 list_del(&uobj->list); 704 mutex_unlock(&file->mutex); 705 706 put_uobj(uobj); 707 708 return in_len; 709 } 710 711 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 712 const char __user *buf, int in_len, 713 int out_len) 714 { 715 struct ib_uverbs_create_comp_channel cmd; 716 struct ib_uverbs_create_comp_channel_resp resp; 717 struct file *filp; 718 719 if (out_len < sizeof resp) 720 return -ENOSPC; 721 722 if (copy_from_user(&cmd, buf, sizeof cmd)) 723 return -EFAULT; 724 725 filp = ib_uverbs_alloc_event_file(file, 0, &resp.fd); 726 if (IS_ERR(filp)) 727 return PTR_ERR(filp); 728 729 if (copy_to_user((void __user *) (unsigned long) cmd.response, 730 &resp, sizeof resp)) { 731 put_unused_fd(resp.fd); 732 fput(filp); 733 return -EFAULT; 734 } 735 736 fd_install(resp.fd, filp); 737 return in_len; 738 } 739 740 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 741 const char __user *buf, int in_len, 742 int out_len) 743 { 744 struct ib_uverbs_create_cq cmd; 745 struct ib_uverbs_create_cq_resp resp; 746 struct ib_udata udata; 747 struct ib_ucq_object *obj; 748 struct ib_uverbs_event_file *ev_file = NULL; 749 struct ib_cq *cq; 750 int ret; 751 752 if (out_len < sizeof resp) 753 return -ENOSPC; 754 755 if (copy_from_user(&cmd, buf, sizeof cmd)) 756 return -EFAULT; 757 758 INIT_UDATA(&udata, buf + sizeof cmd, 759 (unsigned long) cmd.response + sizeof resp, 760 in_len - sizeof cmd, out_len - sizeof resp); 761 762 if (cmd.comp_vector >= file->device->num_comp_vectors) 763 return -EINVAL; 764 765 obj = kmalloc(sizeof *obj, GFP_KERNEL); 766 if (!obj) 767 return -ENOMEM; 768 769 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_key); 770 down_write(&obj->uobject.mutex); 771 772 if (cmd.comp_channel >= 0) { 773 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel); 774 if (!ev_file) { 775 ret = -EINVAL; 776 goto err; 777 } 778 } 779 780 obj->uverbs_file = file; 781 obj->comp_events_reported = 0; 782 obj->async_events_reported = 0; 783 INIT_LIST_HEAD(&obj->comp_list); 784 INIT_LIST_HEAD(&obj->async_list); 785 786 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, 787 cmd.comp_vector, 788 file->ucontext, &udata); 789 if (IS_ERR(cq)) { 790 ret = PTR_ERR(cq); 791 goto err_file; 792 } 793 794 cq->device = file->device->ib_dev; 795 cq->uobject = &obj->uobject; 796 cq->comp_handler = ib_uverbs_comp_handler; 797 cq->event_handler = ib_uverbs_cq_event_handler; 798 cq->cq_context = ev_file; 799 atomic_set(&cq->usecnt, 0); 800 801 obj->uobject.object = cq; 802 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject); 803 if (ret) 804 goto err_free; 805 806 memset(&resp, 0, sizeof resp); 807 resp.cq_handle = obj->uobject.id; 808 resp.cqe = cq->cqe; 809 810 if (copy_to_user((void __user *) (unsigned long) cmd.response, 811 &resp, sizeof resp)) { 812 ret = -EFAULT; 813 goto err_copy; 814 } 815 816 mutex_lock(&file->mutex); 817 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list); 818 mutex_unlock(&file->mutex); 819 820 obj->uobject.live = 1; 821 822 up_write(&obj->uobject.mutex); 823 824 return in_len; 825 826 err_copy: 827 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject); 828 829 err_free: 830 ib_destroy_cq(cq); 831 832 err_file: 833 if (ev_file) 834 ib_uverbs_release_ucq(file, ev_file, obj); 835 836 err: 837 put_uobj_write(&obj->uobject); 838 return ret; 839 } 840 841 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 842 const char __user *buf, int in_len, 843 int out_len) 844 { 845 struct ib_uverbs_resize_cq cmd; 846 struct ib_uverbs_resize_cq_resp resp; 847 struct ib_udata udata; 848 struct ib_cq *cq; 849 int ret = -EINVAL; 850 851 if (copy_from_user(&cmd, buf, sizeof cmd)) 852 return -EFAULT; 853 854 INIT_UDATA(&udata, buf + sizeof cmd, 855 (unsigned long) cmd.response + sizeof resp, 856 in_len - sizeof cmd, out_len - sizeof resp); 857 858 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 859 if (!cq) 860 return -EINVAL; 861 862 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 863 if (ret) 864 goto out; 865 866 resp.cqe = cq->cqe; 867 868 if (copy_to_user((void __user *) (unsigned long) cmd.response, 869 &resp, sizeof resp.cqe)) 870 ret = -EFAULT; 871 872 out: 873 put_cq_read(cq); 874 875 return ret ? ret : in_len; 876 } 877 878 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 879 const char __user *buf, int in_len, 880 int out_len) 881 { 882 struct ib_uverbs_poll_cq cmd; 883 struct ib_uverbs_poll_cq_resp *resp; 884 struct ib_cq *cq; 885 struct ib_wc *wc; 886 int ret = 0; 887 int i; 888 int rsize; 889 890 if (copy_from_user(&cmd, buf, sizeof cmd)) 891 return -EFAULT; 892 893 wc = kmalloc(cmd.ne * sizeof *wc, GFP_KERNEL); 894 if (!wc) 895 return -ENOMEM; 896 897 rsize = sizeof *resp + cmd.ne * sizeof(struct ib_uverbs_wc); 898 resp = kmalloc(rsize, GFP_KERNEL); 899 if (!resp) { 900 ret = -ENOMEM; 901 goto out_wc; 902 } 903 904 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 905 if (!cq) { 906 ret = -EINVAL; 907 goto out; 908 } 909 910 resp->count = ib_poll_cq(cq, cmd.ne, wc); 911 912 put_cq_read(cq); 913 914 for (i = 0; i < resp->count; i++) { 915 resp->wc[i].wr_id = wc[i].wr_id; 916 resp->wc[i].status = wc[i].status; 917 resp->wc[i].opcode = wc[i].opcode; 918 resp->wc[i].vendor_err = wc[i].vendor_err; 919 resp->wc[i].byte_len = wc[i].byte_len; 920 resp->wc[i].ex.imm_data = (__u32 __force) wc[i].ex.imm_data; 921 resp->wc[i].qp_num = wc[i].qp->qp_num; 922 resp->wc[i].src_qp = wc[i].src_qp; 923 resp->wc[i].wc_flags = wc[i].wc_flags; 924 resp->wc[i].pkey_index = wc[i].pkey_index; 925 resp->wc[i].slid = wc[i].slid; 926 resp->wc[i].sl = wc[i].sl; 927 resp->wc[i].dlid_path_bits = wc[i].dlid_path_bits; 928 resp->wc[i].port_num = wc[i].port_num; 929 } 930 931 if (copy_to_user((void __user *) (unsigned long) cmd.response, resp, rsize)) 932 ret = -EFAULT; 933 934 out: 935 kfree(resp); 936 937 out_wc: 938 kfree(wc); 939 return ret ? ret : in_len; 940 } 941 942 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 943 const char __user *buf, int in_len, 944 int out_len) 945 { 946 struct ib_uverbs_req_notify_cq cmd; 947 struct ib_cq *cq; 948 949 if (copy_from_user(&cmd, buf, sizeof cmd)) 950 return -EFAULT; 951 952 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 953 if (!cq) 954 return -EINVAL; 955 956 ib_req_notify_cq(cq, cmd.solicited_only ? 957 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 958 959 put_cq_read(cq); 960 961 return in_len; 962 } 963 964 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 965 const char __user *buf, int in_len, 966 int out_len) 967 { 968 struct ib_uverbs_destroy_cq cmd; 969 struct ib_uverbs_destroy_cq_resp resp; 970 struct ib_uobject *uobj; 971 struct ib_cq *cq; 972 struct ib_ucq_object *obj; 973 struct ib_uverbs_event_file *ev_file; 974 int ret = -EINVAL; 975 976 if (copy_from_user(&cmd, buf, sizeof cmd)) 977 return -EFAULT; 978 979 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); 980 if (!uobj) 981 return -EINVAL; 982 cq = uobj->object; 983 ev_file = cq->cq_context; 984 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 985 986 ret = ib_destroy_cq(cq); 987 if (!ret) 988 uobj->live = 0; 989 990 put_uobj_write(uobj); 991 992 if (ret) 993 return ret; 994 995 idr_remove_uobj(&ib_uverbs_cq_idr, uobj); 996 997 mutex_lock(&file->mutex); 998 list_del(&uobj->list); 999 mutex_unlock(&file->mutex); 1000 1001 ib_uverbs_release_ucq(file, ev_file, obj); 1002 1003 memset(&resp, 0, sizeof resp); 1004 resp.comp_events_reported = obj->comp_events_reported; 1005 resp.async_events_reported = obj->async_events_reported; 1006 1007 put_uobj(uobj); 1008 1009 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1010 &resp, sizeof resp)) 1011 return -EFAULT; 1012 1013 return in_len; 1014 } 1015 1016 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 1017 const char __user *buf, int in_len, 1018 int out_len) 1019 { 1020 struct ib_uverbs_create_qp cmd; 1021 struct ib_uverbs_create_qp_resp resp; 1022 struct ib_udata udata; 1023 struct ib_uqp_object *obj; 1024 struct ib_pd *pd; 1025 struct ib_cq *scq, *rcq; 1026 struct ib_srq *srq; 1027 struct ib_qp *qp; 1028 struct ib_qp_init_attr attr; 1029 int ret; 1030 1031 if (out_len < sizeof resp) 1032 return -ENOSPC; 1033 1034 if (copy_from_user(&cmd, buf, sizeof cmd)) 1035 return -EFAULT; 1036 1037 INIT_UDATA(&udata, buf + sizeof cmd, 1038 (unsigned long) cmd.response + sizeof resp, 1039 in_len - sizeof cmd, out_len - sizeof resp); 1040 1041 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1042 if (!obj) 1043 return -ENOMEM; 1044 1045 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key); 1046 down_write(&obj->uevent.uobject.mutex); 1047 1048 srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL; 1049 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1050 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0); 1051 rcq = cmd.recv_cq_handle == cmd.send_cq_handle ? 1052 scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1); 1053 1054 if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) { 1055 ret = -EINVAL; 1056 goto err_put; 1057 } 1058 1059 attr.event_handler = ib_uverbs_qp_event_handler; 1060 attr.qp_context = file; 1061 attr.send_cq = scq; 1062 attr.recv_cq = rcq; 1063 attr.srq = srq; 1064 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 1065 attr.qp_type = cmd.qp_type; 1066 attr.create_flags = 0; 1067 1068 attr.cap.max_send_wr = cmd.max_send_wr; 1069 attr.cap.max_recv_wr = cmd.max_recv_wr; 1070 attr.cap.max_send_sge = cmd.max_send_sge; 1071 attr.cap.max_recv_sge = cmd.max_recv_sge; 1072 attr.cap.max_inline_data = cmd.max_inline_data; 1073 1074 obj->uevent.events_reported = 0; 1075 INIT_LIST_HEAD(&obj->uevent.event_list); 1076 INIT_LIST_HEAD(&obj->mcast_list); 1077 1078 qp = pd->device->create_qp(pd, &attr, &udata); 1079 if (IS_ERR(qp)) { 1080 ret = PTR_ERR(qp); 1081 goto err_put; 1082 } 1083 1084 qp->device = pd->device; 1085 qp->pd = pd; 1086 qp->send_cq = attr.send_cq; 1087 qp->recv_cq = attr.recv_cq; 1088 qp->srq = attr.srq; 1089 qp->uobject = &obj->uevent.uobject; 1090 qp->event_handler = attr.event_handler; 1091 qp->qp_context = attr.qp_context; 1092 qp->qp_type = attr.qp_type; 1093 atomic_inc(&pd->usecnt); 1094 atomic_inc(&attr.send_cq->usecnt); 1095 atomic_inc(&attr.recv_cq->usecnt); 1096 if (attr.srq) 1097 atomic_inc(&attr.srq->usecnt); 1098 1099 obj->uevent.uobject.object = qp; 1100 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1101 if (ret) 1102 goto err_destroy; 1103 1104 memset(&resp, 0, sizeof resp); 1105 resp.qpn = qp->qp_num; 1106 resp.qp_handle = obj->uevent.uobject.id; 1107 resp.max_recv_sge = attr.cap.max_recv_sge; 1108 resp.max_send_sge = attr.cap.max_send_sge; 1109 resp.max_recv_wr = attr.cap.max_recv_wr; 1110 resp.max_send_wr = attr.cap.max_send_wr; 1111 resp.max_inline_data = attr.cap.max_inline_data; 1112 1113 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1114 &resp, sizeof resp)) { 1115 ret = -EFAULT; 1116 goto err_copy; 1117 } 1118 1119 put_pd_read(pd); 1120 put_cq_read(scq); 1121 if (rcq != scq) 1122 put_cq_read(rcq); 1123 if (srq) 1124 put_srq_read(srq); 1125 1126 mutex_lock(&file->mutex); 1127 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 1128 mutex_unlock(&file->mutex); 1129 1130 obj->uevent.uobject.live = 1; 1131 1132 up_write(&obj->uevent.uobject.mutex); 1133 1134 return in_len; 1135 1136 err_copy: 1137 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1138 1139 err_destroy: 1140 ib_destroy_qp(qp); 1141 1142 err_put: 1143 if (pd) 1144 put_pd_read(pd); 1145 if (scq) 1146 put_cq_read(scq); 1147 if (rcq && rcq != scq) 1148 put_cq_read(rcq); 1149 if (srq) 1150 put_srq_read(srq); 1151 1152 put_uobj_write(&obj->uevent.uobject); 1153 return ret; 1154 } 1155 1156 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 1157 const char __user *buf, int in_len, 1158 int out_len) 1159 { 1160 struct ib_uverbs_query_qp cmd; 1161 struct ib_uverbs_query_qp_resp resp; 1162 struct ib_qp *qp; 1163 struct ib_qp_attr *attr; 1164 struct ib_qp_init_attr *init_attr; 1165 int ret; 1166 1167 if (copy_from_user(&cmd, buf, sizeof cmd)) 1168 return -EFAULT; 1169 1170 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1171 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 1172 if (!attr || !init_attr) { 1173 ret = -ENOMEM; 1174 goto out; 1175 } 1176 1177 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1178 if (!qp) { 1179 ret = -EINVAL; 1180 goto out; 1181 } 1182 1183 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 1184 1185 put_qp_read(qp); 1186 1187 if (ret) 1188 goto out; 1189 1190 memset(&resp, 0, sizeof resp); 1191 1192 resp.qp_state = attr->qp_state; 1193 resp.cur_qp_state = attr->cur_qp_state; 1194 resp.path_mtu = attr->path_mtu; 1195 resp.path_mig_state = attr->path_mig_state; 1196 resp.qkey = attr->qkey; 1197 resp.rq_psn = attr->rq_psn; 1198 resp.sq_psn = attr->sq_psn; 1199 resp.dest_qp_num = attr->dest_qp_num; 1200 resp.qp_access_flags = attr->qp_access_flags; 1201 resp.pkey_index = attr->pkey_index; 1202 resp.alt_pkey_index = attr->alt_pkey_index; 1203 resp.sq_draining = attr->sq_draining; 1204 resp.max_rd_atomic = attr->max_rd_atomic; 1205 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 1206 resp.min_rnr_timer = attr->min_rnr_timer; 1207 resp.port_num = attr->port_num; 1208 resp.timeout = attr->timeout; 1209 resp.retry_cnt = attr->retry_cnt; 1210 resp.rnr_retry = attr->rnr_retry; 1211 resp.alt_port_num = attr->alt_port_num; 1212 resp.alt_timeout = attr->alt_timeout; 1213 1214 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16); 1215 resp.dest.flow_label = attr->ah_attr.grh.flow_label; 1216 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index; 1217 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit; 1218 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class; 1219 resp.dest.dlid = attr->ah_attr.dlid; 1220 resp.dest.sl = attr->ah_attr.sl; 1221 resp.dest.src_path_bits = attr->ah_attr.src_path_bits; 1222 resp.dest.static_rate = attr->ah_attr.static_rate; 1223 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH); 1224 resp.dest.port_num = attr->ah_attr.port_num; 1225 1226 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16); 1227 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label; 1228 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index; 1229 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit; 1230 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class; 1231 resp.alt_dest.dlid = attr->alt_ah_attr.dlid; 1232 resp.alt_dest.sl = attr->alt_ah_attr.sl; 1233 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits; 1234 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate; 1235 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH); 1236 resp.alt_dest.port_num = attr->alt_ah_attr.port_num; 1237 1238 resp.max_send_wr = init_attr->cap.max_send_wr; 1239 resp.max_recv_wr = init_attr->cap.max_recv_wr; 1240 resp.max_send_sge = init_attr->cap.max_send_sge; 1241 resp.max_recv_sge = init_attr->cap.max_recv_sge; 1242 resp.max_inline_data = init_attr->cap.max_inline_data; 1243 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 1244 1245 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1246 &resp, sizeof resp)) 1247 ret = -EFAULT; 1248 1249 out: 1250 kfree(attr); 1251 kfree(init_attr); 1252 1253 return ret ? ret : in_len; 1254 } 1255 1256 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 1257 const char __user *buf, int in_len, 1258 int out_len) 1259 { 1260 struct ib_uverbs_modify_qp cmd; 1261 struct ib_udata udata; 1262 struct ib_qp *qp; 1263 struct ib_qp_attr *attr; 1264 int ret; 1265 1266 if (copy_from_user(&cmd, buf, sizeof cmd)) 1267 return -EFAULT; 1268 1269 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 1270 out_len); 1271 1272 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1273 if (!attr) 1274 return -ENOMEM; 1275 1276 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1277 if (!qp) { 1278 ret = -EINVAL; 1279 goto out; 1280 } 1281 1282 attr->qp_state = cmd.qp_state; 1283 attr->cur_qp_state = cmd.cur_qp_state; 1284 attr->path_mtu = cmd.path_mtu; 1285 attr->path_mig_state = cmd.path_mig_state; 1286 attr->qkey = cmd.qkey; 1287 attr->rq_psn = cmd.rq_psn; 1288 attr->sq_psn = cmd.sq_psn; 1289 attr->dest_qp_num = cmd.dest_qp_num; 1290 attr->qp_access_flags = cmd.qp_access_flags; 1291 attr->pkey_index = cmd.pkey_index; 1292 attr->alt_pkey_index = cmd.alt_pkey_index; 1293 attr->en_sqd_async_notify = cmd.en_sqd_async_notify; 1294 attr->max_rd_atomic = cmd.max_rd_atomic; 1295 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic; 1296 attr->min_rnr_timer = cmd.min_rnr_timer; 1297 attr->port_num = cmd.port_num; 1298 attr->timeout = cmd.timeout; 1299 attr->retry_cnt = cmd.retry_cnt; 1300 attr->rnr_retry = cmd.rnr_retry; 1301 attr->alt_port_num = cmd.alt_port_num; 1302 attr->alt_timeout = cmd.alt_timeout; 1303 1304 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16); 1305 attr->ah_attr.grh.flow_label = cmd.dest.flow_label; 1306 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index; 1307 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit; 1308 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class; 1309 attr->ah_attr.dlid = cmd.dest.dlid; 1310 attr->ah_attr.sl = cmd.dest.sl; 1311 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits; 1312 attr->ah_attr.static_rate = cmd.dest.static_rate; 1313 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0; 1314 attr->ah_attr.port_num = cmd.dest.port_num; 1315 1316 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16); 1317 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label; 1318 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index; 1319 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit; 1320 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class; 1321 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid; 1322 attr->alt_ah_attr.sl = cmd.alt_dest.sl; 1323 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits; 1324 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate; 1325 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; 1326 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; 1327 1328 ret = qp->device->modify_qp(qp, attr, cmd.attr_mask, &udata); 1329 1330 put_qp_read(qp); 1331 1332 if (ret) 1333 goto out; 1334 1335 ret = in_len; 1336 1337 out: 1338 kfree(attr); 1339 1340 return ret; 1341 } 1342 1343 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 1344 const char __user *buf, int in_len, 1345 int out_len) 1346 { 1347 struct ib_uverbs_destroy_qp cmd; 1348 struct ib_uverbs_destroy_qp_resp resp; 1349 struct ib_uobject *uobj; 1350 struct ib_qp *qp; 1351 struct ib_uqp_object *obj; 1352 int ret = -EINVAL; 1353 1354 if (copy_from_user(&cmd, buf, sizeof cmd)) 1355 return -EFAULT; 1356 1357 memset(&resp, 0, sizeof resp); 1358 1359 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext); 1360 if (!uobj) 1361 return -EINVAL; 1362 qp = uobj->object; 1363 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 1364 1365 if (!list_empty(&obj->mcast_list)) { 1366 put_uobj_write(uobj); 1367 return -EBUSY; 1368 } 1369 1370 ret = ib_destroy_qp(qp); 1371 if (!ret) 1372 uobj->live = 0; 1373 1374 put_uobj_write(uobj); 1375 1376 if (ret) 1377 return ret; 1378 1379 idr_remove_uobj(&ib_uverbs_qp_idr, uobj); 1380 1381 mutex_lock(&file->mutex); 1382 list_del(&uobj->list); 1383 mutex_unlock(&file->mutex); 1384 1385 ib_uverbs_release_uevent(file, &obj->uevent); 1386 1387 resp.events_reported = obj->uevent.events_reported; 1388 1389 put_uobj(uobj); 1390 1391 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1392 &resp, sizeof resp)) 1393 return -EFAULT; 1394 1395 return in_len; 1396 } 1397 1398 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 1399 const char __user *buf, int in_len, 1400 int out_len) 1401 { 1402 struct ib_uverbs_post_send cmd; 1403 struct ib_uverbs_post_send_resp resp; 1404 struct ib_uverbs_send_wr *user_wr; 1405 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 1406 struct ib_qp *qp; 1407 int i, sg_ind; 1408 int is_ud; 1409 ssize_t ret = -EINVAL; 1410 1411 if (copy_from_user(&cmd, buf, sizeof cmd)) 1412 return -EFAULT; 1413 1414 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 1415 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 1416 return -EINVAL; 1417 1418 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 1419 return -EINVAL; 1420 1421 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 1422 if (!user_wr) 1423 return -ENOMEM; 1424 1425 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1426 if (!qp) 1427 goto out; 1428 1429 is_ud = qp->qp_type == IB_QPT_UD; 1430 sg_ind = 0; 1431 last = NULL; 1432 for (i = 0; i < cmd.wr_count; ++i) { 1433 if (copy_from_user(user_wr, 1434 buf + sizeof cmd + i * cmd.wqe_size, 1435 cmd.wqe_size)) { 1436 ret = -EFAULT; 1437 goto out_put; 1438 } 1439 1440 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 1441 ret = -EINVAL; 1442 goto out_put; 1443 } 1444 1445 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 1446 user_wr->num_sge * sizeof (struct ib_sge), 1447 GFP_KERNEL); 1448 if (!next) { 1449 ret = -ENOMEM; 1450 goto out_put; 1451 } 1452 1453 if (!last) 1454 wr = next; 1455 else 1456 last->next = next; 1457 last = next; 1458 1459 next->next = NULL; 1460 next->wr_id = user_wr->wr_id; 1461 next->num_sge = user_wr->num_sge; 1462 next->opcode = user_wr->opcode; 1463 next->send_flags = user_wr->send_flags; 1464 1465 if (is_ud) { 1466 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah, 1467 file->ucontext); 1468 if (!next->wr.ud.ah) { 1469 ret = -EINVAL; 1470 goto out_put; 1471 } 1472 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn; 1473 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey; 1474 } else { 1475 switch (next->opcode) { 1476 case IB_WR_RDMA_WRITE_WITH_IMM: 1477 next->ex.imm_data = 1478 (__be32 __force) user_wr->ex.imm_data; 1479 case IB_WR_RDMA_WRITE: 1480 case IB_WR_RDMA_READ: 1481 next->wr.rdma.remote_addr = 1482 user_wr->wr.rdma.remote_addr; 1483 next->wr.rdma.rkey = 1484 user_wr->wr.rdma.rkey; 1485 break; 1486 case IB_WR_SEND_WITH_IMM: 1487 next->ex.imm_data = 1488 (__be32 __force) user_wr->ex.imm_data; 1489 break; 1490 case IB_WR_SEND_WITH_INV: 1491 next->ex.invalidate_rkey = 1492 user_wr->ex.invalidate_rkey; 1493 break; 1494 case IB_WR_ATOMIC_CMP_AND_SWP: 1495 case IB_WR_ATOMIC_FETCH_AND_ADD: 1496 next->wr.atomic.remote_addr = 1497 user_wr->wr.atomic.remote_addr; 1498 next->wr.atomic.compare_add = 1499 user_wr->wr.atomic.compare_add; 1500 next->wr.atomic.swap = user_wr->wr.atomic.swap; 1501 next->wr.atomic.rkey = user_wr->wr.atomic.rkey; 1502 break; 1503 default: 1504 break; 1505 } 1506 } 1507 1508 if (next->num_sge) { 1509 next->sg_list = (void *) next + 1510 ALIGN(sizeof *next, sizeof (struct ib_sge)); 1511 if (copy_from_user(next->sg_list, 1512 buf + sizeof cmd + 1513 cmd.wr_count * cmd.wqe_size + 1514 sg_ind * sizeof (struct ib_sge), 1515 next->num_sge * sizeof (struct ib_sge))) { 1516 ret = -EFAULT; 1517 goto out_put; 1518 } 1519 sg_ind += next->num_sge; 1520 } else 1521 next->sg_list = NULL; 1522 } 1523 1524 resp.bad_wr = 0; 1525 ret = qp->device->post_send(qp, wr, &bad_wr); 1526 if (ret) 1527 for (next = wr; next; next = next->next) { 1528 ++resp.bad_wr; 1529 if (next == bad_wr) 1530 break; 1531 } 1532 1533 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1534 &resp, sizeof resp)) 1535 ret = -EFAULT; 1536 1537 out_put: 1538 put_qp_read(qp); 1539 1540 while (wr) { 1541 if (is_ud && wr->wr.ud.ah) 1542 put_ah_read(wr->wr.ud.ah); 1543 next = wr->next; 1544 kfree(wr); 1545 wr = next; 1546 } 1547 1548 out: 1549 kfree(user_wr); 1550 1551 return ret ? ret : in_len; 1552 } 1553 1554 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 1555 int in_len, 1556 u32 wr_count, 1557 u32 sge_count, 1558 u32 wqe_size) 1559 { 1560 struct ib_uverbs_recv_wr *user_wr; 1561 struct ib_recv_wr *wr = NULL, *last, *next; 1562 int sg_ind; 1563 int i; 1564 int ret; 1565 1566 if (in_len < wqe_size * wr_count + 1567 sge_count * sizeof (struct ib_uverbs_sge)) 1568 return ERR_PTR(-EINVAL); 1569 1570 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 1571 return ERR_PTR(-EINVAL); 1572 1573 user_wr = kmalloc(wqe_size, GFP_KERNEL); 1574 if (!user_wr) 1575 return ERR_PTR(-ENOMEM); 1576 1577 sg_ind = 0; 1578 last = NULL; 1579 for (i = 0; i < wr_count; ++i) { 1580 if (copy_from_user(user_wr, buf + i * wqe_size, 1581 wqe_size)) { 1582 ret = -EFAULT; 1583 goto err; 1584 } 1585 1586 if (user_wr->num_sge + sg_ind > sge_count) { 1587 ret = -EINVAL; 1588 goto err; 1589 } 1590 1591 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 1592 user_wr->num_sge * sizeof (struct ib_sge), 1593 GFP_KERNEL); 1594 if (!next) { 1595 ret = -ENOMEM; 1596 goto err; 1597 } 1598 1599 if (!last) 1600 wr = next; 1601 else 1602 last->next = next; 1603 last = next; 1604 1605 next->next = NULL; 1606 next->wr_id = user_wr->wr_id; 1607 next->num_sge = user_wr->num_sge; 1608 1609 if (next->num_sge) { 1610 next->sg_list = (void *) next + 1611 ALIGN(sizeof *next, sizeof (struct ib_sge)); 1612 if (copy_from_user(next->sg_list, 1613 buf + wr_count * wqe_size + 1614 sg_ind * sizeof (struct ib_sge), 1615 next->num_sge * sizeof (struct ib_sge))) { 1616 ret = -EFAULT; 1617 goto err; 1618 } 1619 sg_ind += next->num_sge; 1620 } else 1621 next->sg_list = NULL; 1622 } 1623 1624 kfree(user_wr); 1625 return wr; 1626 1627 err: 1628 kfree(user_wr); 1629 1630 while (wr) { 1631 next = wr->next; 1632 kfree(wr); 1633 wr = next; 1634 } 1635 1636 return ERR_PTR(ret); 1637 } 1638 1639 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 1640 const char __user *buf, int in_len, 1641 int out_len) 1642 { 1643 struct ib_uverbs_post_recv cmd; 1644 struct ib_uverbs_post_recv_resp resp; 1645 struct ib_recv_wr *wr, *next, *bad_wr; 1646 struct ib_qp *qp; 1647 ssize_t ret = -EINVAL; 1648 1649 if (copy_from_user(&cmd, buf, sizeof cmd)) 1650 return -EFAULT; 1651 1652 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 1653 in_len - sizeof cmd, cmd.wr_count, 1654 cmd.sge_count, cmd.wqe_size); 1655 if (IS_ERR(wr)) 1656 return PTR_ERR(wr); 1657 1658 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1659 if (!qp) 1660 goto out; 1661 1662 resp.bad_wr = 0; 1663 ret = qp->device->post_recv(qp, wr, &bad_wr); 1664 1665 put_qp_read(qp); 1666 1667 if (ret) 1668 for (next = wr; next; next = next->next) { 1669 ++resp.bad_wr; 1670 if (next == bad_wr) 1671 break; 1672 } 1673 1674 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1675 &resp, sizeof resp)) 1676 ret = -EFAULT; 1677 1678 out: 1679 while (wr) { 1680 next = wr->next; 1681 kfree(wr); 1682 wr = next; 1683 } 1684 1685 return ret ? ret : in_len; 1686 } 1687 1688 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 1689 const char __user *buf, int in_len, 1690 int out_len) 1691 { 1692 struct ib_uverbs_post_srq_recv cmd; 1693 struct ib_uverbs_post_srq_recv_resp resp; 1694 struct ib_recv_wr *wr, *next, *bad_wr; 1695 struct ib_srq *srq; 1696 ssize_t ret = -EINVAL; 1697 1698 if (copy_from_user(&cmd, buf, sizeof cmd)) 1699 return -EFAULT; 1700 1701 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 1702 in_len - sizeof cmd, cmd.wr_count, 1703 cmd.sge_count, cmd.wqe_size); 1704 if (IS_ERR(wr)) 1705 return PTR_ERR(wr); 1706 1707 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 1708 if (!srq) 1709 goto out; 1710 1711 resp.bad_wr = 0; 1712 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 1713 1714 put_srq_read(srq); 1715 1716 if (ret) 1717 for (next = wr; next; next = next->next) { 1718 ++resp.bad_wr; 1719 if (next == bad_wr) 1720 break; 1721 } 1722 1723 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1724 &resp, sizeof resp)) 1725 ret = -EFAULT; 1726 1727 out: 1728 while (wr) { 1729 next = wr->next; 1730 kfree(wr); 1731 wr = next; 1732 } 1733 1734 return ret ? ret : in_len; 1735 } 1736 1737 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 1738 const char __user *buf, int in_len, 1739 int out_len) 1740 { 1741 struct ib_uverbs_create_ah cmd; 1742 struct ib_uverbs_create_ah_resp resp; 1743 struct ib_uobject *uobj; 1744 struct ib_pd *pd; 1745 struct ib_ah *ah; 1746 struct ib_ah_attr attr; 1747 int ret; 1748 1749 if (out_len < sizeof resp) 1750 return -ENOSPC; 1751 1752 if (copy_from_user(&cmd, buf, sizeof cmd)) 1753 return -EFAULT; 1754 1755 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 1756 if (!uobj) 1757 return -ENOMEM; 1758 1759 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_key); 1760 down_write(&uobj->mutex); 1761 1762 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1763 if (!pd) { 1764 ret = -EINVAL; 1765 goto err; 1766 } 1767 1768 attr.dlid = cmd.attr.dlid; 1769 attr.sl = cmd.attr.sl; 1770 attr.src_path_bits = cmd.attr.src_path_bits; 1771 attr.static_rate = cmd.attr.static_rate; 1772 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0; 1773 attr.port_num = cmd.attr.port_num; 1774 attr.grh.flow_label = cmd.attr.grh.flow_label; 1775 attr.grh.sgid_index = cmd.attr.grh.sgid_index; 1776 attr.grh.hop_limit = cmd.attr.grh.hop_limit; 1777 attr.grh.traffic_class = cmd.attr.grh.traffic_class; 1778 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); 1779 1780 ah = ib_create_ah(pd, &attr); 1781 if (IS_ERR(ah)) { 1782 ret = PTR_ERR(ah); 1783 goto err_put; 1784 } 1785 1786 ah->uobject = uobj; 1787 uobj->object = ah; 1788 1789 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj); 1790 if (ret) 1791 goto err_destroy; 1792 1793 resp.ah_handle = uobj->id; 1794 1795 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1796 &resp, sizeof resp)) { 1797 ret = -EFAULT; 1798 goto err_copy; 1799 } 1800 1801 put_pd_read(pd); 1802 1803 mutex_lock(&file->mutex); 1804 list_add_tail(&uobj->list, &file->ucontext->ah_list); 1805 mutex_unlock(&file->mutex); 1806 1807 uobj->live = 1; 1808 1809 up_write(&uobj->mutex); 1810 1811 return in_len; 1812 1813 err_copy: 1814 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 1815 1816 err_destroy: 1817 ib_destroy_ah(ah); 1818 1819 err_put: 1820 put_pd_read(pd); 1821 1822 err: 1823 put_uobj_write(uobj); 1824 return ret; 1825 } 1826 1827 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 1828 const char __user *buf, int in_len, int out_len) 1829 { 1830 struct ib_uverbs_destroy_ah cmd; 1831 struct ib_ah *ah; 1832 struct ib_uobject *uobj; 1833 int ret; 1834 1835 if (copy_from_user(&cmd, buf, sizeof cmd)) 1836 return -EFAULT; 1837 1838 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext); 1839 if (!uobj) 1840 return -EINVAL; 1841 ah = uobj->object; 1842 1843 ret = ib_destroy_ah(ah); 1844 if (!ret) 1845 uobj->live = 0; 1846 1847 put_uobj_write(uobj); 1848 1849 if (ret) 1850 return ret; 1851 1852 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 1853 1854 mutex_lock(&file->mutex); 1855 list_del(&uobj->list); 1856 mutex_unlock(&file->mutex); 1857 1858 put_uobj(uobj); 1859 1860 return in_len; 1861 } 1862 1863 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 1864 const char __user *buf, int in_len, 1865 int out_len) 1866 { 1867 struct ib_uverbs_attach_mcast cmd; 1868 struct ib_qp *qp; 1869 struct ib_uqp_object *obj; 1870 struct ib_uverbs_mcast_entry *mcast; 1871 int ret; 1872 1873 if (copy_from_user(&cmd, buf, sizeof cmd)) 1874 return -EFAULT; 1875 1876 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1877 if (!qp) 1878 return -EINVAL; 1879 1880 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 1881 1882 list_for_each_entry(mcast, &obj->mcast_list, list) 1883 if (cmd.mlid == mcast->lid && 1884 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 1885 ret = 0; 1886 goto out_put; 1887 } 1888 1889 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 1890 if (!mcast) { 1891 ret = -ENOMEM; 1892 goto out_put; 1893 } 1894 1895 mcast->lid = cmd.mlid; 1896 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 1897 1898 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 1899 if (!ret) 1900 list_add_tail(&mcast->list, &obj->mcast_list); 1901 else 1902 kfree(mcast); 1903 1904 out_put: 1905 put_qp_read(qp); 1906 1907 return ret ? ret : in_len; 1908 } 1909 1910 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 1911 const char __user *buf, int in_len, 1912 int out_len) 1913 { 1914 struct ib_uverbs_detach_mcast cmd; 1915 struct ib_uqp_object *obj; 1916 struct ib_qp *qp; 1917 struct ib_uverbs_mcast_entry *mcast; 1918 int ret = -EINVAL; 1919 1920 if (copy_from_user(&cmd, buf, sizeof cmd)) 1921 return -EFAULT; 1922 1923 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1924 if (!qp) 1925 return -EINVAL; 1926 1927 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 1928 if (ret) 1929 goto out_put; 1930 1931 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 1932 1933 list_for_each_entry(mcast, &obj->mcast_list, list) 1934 if (cmd.mlid == mcast->lid && 1935 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 1936 list_del(&mcast->list); 1937 kfree(mcast); 1938 break; 1939 } 1940 1941 out_put: 1942 put_qp_read(qp); 1943 1944 return ret ? ret : in_len; 1945 } 1946 1947 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 1948 const char __user *buf, int in_len, 1949 int out_len) 1950 { 1951 struct ib_uverbs_create_srq cmd; 1952 struct ib_uverbs_create_srq_resp resp; 1953 struct ib_udata udata; 1954 struct ib_uevent_object *obj; 1955 struct ib_pd *pd; 1956 struct ib_srq *srq; 1957 struct ib_srq_init_attr attr; 1958 int ret; 1959 1960 if (out_len < sizeof resp) 1961 return -ENOSPC; 1962 1963 if (copy_from_user(&cmd, buf, sizeof cmd)) 1964 return -EFAULT; 1965 1966 INIT_UDATA(&udata, buf + sizeof cmd, 1967 (unsigned long) cmd.response + sizeof resp, 1968 in_len - sizeof cmd, out_len - sizeof resp); 1969 1970 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1971 if (!obj) 1972 return -ENOMEM; 1973 1974 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &srq_lock_key); 1975 down_write(&obj->uobject.mutex); 1976 1977 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1978 if (!pd) { 1979 ret = -EINVAL; 1980 goto err; 1981 } 1982 1983 attr.event_handler = ib_uverbs_srq_event_handler; 1984 attr.srq_context = file; 1985 attr.attr.max_wr = cmd.max_wr; 1986 attr.attr.max_sge = cmd.max_sge; 1987 attr.attr.srq_limit = cmd.srq_limit; 1988 1989 obj->events_reported = 0; 1990 INIT_LIST_HEAD(&obj->event_list); 1991 1992 srq = pd->device->create_srq(pd, &attr, &udata); 1993 if (IS_ERR(srq)) { 1994 ret = PTR_ERR(srq); 1995 goto err_put; 1996 } 1997 1998 srq->device = pd->device; 1999 srq->pd = pd; 2000 srq->uobject = &obj->uobject; 2001 srq->event_handler = attr.event_handler; 2002 srq->srq_context = attr.srq_context; 2003 atomic_inc(&pd->usecnt); 2004 atomic_set(&srq->usecnt, 0); 2005 2006 obj->uobject.object = srq; 2007 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uobject); 2008 if (ret) 2009 goto err_destroy; 2010 2011 memset(&resp, 0, sizeof resp); 2012 resp.srq_handle = obj->uobject.id; 2013 resp.max_wr = attr.attr.max_wr; 2014 resp.max_sge = attr.attr.max_sge; 2015 2016 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2017 &resp, sizeof resp)) { 2018 ret = -EFAULT; 2019 goto err_copy; 2020 } 2021 2022 put_pd_read(pd); 2023 2024 mutex_lock(&file->mutex); 2025 list_add_tail(&obj->uobject.list, &file->ucontext->srq_list); 2026 mutex_unlock(&file->mutex); 2027 2028 obj->uobject.live = 1; 2029 2030 up_write(&obj->uobject.mutex); 2031 2032 return in_len; 2033 2034 err_copy: 2035 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uobject); 2036 2037 err_destroy: 2038 ib_destroy_srq(srq); 2039 2040 err_put: 2041 put_pd_read(pd); 2042 2043 err: 2044 put_uobj_write(&obj->uobject); 2045 return ret; 2046 } 2047 2048 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 2049 const char __user *buf, int in_len, 2050 int out_len) 2051 { 2052 struct ib_uverbs_modify_srq cmd; 2053 struct ib_udata udata; 2054 struct ib_srq *srq; 2055 struct ib_srq_attr attr; 2056 int ret; 2057 2058 if (copy_from_user(&cmd, buf, sizeof cmd)) 2059 return -EFAULT; 2060 2061 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 2062 out_len); 2063 2064 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2065 if (!srq) 2066 return -EINVAL; 2067 2068 attr.max_wr = cmd.max_wr; 2069 attr.srq_limit = cmd.srq_limit; 2070 2071 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 2072 2073 put_srq_read(srq); 2074 2075 return ret ? ret : in_len; 2076 } 2077 2078 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 2079 const char __user *buf, 2080 int in_len, int out_len) 2081 { 2082 struct ib_uverbs_query_srq cmd; 2083 struct ib_uverbs_query_srq_resp resp; 2084 struct ib_srq_attr attr; 2085 struct ib_srq *srq; 2086 int ret; 2087 2088 if (out_len < sizeof resp) 2089 return -ENOSPC; 2090 2091 if (copy_from_user(&cmd, buf, sizeof cmd)) 2092 return -EFAULT; 2093 2094 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2095 if (!srq) 2096 return -EINVAL; 2097 2098 ret = ib_query_srq(srq, &attr); 2099 2100 put_srq_read(srq); 2101 2102 if (ret) 2103 return ret; 2104 2105 memset(&resp, 0, sizeof resp); 2106 2107 resp.max_wr = attr.max_wr; 2108 resp.max_sge = attr.max_sge; 2109 resp.srq_limit = attr.srq_limit; 2110 2111 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2112 &resp, sizeof resp)) 2113 return -EFAULT; 2114 2115 return in_len; 2116 } 2117 2118 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 2119 const char __user *buf, int in_len, 2120 int out_len) 2121 { 2122 struct ib_uverbs_destroy_srq cmd; 2123 struct ib_uverbs_destroy_srq_resp resp; 2124 struct ib_uobject *uobj; 2125 struct ib_srq *srq; 2126 struct ib_uevent_object *obj; 2127 int ret = -EINVAL; 2128 2129 if (copy_from_user(&cmd, buf, sizeof cmd)) 2130 return -EFAULT; 2131 2132 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext); 2133 if (!uobj) 2134 return -EINVAL; 2135 srq = uobj->object; 2136 obj = container_of(uobj, struct ib_uevent_object, uobject); 2137 2138 ret = ib_destroy_srq(srq); 2139 if (!ret) 2140 uobj->live = 0; 2141 2142 put_uobj_write(uobj); 2143 2144 if (ret) 2145 return ret; 2146 2147 idr_remove_uobj(&ib_uverbs_srq_idr, uobj); 2148 2149 mutex_lock(&file->mutex); 2150 list_del(&uobj->list); 2151 mutex_unlock(&file->mutex); 2152 2153 ib_uverbs_release_uevent(file, obj); 2154 2155 memset(&resp, 0, sizeof resp); 2156 resp.events_reported = obj->events_reported; 2157 2158 put_uobj(uobj); 2159 2160 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2161 &resp, sizeof resp)) 2162 ret = -EFAULT; 2163 2164 return ret ? ret : in_len; 2165 } 2166