1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/file.h> 37 #include <linux/fs.h> 38 #include <linux/slab.h> 39 40 #include <asm/uaccess.h> 41 42 #include "uverbs.h" 43 44 struct uverbs_lock_class { 45 struct lock_class_key key; 46 char name[16]; 47 }; 48 49 static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" }; 50 static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" }; 51 static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" }; 52 static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" }; 53 static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" }; 54 static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; 55 static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; 56 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; 57 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" }; 58 59 /* 60 * The ib_uobject locking scheme is as follows: 61 * 62 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it 63 * needs to be held during all idr operations. When an object is 64 * looked up, a reference must be taken on the object's kref before 65 * dropping this lock. 66 * 67 * - Each object also has an rwsem. This rwsem must be held for 68 * reading while an operation that uses the object is performed. 69 * For example, while registering an MR, the associated PD's 70 * uobject.mutex must be held for reading. The rwsem must be held 71 * for writing while initializing or destroying an object. 72 * 73 * - In addition, each object has a "live" flag. If this flag is not 74 * set, then lookups of the object will fail even if it is found in 75 * the idr. This handles a reader that blocks and does not acquire 76 * the rwsem until after the object is destroyed. The destroy 77 * operation will set the live flag to 0 and then drop the rwsem; 78 * this will allow the reader to acquire the rwsem, see that the 79 * live flag is 0, and then drop the rwsem and its reference to 80 * object. The underlying storage will not be freed until the last 81 * reference to the object is dropped. 82 */ 83 84 static void init_uobj(struct ib_uobject *uobj, u64 user_handle, 85 struct ib_ucontext *context, struct uverbs_lock_class *c) 86 { 87 uobj->user_handle = user_handle; 88 uobj->context = context; 89 kref_init(&uobj->ref); 90 init_rwsem(&uobj->mutex); 91 lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name); 92 uobj->live = 0; 93 } 94 95 static void release_uobj(struct kref *kref) 96 { 97 kfree(container_of(kref, struct ib_uobject, ref)); 98 } 99 100 static void put_uobj(struct ib_uobject *uobj) 101 { 102 kref_put(&uobj->ref, release_uobj); 103 } 104 105 static void put_uobj_read(struct ib_uobject *uobj) 106 { 107 up_read(&uobj->mutex); 108 put_uobj(uobj); 109 } 110 111 static void put_uobj_write(struct ib_uobject *uobj) 112 { 113 up_write(&uobj->mutex); 114 put_uobj(uobj); 115 } 116 117 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj) 118 { 119 int ret; 120 121 idr_preload(GFP_KERNEL); 122 spin_lock(&ib_uverbs_idr_lock); 123 124 ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT); 125 if (ret >= 0) 126 uobj->id = ret; 127 128 spin_unlock(&ib_uverbs_idr_lock); 129 idr_preload_end(); 130 131 return ret < 0 ? ret : 0; 132 } 133 134 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj) 135 { 136 spin_lock(&ib_uverbs_idr_lock); 137 idr_remove(idr, uobj->id); 138 spin_unlock(&ib_uverbs_idr_lock); 139 } 140 141 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, 142 struct ib_ucontext *context) 143 { 144 struct ib_uobject *uobj; 145 146 spin_lock(&ib_uverbs_idr_lock); 147 uobj = idr_find(idr, id); 148 if (uobj) { 149 if (uobj->context == context) 150 kref_get(&uobj->ref); 151 else 152 uobj = NULL; 153 } 154 spin_unlock(&ib_uverbs_idr_lock); 155 156 return uobj; 157 } 158 159 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, 160 struct ib_ucontext *context, int nested) 161 { 162 struct ib_uobject *uobj; 163 164 uobj = __idr_get_uobj(idr, id, context); 165 if (!uobj) 166 return NULL; 167 168 if (nested) 169 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING); 170 else 171 down_read(&uobj->mutex); 172 if (!uobj->live) { 173 put_uobj_read(uobj); 174 return NULL; 175 } 176 177 return uobj; 178 } 179 180 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id, 181 struct ib_ucontext *context) 182 { 183 struct ib_uobject *uobj; 184 185 uobj = __idr_get_uobj(idr, id, context); 186 if (!uobj) 187 return NULL; 188 189 down_write(&uobj->mutex); 190 if (!uobj->live) { 191 put_uobj_write(uobj); 192 return NULL; 193 } 194 195 return uobj; 196 } 197 198 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context, 199 int nested) 200 { 201 struct ib_uobject *uobj; 202 203 uobj = idr_read_uobj(idr, id, context, nested); 204 return uobj ? uobj->object : NULL; 205 } 206 207 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context) 208 { 209 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0); 210 } 211 212 static void put_pd_read(struct ib_pd *pd) 213 { 214 put_uobj_read(pd->uobject); 215 } 216 217 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested) 218 { 219 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested); 220 } 221 222 static void put_cq_read(struct ib_cq *cq) 223 { 224 put_uobj_read(cq->uobject); 225 } 226 227 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context) 228 { 229 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0); 230 } 231 232 static void put_ah_read(struct ib_ah *ah) 233 { 234 put_uobj_read(ah->uobject); 235 } 236 237 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context) 238 { 239 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0); 240 } 241 242 static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context) 243 { 244 struct ib_uobject *uobj; 245 246 uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context); 247 return uobj ? uobj->object : NULL; 248 } 249 250 static void put_qp_read(struct ib_qp *qp) 251 { 252 put_uobj_read(qp->uobject); 253 } 254 255 static void put_qp_write(struct ib_qp *qp) 256 { 257 put_uobj_write(qp->uobject); 258 } 259 260 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context) 261 { 262 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0); 263 } 264 265 static void put_srq_read(struct ib_srq *srq) 266 { 267 put_uobj_read(srq->uobject); 268 } 269 270 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context, 271 struct ib_uobject **uobj) 272 { 273 *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0); 274 return *uobj ? (*uobj)->object : NULL; 275 } 276 277 static void put_xrcd_read(struct ib_uobject *uobj) 278 { 279 put_uobj_read(uobj); 280 } 281 282 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 283 const char __user *buf, 284 int in_len, int out_len) 285 { 286 struct ib_uverbs_get_context cmd; 287 struct ib_uverbs_get_context_resp resp; 288 struct ib_udata udata; 289 struct ib_device *ibdev = file->device->ib_dev; 290 struct ib_ucontext *ucontext; 291 struct file *filp; 292 int ret; 293 294 if (out_len < sizeof resp) 295 return -ENOSPC; 296 297 if (copy_from_user(&cmd, buf, sizeof cmd)) 298 return -EFAULT; 299 300 mutex_lock(&file->mutex); 301 302 if (file->ucontext) { 303 ret = -EINVAL; 304 goto err; 305 } 306 307 INIT_UDATA(&udata, buf + sizeof cmd, 308 (unsigned long) cmd.response + sizeof resp, 309 in_len - sizeof cmd, out_len - sizeof resp); 310 311 ucontext = ibdev->alloc_ucontext(ibdev, &udata); 312 if (IS_ERR(ucontext)) { 313 ret = PTR_ERR(ucontext); 314 goto err; 315 } 316 317 ucontext->device = ibdev; 318 INIT_LIST_HEAD(&ucontext->pd_list); 319 INIT_LIST_HEAD(&ucontext->mr_list); 320 INIT_LIST_HEAD(&ucontext->mw_list); 321 INIT_LIST_HEAD(&ucontext->cq_list); 322 INIT_LIST_HEAD(&ucontext->qp_list); 323 INIT_LIST_HEAD(&ucontext->srq_list); 324 INIT_LIST_HEAD(&ucontext->ah_list); 325 INIT_LIST_HEAD(&ucontext->xrcd_list); 326 INIT_LIST_HEAD(&ucontext->rule_list); 327 ucontext->closing = 0; 328 329 resp.num_comp_vectors = file->device->num_comp_vectors; 330 331 ret = get_unused_fd_flags(O_CLOEXEC); 332 if (ret < 0) 333 goto err_free; 334 resp.async_fd = ret; 335 336 filp = ib_uverbs_alloc_event_file(file, 1); 337 if (IS_ERR(filp)) { 338 ret = PTR_ERR(filp); 339 goto err_fd; 340 } 341 342 if (copy_to_user((void __user *) (unsigned long) cmd.response, 343 &resp, sizeof resp)) { 344 ret = -EFAULT; 345 goto err_file; 346 } 347 348 file->async_file = filp->private_data; 349 350 INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev, 351 ib_uverbs_event_handler); 352 ret = ib_register_event_handler(&file->event_handler); 353 if (ret) 354 goto err_file; 355 356 kref_get(&file->async_file->ref); 357 kref_get(&file->ref); 358 file->ucontext = ucontext; 359 360 fd_install(resp.async_fd, filp); 361 362 mutex_unlock(&file->mutex); 363 364 return in_len; 365 366 err_file: 367 fput(filp); 368 369 err_fd: 370 put_unused_fd(resp.async_fd); 371 372 err_free: 373 ibdev->dealloc_ucontext(ucontext); 374 375 err: 376 mutex_unlock(&file->mutex); 377 return ret; 378 } 379 380 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 381 const char __user *buf, 382 int in_len, int out_len) 383 { 384 struct ib_uverbs_query_device cmd; 385 struct ib_uverbs_query_device_resp resp; 386 struct ib_device_attr attr; 387 int ret; 388 389 if (out_len < sizeof resp) 390 return -ENOSPC; 391 392 if (copy_from_user(&cmd, buf, sizeof cmd)) 393 return -EFAULT; 394 395 ret = ib_query_device(file->device->ib_dev, &attr); 396 if (ret) 397 return ret; 398 399 memset(&resp, 0, sizeof resp); 400 401 resp.fw_ver = attr.fw_ver; 402 resp.node_guid = file->device->ib_dev->node_guid; 403 resp.sys_image_guid = attr.sys_image_guid; 404 resp.max_mr_size = attr.max_mr_size; 405 resp.page_size_cap = attr.page_size_cap; 406 resp.vendor_id = attr.vendor_id; 407 resp.vendor_part_id = attr.vendor_part_id; 408 resp.hw_ver = attr.hw_ver; 409 resp.max_qp = attr.max_qp; 410 resp.max_qp_wr = attr.max_qp_wr; 411 resp.device_cap_flags = attr.device_cap_flags; 412 resp.max_sge = attr.max_sge; 413 resp.max_sge_rd = attr.max_sge_rd; 414 resp.max_cq = attr.max_cq; 415 resp.max_cqe = attr.max_cqe; 416 resp.max_mr = attr.max_mr; 417 resp.max_pd = attr.max_pd; 418 resp.max_qp_rd_atom = attr.max_qp_rd_atom; 419 resp.max_ee_rd_atom = attr.max_ee_rd_atom; 420 resp.max_res_rd_atom = attr.max_res_rd_atom; 421 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom; 422 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom; 423 resp.atomic_cap = attr.atomic_cap; 424 resp.max_ee = attr.max_ee; 425 resp.max_rdd = attr.max_rdd; 426 resp.max_mw = attr.max_mw; 427 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp; 428 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp; 429 resp.max_mcast_grp = attr.max_mcast_grp; 430 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach; 431 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach; 432 resp.max_ah = attr.max_ah; 433 resp.max_fmr = attr.max_fmr; 434 resp.max_map_per_fmr = attr.max_map_per_fmr; 435 resp.max_srq = attr.max_srq; 436 resp.max_srq_wr = attr.max_srq_wr; 437 resp.max_srq_sge = attr.max_srq_sge; 438 resp.max_pkeys = attr.max_pkeys; 439 resp.local_ca_ack_delay = attr.local_ca_ack_delay; 440 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt; 441 442 if (copy_to_user((void __user *) (unsigned long) cmd.response, 443 &resp, sizeof resp)) 444 return -EFAULT; 445 446 return in_len; 447 } 448 449 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 450 const char __user *buf, 451 int in_len, int out_len) 452 { 453 struct ib_uverbs_query_port cmd; 454 struct ib_uverbs_query_port_resp resp; 455 struct ib_port_attr attr; 456 int ret; 457 458 if (out_len < sizeof resp) 459 return -ENOSPC; 460 461 if (copy_from_user(&cmd, buf, sizeof cmd)) 462 return -EFAULT; 463 464 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr); 465 if (ret) 466 return ret; 467 468 memset(&resp, 0, sizeof resp); 469 470 resp.state = attr.state; 471 resp.max_mtu = attr.max_mtu; 472 resp.active_mtu = attr.active_mtu; 473 resp.gid_tbl_len = attr.gid_tbl_len; 474 resp.port_cap_flags = attr.port_cap_flags; 475 resp.max_msg_sz = attr.max_msg_sz; 476 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 477 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 478 resp.pkey_tbl_len = attr.pkey_tbl_len; 479 resp.lid = attr.lid; 480 resp.sm_lid = attr.sm_lid; 481 resp.lmc = attr.lmc; 482 resp.max_vl_num = attr.max_vl_num; 483 resp.sm_sl = attr.sm_sl; 484 resp.subnet_timeout = attr.subnet_timeout; 485 resp.init_type_reply = attr.init_type_reply; 486 resp.active_width = attr.active_width; 487 resp.active_speed = attr.active_speed; 488 resp.phys_state = attr.phys_state; 489 resp.link_layer = rdma_port_get_link_layer(file->device->ib_dev, 490 cmd.port_num); 491 492 if (copy_to_user((void __user *) (unsigned long) cmd.response, 493 &resp, sizeof resp)) 494 return -EFAULT; 495 496 return in_len; 497 } 498 499 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 500 const char __user *buf, 501 int in_len, int out_len) 502 { 503 struct ib_uverbs_alloc_pd cmd; 504 struct ib_uverbs_alloc_pd_resp resp; 505 struct ib_udata udata; 506 struct ib_uobject *uobj; 507 struct ib_pd *pd; 508 int ret; 509 510 if (out_len < sizeof resp) 511 return -ENOSPC; 512 513 if (copy_from_user(&cmd, buf, sizeof cmd)) 514 return -EFAULT; 515 516 INIT_UDATA(&udata, buf + sizeof cmd, 517 (unsigned long) cmd.response + sizeof resp, 518 in_len - sizeof cmd, out_len - sizeof resp); 519 520 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 521 if (!uobj) 522 return -ENOMEM; 523 524 init_uobj(uobj, 0, file->ucontext, &pd_lock_class); 525 down_write(&uobj->mutex); 526 527 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, 528 file->ucontext, &udata); 529 if (IS_ERR(pd)) { 530 ret = PTR_ERR(pd); 531 goto err; 532 } 533 534 pd->device = file->device->ib_dev; 535 pd->uobject = uobj; 536 atomic_set(&pd->usecnt, 0); 537 538 uobj->object = pd; 539 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj); 540 if (ret) 541 goto err_idr; 542 543 memset(&resp, 0, sizeof resp); 544 resp.pd_handle = uobj->id; 545 546 if (copy_to_user((void __user *) (unsigned long) cmd.response, 547 &resp, sizeof resp)) { 548 ret = -EFAULT; 549 goto err_copy; 550 } 551 552 mutex_lock(&file->mutex); 553 list_add_tail(&uobj->list, &file->ucontext->pd_list); 554 mutex_unlock(&file->mutex); 555 556 uobj->live = 1; 557 558 up_write(&uobj->mutex); 559 560 return in_len; 561 562 err_copy: 563 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 564 565 err_idr: 566 ib_dealloc_pd(pd); 567 568 err: 569 put_uobj_write(uobj); 570 return ret; 571 } 572 573 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 574 const char __user *buf, 575 int in_len, int out_len) 576 { 577 struct ib_uverbs_dealloc_pd cmd; 578 struct ib_uobject *uobj; 579 int ret; 580 581 if (copy_from_user(&cmd, buf, sizeof cmd)) 582 return -EFAULT; 583 584 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext); 585 if (!uobj) 586 return -EINVAL; 587 588 ret = ib_dealloc_pd(uobj->object); 589 if (!ret) 590 uobj->live = 0; 591 592 put_uobj_write(uobj); 593 594 if (ret) 595 return ret; 596 597 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 598 599 mutex_lock(&file->mutex); 600 list_del(&uobj->list); 601 mutex_unlock(&file->mutex); 602 603 put_uobj(uobj); 604 605 return in_len; 606 } 607 608 struct xrcd_table_entry { 609 struct rb_node node; 610 struct ib_xrcd *xrcd; 611 struct inode *inode; 612 }; 613 614 static int xrcd_table_insert(struct ib_uverbs_device *dev, 615 struct inode *inode, 616 struct ib_xrcd *xrcd) 617 { 618 struct xrcd_table_entry *entry, *scan; 619 struct rb_node **p = &dev->xrcd_tree.rb_node; 620 struct rb_node *parent = NULL; 621 622 entry = kmalloc(sizeof *entry, GFP_KERNEL); 623 if (!entry) 624 return -ENOMEM; 625 626 entry->xrcd = xrcd; 627 entry->inode = inode; 628 629 while (*p) { 630 parent = *p; 631 scan = rb_entry(parent, struct xrcd_table_entry, node); 632 633 if (inode < scan->inode) { 634 p = &(*p)->rb_left; 635 } else if (inode > scan->inode) { 636 p = &(*p)->rb_right; 637 } else { 638 kfree(entry); 639 return -EEXIST; 640 } 641 } 642 643 rb_link_node(&entry->node, parent, p); 644 rb_insert_color(&entry->node, &dev->xrcd_tree); 645 igrab(inode); 646 return 0; 647 } 648 649 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, 650 struct inode *inode) 651 { 652 struct xrcd_table_entry *entry; 653 struct rb_node *p = dev->xrcd_tree.rb_node; 654 655 while (p) { 656 entry = rb_entry(p, struct xrcd_table_entry, node); 657 658 if (inode < entry->inode) 659 p = p->rb_left; 660 else if (inode > entry->inode) 661 p = p->rb_right; 662 else 663 return entry; 664 } 665 666 return NULL; 667 } 668 669 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) 670 { 671 struct xrcd_table_entry *entry; 672 673 entry = xrcd_table_search(dev, inode); 674 if (!entry) 675 return NULL; 676 677 return entry->xrcd; 678 } 679 680 static void xrcd_table_delete(struct ib_uverbs_device *dev, 681 struct inode *inode) 682 { 683 struct xrcd_table_entry *entry; 684 685 entry = xrcd_table_search(dev, inode); 686 if (entry) { 687 iput(inode); 688 rb_erase(&entry->node, &dev->xrcd_tree); 689 kfree(entry); 690 } 691 } 692 693 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, 694 const char __user *buf, int in_len, 695 int out_len) 696 { 697 struct ib_uverbs_open_xrcd cmd; 698 struct ib_uverbs_open_xrcd_resp resp; 699 struct ib_udata udata; 700 struct ib_uxrcd_object *obj; 701 struct ib_xrcd *xrcd = NULL; 702 struct fd f = {NULL, 0}; 703 struct inode *inode = NULL; 704 int ret = 0; 705 int new_xrcd = 0; 706 707 if (out_len < sizeof resp) 708 return -ENOSPC; 709 710 if (copy_from_user(&cmd, buf, sizeof cmd)) 711 return -EFAULT; 712 713 INIT_UDATA(&udata, buf + sizeof cmd, 714 (unsigned long) cmd.response + sizeof resp, 715 in_len - sizeof cmd, out_len - sizeof resp); 716 717 mutex_lock(&file->device->xrcd_tree_mutex); 718 719 if (cmd.fd != -1) { 720 /* search for file descriptor */ 721 f = fdget(cmd.fd); 722 if (!f.file) { 723 ret = -EBADF; 724 goto err_tree_mutex_unlock; 725 } 726 727 inode = file_inode(f.file); 728 xrcd = find_xrcd(file->device, inode); 729 if (!xrcd && !(cmd.oflags & O_CREAT)) { 730 /* no file descriptor. Need CREATE flag */ 731 ret = -EAGAIN; 732 goto err_tree_mutex_unlock; 733 } 734 735 if (xrcd && cmd.oflags & O_EXCL) { 736 ret = -EINVAL; 737 goto err_tree_mutex_unlock; 738 } 739 } 740 741 obj = kmalloc(sizeof *obj, GFP_KERNEL); 742 if (!obj) { 743 ret = -ENOMEM; 744 goto err_tree_mutex_unlock; 745 } 746 747 init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class); 748 749 down_write(&obj->uobject.mutex); 750 751 if (!xrcd) { 752 xrcd = file->device->ib_dev->alloc_xrcd(file->device->ib_dev, 753 file->ucontext, &udata); 754 if (IS_ERR(xrcd)) { 755 ret = PTR_ERR(xrcd); 756 goto err; 757 } 758 759 xrcd->inode = inode; 760 xrcd->device = file->device->ib_dev; 761 atomic_set(&xrcd->usecnt, 0); 762 mutex_init(&xrcd->tgt_qp_mutex); 763 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 764 new_xrcd = 1; 765 } 766 767 atomic_set(&obj->refcnt, 0); 768 obj->uobject.object = xrcd; 769 ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 770 if (ret) 771 goto err_idr; 772 773 memset(&resp, 0, sizeof resp); 774 resp.xrcd_handle = obj->uobject.id; 775 776 if (inode) { 777 if (new_xrcd) { 778 /* create new inode/xrcd table entry */ 779 ret = xrcd_table_insert(file->device, inode, xrcd); 780 if (ret) 781 goto err_insert_xrcd; 782 } 783 atomic_inc(&xrcd->usecnt); 784 } 785 786 if (copy_to_user((void __user *) (unsigned long) cmd.response, 787 &resp, sizeof resp)) { 788 ret = -EFAULT; 789 goto err_copy; 790 } 791 792 if (f.file) 793 fdput(f); 794 795 mutex_lock(&file->mutex); 796 list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list); 797 mutex_unlock(&file->mutex); 798 799 obj->uobject.live = 1; 800 up_write(&obj->uobject.mutex); 801 802 mutex_unlock(&file->device->xrcd_tree_mutex); 803 return in_len; 804 805 err_copy: 806 if (inode) { 807 if (new_xrcd) 808 xrcd_table_delete(file->device, inode); 809 atomic_dec(&xrcd->usecnt); 810 } 811 812 err_insert_xrcd: 813 idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 814 815 err_idr: 816 ib_dealloc_xrcd(xrcd); 817 818 err: 819 put_uobj_write(&obj->uobject); 820 821 err_tree_mutex_unlock: 822 if (f.file) 823 fdput(f); 824 825 mutex_unlock(&file->device->xrcd_tree_mutex); 826 827 return ret; 828 } 829 830 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, 831 const char __user *buf, int in_len, 832 int out_len) 833 { 834 struct ib_uverbs_close_xrcd cmd; 835 struct ib_uobject *uobj; 836 struct ib_xrcd *xrcd = NULL; 837 struct inode *inode = NULL; 838 struct ib_uxrcd_object *obj; 839 int live; 840 int ret = 0; 841 842 if (copy_from_user(&cmd, buf, sizeof cmd)) 843 return -EFAULT; 844 845 mutex_lock(&file->device->xrcd_tree_mutex); 846 uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext); 847 if (!uobj) { 848 ret = -EINVAL; 849 goto out; 850 } 851 852 xrcd = uobj->object; 853 inode = xrcd->inode; 854 obj = container_of(uobj, struct ib_uxrcd_object, uobject); 855 if (atomic_read(&obj->refcnt)) { 856 put_uobj_write(uobj); 857 ret = -EBUSY; 858 goto out; 859 } 860 861 if (!inode || atomic_dec_and_test(&xrcd->usecnt)) { 862 ret = ib_dealloc_xrcd(uobj->object); 863 if (!ret) 864 uobj->live = 0; 865 } 866 867 live = uobj->live; 868 if (inode && ret) 869 atomic_inc(&xrcd->usecnt); 870 871 put_uobj_write(uobj); 872 873 if (ret) 874 goto out; 875 876 if (inode && !live) 877 xrcd_table_delete(file->device, inode); 878 879 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj); 880 mutex_lock(&file->mutex); 881 list_del(&uobj->list); 882 mutex_unlock(&file->mutex); 883 884 put_uobj(uobj); 885 ret = in_len; 886 887 out: 888 mutex_unlock(&file->device->xrcd_tree_mutex); 889 return ret; 890 } 891 892 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, 893 struct ib_xrcd *xrcd) 894 { 895 struct inode *inode; 896 897 inode = xrcd->inode; 898 if (inode && !atomic_dec_and_test(&xrcd->usecnt)) 899 return; 900 901 ib_dealloc_xrcd(xrcd); 902 903 if (inode) 904 xrcd_table_delete(dev, inode); 905 } 906 907 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 908 const char __user *buf, int in_len, 909 int out_len) 910 { 911 struct ib_uverbs_reg_mr cmd; 912 struct ib_uverbs_reg_mr_resp resp; 913 struct ib_udata udata; 914 struct ib_uobject *uobj; 915 struct ib_pd *pd; 916 struct ib_mr *mr; 917 int ret; 918 919 if (out_len < sizeof resp) 920 return -ENOSPC; 921 922 if (copy_from_user(&cmd, buf, sizeof cmd)) 923 return -EFAULT; 924 925 INIT_UDATA(&udata, buf + sizeof cmd, 926 (unsigned long) cmd.response + sizeof resp, 927 in_len - sizeof cmd, out_len - sizeof resp); 928 929 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 930 return -EINVAL; 931 932 ret = ib_check_mr_access(cmd.access_flags); 933 if (ret) 934 return ret; 935 936 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 937 if (!uobj) 938 return -ENOMEM; 939 940 init_uobj(uobj, 0, file->ucontext, &mr_lock_class); 941 down_write(&uobj->mutex); 942 943 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 944 if (!pd) { 945 ret = -EINVAL; 946 goto err_free; 947 } 948 949 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 950 cmd.access_flags, &udata); 951 if (IS_ERR(mr)) { 952 ret = PTR_ERR(mr); 953 goto err_put; 954 } 955 956 mr->device = pd->device; 957 mr->pd = pd; 958 mr->uobject = uobj; 959 atomic_inc(&pd->usecnt); 960 atomic_set(&mr->usecnt, 0); 961 962 uobj->object = mr; 963 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj); 964 if (ret) 965 goto err_unreg; 966 967 memset(&resp, 0, sizeof resp); 968 resp.lkey = mr->lkey; 969 resp.rkey = mr->rkey; 970 resp.mr_handle = uobj->id; 971 972 if (copy_to_user((void __user *) (unsigned long) cmd.response, 973 &resp, sizeof resp)) { 974 ret = -EFAULT; 975 goto err_copy; 976 } 977 978 put_pd_read(pd); 979 980 mutex_lock(&file->mutex); 981 list_add_tail(&uobj->list, &file->ucontext->mr_list); 982 mutex_unlock(&file->mutex); 983 984 uobj->live = 1; 985 986 up_write(&uobj->mutex); 987 988 return in_len; 989 990 err_copy: 991 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 992 993 err_unreg: 994 ib_dereg_mr(mr); 995 996 err_put: 997 put_pd_read(pd); 998 999 err_free: 1000 put_uobj_write(uobj); 1001 return ret; 1002 } 1003 1004 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 1005 const char __user *buf, int in_len, 1006 int out_len) 1007 { 1008 struct ib_uverbs_dereg_mr cmd; 1009 struct ib_mr *mr; 1010 struct ib_uobject *uobj; 1011 int ret = -EINVAL; 1012 1013 if (copy_from_user(&cmd, buf, sizeof cmd)) 1014 return -EFAULT; 1015 1016 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext); 1017 if (!uobj) 1018 return -EINVAL; 1019 1020 mr = uobj->object; 1021 1022 ret = ib_dereg_mr(mr); 1023 if (!ret) 1024 uobj->live = 0; 1025 1026 put_uobj_write(uobj); 1027 1028 if (ret) 1029 return ret; 1030 1031 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 1032 1033 mutex_lock(&file->mutex); 1034 list_del(&uobj->list); 1035 mutex_unlock(&file->mutex); 1036 1037 put_uobj(uobj); 1038 1039 return in_len; 1040 } 1041 1042 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, 1043 const char __user *buf, int in_len, 1044 int out_len) 1045 { 1046 struct ib_uverbs_alloc_mw cmd; 1047 struct ib_uverbs_alloc_mw_resp resp; 1048 struct ib_uobject *uobj; 1049 struct ib_pd *pd; 1050 struct ib_mw *mw; 1051 int ret; 1052 1053 if (out_len < sizeof(resp)) 1054 return -ENOSPC; 1055 1056 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1057 return -EFAULT; 1058 1059 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 1060 if (!uobj) 1061 return -ENOMEM; 1062 1063 init_uobj(uobj, 0, file->ucontext, &mw_lock_class); 1064 down_write(&uobj->mutex); 1065 1066 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1067 if (!pd) { 1068 ret = -EINVAL; 1069 goto err_free; 1070 } 1071 1072 mw = pd->device->alloc_mw(pd, cmd.mw_type); 1073 if (IS_ERR(mw)) { 1074 ret = PTR_ERR(mw); 1075 goto err_put; 1076 } 1077 1078 mw->device = pd->device; 1079 mw->pd = pd; 1080 mw->uobject = uobj; 1081 atomic_inc(&pd->usecnt); 1082 1083 uobj->object = mw; 1084 ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj); 1085 if (ret) 1086 goto err_unalloc; 1087 1088 memset(&resp, 0, sizeof(resp)); 1089 resp.rkey = mw->rkey; 1090 resp.mw_handle = uobj->id; 1091 1092 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1093 &resp, sizeof(resp))) { 1094 ret = -EFAULT; 1095 goto err_copy; 1096 } 1097 1098 put_pd_read(pd); 1099 1100 mutex_lock(&file->mutex); 1101 list_add_tail(&uobj->list, &file->ucontext->mw_list); 1102 mutex_unlock(&file->mutex); 1103 1104 uobj->live = 1; 1105 1106 up_write(&uobj->mutex); 1107 1108 return in_len; 1109 1110 err_copy: 1111 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 1112 1113 err_unalloc: 1114 ib_dealloc_mw(mw); 1115 1116 err_put: 1117 put_pd_read(pd); 1118 1119 err_free: 1120 put_uobj_write(uobj); 1121 return ret; 1122 } 1123 1124 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file, 1125 const char __user *buf, int in_len, 1126 int out_len) 1127 { 1128 struct ib_uverbs_dealloc_mw cmd; 1129 struct ib_mw *mw; 1130 struct ib_uobject *uobj; 1131 int ret = -EINVAL; 1132 1133 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1134 return -EFAULT; 1135 1136 uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext); 1137 if (!uobj) 1138 return -EINVAL; 1139 1140 mw = uobj->object; 1141 1142 ret = ib_dealloc_mw(mw); 1143 if (!ret) 1144 uobj->live = 0; 1145 1146 put_uobj_write(uobj); 1147 1148 if (ret) 1149 return ret; 1150 1151 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 1152 1153 mutex_lock(&file->mutex); 1154 list_del(&uobj->list); 1155 mutex_unlock(&file->mutex); 1156 1157 put_uobj(uobj); 1158 1159 return in_len; 1160 } 1161 1162 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 1163 const char __user *buf, int in_len, 1164 int out_len) 1165 { 1166 struct ib_uverbs_create_comp_channel cmd; 1167 struct ib_uverbs_create_comp_channel_resp resp; 1168 struct file *filp; 1169 int ret; 1170 1171 if (out_len < sizeof resp) 1172 return -ENOSPC; 1173 1174 if (copy_from_user(&cmd, buf, sizeof cmd)) 1175 return -EFAULT; 1176 1177 ret = get_unused_fd_flags(O_CLOEXEC); 1178 if (ret < 0) 1179 return ret; 1180 resp.fd = ret; 1181 1182 filp = ib_uverbs_alloc_event_file(file, 0); 1183 if (IS_ERR(filp)) { 1184 put_unused_fd(resp.fd); 1185 return PTR_ERR(filp); 1186 } 1187 1188 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1189 &resp, sizeof resp)) { 1190 put_unused_fd(resp.fd); 1191 fput(filp); 1192 return -EFAULT; 1193 } 1194 1195 fd_install(resp.fd, filp); 1196 return in_len; 1197 } 1198 1199 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 1200 const char __user *buf, int in_len, 1201 int out_len) 1202 { 1203 struct ib_uverbs_create_cq cmd; 1204 struct ib_uverbs_create_cq_resp resp; 1205 struct ib_udata udata; 1206 struct ib_ucq_object *obj; 1207 struct ib_uverbs_event_file *ev_file = NULL; 1208 struct ib_cq *cq; 1209 int ret; 1210 1211 if (out_len < sizeof resp) 1212 return -ENOSPC; 1213 1214 if (copy_from_user(&cmd, buf, sizeof cmd)) 1215 return -EFAULT; 1216 1217 INIT_UDATA(&udata, buf + sizeof cmd, 1218 (unsigned long) cmd.response + sizeof resp, 1219 in_len - sizeof cmd, out_len - sizeof resp); 1220 1221 if (cmd.comp_vector >= file->device->num_comp_vectors) 1222 return -EINVAL; 1223 1224 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1225 if (!obj) 1226 return -ENOMEM; 1227 1228 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_class); 1229 down_write(&obj->uobject.mutex); 1230 1231 if (cmd.comp_channel >= 0) { 1232 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel); 1233 if (!ev_file) { 1234 ret = -EINVAL; 1235 goto err; 1236 } 1237 } 1238 1239 obj->uverbs_file = file; 1240 obj->comp_events_reported = 0; 1241 obj->async_events_reported = 0; 1242 INIT_LIST_HEAD(&obj->comp_list); 1243 INIT_LIST_HEAD(&obj->async_list); 1244 1245 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, 1246 cmd.comp_vector, 1247 file->ucontext, &udata); 1248 if (IS_ERR(cq)) { 1249 ret = PTR_ERR(cq); 1250 goto err_file; 1251 } 1252 1253 cq->device = file->device->ib_dev; 1254 cq->uobject = &obj->uobject; 1255 cq->comp_handler = ib_uverbs_comp_handler; 1256 cq->event_handler = ib_uverbs_cq_event_handler; 1257 cq->cq_context = ev_file; 1258 atomic_set(&cq->usecnt, 0); 1259 1260 obj->uobject.object = cq; 1261 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1262 if (ret) 1263 goto err_free; 1264 1265 memset(&resp, 0, sizeof resp); 1266 resp.cq_handle = obj->uobject.id; 1267 resp.cqe = cq->cqe; 1268 1269 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1270 &resp, sizeof resp)) { 1271 ret = -EFAULT; 1272 goto err_copy; 1273 } 1274 1275 mutex_lock(&file->mutex); 1276 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list); 1277 mutex_unlock(&file->mutex); 1278 1279 obj->uobject.live = 1; 1280 1281 up_write(&obj->uobject.mutex); 1282 1283 return in_len; 1284 1285 err_copy: 1286 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1287 1288 err_free: 1289 ib_destroy_cq(cq); 1290 1291 err_file: 1292 if (ev_file) 1293 ib_uverbs_release_ucq(file, ev_file, obj); 1294 1295 err: 1296 put_uobj_write(&obj->uobject); 1297 return ret; 1298 } 1299 1300 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 1301 const char __user *buf, int in_len, 1302 int out_len) 1303 { 1304 struct ib_uverbs_resize_cq cmd; 1305 struct ib_uverbs_resize_cq_resp resp; 1306 struct ib_udata udata; 1307 struct ib_cq *cq; 1308 int ret = -EINVAL; 1309 1310 if (copy_from_user(&cmd, buf, sizeof cmd)) 1311 return -EFAULT; 1312 1313 INIT_UDATA(&udata, buf + sizeof cmd, 1314 (unsigned long) cmd.response + sizeof resp, 1315 in_len - sizeof cmd, out_len - sizeof resp); 1316 1317 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1318 if (!cq) 1319 return -EINVAL; 1320 1321 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 1322 if (ret) 1323 goto out; 1324 1325 resp.cqe = cq->cqe; 1326 1327 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1328 &resp, sizeof resp.cqe)) 1329 ret = -EFAULT; 1330 1331 out: 1332 put_cq_read(cq); 1333 1334 return ret ? ret : in_len; 1335 } 1336 1337 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc) 1338 { 1339 struct ib_uverbs_wc tmp; 1340 1341 tmp.wr_id = wc->wr_id; 1342 tmp.status = wc->status; 1343 tmp.opcode = wc->opcode; 1344 tmp.vendor_err = wc->vendor_err; 1345 tmp.byte_len = wc->byte_len; 1346 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data; 1347 tmp.qp_num = wc->qp->qp_num; 1348 tmp.src_qp = wc->src_qp; 1349 tmp.wc_flags = wc->wc_flags; 1350 tmp.pkey_index = wc->pkey_index; 1351 tmp.slid = wc->slid; 1352 tmp.sl = wc->sl; 1353 tmp.dlid_path_bits = wc->dlid_path_bits; 1354 tmp.port_num = wc->port_num; 1355 tmp.reserved = 0; 1356 1357 if (copy_to_user(dest, &tmp, sizeof tmp)) 1358 return -EFAULT; 1359 1360 return 0; 1361 } 1362 1363 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 1364 const char __user *buf, int in_len, 1365 int out_len) 1366 { 1367 struct ib_uverbs_poll_cq cmd; 1368 struct ib_uverbs_poll_cq_resp resp; 1369 u8 __user *header_ptr; 1370 u8 __user *data_ptr; 1371 struct ib_cq *cq; 1372 struct ib_wc wc; 1373 int ret; 1374 1375 if (copy_from_user(&cmd, buf, sizeof cmd)) 1376 return -EFAULT; 1377 1378 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1379 if (!cq) 1380 return -EINVAL; 1381 1382 /* we copy a struct ib_uverbs_poll_cq_resp to user space */ 1383 header_ptr = (void __user *)(unsigned long) cmd.response; 1384 data_ptr = header_ptr + sizeof resp; 1385 1386 memset(&resp, 0, sizeof resp); 1387 while (resp.count < cmd.ne) { 1388 ret = ib_poll_cq(cq, 1, &wc); 1389 if (ret < 0) 1390 goto out_put; 1391 if (!ret) 1392 break; 1393 1394 ret = copy_wc_to_user(data_ptr, &wc); 1395 if (ret) 1396 goto out_put; 1397 1398 data_ptr += sizeof(struct ib_uverbs_wc); 1399 ++resp.count; 1400 } 1401 1402 if (copy_to_user(header_ptr, &resp, sizeof resp)) { 1403 ret = -EFAULT; 1404 goto out_put; 1405 } 1406 1407 ret = in_len; 1408 1409 out_put: 1410 put_cq_read(cq); 1411 return ret; 1412 } 1413 1414 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 1415 const char __user *buf, int in_len, 1416 int out_len) 1417 { 1418 struct ib_uverbs_req_notify_cq cmd; 1419 struct ib_cq *cq; 1420 1421 if (copy_from_user(&cmd, buf, sizeof cmd)) 1422 return -EFAULT; 1423 1424 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1425 if (!cq) 1426 return -EINVAL; 1427 1428 ib_req_notify_cq(cq, cmd.solicited_only ? 1429 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 1430 1431 put_cq_read(cq); 1432 1433 return in_len; 1434 } 1435 1436 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 1437 const char __user *buf, int in_len, 1438 int out_len) 1439 { 1440 struct ib_uverbs_destroy_cq cmd; 1441 struct ib_uverbs_destroy_cq_resp resp; 1442 struct ib_uobject *uobj; 1443 struct ib_cq *cq; 1444 struct ib_ucq_object *obj; 1445 struct ib_uverbs_event_file *ev_file; 1446 int ret = -EINVAL; 1447 1448 if (copy_from_user(&cmd, buf, sizeof cmd)) 1449 return -EFAULT; 1450 1451 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); 1452 if (!uobj) 1453 return -EINVAL; 1454 cq = uobj->object; 1455 ev_file = cq->cq_context; 1456 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1457 1458 ret = ib_destroy_cq(cq); 1459 if (!ret) 1460 uobj->live = 0; 1461 1462 put_uobj_write(uobj); 1463 1464 if (ret) 1465 return ret; 1466 1467 idr_remove_uobj(&ib_uverbs_cq_idr, uobj); 1468 1469 mutex_lock(&file->mutex); 1470 list_del(&uobj->list); 1471 mutex_unlock(&file->mutex); 1472 1473 ib_uverbs_release_ucq(file, ev_file, obj); 1474 1475 memset(&resp, 0, sizeof resp); 1476 resp.comp_events_reported = obj->comp_events_reported; 1477 resp.async_events_reported = obj->async_events_reported; 1478 1479 put_uobj(uobj); 1480 1481 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1482 &resp, sizeof resp)) 1483 return -EFAULT; 1484 1485 return in_len; 1486 } 1487 1488 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 1489 const char __user *buf, int in_len, 1490 int out_len) 1491 { 1492 struct ib_uverbs_create_qp cmd; 1493 struct ib_uverbs_create_qp_resp resp; 1494 struct ib_udata udata; 1495 struct ib_uqp_object *obj; 1496 struct ib_device *device; 1497 struct ib_pd *pd = NULL; 1498 struct ib_xrcd *xrcd = NULL; 1499 struct ib_uobject *uninitialized_var(xrcd_uobj); 1500 struct ib_cq *scq = NULL, *rcq = NULL; 1501 struct ib_srq *srq = NULL; 1502 struct ib_qp *qp; 1503 struct ib_qp_init_attr attr; 1504 int ret; 1505 1506 if (out_len < sizeof resp) 1507 return -ENOSPC; 1508 1509 if (copy_from_user(&cmd, buf, sizeof cmd)) 1510 return -EFAULT; 1511 1512 if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) 1513 return -EPERM; 1514 1515 INIT_UDATA(&udata, buf + sizeof cmd, 1516 (unsigned long) cmd.response + sizeof resp, 1517 in_len - sizeof cmd, out_len - sizeof resp); 1518 1519 obj = kzalloc(sizeof *obj, GFP_KERNEL); 1520 if (!obj) 1521 return -ENOMEM; 1522 1523 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class); 1524 down_write(&obj->uevent.uobject.mutex); 1525 1526 if (cmd.qp_type == IB_QPT_XRC_TGT) { 1527 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); 1528 if (!xrcd) { 1529 ret = -EINVAL; 1530 goto err_put; 1531 } 1532 device = xrcd->device; 1533 } else { 1534 if (cmd.qp_type == IB_QPT_XRC_INI) { 1535 cmd.max_recv_wr = cmd.max_recv_sge = 0; 1536 } else { 1537 if (cmd.is_srq) { 1538 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 1539 if (!srq || srq->srq_type != IB_SRQT_BASIC) { 1540 ret = -EINVAL; 1541 goto err_put; 1542 } 1543 } 1544 1545 if (cmd.recv_cq_handle != cmd.send_cq_handle) { 1546 rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0); 1547 if (!rcq) { 1548 ret = -EINVAL; 1549 goto err_put; 1550 } 1551 } 1552 } 1553 1554 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq); 1555 rcq = rcq ?: scq; 1556 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1557 if (!pd || !scq) { 1558 ret = -EINVAL; 1559 goto err_put; 1560 } 1561 1562 device = pd->device; 1563 } 1564 1565 attr.event_handler = ib_uverbs_qp_event_handler; 1566 attr.qp_context = file; 1567 attr.send_cq = scq; 1568 attr.recv_cq = rcq; 1569 attr.srq = srq; 1570 attr.xrcd = xrcd; 1571 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 1572 attr.qp_type = cmd.qp_type; 1573 attr.create_flags = 0; 1574 1575 attr.cap.max_send_wr = cmd.max_send_wr; 1576 attr.cap.max_recv_wr = cmd.max_recv_wr; 1577 attr.cap.max_send_sge = cmd.max_send_sge; 1578 attr.cap.max_recv_sge = cmd.max_recv_sge; 1579 attr.cap.max_inline_data = cmd.max_inline_data; 1580 1581 obj->uevent.events_reported = 0; 1582 INIT_LIST_HEAD(&obj->uevent.event_list); 1583 INIT_LIST_HEAD(&obj->mcast_list); 1584 1585 if (cmd.qp_type == IB_QPT_XRC_TGT) 1586 qp = ib_create_qp(pd, &attr); 1587 else 1588 qp = device->create_qp(pd, &attr, &udata); 1589 1590 if (IS_ERR(qp)) { 1591 ret = PTR_ERR(qp); 1592 goto err_put; 1593 } 1594 1595 if (cmd.qp_type != IB_QPT_XRC_TGT) { 1596 qp->real_qp = qp; 1597 qp->device = device; 1598 qp->pd = pd; 1599 qp->send_cq = attr.send_cq; 1600 qp->recv_cq = attr.recv_cq; 1601 qp->srq = attr.srq; 1602 qp->event_handler = attr.event_handler; 1603 qp->qp_context = attr.qp_context; 1604 qp->qp_type = attr.qp_type; 1605 atomic_set(&qp->usecnt, 0); 1606 atomic_inc(&pd->usecnt); 1607 atomic_inc(&attr.send_cq->usecnt); 1608 if (attr.recv_cq) 1609 atomic_inc(&attr.recv_cq->usecnt); 1610 if (attr.srq) 1611 atomic_inc(&attr.srq->usecnt); 1612 } 1613 qp->uobject = &obj->uevent.uobject; 1614 1615 obj->uevent.uobject.object = qp; 1616 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1617 if (ret) 1618 goto err_destroy; 1619 1620 memset(&resp, 0, sizeof resp); 1621 resp.qpn = qp->qp_num; 1622 resp.qp_handle = obj->uevent.uobject.id; 1623 resp.max_recv_sge = attr.cap.max_recv_sge; 1624 resp.max_send_sge = attr.cap.max_send_sge; 1625 resp.max_recv_wr = attr.cap.max_recv_wr; 1626 resp.max_send_wr = attr.cap.max_send_wr; 1627 resp.max_inline_data = attr.cap.max_inline_data; 1628 1629 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1630 &resp, sizeof resp)) { 1631 ret = -EFAULT; 1632 goto err_copy; 1633 } 1634 1635 if (xrcd) { 1636 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, 1637 uobject); 1638 atomic_inc(&obj->uxrcd->refcnt); 1639 put_xrcd_read(xrcd_uobj); 1640 } 1641 1642 if (pd) 1643 put_pd_read(pd); 1644 if (scq) 1645 put_cq_read(scq); 1646 if (rcq && rcq != scq) 1647 put_cq_read(rcq); 1648 if (srq) 1649 put_srq_read(srq); 1650 1651 mutex_lock(&file->mutex); 1652 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 1653 mutex_unlock(&file->mutex); 1654 1655 obj->uevent.uobject.live = 1; 1656 1657 up_write(&obj->uevent.uobject.mutex); 1658 1659 return in_len; 1660 1661 err_copy: 1662 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1663 1664 err_destroy: 1665 ib_destroy_qp(qp); 1666 1667 err_put: 1668 if (xrcd) 1669 put_xrcd_read(xrcd_uobj); 1670 if (pd) 1671 put_pd_read(pd); 1672 if (scq) 1673 put_cq_read(scq); 1674 if (rcq && rcq != scq) 1675 put_cq_read(rcq); 1676 if (srq) 1677 put_srq_read(srq); 1678 1679 put_uobj_write(&obj->uevent.uobject); 1680 return ret; 1681 } 1682 1683 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, 1684 const char __user *buf, int in_len, int out_len) 1685 { 1686 struct ib_uverbs_open_qp cmd; 1687 struct ib_uverbs_create_qp_resp resp; 1688 struct ib_udata udata; 1689 struct ib_uqp_object *obj; 1690 struct ib_xrcd *xrcd; 1691 struct ib_uobject *uninitialized_var(xrcd_uobj); 1692 struct ib_qp *qp; 1693 struct ib_qp_open_attr attr; 1694 int ret; 1695 1696 if (out_len < sizeof resp) 1697 return -ENOSPC; 1698 1699 if (copy_from_user(&cmd, buf, sizeof cmd)) 1700 return -EFAULT; 1701 1702 INIT_UDATA(&udata, buf + sizeof cmd, 1703 (unsigned long) cmd.response + sizeof resp, 1704 in_len - sizeof cmd, out_len - sizeof resp); 1705 1706 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1707 if (!obj) 1708 return -ENOMEM; 1709 1710 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class); 1711 down_write(&obj->uevent.uobject.mutex); 1712 1713 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); 1714 if (!xrcd) { 1715 ret = -EINVAL; 1716 goto err_put; 1717 } 1718 1719 attr.event_handler = ib_uverbs_qp_event_handler; 1720 attr.qp_context = file; 1721 attr.qp_num = cmd.qpn; 1722 attr.qp_type = cmd.qp_type; 1723 1724 obj->uevent.events_reported = 0; 1725 INIT_LIST_HEAD(&obj->uevent.event_list); 1726 INIT_LIST_HEAD(&obj->mcast_list); 1727 1728 qp = ib_open_qp(xrcd, &attr); 1729 if (IS_ERR(qp)) { 1730 ret = PTR_ERR(qp); 1731 goto err_put; 1732 } 1733 1734 qp->uobject = &obj->uevent.uobject; 1735 1736 obj->uevent.uobject.object = qp; 1737 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1738 if (ret) 1739 goto err_destroy; 1740 1741 memset(&resp, 0, sizeof resp); 1742 resp.qpn = qp->qp_num; 1743 resp.qp_handle = obj->uevent.uobject.id; 1744 1745 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1746 &resp, sizeof resp)) { 1747 ret = -EFAULT; 1748 goto err_remove; 1749 } 1750 1751 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 1752 atomic_inc(&obj->uxrcd->refcnt); 1753 put_xrcd_read(xrcd_uobj); 1754 1755 mutex_lock(&file->mutex); 1756 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 1757 mutex_unlock(&file->mutex); 1758 1759 obj->uevent.uobject.live = 1; 1760 1761 up_write(&obj->uevent.uobject.mutex); 1762 1763 return in_len; 1764 1765 err_remove: 1766 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1767 1768 err_destroy: 1769 ib_destroy_qp(qp); 1770 1771 err_put: 1772 put_xrcd_read(xrcd_uobj); 1773 put_uobj_write(&obj->uevent.uobject); 1774 return ret; 1775 } 1776 1777 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 1778 const char __user *buf, int in_len, 1779 int out_len) 1780 { 1781 struct ib_uverbs_query_qp cmd; 1782 struct ib_uverbs_query_qp_resp resp; 1783 struct ib_qp *qp; 1784 struct ib_qp_attr *attr; 1785 struct ib_qp_init_attr *init_attr; 1786 int ret; 1787 1788 if (copy_from_user(&cmd, buf, sizeof cmd)) 1789 return -EFAULT; 1790 1791 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1792 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 1793 if (!attr || !init_attr) { 1794 ret = -ENOMEM; 1795 goto out; 1796 } 1797 1798 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1799 if (!qp) { 1800 ret = -EINVAL; 1801 goto out; 1802 } 1803 1804 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 1805 1806 put_qp_read(qp); 1807 1808 if (ret) 1809 goto out; 1810 1811 memset(&resp, 0, sizeof resp); 1812 1813 resp.qp_state = attr->qp_state; 1814 resp.cur_qp_state = attr->cur_qp_state; 1815 resp.path_mtu = attr->path_mtu; 1816 resp.path_mig_state = attr->path_mig_state; 1817 resp.qkey = attr->qkey; 1818 resp.rq_psn = attr->rq_psn; 1819 resp.sq_psn = attr->sq_psn; 1820 resp.dest_qp_num = attr->dest_qp_num; 1821 resp.qp_access_flags = attr->qp_access_flags; 1822 resp.pkey_index = attr->pkey_index; 1823 resp.alt_pkey_index = attr->alt_pkey_index; 1824 resp.sq_draining = attr->sq_draining; 1825 resp.max_rd_atomic = attr->max_rd_atomic; 1826 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 1827 resp.min_rnr_timer = attr->min_rnr_timer; 1828 resp.port_num = attr->port_num; 1829 resp.timeout = attr->timeout; 1830 resp.retry_cnt = attr->retry_cnt; 1831 resp.rnr_retry = attr->rnr_retry; 1832 resp.alt_port_num = attr->alt_port_num; 1833 resp.alt_timeout = attr->alt_timeout; 1834 1835 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16); 1836 resp.dest.flow_label = attr->ah_attr.grh.flow_label; 1837 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index; 1838 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit; 1839 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class; 1840 resp.dest.dlid = attr->ah_attr.dlid; 1841 resp.dest.sl = attr->ah_attr.sl; 1842 resp.dest.src_path_bits = attr->ah_attr.src_path_bits; 1843 resp.dest.static_rate = attr->ah_attr.static_rate; 1844 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH); 1845 resp.dest.port_num = attr->ah_attr.port_num; 1846 1847 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16); 1848 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label; 1849 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index; 1850 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit; 1851 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class; 1852 resp.alt_dest.dlid = attr->alt_ah_attr.dlid; 1853 resp.alt_dest.sl = attr->alt_ah_attr.sl; 1854 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits; 1855 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate; 1856 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH); 1857 resp.alt_dest.port_num = attr->alt_ah_attr.port_num; 1858 1859 resp.max_send_wr = init_attr->cap.max_send_wr; 1860 resp.max_recv_wr = init_attr->cap.max_recv_wr; 1861 resp.max_send_sge = init_attr->cap.max_send_sge; 1862 resp.max_recv_sge = init_attr->cap.max_recv_sge; 1863 resp.max_inline_data = init_attr->cap.max_inline_data; 1864 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 1865 1866 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1867 &resp, sizeof resp)) 1868 ret = -EFAULT; 1869 1870 out: 1871 kfree(attr); 1872 kfree(init_attr); 1873 1874 return ret ? ret : in_len; 1875 } 1876 1877 /* Remove ignored fields set in the attribute mask */ 1878 static int modify_qp_mask(enum ib_qp_type qp_type, int mask) 1879 { 1880 switch (qp_type) { 1881 case IB_QPT_XRC_INI: 1882 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); 1883 case IB_QPT_XRC_TGT: 1884 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | 1885 IB_QP_RNR_RETRY); 1886 default: 1887 return mask; 1888 } 1889 } 1890 1891 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 1892 const char __user *buf, int in_len, 1893 int out_len) 1894 { 1895 struct ib_uverbs_modify_qp cmd; 1896 struct ib_udata udata; 1897 struct ib_qp *qp; 1898 struct ib_qp_attr *attr; 1899 int ret; 1900 1901 if (copy_from_user(&cmd, buf, sizeof cmd)) 1902 return -EFAULT; 1903 1904 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 1905 out_len); 1906 1907 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1908 if (!attr) 1909 return -ENOMEM; 1910 1911 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1912 if (!qp) { 1913 ret = -EINVAL; 1914 goto out; 1915 } 1916 1917 attr->qp_state = cmd.qp_state; 1918 attr->cur_qp_state = cmd.cur_qp_state; 1919 attr->path_mtu = cmd.path_mtu; 1920 attr->path_mig_state = cmd.path_mig_state; 1921 attr->qkey = cmd.qkey; 1922 attr->rq_psn = cmd.rq_psn; 1923 attr->sq_psn = cmd.sq_psn; 1924 attr->dest_qp_num = cmd.dest_qp_num; 1925 attr->qp_access_flags = cmd.qp_access_flags; 1926 attr->pkey_index = cmd.pkey_index; 1927 attr->alt_pkey_index = cmd.alt_pkey_index; 1928 attr->en_sqd_async_notify = cmd.en_sqd_async_notify; 1929 attr->max_rd_atomic = cmd.max_rd_atomic; 1930 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic; 1931 attr->min_rnr_timer = cmd.min_rnr_timer; 1932 attr->port_num = cmd.port_num; 1933 attr->timeout = cmd.timeout; 1934 attr->retry_cnt = cmd.retry_cnt; 1935 attr->rnr_retry = cmd.rnr_retry; 1936 attr->alt_port_num = cmd.alt_port_num; 1937 attr->alt_timeout = cmd.alt_timeout; 1938 1939 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16); 1940 attr->ah_attr.grh.flow_label = cmd.dest.flow_label; 1941 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index; 1942 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit; 1943 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class; 1944 attr->ah_attr.dlid = cmd.dest.dlid; 1945 attr->ah_attr.sl = cmd.dest.sl; 1946 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits; 1947 attr->ah_attr.static_rate = cmd.dest.static_rate; 1948 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0; 1949 attr->ah_attr.port_num = cmd.dest.port_num; 1950 1951 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16); 1952 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label; 1953 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index; 1954 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit; 1955 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class; 1956 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid; 1957 attr->alt_ah_attr.sl = cmd.alt_dest.sl; 1958 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits; 1959 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate; 1960 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; 1961 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; 1962 1963 if (qp->real_qp == qp) { 1964 ret = qp->device->modify_qp(qp, attr, 1965 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); 1966 } else { 1967 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); 1968 } 1969 1970 put_qp_read(qp); 1971 1972 if (ret) 1973 goto out; 1974 1975 ret = in_len; 1976 1977 out: 1978 kfree(attr); 1979 1980 return ret; 1981 } 1982 1983 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 1984 const char __user *buf, int in_len, 1985 int out_len) 1986 { 1987 struct ib_uverbs_destroy_qp cmd; 1988 struct ib_uverbs_destroy_qp_resp resp; 1989 struct ib_uobject *uobj; 1990 struct ib_qp *qp; 1991 struct ib_uqp_object *obj; 1992 int ret = -EINVAL; 1993 1994 if (copy_from_user(&cmd, buf, sizeof cmd)) 1995 return -EFAULT; 1996 1997 memset(&resp, 0, sizeof resp); 1998 1999 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext); 2000 if (!uobj) 2001 return -EINVAL; 2002 qp = uobj->object; 2003 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 2004 2005 if (!list_empty(&obj->mcast_list)) { 2006 put_uobj_write(uobj); 2007 return -EBUSY; 2008 } 2009 2010 ret = ib_destroy_qp(qp); 2011 if (!ret) 2012 uobj->live = 0; 2013 2014 put_uobj_write(uobj); 2015 2016 if (ret) 2017 return ret; 2018 2019 if (obj->uxrcd) 2020 atomic_dec(&obj->uxrcd->refcnt); 2021 2022 idr_remove_uobj(&ib_uverbs_qp_idr, uobj); 2023 2024 mutex_lock(&file->mutex); 2025 list_del(&uobj->list); 2026 mutex_unlock(&file->mutex); 2027 2028 ib_uverbs_release_uevent(file, &obj->uevent); 2029 2030 resp.events_reported = obj->uevent.events_reported; 2031 2032 put_uobj(uobj); 2033 2034 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2035 &resp, sizeof resp)) 2036 return -EFAULT; 2037 2038 return in_len; 2039 } 2040 2041 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 2042 const char __user *buf, int in_len, 2043 int out_len) 2044 { 2045 struct ib_uverbs_post_send cmd; 2046 struct ib_uverbs_post_send_resp resp; 2047 struct ib_uverbs_send_wr *user_wr; 2048 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 2049 struct ib_qp *qp; 2050 int i, sg_ind; 2051 int is_ud; 2052 ssize_t ret = -EINVAL; 2053 2054 if (copy_from_user(&cmd, buf, sizeof cmd)) 2055 return -EFAULT; 2056 2057 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 2058 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 2059 return -EINVAL; 2060 2061 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 2062 return -EINVAL; 2063 2064 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 2065 if (!user_wr) 2066 return -ENOMEM; 2067 2068 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2069 if (!qp) 2070 goto out; 2071 2072 is_ud = qp->qp_type == IB_QPT_UD; 2073 sg_ind = 0; 2074 last = NULL; 2075 for (i = 0; i < cmd.wr_count; ++i) { 2076 if (copy_from_user(user_wr, 2077 buf + sizeof cmd + i * cmd.wqe_size, 2078 cmd.wqe_size)) { 2079 ret = -EFAULT; 2080 goto out_put; 2081 } 2082 2083 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 2084 ret = -EINVAL; 2085 goto out_put; 2086 } 2087 2088 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2089 user_wr->num_sge * sizeof (struct ib_sge), 2090 GFP_KERNEL); 2091 if (!next) { 2092 ret = -ENOMEM; 2093 goto out_put; 2094 } 2095 2096 if (!last) 2097 wr = next; 2098 else 2099 last->next = next; 2100 last = next; 2101 2102 next->next = NULL; 2103 next->wr_id = user_wr->wr_id; 2104 next->num_sge = user_wr->num_sge; 2105 next->opcode = user_wr->opcode; 2106 next->send_flags = user_wr->send_flags; 2107 2108 if (is_ud) { 2109 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah, 2110 file->ucontext); 2111 if (!next->wr.ud.ah) { 2112 ret = -EINVAL; 2113 goto out_put; 2114 } 2115 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn; 2116 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey; 2117 if (next->opcode == IB_WR_SEND_WITH_IMM) 2118 next->ex.imm_data = 2119 (__be32 __force) user_wr->ex.imm_data; 2120 } else { 2121 switch (next->opcode) { 2122 case IB_WR_RDMA_WRITE_WITH_IMM: 2123 next->ex.imm_data = 2124 (__be32 __force) user_wr->ex.imm_data; 2125 case IB_WR_RDMA_WRITE: 2126 case IB_WR_RDMA_READ: 2127 next->wr.rdma.remote_addr = 2128 user_wr->wr.rdma.remote_addr; 2129 next->wr.rdma.rkey = 2130 user_wr->wr.rdma.rkey; 2131 break; 2132 case IB_WR_SEND_WITH_IMM: 2133 next->ex.imm_data = 2134 (__be32 __force) user_wr->ex.imm_data; 2135 break; 2136 case IB_WR_SEND_WITH_INV: 2137 next->ex.invalidate_rkey = 2138 user_wr->ex.invalidate_rkey; 2139 break; 2140 case IB_WR_ATOMIC_CMP_AND_SWP: 2141 case IB_WR_ATOMIC_FETCH_AND_ADD: 2142 next->wr.atomic.remote_addr = 2143 user_wr->wr.atomic.remote_addr; 2144 next->wr.atomic.compare_add = 2145 user_wr->wr.atomic.compare_add; 2146 next->wr.atomic.swap = user_wr->wr.atomic.swap; 2147 next->wr.atomic.rkey = user_wr->wr.atomic.rkey; 2148 break; 2149 default: 2150 break; 2151 } 2152 } 2153 2154 if (next->num_sge) { 2155 next->sg_list = (void *) next + 2156 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2157 if (copy_from_user(next->sg_list, 2158 buf + sizeof cmd + 2159 cmd.wr_count * cmd.wqe_size + 2160 sg_ind * sizeof (struct ib_sge), 2161 next->num_sge * sizeof (struct ib_sge))) { 2162 ret = -EFAULT; 2163 goto out_put; 2164 } 2165 sg_ind += next->num_sge; 2166 } else 2167 next->sg_list = NULL; 2168 } 2169 2170 resp.bad_wr = 0; 2171 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); 2172 if (ret) 2173 for (next = wr; next; next = next->next) { 2174 ++resp.bad_wr; 2175 if (next == bad_wr) 2176 break; 2177 } 2178 2179 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2180 &resp, sizeof resp)) 2181 ret = -EFAULT; 2182 2183 out_put: 2184 put_qp_read(qp); 2185 2186 while (wr) { 2187 if (is_ud && wr->wr.ud.ah) 2188 put_ah_read(wr->wr.ud.ah); 2189 next = wr->next; 2190 kfree(wr); 2191 wr = next; 2192 } 2193 2194 out: 2195 kfree(user_wr); 2196 2197 return ret ? ret : in_len; 2198 } 2199 2200 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 2201 int in_len, 2202 u32 wr_count, 2203 u32 sge_count, 2204 u32 wqe_size) 2205 { 2206 struct ib_uverbs_recv_wr *user_wr; 2207 struct ib_recv_wr *wr = NULL, *last, *next; 2208 int sg_ind; 2209 int i; 2210 int ret; 2211 2212 if (in_len < wqe_size * wr_count + 2213 sge_count * sizeof (struct ib_uverbs_sge)) 2214 return ERR_PTR(-EINVAL); 2215 2216 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 2217 return ERR_PTR(-EINVAL); 2218 2219 user_wr = kmalloc(wqe_size, GFP_KERNEL); 2220 if (!user_wr) 2221 return ERR_PTR(-ENOMEM); 2222 2223 sg_ind = 0; 2224 last = NULL; 2225 for (i = 0; i < wr_count; ++i) { 2226 if (copy_from_user(user_wr, buf + i * wqe_size, 2227 wqe_size)) { 2228 ret = -EFAULT; 2229 goto err; 2230 } 2231 2232 if (user_wr->num_sge + sg_ind > sge_count) { 2233 ret = -EINVAL; 2234 goto err; 2235 } 2236 2237 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2238 user_wr->num_sge * sizeof (struct ib_sge), 2239 GFP_KERNEL); 2240 if (!next) { 2241 ret = -ENOMEM; 2242 goto err; 2243 } 2244 2245 if (!last) 2246 wr = next; 2247 else 2248 last->next = next; 2249 last = next; 2250 2251 next->next = NULL; 2252 next->wr_id = user_wr->wr_id; 2253 next->num_sge = user_wr->num_sge; 2254 2255 if (next->num_sge) { 2256 next->sg_list = (void *) next + 2257 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2258 if (copy_from_user(next->sg_list, 2259 buf + wr_count * wqe_size + 2260 sg_ind * sizeof (struct ib_sge), 2261 next->num_sge * sizeof (struct ib_sge))) { 2262 ret = -EFAULT; 2263 goto err; 2264 } 2265 sg_ind += next->num_sge; 2266 } else 2267 next->sg_list = NULL; 2268 } 2269 2270 kfree(user_wr); 2271 return wr; 2272 2273 err: 2274 kfree(user_wr); 2275 2276 while (wr) { 2277 next = wr->next; 2278 kfree(wr); 2279 wr = next; 2280 } 2281 2282 return ERR_PTR(ret); 2283 } 2284 2285 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 2286 const char __user *buf, int in_len, 2287 int out_len) 2288 { 2289 struct ib_uverbs_post_recv cmd; 2290 struct ib_uverbs_post_recv_resp resp; 2291 struct ib_recv_wr *wr, *next, *bad_wr; 2292 struct ib_qp *qp; 2293 ssize_t ret = -EINVAL; 2294 2295 if (copy_from_user(&cmd, buf, sizeof cmd)) 2296 return -EFAULT; 2297 2298 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2299 in_len - sizeof cmd, cmd.wr_count, 2300 cmd.sge_count, cmd.wqe_size); 2301 if (IS_ERR(wr)) 2302 return PTR_ERR(wr); 2303 2304 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2305 if (!qp) 2306 goto out; 2307 2308 resp.bad_wr = 0; 2309 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); 2310 2311 put_qp_read(qp); 2312 2313 if (ret) 2314 for (next = wr; next; next = next->next) { 2315 ++resp.bad_wr; 2316 if (next == bad_wr) 2317 break; 2318 } 2319 2320 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2321 &resp, sizeof resp)) 2322 ret = -EFAULT; 2323 2324 out: 2325 while (wr) { 2326 next = wr->next; 2327 kfree(wr); 2328 wr = next; 2329 } 2330 2331 return ret ? ret : in_len; 2332 } 2333 2334 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 2335 const char __user *buf, int in_len, 2336 int out_len) 2337 { 2338 struct ib_uverbs_post_srq_recv cmd; 2339 struct ib_uverbs_post_srq_recv_resp resp; 2340 struct ib_recv_wr *wr, *next, *bad_wr; 2341 struct ib_srq *srq; 2342 ssize_t ret = -EINVAL; 2343 2344 if (copy_from_user(&cmd, buf, sizeof cmd)) 2345 return -EFAULT; 2346 2347 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2348 in_len - sizeof cmd, cmd.wr_count, 2349 cmd.sge_count, cmd.wqe_size); 2350 if (IS_ERR(wr)) 2351 return PTR_ERR(wr); 2352 2353 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2354 if (!srq) 2355 goto out; 2356 2357 resp.bad_wr = 0; 2358 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 2359 2360 put_srq_read(srq); 2361 2362 if (ret) 2363 for (next = wr; next; next = next->next) { 2364 ++resp.bad_wr; 2365 if (next == bad_wr) 2366 break; 2367 } 2368 2369 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2370 &resp, sizeof resp)) 2371 ret = -EFAULT; 2372 2373 out: 2374 while (wr) { 2375 next = wr->next; 2376 kfree(wr); 2377 wr = next; 2378 } 2379 2380 return ret ? ret : in_len; 2381 } 2382 2383 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 2384 const char __user *buf, int in_len, 2385 int out_len) 2386 { 2387 struct ib_uverbs_create_ah cmd; 2388 struct ib_uverbs_create_ah_resp resp; 2389 struct ib_uobject *uobj; 2390 struct ib_pd *pd; 2391 struct ib_ah *ah; 2392 struct ib_ah_attr attr; 2393 int ret; 2394 2395 if (out_len < sizeof resp) 2396 return -ENOSPC; 2397 2398 if (copy_from_user(&cmd, buf, sizeof cmd)) 2399 return -EFAULT; 2400 2401 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 2402 if (!uobj) 2403 return -ENOMEM; 2404 2405 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class); 2406 down_write(&uobj->mutex); 2407 2408 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 2409 if (!pd) { 2410 ret = -EINVAL; 2411 goto err; 2412 } 2413 2414 attr.dlid = cmd.attr.dlid; 2415 attr.sl = cmd.attr.sl; 2416 attr.src_path_bits = cmd.attr.src_path_bits; 2417 attr.static_rate = cmd.attr.static_rate; 2418 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0; 2419 attr.port_num = cmd.attr.port_num; 2420 attr.grh.flow_label = cmd.attr.grh.flow_label; 2421 attr.grh.sgid_index = cmd.attr.grh.sgid_index; 2422 attr.grh.hop_limit = cmd.attr.grh.hop_limit; 2423 attr.grh.traffic_class = cmd.attr.grh.traffic_class; 2424 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); 2425 2426 ah = ib_create_ah(pd, &attr); 2427 if (IS_ERR(ah)) { 2428 ret = PTR_ERR(ah); 2429 goto err_put; 2430 } 2431 2432 ah->uobject = uobj; 2433 uobj->object = ah; 2434 2435 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj); 2436 if (ret) 2437 goto err_destroy; 2438 2439 resp.ah_handle = uobj->id; 2440 2441 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2442 &resp, sizeof resp)) { 2443 ret = -EFAULT; 2444 goto err_copy; 2445 } 2446 2447 put_pd_read(pd); 2448 2449 mutex_lock(&file->mutex); 2450 list_add_tail(&uobj->list, &file->ucontext->ah_list); 2451 mutex_unlock(&file->mutex); 2452 2453 uobj->live = 1; 2454 2455 up_write(&uobj->mutex); 2456 2457 return in_len; 2458 2459 err_copy: 2460 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2461 2462 err_destroy: 2463 ib_destroy_ah(ah); 2464 2465 err_put: 2466 put_pd_read(pd); 2467 2468 err: 2469 put_uobj_write(uobj); 2470 return ret; 2471 } 2472 2473 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 2474 const char __user *buf, int in_len, int out_len) 2475 { 2476 struct ib_uverbs_destroy_ah cmd; 2477 struct ib_ah *ah; 2478 struct ib_uobject *uobj; 2479 int ret; 2480 2481 if (copy_from_user(&cmd, buf, sizeof cmd)) 2482 return -EFAULT; 2483 2484 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext); 2485 if (!uobj) 2486 return -EINVAL; 2487 ah = uobj->object; 2488 2489 ret = ib_destroy_ah(ah); 2490 if (!ret) 2491 uobj->live = 0; 2492 2493 put_uobj_write(uobj); 2494 2495 if (ret) 2496 return ret; 2497 2498 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2499 2500 mutex_lock(&file->mutex); 2501 list_del(&uobj->list); 2502 mutex_unlock(&file->mutex); 2503 2504 put_uobj(uobj); 2505 2506 return in_len; 2507 } 2508 2509 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 2510 const char __user *buf, int in_len, 2511 int out_len) 2512 { 2513 struct ib_uverbs_attach_mcast cmd; 2514 struct ib_qp *qp; 2515 struct ib_uqp_object *obj; 2516 struct ib_uverbs_mcast_entry *mcast; 2517 int ret; 2518 2519 if (copy_from_user(&cmd, buf, sizeof cmd)) 2520 return -EFAULT; 2521 2522 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 2523 if (!qp) 2524 return -EINVAL; 2525 2526 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2527 2528 list_for_each_entry(mcast, &obj->mcast_list, list) 2529 if (cmd.mlid == mcast->lid && 2530 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2531 ret = 0; 2532 goto out_put; 2533 } 2534 2535 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 2536 if (!mcast) { 2537 ret = -ENOMEM; 2538 goto out_put; 2539 } 2540 2541 mcast->lid = cmd.mlid; 2542 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 2543 2544 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 2545 if (!ret) 2546 list_add_tail(&mcast->list, &obj->mcast_list); 2547 else 2548 kfree(mcast); 2549 2550 out_put: 2551 put_qp_write(qp); 2552 2553 return ret ? ret : in_len; 2554 } 2555 2556 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 2557 const char __user *buf, int in_len, 2558 int out_len) 2559 { 2560 struct ib_uverbs_detach_mcast cmd; 2561 struct ib_uqp_object *obj; 2562 struct ib_qp *qp; 2563 struct ib_uverbs_mcast_entry *mcast; 2564 int ret = -EINVAL; 2565 2566 if (copy_from_user(&cmd, buf, sizeof cmd)) 2567 return -EFAULT; 2568 2569 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 2570 if (!qp) 2571 return -EINVAL; 2572 2573 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 2574 if (ret) 2575 goto out_put; 2576 2577 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2578 2579 list_for_each_entry(mcast, &obj->mcast_list, list) 2580 if (cmd.mlid == mcast->lid && 2581 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2582 list_del(&mcast->list); 2583 kfree(mcast); 2584 break; 2585 } 2586 2587 out_put: 2588 put_qp_write(qp); 2589 2590 return ret ? ret : in_len; 2591 } 2592 2593 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, 2594 union ib_flow_spec *ib_spec) 2595 { 2596 ib_spec->type = kern_spec->type; 2597 2598 switch (ib_spec->type) { 2599 case IB_FLOW_SPEC_ETH: 2600 ib_spec->eth.size = sizeof(struct ib_flow_spec_eth); 2601 if (ib_spec->eth.size != kern_spec->eth.size) 2602 return -EINVAL; 2603 memcpy(&ib_spec->eth.val, &kern_spec->eth.val, 2604 sizeof(struct ib_flow_eth_filter)); 2605 memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask, 2606 sizeof(struct ib_flow_eth_filter)); 2607 break; 2608 case IB_FLOW_SPEC_IPV4: 2609 ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4); 2610 if (ib_spec->ipv4.size != kern_spec->ipv4.size) 2611 return -EINVAL; 2612 memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val, 2613 sizeof(struct ib_flow_ipv4_filter)); 2614 memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask, 2615 sizeof(struct ib_flow_ipv4_filter)); 2616 break; 2617 case IB_FLOW_SPEC_TCP: 2618 case IB_FLOW_SPEC_UDP: 2619 ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp); 2620 if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size) 2621 return -EINVAL; 2622 memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val, 2623 sizeof(struct ib_flow_tcp_udp_filter)); 2624 memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask, 2625 sizeof(struct ib_flow_tcp_udp_filter)); 2626 break; 2627 default: 2628 return -EINVAL; 2629 } 2630 return 0; 2631 } 2632 2633 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, 2634 struct ib_udata *ucore, 2635 struct ib_udata *uhw) 2636 { 2637 struct ib_uverbs_create_flow cmd; 2638 struct ib_uverbs_create_flow_resp resp; 2639 struct ib_uobject *uobj; 2640 struct ib_flow *flow_id; 2641 struct ib_uverbs_flow_attr *kern_flow_attr; 2642 struct ib_flow_attr *flow_attr; 2643 struct ib_qp *qp; 2644 int err = 0; 2645 void *kern_spec; 2646 void *ib_spec; 2647 int i; 2648 2649 if (ucore->outlen < sizeof(resp)) 2650 return -ENOSPC; 2651 2652 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 2653 if (err) 2654 return err; 2655 2656 ucore->inbuf += sizeof(cmd); 2657 ucore->inlen -= sizeof(cmd); 2658 2659 if (cmd.comp_mask) 2660 return -EINVAL; 2661 2662 if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER && 2663 !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW)) 2664 return -EPERM; 2665 2666 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) 2667 return -EINVAL; 2668 2669 if (cmd.flow_attr.size > ucore->inlen || 2670 cmd.flow_attr.size > 2671 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) 2672 return -EINVAL; 2673 2674 if (cmd.flow_attr.num_of_specs) { 2675 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, 2676 GFP_KERNEL); 2677 if (!kern_flow_attr) 2678 return -ENOMEM; 2679 2680 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); 2681 err = ib_copy_from_udata(kern_flow_attr + 1, ucore, 2682 cmd.flow_attr.size); 2683 if (err) 2684 goto err_free_attr; 2685 } else { 2686 kern_flow_attr = &cmd.flow_attr; 2687 } 2688 2689 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 2690 if (!uobj) { 2691 err = -ENOMEM; 2692 goto err_free_attr; 2693 } 2694 init_uobj(uobj, 0, file->ucontext, &rule_lock_class); 2695 down_write(&uobj->mutex); 2696 2697 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2698 if (!qp) { 2699 err = -EINVAL; 2700 goto err_uobj; 2701 } 2702 2703 flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL); 2704 if (!flow_attr) { 2705 err = -ENOMEM; 2706 goto err_put; 2707 } 2708 2709 flow_attr->type = kern_flow_attr->type; 2710 flow_attr->priority = kern_flow_attr->priority; 2711 flow_attr->num_of_specs = kern_flow_attr->num_of_specs; 2712 flow_attr->port = kern_flow_attr->port; 2713 flow_attr->flags = kern_flow_attr->flags; 2714 flow_attr->size = sizeof(*flow_attr); 2715 2716 kern_spec = kern_flow_attr + 1; 2717 ib_spec = flow_attr + 1; 2718 for (i = 0; i < flow_attr->num_of_specs && 2719 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && 2720 cmd.flow_attr.size >= 2721 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { 2722 err = kern_spec_to_ib_spec(kern_spec, ib_spec); 2723 if (err) 2724 goto err_free; 2725 flow_attr->size += 2726 ((union ib_flow_spec *) ib_spec)->size; 2727 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; 2728 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size; 2729 ib_spec += ((union ib_flow_spec *) ib_spec)->size; 2730 } 2731 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { 2732 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", 2733 i, cmd.flow_attr.size); 2734 goto err_free; 2735 } 2736 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); 2737 if (IS_ERR(flow_id)) { 2738 err = PTR_ERR(flow_id); 2739 goto err_free; 2740 } 2741 flow_id->qp = qp; 2742 flow_id->uobject = uobj; 2743 uobj->object = flow_id; 2744 2745 err = idr_add_uobj(&ib_uverbs_rule_idr, uobj); 2746 if (err) 2747 goto destroy_flow; 2748 2749 memset(&resp, 0, sizeof(resp)); 2750 resp.flow_handle = uobj->id; 2751 2752 err = ib_copy_to_udata(ucore, 2753 &resp, sizeof(resp)); 2754 if (err) 2755 goto err_copy; 2756 2757 put_qp_read(qp); 2758 mutex_lock(&file->mutex); 2759 list_add_tail(&uobj->list, &file->ucontext->rule_list); 2760 mutex_unlock(&file->mutex); 2761 2762 uobj->live = 1; 2763 2764 up_write(&uobj->mutex); 2765 kfree(flow_attr); 2766 if (cmd.flow_attr.num_of_specs) 2767 kfree(kern_flow_attr); 2768 return 0; 2769 err_copy: 2770 idr_remove_uobj(&ib_uverbs_rule_idr, uobj); 2771 destroy_flow: 2772 ib_destroy_flow(flow_id); 2773 err_free: 2774 kfree(flow_attr); 2775 err_put: 2776 put_qp_read(qp); 2777 err_uobj: 2778 put_uobj_write(uobj); 2779 err_free_attr: 2780 if (cmd.flow_attr.num_of_specs) 2781 kfree(kern_flow_attr); 2782 return err; 2783 } 2784 2785 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, 2786 struct ib_udata *ucore, 2787 struct ib_udata *uhw) 2788 { 2789 struct ib_uverbs_destroy_flow cmd; 2790 struct ib_flow *flow_id; 2791 struct ib_uobject *uobj; 2792 int ret; 2793 2794 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 2795 if (ret) 2796 return ret; 2797 2798 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle, 2799 file->ucontext); 2800 if (!uobj) 2801 return -EINVAL; 2802 flow_id = uobj->object; 2803 2804 ret = ib_destroy_flow(flow_id); 2805 if (!ret) 2806 uobj->live = 0; 2807 2808 put_uobj_write(uobj); 2809 2810 idr_remove_uobj(&ib_uverbs_rule_idr, uobj); 2811 2812 mutex_lock(&file->mutex); 2813 list_del(&uobj->list); 2814 mutex_unlock(&file->mutex); 2815 2816 put_uobj(uobj); 2817 2818 return ret; 2819 } 2820 2821 static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 2822 struct ib_uverbs_create_xsrq *cmd, 2823 struct ib_udata *udata) 2824 { 2825 struct ib_uverbs_create_srq_resp resp; 2826 struct ib_usrq_object *obj; 2827 struct ib_pd *pd; 2828 struct ib_srq *srq; 2829 struct ib_uobject *uninitialized_var(xrcd_uobj); 2830 struct ib_srq_init_attr attr; 2831 int ret; 2832 2833 obj = kmalloc(sizeof *obj, GFP_KERNEL); 2834 if (!obj) 2835 return -ENOMEM; 2836 2837 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class); 2838 down_write(&obj->uevent.uobject.mutex); 2839 2840 if (cmd->srq_type == IB_SRQT_XRC) { 2841 attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj); 2842 if (!attr.ext.xrc.xrcd) { 2843 ret = -EINVAL; 2844 goto err; 2845 } 2846 2847 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 2848 atomic_inc(&obj->uxrcd->refcnt); 2849 2850 attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0); 2851 if (!attr.ext.xrc.cq) { 2852 ret = -EINVAL; 2853 goto err_put_xrcd; 2854 } 2855 } 2856 2857 pd = idr_read_pd(cmd->pd_handle, file->ucontext); 2858 if (!pd) { 2859 ret = -EINVAL; 2860 goto err_put_cq; 2861 } 2862 2863 attr.event_handler = ib_uverbs_srq_event_handler; 2864 attr.srq_context = file; 2865 attr.srq_type = cmd->srq_type; 2866 attr.attr.max_wr = cmd->max_wr; 2867 attr.attr.max_sge = cmd->max_sge; 2868 attr.attr.srq_limit = cmd->srq_limit; 2869 2870 obj->uevent.events_reported = 0; 2871 INIT_LIST_HEAD(&obj->uevent.event_list); 2872 2873 srq = pd->device->create_srq(pd, &attr, udata); 2874 if (IS_ERR(srq)) { 2875 ret = PTR_ERR(srq); 2876 goto err_put; 2877 } 2878 2879 srq->device = pd->device; 2880 srq->pd = pd; 2881 srq->srq_type = cmd->srq_type; 2882 srq->uobject = &obj->uevent.uobject; 2883 srq->event_handler = attr.event_handler; 2884 srq->srq_context = attr.srq_context; 2885 2886 if (cmd->srq_type == IB_SRQT_XRC) { 2887 srq->ext.xrc.cq = attr.ext.xrc.cq; 2888 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; 2889 atomic_inc(&attr.ext.xrc.cq->usecnt); 2890 atomic_inc(&attr.ext.xrc.xrcd->usecnt); 2891 } 2892 2893 atomic_inc(&pd->usecnt); 2894 atomic_set(&srq->usecnt, 0); 2895 2896 obj->uevent.uobject.object = srq; 2897 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 2898 if (ret) 2899 goto err_destroy; 2900 2901 memset(&resp, 0, sizeof resp); 2902 resp.srq_handle = obj->uevent.uobject.id; 2903 resp.max_wr = attr.attr.max_wr; 2904 resp.max_sge = attr.attr.max_sge; 2905 if (cmd->srq_type == IB_SRQT_XRC) 2906 resp.srqn = srq->ext.xrc.srq_num; 2907 2908 if (copy_to_user((void __user *) (unsigned long) cmd->response, 2909 &resp, sizeof resp)) { 2910 ret = -EFAULT; 2911 goto err_copy; 2912 } 2913 2914 if (cmd->srq_type == IB_SRQT_XRC) { 2915 put_uobj_read(xrcd_uobj); 2916 put_cq_read(attr.ext.xrc.cq); 2917 } 2918 put_pd_read(pd); 2919 2920 mutex_lock(&file->mutex); 2921 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list); 2922 mutex_unlock(&file->mutex); 2923 2924 obj->uevent.uobject.live = 1; 2925 2926 up_write(&obj->uevent.uobject.mutex); 2927 2928 return 0; 2929 2930 err_copy: 2931 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 2932 2933 err_destroy: 2934 ib_destroy_srq(srq); 2935 2936 err_put: 2937 put_pd_read(pd); 2938 2939 err_put_cq: 2940 if (cmd->srq_type == IB_SRQT_XRC) 2941 put_cq_read(attr.ext.xrc.cq); 2942 2943 err_put_xrcd: 2944 if (cmd->srq_type == IB_SRQT_XRC) { 2945 atomic_dec(&obj->uxrcd->refcnt); 2946 put_uobj_read(xrcd_uobj); 2947 } 2948 2949 err: 2950 put_uobj_write(&obj->uevent.uobject); 2951 return ret; 2952 } 2953 2954 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 2955 const char __user *buf, int in_len, 2956 int out_len) 2957 { 2958 struct ib_uverbs_create_srq cmd; 2959 struct ib_uverbs_create_xsrq xcmd; 2960 struct ib_uverbs_create_srq_resp resp; 2961 struct ib_udata udata; 2962 int ret; 2963 2964 if (out_len < sizeof resp) 2965 return -ENOSPC; 2966 2967 if (copy_from_user(&cmd, buf, sizeof cmd)) 2968 return -EFAULT; 2969 2970 xcmd.response = cmd.response; 2971 xcmd.user_handle = cmd.user_handle; 2972 xcmd.srq_type = IB_SRQT_BASIC; 2973 xcmd.pd_handle = cmd.pd_handle; 2974 xcmd.max_wr = cmd.max_wr; 2975 xcmd.max_sge = cmd.max_sge; 2976 xcmd.srq_limit = cmd.srq_limit; 2977 2978 INIT_UDATA(&udata, buf + sizeof cmd, 2979 (unsigned long) cmd.response + sizeof resp, 2980 in_len - sizeof cmd, out_len - sizeof resp); 2981 2982 ret = __uverbs_create_xsrq(file, &xcmd, &udata); 2983 if (ret) 2984 return ret; 2985 2986 return in_len; 2987 } 2988 2989 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, 2990 const char __user *buf, int in_len, int out_len) 2991 { 2992 struct ib_uverbs_create_xsrq cmd; 2993 struct ib_uverbs_create_srq_resp resp; 2994 struct ib_udata udata; 2995 int ret; 2996 2997 if (out_len < sizeof resp) 2998 return -ENOSPC; 2999 3000 if (copy_from_user(&cmd, buf, sizeof cmd)) 3001 return -EFAULT; 3002 3003 INIT_UDATA(&udata, buf + sizeof cmd, 3004 (unsigned long) cmd.response + sizeof resp, 3005 in_len - sizeof cmd, out_len - sizeof resp); 3006 3007 ret = __uverbs_create_xsrq(file, &cmd, &udata); 3008 if (ret) 3009 return ret; 3010 3011 return in_len; 3012 } 3013 3014 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 3015 const char __user *buf, int in_len, 3016 int out_len) 3017 { 3018 struct ib_uverbs_modify_srq cmd; 3019 struct ib_udata udata; 3020 struct ib_srq *srq; 3021 struct ib_srq_attr attr; 3022 int ret; 3023 3024 if (copy_from_user(&cmd, buf, sizeof cmd)) 3025 return -EFAULT; 3026 3027 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 3028 out_len); 3029 3030 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 3031 if (!srq) 3032 return -EINVAL; 3033 3034 attr.max_wr = cmd.max_wr; 3035 attr.srq_limit = cmd.srq_limit; 3036 3037 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 3038 3039 put_srq_read(srq); 3040 3041 return ret ? ret : in_len; 3042 } 3043 3044 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 3045 const char __user *buf, 3046 int in_len, int out_len) 3047 { 3048 struct ib_uverbs_query_srq cmd; 3049 struct ib_uverbs_query_srq_resp resp; 3050 struct ib_srq_attr attr; 3051 struct ib_srq *srq; 3052 int ret; 3053 3054 if (out_len < sizeof resp) 3055 return -ENOSPC; 3056 3057 if (copy_from_user(&cmd, buf, sizeof cmd)) 3058 return -EFAULT; 3059 3060 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 3061 if (!srq) 3062 return -EINVAL; 3063 3064 ret = ib_query_srq(srq, &attr); 3065 3066 put_srq_read(srq); 3067 3068 if (ret) 3069 return ret; 3070 3071 memset(&resp, 0, sizeof resp); 3072 3073 resp.max_wr = attr.max_wr; 3074 resp.max_sge = attr.max_sge; 3075 resp.srq_limit = attr.srq_limit; 3076 3077 if (copy_to_user((void __user *) (unsigned long) cmd.response, 3078 &resp, sizeof resp)) 3079 return -EFAULT; 3080 3081 return in_len; 3082 } 3083 3084 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 3085 const char __user *buf, int in_len, 3086 int out_len) 3087 { 3088 struct ib_uverbs_destroy_srq cmd; 3089 struct ib_uverbs_destroy_srq_resp resp; 3090 struct ib_uobject *uobj; 3091 struct ib_srq *srq; 3092 struct ib_uevent_object *obj; 3093 int ret = -EINVAL; 3094 struct ib_usrq_object *us; 3095 enum ib_srq_type srq_type; 3096 3097 if (copy_from_user(&cmd, buf, sizeof cmd)) 3098 return -EFAULT; 3099 3100 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext); 3101 if (!uobj) 3102 return -EINVAL; 3103 srq = uobj->object; 3104 obj = container_of(uobj, struct ib_uevent_object, uobject); 3105 srq_type = srq->srq_type; 3106 3107 ret = ib_destroy_srq(srq); 3108 if (!ret) 3109 uobj->live = 0; 3110 3111 put_uobj_write(uobj); 3112 3113 if (ret) 3114 return ret; 3115 3116 if (srq_type == IB_SRQT_XRC) { 3117 us = container_of(obj, struct ib_usrq_object, uevent); 3118 atomic_dec(&us->uxrcd->refcnt); 3119 } 3120 3121 idr_remove_uobj(&ib_uverbs_srq_idr, uobj); 3122 3123 mutex_lock(&file->mutex); 3124 list_del(&uobj->list); 3125 mutex_unlock(&file->mutex); 3126 3127 ib_uverbs_release_uevent(file, obj); 3128 3129 memset(&resp, 0, sizeof resp); 3130 resp.events_reported = obj->events_reported; 3131 3132 put_uobj(uobj); 3133 3134 if (copy_to_user((void __user *) (unsigned long) cmd.response, 3135 &resp, sizeof resp)) 3136 ret = -EFAULT; 3137 3138 return ret ? ret : in_len; 3139 } 3140