1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/file.h> 37 #include <linux/fs.h> 38 #include <linux/slab.h> 39 #include <linux/sched.h> 40 41 #include <asm/uaccess.h> 42 43 #include "uverbs.h" 44 #include "core_priv.h" 45 46 struct uverbs_lock_class { 47 struct lock_class_key key; 48 char name[16]; 49 }; 50 51 static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" }; 52 static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" }; 53 static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" }; 54 static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" }; 55 static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" }; 56 static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; 57 static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; 58 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; 59 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" }; 60 61 /* 62 * The ib_uobject locking scheme is as follows: 63 * 64 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it 65 * needs to be held during all idr operations. When an object is 66 * looked up, a reference must be taken on the object's kref before 67 * dropping this lock. 68 * 69 * - Each object also has an rwsem. This rwsem must be held for 70 * reading while an operation that uses the object is performed. 71 * For example, while registering an MR, the associated PD's 72 * uobject.mutex must be held for reading. The rwsem must be held 73 * for writing while initializing or destroying an object. 74 * 75 * - In addition, each object has a "live" flag. If this flag is not 76 * set, then lookups of the object will fail even if it is found in 77 * the idr. This handles a reader that blocks and does not acquire 78 * the rwsem until after the object is destroyed. The destroy 79 * operation will set the live flag to 0 and then drop the rwsem; 80 * this will allow the reader to acquire the rwsem, see that the 81 * live flag is 0, and then drop the rwsem and its reference to 82 * object. The underlying storage will not be freed until the last 83 * reference to the object is dropped. 84 */ 85 86 static void init_uobj(struct ib_uobject *uobj, u64 user_handle, 87 struct ib_ucontext *context, struct uverbs_lock_class *c) 88 { 89 uobj->user_handle = user_handle; 90 uobj->context = context; 91 kref_init(&uobj->ref); 92 init_rwsem(&uobj->mutex); 93 lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name); 94 uobj->live = 0; 95 } 96 97 static void release_uobj(struct kref *kref) 98 { 99 kfree(container_of(kref, struct ib_uobject, ref)); 100 } 101 102 static void put_uobj(struct ib_uobject *uobj) 103 { 104 kref_put(&uobj->ref, release_uobj); 105 } 106 107 static void put_uobj_read(struct ib_uobject *uobj) 108 { 109 up_read(&uobj->mutex); 110 put_uobj(uobj); 111 } 112 113 static void put_uobj_write(struct ib_uobject *uobj) 114 { 115 up_write(&uobj->mutex); 116 put_uobj(uobj); 117 } 118 119 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj) 120 { 121 int ret; 122 123 idr_preload(GFP_KERNEL); 124 spin_lock(&ib_uverbs_idr_lock); 125 126 ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT); 127 if (ret >= 0) 128 uobj->id = ret; 129 130 spin_unlock(&ib_uverbs_idr_lock); 131 idr_preload_end(); 132 133 return ret < 0 ? ret : 0; 134 } 135 136 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj) 137 { 138 spin_lock(&ib_uverbs_idr_lock); 139 idr_remove(idr, uobj->id); 140 spin_unlock(&ib_uverbs_idr_lock); 141 } 142 143 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, 144 struct ib_ucontext *context) 145 { 146 struct ib_uobject *uobj; 147 148 spin_lock(&ib_uverbs_idr_lock); 149 uobj = idr_find(idr, id); 150 if (uobj) { 151 if (uobj->context == context) 152 kref_get(&uobj->ref); 153 else 154 uobj = NULL; 155 } 156 spin_unlock(&ib_uverbs_idr_lock); 157 158 return uobj; 159 } 160 161 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, 162 struct ib_ucontext *context, int nested) 163 { 164 struct ib_uobject *uobj; 165 166 uobj = __idr_get_uobj(idr, id, context); 167 if (!uobj) 168 return NULL; 169 170 if (nested) 171 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING); 172 else 173 down_read(&uobj->mutex); 174 if (!uobj->live) { 175 put_uobj_read(uobj); 176 return NULL; 177 } 178 179 return uobj; 180 } 181 182 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id, 183 struct ib_ucontext *context) 184 { 185 struct ib_uobject *uobj; 186 187 uobj = __idr_get_uobj(idr, id, context); 188 if (!uobj) 189 return NULL; 190 191 down_write(&uobj->mutex); 192 if (!uobj->live) { 193 put_uobj_write(uobj); 194 return NULL; 195 } 196 197 return uobj; 198 } 199 200 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context, 201 int nested) 202 { 203 struct ib_uobject *uobj; 204 205 uobj = idr_read_uobj(idr, id, context, nested); 206 return uobj ? uobj->object : NULL; 207 } 208 209 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context) 210 { 211 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0); 212 } 213 214 static void put_pd_read(struct ib_pd *pd) 215 { 216 put_uobj_read(pd->uobject); 217 } 218 219 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested) 220 { 221 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested); 222 } 223 224 static void put_cq_read(struct ib_cq *cq) 225 { 226 put_uobj_read(cq->uobject); 227 } 228 229 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context) 230 { 231 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0); 232 } 233 234 static void put_ah_read(struct ib_ah *ah) 235 { 236 put_uobj_read(ah->uobject); 237 } 238 239 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context) 240 { 241 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0); 242 } 243 244 static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context) 245 { 246 struct ib_uobject *uobj; 247 248 uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context); 249 return uobj ? uobj->object : NULL; 250 } 251 252 static void put_qp_read(struct ib_qp *qp) 253 { 254 put_uobj_read(qp->uobject); 255 } 256 257 static void put_qp_write(struct ib_qp *qp) 258 { 259 put_uobj_write(qp->uobject); 260 } 261 262 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context) 263 { 264 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0); 265 } 266 267 static void put_srq_read(struct ib_srq *srq) 268 { 269 put_uobj_read(srq->uobject); 270 } 271 272 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context, 273 struct ib_uobject **uobj) 274 { 275 *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0); 276 return *uobj ? (*uobj)->object : NULL; 277 } 278 279 static void put_xrcd_read(struct ib_uobject *uobj) 280 { 281 put_uobj_read(uobj); 282 } 283 284 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 285 const char __user *buf, 286 int in_len, int out_len) 287 { 288 struct ib_uverbs_get_context cmd; 289 struct ib_uverbs_get_context_resp resp; 290 struct ib_udata udata; 291 struct ib_device *ibdev = file->device->ib_dev; 292 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 293 struct ib_device_attr dev_attr; 294 #endif 295 struct ib_ucontext *ucontext; 296 struct file *filp; 297 int ret; 298 299 if (out_len < sizeof resp) 300 return -ENOSPC; 301 302 if (copy_from_user(&cmd, buf, sizeof cmd)) 303 return -EFAULT; 304 305 mutex_lock(&file->mutex); 306 307 if (file->ucontext) { 308 ret = -EINVAL; 309 goto err; 310 } 311 312 INIT_UDATA(&udata, buf + sizeof cmd, 313 (unsigned long) cmd.response + sizeof resp, 314 in_len - sizeof cmd, out_len - sizeof resp); 315 316 ucontext = ibdev->alloc_ucontext(ibdev, &udata); 317 if (IS_ERR(ucontext)) { 318 ret = PTR_ERR(ucontext); 319 goto err; 320 } 321 322 ucontext->device = ibdev; 323 INIT_LIST_HEAD(&ucontext->pd_list); 324 INIT_LIST_HEAD(&ucontext->mr_list); 325 INIT_LIST_HEAD(&ucontext->mw_list); 326 INIT_LIST_HEAD(&ucontext->cq_list); 327 INIT_LIST_HEAD(&ucontext->qp_list); 328 INIT_LIST_HEAD(&ucontext->srq_list); 329 INIT_LIST_HEAD(&ucontext->ah_list); 330 INIT_LIST_HEAD(&ucontext->xrcd_list); 331 INIT_LIST_HEAD(&ucontext->rule_list); 332 rcu_read_lock(); 333 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); 334 rcu_read_unlock(); 335 ucontext->closing = 0; 336 337 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 338 ucontext->umem_tree = RB_ROOT; 339 init_rwsem(&ucontext->umem_rwsem); 340 ucontext->odp_mrs_count = 0; 341 INIT_LIST_HEAD(&ucontext->no_private_counters); 342 343 ret = ib_query_device(ibdev, &dev_attr); 344 if (ret) 345 goto err_free; 346 if (!(dev_attr.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)) 347 ucontext->invalidate_range = NULL; 348 349 #endif 350 351 resp.num_comp_vectors = file->device->num_comp_vectors; 352 353 ret = get_unused_fd_flags(O_CLOEXEC); 354 if (ret < 0) 355 goto err_free; 356 resp.async_fd = ret; 357 358 filp = ib_uverbs_alloc_event_file(file, 1); 359 if (IS_ERR(filp)) { 360 ret = PTR_ERR(filp); 361 goto err_fd; 362 } 363 364 if (copy_to_user((void __user *) (unsigned long) cmd.response, 365 &resp, sizeof resp)) { 366 ret = -EFAULT; 367 goto err_file; 368 } 369 370 file->async_file = filp->private_data; 371 372 INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev, 373 ib_uverbs_event_handler); 374 ret = ib_register_event_handler(&file->event_handler); 375 if (ret) 376 goto err_file; 377 378 kref_get(&file->async_file->ref); 379 kref_get(&file->ref); 380 file->ucontext = ucontext; 381 382 fd_install(resp.async_fd, filp); 383 384 mutex_unlock(&file->mutex); 385 386 return in_len; 387 388 err_file: 389 fput(filp); 390 391 err_fd: 392 put_unused_fd(resp.async_fd); 393 394 err_free: 395 put_pid(ucontext->tgid); 396 ibdev->dealloc_ucontext(ucontext); 397 398 err: 399 mutex_unlock(&file->mutex); 400 return ret; 401 } 402 403 static void copy_query_dev_fields(struct ib_uverbs_file *file, 404 struct ib_uverbs_query_device_resp *resp, 405 struct ib_device_attr *attr) 406 { 407 resp->fw_ver = attr->fw_ver; 408 resp->node_guid = file->device->ib_dev->node_guid; 409 resp->sys_image_guid = attr->sys_image_guid; 410 resp->max_mr_size = attr->max_mr_size; 411 resp->page_size_cap = attr->page_size_cap; 412 resp->vendor_id = attr->vendor_id; 413 resp->vendor_part_id = attr->vendor_part_id; 414 resp->hw_ver = attr->hw_ver; 415 resp->max_qp = attr->max_qp; 416 resp->max_qp_wr = attr->max_qp_wr; 417 resp->device_cap_flags = attr->device_cap_flags; 418 resp->max_sge = attr->max_sge; 419 resp->max_sge_rd = attr->max_sge_rd; 420 resp->max_cq = attr->max_cq; 421 resp->max_cqe = attr->max_cqe; 422 resp->max_mr = attr->max_mr; 423 resp->max_pd = attr->max_pd; 424 resp->max_qp_rd_atom = attr->max_qp_rd_atom; 425 resp->max_ee_rd_atom = attr->max_ee_rd_atom; 426 resp->max_res_rd_atom = attr->max_res_rd_atom; 427 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom; 428 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom; 429 resp->atomic_cap = attr->atomic_cap; 430 resp->max_ee = attr->max_ee; 431 resp->max_rdd = attr->max_rdd; 432 resp->max_mw = attr->max_mw; 433 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp; 434 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp; 435 resp->max_mcast_grp = attr->max_mcast_grp; 436 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach; 437 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach; 438 resp->max_ah = attr->max_ah; 439 resp->max_fmr = attr->max_fmr; 440 resp->max_map_per_fmr = attr->max_map_per_fmr; 441 resp->max_srq = attr->max_srq; 442 resp->max_srq_wr = attr->max_srq_wr; 443 resp->max_srq_sge = attr->max_srq_sge; 444 resp->max_pkeys = attr->max_pkeys; 445 resp->local_ca_ack_delay = attr->local_ca_ack_delay; 446 resp->phys_port_cnt = file->device->ib_dev->phys_port_cnt; 447 } 448 449 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 450 const char __user *buf, 451 int in_len, int out_len) 452 { 453 struct ib_uverbs_query_device cmd; 454 struct ib_uverbs_query_device_resp resp; 455 struct ib_device_attr attr; 456 int ret; 457 458 if (out_len < sizeof resp) 459 return -ENOSPC; 460 461 if (copy_from_user(&cmd, buf, sizeof cmd)) 462 return -EFAULT; 463 464 ret = ib_query_device(file->device->ib_dev, &attr); 465 if (ret) 466 return ret; 467 468 memset(&resp, 0, sizeof resp); 469 copy_query_dev_fields(file, &resp, &attr); 470 471 if (copy_to_user((void __user *) (unsigned long) cmd.response, 472 &resp, sizeof resp)) 473 return -EFAULT; 474 475 return in_len; 476 } 477 478 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 479 const char __user *buf, 480 int in_len, int out_len) 481 { 482 struct ib_uverbs_query_port cmd; 483 struct ib_uverbs_query_port_resp resp; 484 struct ib_port_attr attr; 485 int ret; 486 487 if (out_len < sizeof resp) 488 return -ENOSPC; 489 490 if (copy_from_user(&cmd, buf, sizeof cmd)) 491 return -EFAULT; 492 493 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr); 494 if (ret) 495 return ret; 496 497 memset(&resp, 0, sizeof resp); 498 499 resp.state = attr.state; 500 resp.max_mtu = attr.max_mtu; 501 resp.active_mtu = attr.active_mtu; 502 resp.gid_tbl_len = attr.gid_tbl_len; 503 resp.port_cap_flags = attr.port_cap_flags; 504 resp.max_msg_sz = attr.max_msg_sz; 505 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 506 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 507 resp.pkey_tbl_len = attr.pkey_tbl_len; 508 resp.lid = attr.lid; 509 resp.sm_lid = attr.sm_lid; 510 resp.lmc = attr.lmc; 511 resp.max_vl_num = attr.max_vl_num; 512 resp.sm_sl = attr.sm_sl; 513 resp.subnet_timeout = attr.subnet_timeout; 514 resp.init_type_reply = attr.init_type_reply; 515 resp.active_width = attr.active_width; 516 resp.active_speed = attr.active_speed; 517 resp.phys_state = attr.phys_state; 518 resp.link_layer = rdma_port_get_link_layer(file->device->ib_dev, 519 cmd.port_num); 520 521 if (copy_to_user((void __user *) (unsigned long) cmd.response, 522 &resp, sizeof resp)) 523 return -EFAULT; 524 525 return in_len; 526 } 527 528 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 529 const char __user *buf, 530 int in_len, int out_len) 531 { 532 struct ib_uverbs_alloc_pd cmd; 533 struct ib_uverbs_alloc_pd_resp resp; 534 struct ib_udata udata; 535 struct ib_uobject *uobj; 536 struct ib_pd *pd; 537 int ret; 538 539 if (out_len < sizeof resp) 540 return -ENOSPC; 541 542 if (copy_from_user(&cmd, buf, sizeof cmd)) 543 return -EFAULT; 544 545 INIT_UDATA(&udata, buf + sizeof cmd, 546 (unsigned long) cmd.response + sizeof resp, 547 in_len - sizeof cmd, out_len - sizeof resp); 548 549 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 550 if (!uobj) 551 return -ENOMEM; 552 553 init_uobj(uobj, 0, file->ucontext, &pd_lock_class); 554 down_write(&uobj->mutex); 555 556 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, 557 file->ucontext, &udata); 558 if (IS_ERR(pd)) { 559 ret = PTR_ERR(pd); 560 goto err; 561 } 562 563 pd->device = file->device->ib_dev; 564 pd->uobject = uobj; 565 atomic_set(&pd->usecnt, 0); 566 567 uobj->object = pd; 568 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj); 569 if (ret) 570 goto err_idr; 571 572 memset(&resp, 0, sizeof resp); 573 resp.pd_handle = uobj->id; 574 575 if (copy_to_user((void __user *) (unsigned long) cmd.response, 576 &resp, sizeof resp)) { 577 ret = -EFAULT; 578 goto err_copy; 579 } 580 581 mutex_lock(&file->mutex); 582 list_add_tail(&uobj->list, &file->ucontext->pd_list); 583 mutex_unlock(&file->mutex); 584 585 uobj->live = 1; 586 587 up_write(&uobj->mutex); 588 589 return in_len; 590 591 err_copy: 592 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 593 594 err_idr: 595 ib_dealloc_pd(pd); 596 597 err: 598 put_uobj_write(uobj); 599 return ret; 600 } 601 602 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 603 const char __user *buf, 604 int in_len, int out_len) 605 { 606 struct ib_uverbs_dealloc_pd cmd; 607 struct ib_uobject *uobj; 608 int ret; 609 610 if (copy_from_user(&cmd, buf, sizeof cmd)) 611 return -EFAULT; 612 613 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext); 614 if (!uobj) 615 return -EINVAL; 616 617 ret = ib_dealloc_pd(uobj->object); 618 if (!ret) 619 uobj->live = 0; 620 621 put_uobj_write(uobj); 622 623 if (ret) 624 return ret; 625 626 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 627 628 mutex_lock(&file->mutex); 629 list_del(&uobj->list); 630 mutex_unlock(&file->mutex); 631 632 put_uobj(uobj); 633 634 return in_len; 635 } 636 637 struct xrcd_table_entry { 638 struct rb_node node; 639 struct ib_xrcd *xrcd; 640 struct inode *inode; 641 }; 642 643 static int xrcd_table_insert(struct ib_uverbs_device *dev, 644 struct inode *inode, 645 struct ib_xrcd *xrcd) 646 { 647 struct xrcd_table_entry *entry, *scan; 648 struct rb_node **p = &dev->xrcd_tree.rb_node; 649 struct rb_node *parent = NULL; 650 651 entry = kmalloc(sizeof *entry, GFP_KERNEL); 652 if (!entry) 653 return -ENOMEM; 654 655 entry->xrcd = xrcd; 656 entry->inode = inode; 657 658 while (*p) { 659 parent = *p; 660 scan = rb_entry(parent, struct xrcd_table_entry, node); 661 662 if (inode < scan->inode) { 663 p = &(*p)->rb_left; 664 } else if (inode > scan->inode) { 665 p = &(*p)->rb_right; 666 } else { 667 kfree(entry); 668 return -EEXIST; 669 } 670 } 671 672 rb_link_node(&entry->node, parent, p); 673 rb_insert_color(&entry->node, &dev->xrcd_tree); 674 igrab(inode); 675 return 0; 676 } 677 678 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, 679 struct inode *inode) 680 { 681 struct xrcd_table_entry *entry; 682 struct rb_node *p = dev->xrcd_tree.rb_node; 683 684 while (p) { 685 entry = rb_entry(p, struct xrcd_table_entry, node); 686 687 if (inode < entry->inode) 688 p = p->rb_left; 689 else if (inode > entry->inode) 690 p = p->rb_right; 691 else 692 return entry; 693 } 694 695 return NULL; 696 } 697 698 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) 699 { 700 struct xrcd_table_entry *entry; 701 702 entry = xrcd_table_search(dev, inode); 703 if (!entry) 704 return NULL; 705 706 return entry->xrcd; 707 } 708 709 static void xrcd_table_delete(struct ib_uverbs_device *dev, 710 struct inode *inode) 711 { 712 struct xrcd_table_entry *entry; 713 714 entry = xrcd_table_search(dev, inode); 715 if (entry) { 716 iput(inode); 717 rb_erase(&entry->node, &dev->xrcd_tree); 718 kfree(entry); 719 } 720 } 721 722 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, 723 const char __user *buf, int in_len, 724 int out_len) 725 { 726 struct ib_uverbs_open_xrcd cmd; 727 struct ib_uverbs_open_xrcd_resp resp; 728 struct ib_udata udata; 729 struct ib_uxrcd_object *obj; 730 struct ib_xrcd *xrcd = NULL; 731 struct fd f = {NULL, 0}; 732 struct inode *inode = NULL; 733 int ret = 0; 734 int new_xrcd = 0; 735 736 if (out_len < sizeof resp) 737 return -ENOSPC; 738 739 if (copy_from_user(&cmd, buf, sizeof cmd)) 740 return -EFAULT; 741 742 INIT_UDATA(&udata, buf + sizeof cmd, 743 (unsigned long) cmd.response + sizeof resp, 744 in_len - sizeof cmd, out_len - sizeof resp); 745 746 mutex_lock(&file->device->xrcd_tree_mutex); 747 748 if (cmd.fd != -1) { 749 /* search for file descriptor */ 750 f = fdget(cmd.fd); 751 if (!f.file) { 752 ret = -EBADF; 753 goto err_tree_mutex_unlock; 754 } 755 756 inode = file_inode(f.file); 757 xrcd = find_xrcd(file->device, inode); 758 if (!xrcd && !(cmd.oflags & O_CREAT)) { 759 /* no file descriptor. Need CREATE flag */ 760 ret = -EAGAIN; 761 goto err_tree_mutex_unlock; 762 } 763 764 if (xrcd && cmd.oflags & O_EXCL) { 765 ret = -EINVAL; 766 goto err_tree_mutex_unlock; 767 } 768 } 769 770 obj = kmalloc(sizeof *obj, GFP_KERNEL); 771 if (!obj) { 772 ret = -ENOMEM; 773 goto err_tree_mutex_unlock; 774 } 775 776 init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class); 777 778 down_write(&obj->uobject.mutex); 779 780 if (!xrcd) { 781 xrcd = file->device->ib_dev->alloc_xrcd(file->device->ib_dev, 782 file->ucontext, &udata); 783 if (IS_ERR(xrcd)) { 784 ret = PTR_ERR(xrcd); 785 goto err; 786 } 787 788 xrcd->inode = inode; 789 xrcd->device = file->device->ib_dev; 790 atomic_set(&xrcd->usecnt, 0); 791 mutex_init(&xrcd->tgt_qp_mutex); 792 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 793 new_xrcd = 1; 794 } 795 796 atomic_set(&obj->refcnt, 0); 797 obj->uobject.object = xrcd; 798 ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 799 if (ret) 800 goto err_idr; 801 802 memset(&resp, 0, sizeof resp); 803 resp.xrcd_handle = obj->uobject.id; 804 805 if (inode) { 806 if (new_xrcd) { 807 /* create new inode/xrcd table entry */ 808 ret = xrcd_table_insert(file->device, inode, xrcd); 809 if (ret) 810 goto err_insert_xrcd; 811 } 812 atomic_inc(&xrcd->usecnt); 813 } 814 815 if (copy_to_user((void __user *) (unsigned long) cmd.response, 816 &resp, sizeof resp)) { 817 ret = -EFAULT; 818 goto err_copy; 819 } 820 821 if (f.file) 822 fdput(f); 823 824 mutex_lock(&file->mutex); 825 list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list); 826 mutex_unlock(&file->mutex); 827 828 obj->uobject.live = 1; 829 up_write(&obj->uobject.mutex); 830 831 mutex_unlock(&file->device->xrcd_tree_mutex); 832 return in_len; 833 834 err_copy: 835 if (inode) { 836 if (new_xrcd) 837 xrcd_table_delete(file->device, inode); 838 atomic_dec(&xrcd->usecnt); 839 } 840 841 err_insert_xrcd: 842 idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 843 844 err_idr: 845 ib_dealloc_xrcd(xrcd); 846 847 err: 848 put_uobj_write(&obj->uobject); 849 850 err_tree_mutex_unlock: 851 if (f.file) 852 fdput(f); 853 854 mutex_unlock(&file->device->xrcd_tree_mutex); 855 856 return ret; 857 } 858 859 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, 860 const char __user *buf, int in_len, 861 int out_len) 862 { 863 struct ib_uverbs_close_xrcd cmd; 864 struct ib_uobject *uobj; 865 struct ib_xrcd *xrcd = NULL; 866 struct inode *inode = NULL; 867 struct ib_uxrcd_object *obj; 868 int live; 869 int ret = 0; 870 871 if (copy_from_user(&cmd, buf, sizeof cmd)) 872 return -EFAULT; 873 874 mutex_lock(&file->device->xrcd_tree_mutex); 875 uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext); 876 if (!uobj) { 877 ret = -EINVAL; 878 goto out; 879 } 880 881 xrcd = uobj->object; 882 inode = xrcd->inode; 883 obj = container_of(uobj, struct ib_uxrcd_object, uobject); 884 if (atomic_read(&obj->refcnt)) { 885 put_uobj_write(uobj); 886 ret = -EBUSY; 887 goto out; 888 } 889 890 if (!inode || atomic_dec_and_test(&xrcd->usecnt)) { 891 ret = ib_dealloc_xrcd(uobj->object); 892 if (!ret) 893 uobj->live = 0; 894 } 895 896 live = uobj->live; 897 if (inode && ret) 898 atomic_inc(&xrcd->usecnt); 899 900 put_uobj_write(uobj); 901 902 if (ret) 903 goto out; 904 905 if (inode && !live) 906 xrcd_table_delete(file->device, inode); 907 908 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj); 909 mutex_lock(&file->mutex); 910 list_del(&uobj->list); 911 mutex_unlock(&file->mutex); 912 913 put_uobj(uobj); 914 ret = in_len; 915 916 out: 917 mutex_unlock(&file->device->xrcd_tree_mutex); 918 return ret; 919 } 920 921 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, 922 struct ib_xrcd *xrcd) 923 { 924 struct inode *inode; 925 926 inode = xrcd->inode; 927 if (inode && !atomic_dec_and_test(&xrcd->usecnt)) 928 return; 929 930 ib_dealloc_xrcd(xrcd); 931 932 if (inode) 933 xrcd_table_delete(dev, inode); 934 } 935 936 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 937 const char __user *buf, int in_len, 938 int out_len) 939 { 940 struct ib_uverbs_reg_mr cmd; 941 struct ib_uverbs_reg_mr_resp resp; 942 struct ib_udata udata; 943 struct ib_uobject *uobj; 944 struct ib_pd *pd; 945 struct ib_mr *mr; 946 int ret; 947 948 if (out_len < sizeof resp) 949 return -ENOSPC; 950 951 if (copy_from_user(&cmd, buf, sizeof cmd)) 952 return -EFAULT; 953 954 INIT_UDATA(&udata, buf + sizeof cmd, 955 (unsigned long) cmd.response + sizeof resp, 956 in_len - sizeof cmd, out_len - sizeof resp); 957 958 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 959 return -EINVAL; 960 961 ret = ib_check_mr_access(cmd.access_flags); 962 if (ret) 963 return ret; 964 965 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 966 if (!uobj) 967 return -ENOMEM; 968 969 init_uobj(uobj, 0, file->ucontext, &mr_lock_class); 970 down_write(&uobj->mutex); 971 972 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 973 if (!pd) { 974 ret = -EINVAL; 975 goto err_free; 976 } 977 978 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) { 979 struct ib_device_attr attr; 980 981 ret = ib_query_device(pd->device, &attr); 982 if (ret || !(attr.device_cap_flags & 983 IB_DEVICE_ON_DEMAND_PAGING)) { 984 pr_debug("ODP support not available\n"); 985 ret = -EINVAL; 986 goto err_put; 987 } 988 } 989 990 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 991 cmd.access_flags, &udata); 992 if (IS_ERR(mr)) { 993 ret = PTR_ERR(mr); 994 goto err_put; 995 } 996 997 mr->device = pd->device; 998 mr->pd = pd; 999 mr->uobject = uobj; 1000 atomic_inc(&pd->usecnt); 1001 atomic_set(&mr->usecnt, 0); 1002 1003 uobj->object = mr; 1004 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj); 1005 if (ret) 1006 goto err_unreg; 1007 1008 memset(&resp, 0, sizeof resp); 1009 resp.lkey = mr->lkey; 1010 resp.rkey = mr->rkey; 1011 resp.mr_handle = uobj->id; 1012 1013 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1014 &resp, sizeof resp)) { 1015 ret = -EFAULT; 1016 goto err_copy; 1017 } 1018 1019 put_pd_read(pd); 1020 1021 mutex_lock(&file->mutex); 1022 list_add_tail(&uobj->list, &file->ucontext->mr_list); 1023 mutex_unlock(&file->mutex); 1024 1025 uobj->live = 1; 1026 1027 up_write(&uobj->mutex); 1028 1029 return in_len; 1030 1031 err_copy: 1032 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 1033 1034 err_unreg: 1035 ib_dereg_mr(mr); 1036 1037 err_put: 1038 put_pd_read(pd); 1039 1040 err_free: 1041 put_uobj_write(uobj); 1042 return ret; 1043 } 1044 1045 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, 1046 const char __user *buf, int in_len, 1047 int out_len) 1048 { 1049 struct ib_uverbs_rereg_mr cmd; 1050 struct ib_uverbs_rereg_mr_resp resp; 1051 struct ib_udata udata; 1052 struct ib_pd *pd = NULL; 1053 struct ib_mr *mr; 1054 struct ib_pd *old_pd; 1055 int ret; 1056 struct ib_uobject *uobj; 1057 1058 if (out_len < sizeof(resp)) 1059 return -ENOSPC; 1060 1061 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1062 return -EFAULT; 1063 1064 INIT_UDATA(&udata, buf + sizeof(cmd), 1065 (unsigned long) cmd.response + sizeof(resp), 1066 in_len - sizeof(cmd), out_len - sizeof(resp)); 1067 1068 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags) 1069 return -EINVAL; 1070 1071 if ((cmd.flags & IB_MR_REREG_TRANS) && 1072 (!cmd.start || !cmd.hca_va || 0 >= cmd.length || 1073 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))) 1074 return -EINVAL; 1075 1076 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, 1077 file->ucontext); 1078 1079 if (!uobj) 1080 return -EINVAL; 1081 1082 mr = uobj->object; 1083 1084 if (cmd.flags & IB_MR_REREG_ACCESS) { 1085 ret = ib_check_mr_access(cmd.access_flags); 1086 if (ret) 1087 goto put_uobjs; 1088 } 1089 1090 if (cmd.flags & IB_MR_REREG_PD) { 1091 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1092 if (!pd) { 1093 ret = -EINVAL; 1094 goto put_uobjs; 1095 } 1096 } 1097 1098 if (atomic_read(&mr->usecnt)) { 1099 ret = -EBUSY; 1100 goto put_uobj_pd; 1101 } 1102 1103 old_pd = mr->pd; 1104 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, 1105 cmd.length, cmd.hca_va, 1106 cmd.access_flags, pd, &udata); 1107 if (!ret) { 1108 if (cmd.flags & IB_MR_REREG_PD) { 1109 atomic_inc(&pd->usecnt); 1110 mr->pd = pd; 1111 atomic_dec(&old_pd->usecnt); 1112 } 1113 } else { 1114 goto put_uobj_pd; 1115 } 1116 1117 memset(&resp, 0, sizeof(resp)); 1118 resp.lkey = mr->lkey; 1119 resp.rkey = mr->rkey; 1120 1121 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1122 &resp, sizeof(resp))) 1123 ret = -EFAULT; 1124 else 1125 ret = in_len; 1126 1127 put_uobj_pd: 1128 if (cmd.flags & IB_MR_REREG_PD) 1129 put_pd_read(pd); 1130 1131 put_uobjs: 1132 1133 put_uobj_write(mr->uobject); 1134 1135 return ret; 1136 } 1137 1138 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 1139 const char __user *buf, int in_len, 1140 int out_len) 1141 { 1142 struct ib_uverbs_dereg_mr cmd; 1143 struct ib_mr *mr; 1144 struct ib_uobject *uobj; 1145 int ret = -EINVAL; 1146 1147 if (copy_from_user(&cmd, buf, sizeof cmd)) 1148 return -EFAULT; 1149 1150 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext); 1151 if (!uobj) 1152 return -EINVAL; 1153 1154 mr = uobj->object; 1155 1156 ret = ib_dereg_mr(mr); 1157 if (!ret) 1158 uobj->live = 0; 1159 1160 put_uobj_write(uobj); 1161 1162 if (ret) 1163 return ret; 1164 1165 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 1166 1167 mutex_lock(&file->mutex); 1168 list_del(&uobj->list); 1169 mutex_unlock(&file->mutex); 1170 1171 put_uobj(uobj); 1172 1173 return in_len; 1174 } 1175 1176 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, 1177 const char __user *buf, int in_len, 1178 int out_len) 1179 { 1180 struct ib_uverbs_alloc_mw cmd; 1181 struct ib_uverbs_alloc_mw_resp resp; 1182 struct ib_uobject *uobj; 1183 struct ib_pd *pd; 1184 struct ib_mw *mw; 1185 int ret; 1186 1187 if (out_len < sizeof(resp)) 1188 return -ENOSPC; 1189 1190 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1191 return -EFAULT; 1192 1193 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 1194 if (!uobj) 1195 return -ENOMEM; 1196 1197 init_uobj(uobj, 0, file->ucontext, &mw_lock_class); 1198 down_write(&uobj->mutex); 1199 1200 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1201 if (!pd) { 1202 ret = -EINVAL; 1203 goto err_free; 1204 } 1205 1206 mw = pd->device->alloc_mw(pd, cmd.mw_type); 1207 if (IS_ERR(mw)) { 1208 ret = PTR_ERR(mw); 1209 goto err_put; 1210 } 1211 1212 mw->device = pd->device; 1213 mw->pd = pd; 1214 mw->uobject = uobj; 1215 atomic_inc(&pd->usecnt); 1216 1217 uobj->object = mw; 1218 ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj); 1219 if (ret) 1220 goto err_unalloc; 1221 1222 memset(&resp, 0, sizeof(resp)); 1223 resp.rkey = mw->rkey; 1224 resp.mw_handle = uobj->id; 1225 1226 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1227 &resp, sizeof(resp))) { 1228 ret = -EFAULT; 1229 goto err_copy; 1230 } 1231 1232 put_pd_read(pd); 1233 1234 mutex_lock(&file->mutex); 1235 list_add_tail(&uobj->list, &file->ucontext->mw_list); 1236 mutex_unlock(&file->mutex); 1237 1238 uobj->live = 1; 1239 1240 up_write(&uobj->mutex); 1241 1242 return in_len; 1243 1244 err_copy: 1245 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 1246 1247 err_unalloc: 1248 ib_dealloc_mw(mw); 1249 1250 err_put: 1251 put_pd_read(pd); 1252 1253 err_free: 1254 put_uobj_write(uobj); 1255 return ret; 1256 } 1257 1258 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file, 1259 const char __user *buf, int in_len, 1260 int out_len) 1261 { 1262 struct ib_uverbs_dealloc_mw cmd; 1263 struct ib_mw *mw; 1264 struct ib_uobject *uobj; 1265 int ret = -EINVAL; 1266 1267 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1268 return -EFAULT; 1269 1270 uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext); 1271 if (!uobj) 1272 return -EINVAL; 1273 1274 mw = uobj->object; 1275 1276 ret = ib_dealloc_mw(mw); 1277 if (!ret) 1278 uobj->live = 0; 1279 1280 put_uobj_write(uobj); 1281 1282 if (ret) 1283 return ret; 1284 1285 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 1286 1287 mutex_lock(&file->mutex); 1288 list_del(&uobj->list); 1289 mutex_unlock(&file->mutex); 1290 1291 put_uobj(uobj); 1292 1293 return in_len; 1294 } 1295 1296 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 1297 const char __user *buf, int in_len, 1298 int out_len) 1299 { 1300 struct ib_uverbs_create_comp_channel cmd; 1301 struct ib_uverbs_create_comp_channel_resp resp; 1302 struct file *filp; 1303 int ret; 1304 1305 if (out_len < sizeof resp) 1306 return -ENOSPC; 1307 1308 if (copy_from_user(&cmd, buf, sizeof cmd)) 1309 return -EFAULT; 1310 1311 ret = get_unused_fd_flags(O_CLOEXEC); 1312 if (ret < 0) 1313 return ret; 1314 resp.fd = ret; 1315 1316 filp = ib_uverbs_alloc_event_file(file, 0); 1317 if (IS_ERR(filp)) { 1318 put_unused_fd(resp.fd); 1319 return PTR_ERR(filp); 1320 } 1321 1322 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1323 &resp, sizeof resp)) { 1324 put_unused_fd(resp.fd); 1325 fput(filp); 1326 return -EFAULT; 1327 } 1328 1329 fd_install(resp.fd, filp); 1330 return in_len; 1331 } 1332 1333 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, 1334 struct ib_udata *ucore, 1335 struct ib_udata *uhw, 1336 struct ib_uverbs_ex_create_cq *cmd, 1337 size_t cmd_sz, 1338 int (*cb)(struct ib_uverbs_file *file, 1339 struct ib_ucq_object *obj, 1340 struct ib_uverbs_ex_create_cq_resp *resp, 1341 struct ib_udata *udata, 1342 void *context), 1343 void *context) 1344 { 1345 struct ib_ucq_object *obj; 1346 struct ib_uverbs_event_file *ev_file = NULL; 1347 struct ib_cq *cq; 1348 int ret; 1349 struct ib_uverbs_ex_create_cq_resp resp; 1350 struct ib_cq_init_attr attr = {}; 1351 1352 if (cmd->comp_vector >= file->device->num_comp_vectors) 1353 return ERR_PTR(-EINVAL); 1354 1355 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1356 if (!obj) 1357 return ERR_PTR(-ENOMEM); 1358 1359 init_uobj(&obj->uobject, cmd->user_handle, file->ucontext, &cq_lock_class); 1360 down_write(&obj->uobject.mutex); 1361 1362 if (cmd->comp_channel >= 0) { 1363 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel); 1364 if (!ev_file) { 1365 ret = -EINVAL; 1366 goto err; 1367 } 1368 } 1369 1370 obj->uverbs_file = file; 1371 obj->comp_events_reported = 0; 1372 obj->async_events_reported = 0; 1373 INIT_LIST_HEAD(&obj->comp_list); 1374 INIT_LIST_HEAD(&obj->async_list); 1375 1376 attr.cqe = cmd->cqe; 1377 attr.comp_vector = cmd->comp_vector; 1378 1379 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags)) 1380 attr.flags = cmd->flags; 1381 1382 cq = file->device->ib_dev->create_cq(file->device->ib_dev, &attr, 1383 file->ucontext, uhw); 1384 if (IS_ERR(cq)) { 1385 ret = PTR_ERR(cq); 1386 goto err_file; 1387 } 1388 1389 cq->device = file->device->ib_dev; 1390 cq->uobject = &obj->uobject; 1391 cq->comp_handler = ib_uverbs_comp_handler; 1392 cq->event_handler = ib_uverbs_cq_event_handler; 1393 cq->cq_context = ev_file; 1394 atomic_set(&cq->usecnt, 0); 1395 1396 obj->uobject.object = cq; 1397 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1398 if (ret) 1399 goto err_free; 1400 1401 memset(&resp, 0, sizeof resp); 1402 resp.base.cq_handle = obj->uobject.id; 1403 resp.base.cqe = cq->cqe; 1404 1405 resp.response_length = offsetof(typeof(resp), response_length) + 1406 sizeof(resp.response_length); 1407 1408 ret = cb(file, obj, &resp, ucore, context); 1409 if (ret) 1410 goto err_cb; 1411 1412 mutex_lock(&file->mutex); 1413 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list); 1414 mutex_unlock(&file->mutex); 1415 1416 obj->uobject.live = 1; 1417 1418 up_write(&obj->uobject.mutex); 1419 1420 return obj; 1421 1422 err_cb: 1423 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1424 1425 err_free: 1426 ib_destroy_cq(cq); 1427 1428 err_file: 1429 if (ev_file) 1430 ib_uverbs_release_ucq(file, ev_file, obj); 1431 1432 err: 1433 put_uobj_write(&obj->uobject); 1434 1435 return ERR_PTR(ret); 1436 } 1437 1438 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file, 1439 struct ib_ucq_object *obj, 1440 struct ib_uverbs_ex_create_cq_resp *resp, 1441 struct ib_udata *ucore, void *context) 1442 { 1443 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1444 return -EFAULT; 1445 1446 return 0; 1447 } 1448 1449 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 1450 const char __user *buf, int in_len, 1451 int out_len) 1452 { 1453 struct ib_uverbs_create_cq cmd; 1454 struct ib_uverbs_ex_create_cq cmd_ex; 1455 struct ib_uverbs_create_cq_resp resp; 1456 struct ib_udata ucore; 1457 struct ib_udata uhw; 1458 struct ib_ucq_object *obj; 1459 1460 if (out_len < sizeof(resp)) 1461 return -ENOSPC; 1462 1463 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1464 return -EFAULT; 1465 1466 INIT_UDATA(&ucore, buf, cmd.response, sizeof(cmd), sizeof(resp)); 1467 1468 INIT_UDATA(&uhw, buf + sizeof(cmd), 1469 (unsigned long)cmd.response + sizeof(resp), 1470 in_len - sizeof(cmd), out_len - sizeof(resp)); 1471 1472 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1473 cmd_ex.user_handle = cmd.user_handle; 1474 cmd_ex.cqe = cmd.cqe; 1475 cmd_ex.comp_vector = cmd.comp_vector; 1476 cmd_ex.comp_channel = cmd.comp_channel; 1477 1478 obj = create_cq(file, &ucore, &uhw, &cmd_ex, 1479 offsetof(typeof(cmd_ex), comp_channel) + 1480 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb, 1481 NULL); 1482 1483 if (IS_ERR(obj)) 1484 return PTR_ERR(obj); 1485 1486 return in_len; 1487 } 1488 1489 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file, 1490 struct ib_ucq_object *obj, 1491 struct ib_uverbs_ex_create_cq_resp *resp, 1492 struct ib_udata *ucore, void *context) 1493 { 1494 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1495 return -EFAULT; 1496 1497 return 0; 1498 } 1499 1500 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file, 1501 struct ib_udata *ucore, 1502 struct ib_udata *uhw) 1503 { 1504 struct ib_uverbs_ex_create_cq_resp resp; 1505 struct ib_uverbs_ex_create_cq cmd; 1506 struct ib_ucq_object *obj; 1507 int err; 1508 1509 if (ucore->inlen < sizeof(cmd)) 1510 return -EINVAL; 1511 1512 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 1513 if (err) 1514 return err; 1515 1516 if (cmd.comp_mask) 1517 return -EINVAL; 1518 1519 if (cmd.reserved) 1520 return -EINVAL; 1521 1522 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1523 sizeof(resp.response_length))) 1524 return -ENOSPC; 1525 1526 obj = create_cq(file, ucore, uhw, &cmd, 1527 min(ucore->inlen, sizeof(cmd)), 1528 ib_uverbs_ex_create_cq_cb, NULL); 1529 1530 if (IS_ERR(obj)) 1531 return PTR_ERR(obj); 1532 1533 return 0; 1534 } 1535 1536 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 1537 const char __user *buf, int in_len, 1538 int out_len) 1539 { 1540 struct ib_uverbs_resize_cq cmd; 1541 struct ib_uverbs_resize_cq_resp resp; 1542 struct ib_udata udata; 1543 struct ib_cq *cq; 1544 int ret = -EINVAL; 1545 1546 if (copy_from_user(&cmd, buf, sizeof cmd)) 1547 return -EFAULT; 1548 1549 INIT_UDATA(&udata, buf + sizeof cmd, 1550 (unsigned long) cmd.response + sizeof resp, 1551 in_len - sizeof cmd, out_len - sizeof resp); 1552 1553 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1554 if (!cq) 1555 return -EINVAL; 1556 1557 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 1558 if (ret) 1559 goto out; 1560 1561 resp.cqe = cq->cqe; 1562 1563 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1564 &resp, sizeof resp.cqe)) 1565 ret = -EFAULT; 1566 1567 out: 1568 put_cq_read(cq); 1569 1570 return ret ? ret : in_len; 1571 } 1572 1573 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc) 1574 { 1575 struct ib_uverbs_wc tmp; 1576 1577 tmp.wr_id = wc->wr_id; 1578 tmp.status = wc->status; 1579 tmp.opcode = wc->opcode; 1580 tmp.vendor_err = wc->vendor_err; 1581 tmp.byte_len = wc->byte_len; 1582 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data; 1583 tmp.qp_num = wc->qp->qp_num; 1584 tmp.src_qp = wc->src_qp; 1585 tmp.wc_flags = wc->wc_flags; 1586 tmp.pkey_index = wc->pkey_index; 1587 tmp.slid = wc->slid; 1588 tmp.sl = wc->sl; 1589 tmp.dlid_path_bits = wc->dlid_path_bits; 1590 tmp.port_num = wc->port_num; 1591 tmp.reserved = 0; 1592 1593 if (copy_to_user(dest, &tmp, sizeof tmp)) 1594 return -EFAULT; 1595 1596 return 0; 1597 } 1598 1599 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 1600 const char __user *buf, int in_len, 1601 int out_len) 1602 { 1603 struct ib_uverbs_poll_cq cmd; 1604 struct ib_uverbs_poll_cq_resp resp; 1605 u8 __user *header_ptr; 1606 u8 __user *data_ptr; 1607 struct ib_cq *cq; 1608 struct ib_wc wc; 1609 int ret; 1610 1611 if (copy_from_user(&cmd, buf, sizeof cmd)) 1612 return -EFAULT; 1613 1614 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1615 if (!cq) 1616 return -EINVAL; 1617 1618 /* we copy a struct ib_uverbs_poll_cq_resp to user space */ 1619 header_ptr = (void __user *)(unsigned long) cmd.response; 1620 data_ptr = header_ptr + sizeof resp; 1621 1622 memset(&resp, 0, sizeof resp); 1623 while (resp.count < cmd.ne) { 1624 ret = ib_poll_cq(cq, 1, &wc); 1625 if (ret < 0) 1626 goto out_put; 1627 if (!ret) 1628 break; 1629 1630 ret = copy_wc_to_user(data_ptr, &wc); 1631 if (ret) 1632 goto out_put; 1633 1634 data_ptr += sizeof(struct ib_uverbs_wc); 1635 ++resp.count; 1636 } 1637 1638 if (copy_to_user(header_ptr, &resp, sizeof resp)) { 1639 ret = -EFAULT; 1640 goto out_put; 1641 } 1642 1643 ret = in_len; 1644 1645 out_put: 1646 put_cq_read(cq); 1647 return ret; 1648 } 1649 1650 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 1651 const char __user *buf, int in_len, 1652 int out_len) 1653 { 1654 struct ib_uverbs_req_notify_cq cmd; 1655 struct ib_cq *cq; 1656 1657 if (copy_from_user(&cmd, buf, sizeof cmd)) 1658 return -EFAULT; 1659 1660 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1661 if (!cq) 1662 return -EINVAL; 1663 1664 ib_req_notify_cq(cq, cmd.solicited_only ? 1665 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 1666 1667 put_cq_read(cq); 1668 1669 return in_len; 1670 } 1671 1672 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 1673 const char __user *buf, int in_len, 1674 int out_len) 1675 { 1676 struct ib_uverbs_destroy_cq cmd; 1677 struct ib_uverbs_destroy_cq_resp resp; 1678 struct ib_uobject *uobj; 1679 struct ib_cq *cq; 1680 struct ib_ucq_object *obj; 1681 struct ib_uverbs_event_file *ev_file; 1682 int ret = -EINVAL; 1683 1684 if (copy_from_user(&cmd, buf, sizeof cmd)) 1685 return -EFAULT; 1686 1687 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); 1688 if (!uobj) 1689 return -EINVAL; 1690 cq = uobj->object; 1691 ev_file = cq->cq_context; 1692 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1693 1694 ret = ib_destroy_cq(cq); 1695 if (!ret) 1696 uobj->live = 0; 1697 1698 put_uobj_write(uobj); 1699 1700 if (ret) 1701 return ret; 1702 1703 idr_remove_uobj(&ib_uverbs_cq_idr, uobj); 1704 1705 mutex_lock(&file->mutex); 1706 list_del(&uobj->list); 1707 mutex_unlock(&file->mutex); 1708 1709 ib_uverbs_release_ucq(file, ev_file, obj); 1710 1711 memset(&resp, 0, sizeof resp); 1712 resp.comp_events_reported = obj->comp_events_reported; 1713 resp.async_events_reported = obj->async_events_reported; 1714 1715 put_uobj(uobj); 1716 1717 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1718 &resp, sizeof resp)) 1719 return -EFAULT; 1720 1721 return in_len; 1722 } 1723 1724 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 1725 const char __user *buf, int in_len, 1726 int out_len) 1727 { 1728 struct ib_uverbs_create_qp cmd; 1729 struct ib_uverbs_create_qp_resp resp; 1730 struct ib_udata udata; 1731 struct ib_uqp_object *obj; 1732 struct ib_device *device; 1733 struct ib_pd *pd = NULL; 1734 struct ib_xrcd *xrcd = NULL; 1735 struct ib_uobject *uninitialized_var(xrcd_uobj); 1736 struct ib_cq *scq = NULL, *rcq = NULL; 1737 struct ib_srq *srq = NULL; 1738 struct ib_qp *qp; 1739 struct ib_qp_init_attr attr; 1740 int ret; 1741 1742 if (out_len < sizeof resp) 1743 return -ENOSPC; 1744 1745 if (copy_from_user(&cmd, buf, sizeof cmd)) 1746 return -EFAULT; 1747 1748 if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) 1749 return -EPERM; 1750 1751 INIT_UDATA(&udata, buf + sizeof cmd, 1752 (unsigned long) cmd.response + sizeof resp, 1753 in_len - sizeof cmd, out_len - sizeof resp); 1754 1755 obj = kzalloc(sizeof *obj, GFP_KERNEL); 1756 if (!obj) 1757 return -ENOMEM; 1758 1759 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class); 1760 down_write(&obj->uevent.uobject.mutex); 1761 1762 if (cmd.qp_type == IB_QPT_XRC_TGT) { 1763 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); 1764 if (!xrcd) { 1765 ret = -EINVAL; 1766 goto err_put; 1767 } 1768 device = xrcd->device; 1769 } else { 1770 if (cmd.qp_type == IB_QPT_XRC_INI) { 1771 cmd.max_recv_wr = cmd.max_recv_sge = 0; 1772 } else { 1773 if (cmd.is_srq) { 1774 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 1775 if (!srq || srq->srq_type != IB_SRQT_BASIC) { 1776 ret = -EINVAL; 1777 goto err_put; 1778 } 1779 } 1780 1781 if (cmd.recv_cq_handle != cmd.send_cq_handle) { 1782 rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0); 1783 if (!rcq) { 1784 ret = -EINVAL; 1785 goto err_put; 1786 } 1787 } 1788 } 1789 1790 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq); 1791 rcq = rcq ?: scq; 1792 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1793 if (!pd || !scq) { 1794 ret = -EINVAL; 1795 goto err_put; 1796 } 1797 1798 device = pd->device; 1799 } 1800 1801 attr.event_handler = ib_uverbs_qp_event_handler; 1802 attr.qp_context = file; 1803 attr.send_cq = scq; 1804 attr.recv_cq = rcq; 1805 attr.srq = srq; 1806 attr.xrcd = xrcd; 1807 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 1808 attr.qp_type = cmd.qp_type; 1809 attr.create_flags = 0; 1810 1811 attr.cap.max_send_wr = cmd.max_send_wr; 1812 attr.cap.max_recv_wr = cmd.max_recv_wr; 1813 attr.cap.max_send_sge = cmd.max_send_sge; 1814 attr.cap.max_recv_sge = cmd.max_recv_sge; 1815 attr.cap.max_inline_data = cmd.max_inline_data; 1816 1817 obj->uevent.events_reported = 0; 1818 INIT_LIST_HEAD(&obj->uevent.event_list); 1819 INIT_LIST_HEAD(&obj->mcast_list); 1820 1821 if (cmd.qp_type == IB_QPT_XRC_TGT) 1822 qp = ib_create_qp(pd, &attr); 1823 else 1824 qp = device->create_qp(pd, &attr, &udata); 1825 1826 if (IS_ERR(qp)) { 1827 ret = PTR_ERR(qp); 1828 goto err_put; 1829 } 1830 1831 if (cmd.qp_type != IB_QPT_XRC_TGT) { 1832 qp->real_qp = qp; 1833 qp->device = device; 1834 qp->pd = pd; 1835 qp->send_cq = attr.send_cq; 1836 qp->recv_cq = attr.recv_cq; 1837 qp->srq = attr.srq; 1838 qp->event_handler = attr.event_handler; 1839 qp->qp_context = attr.qp_context; 1840 qp->qp_type = attr.qp_type; 1841 atomic_set(&qp->usecnt, 0); 1842 atomic_inc(&pd->usecnt); 1843 atomic_inc(&attr.send_cq->usecnt); 1844 if (attr.recv_cq) 1845 atomic_inc(&attr.recv_cq->usecnt); 1846 if (attr.srq) 1847 atomic_inc(&attr.srq->usecnt); 1848 } 1849 qp->uobject = &obj->uevent.uobject; 1850 1851 obj->uevent.uobject.object = qp; 1852 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1853 if (ret) 1854 goto err_destroy; 1855 1856 memset(&resp, 0, sizeof resp); 1857 resp.qpn = qp->qp_num; 1858 resp.qp_handle = obj->uevent.uobject.id; 1859 resp.max_recv_sge = attr.cap.max_recv_sge; 1860 resp.max_send_sge = attr.cap.max_send_sge; 1861 resp.max_recv_wr = attr.cap.max_recv_wr; 1862 resp.max_send_wr = attr.cap.max_send_wr; 1863 resp.max_inline_data = attr.cap.max_inline_data; 1864 1865 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1866 &resp, sizeof resp)) { 1867 ret = -EFAULT; 1868 goto err_copy; 1869 } 1870 1871 if (xrcd) { 1872 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, 1873 uobject); 1874 atomic_inc(&obj->uxrcd->refcnt); 1875 put_xrcd_read(xrcd_uobj); 1876 } 1877 1878 if (pd) 1879 put_pd_read(pd); 1880 if (scq) 1881 put_cq_read(scq); 1882 if (rcq && rcq != scq) 1883 put_cq_read(rcq); 1884 if (srq) 1885 put_srq_read(srq); 1886 1887 mutex_lock(&file->mutex); 1888 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 1889 mutex_unlock(&file->mutex); 1890 1891 obj->uevent.uobject.live = 1; 1892 1893 up_write(&obj->uevent.uobject.mutex); 1894 1895 return in_len; 1896 1897 err_copy: 1898 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1899 1900 err_destroy: 1901 ib_destroy_qp(qp); 1902 1903 err_put: 1904 if (xrcd) 1905 put_xrcd_read(xrcd_uobj); 1906 if (pd) 1907 put_pd_read(pd); 1908 if (scq) 1909 put_cq_read(scq); 1910 if (rcq && rcq != scq) 1911 put_cq_read(rcq); 1912 if (srq) 1913 put_srq_read(srq); 1914 1915 put_uobj_write(&obj->uevent.uobject); 1916 return ret; 1917 } 1918 1919 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, 1920 const char __user *buf, int in_len, int out_len) 1921 { 1922 struct ib_uverbs_open_qp cmd; 1923 struct ib_uverbs_create_qp_resp resp; 1924 struct ib_udata udata; 1925 struct ib_uqp_object *obj; 1926 struct ib_xrcd *xrcd; 1927 struct ib_uobject *uninitialized_var(xrcd_uobj); 1928 struct ib_qp *qp; 1929 struct ib_qp_open_attr attr; 1930 int ret; 1931 1932 if (out_len < sizeof resp) 1933 return -ENOSPC; 1934 1935 if (copy_from_user(&cmd, buf, sizeof cmd)) 1936 return -EFAULT; 1937 1938 INIT_UDATA(&udata, buf + sizeof cmd, 1939 (unsigned long) cmd.response + sizeof resp, 1940 in_len - sizeof cmd, out_len - sizeof resp); 1941 1942 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1943 if (!obj) 1944 return -ENOMEM; 1945 1946 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class); 1947 down_write(&obj->uevent.uobject.mutex); 1948 1949 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); 1950 if (!xrcd) { 1951 ret = -EINVAL; 1952 goto err_put; 1953 } 1954 1955 attr.event_handler = ib_uverbs_qp_event_handler; 1956 attr.qp_context = file; 1957 attr.qp_num = cmd.qpn; 1958 attr.qp_type = cmd.qp_type; 1959 1960 obj->uevent.events_reported = 0; 1961 INIT_LIST_HEAD(&obj->uevent.event_list); 1962 INIT_LIST_HEAD(&obj->mcast_list); 1963 1964 qp = ib_open_qp(xrcd, &attr); 1965 if (IS_ERR(qp)) { 1966 ret = PTR_ERR(qp); 1967 goto err_put; 1968 } 1969 1970 qp->uobject = &obj->uevent.uobject; 1971 1972 obj->uevent.uobject.object = qp; 1973 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1974 if (ret) 1975 goto err_destroy; 1976 1977 memset(&resp, 0, sizeof resp); 1978 resp.qpn = qp->qp_num; 1979 resp.qp_handle = obj->uevent.uobject.id; 1980 1981 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1982 &resp, sizeof resp)) { 1983 ret = -EFAULT; 1984 goto err_remove; 1985 } 1986 1987 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 1988 atomic_inc(&obj->uxrcd->refcnt); 1989 put_xrcd_read(xrcd_uobj); 1990 1991 mutex_lock(&file->mutex); 1992 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 1993 mutex_unlock(&file->mutex); 1994 1995 obj->uevent.uobject.live = 1; 1996 1997 up_write(&obj->uevent.uobject.mutex); 1998 1999 return in_len; 2000 2001 err_remove: 2002 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 2003 2004 err_destroy: 2005 ib_destroy_qp(qp); 2006 2007 err_put: 2008 put_xrcd_read(xrcd_uobj); 2009 put_uobj_write(&obj->uevent.uobject); 2010 return ret; 2011 } 2012 2013 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 2014 const char __user *buf, int in_len, 2015 int out_len) 2016 { 2017 struct ib_uverbs_query_qp cmd; 2018 struct ib_uverbs_query_qp_resp resp; 2019 struct ib_qp *qp; 2020 struct ib_qp_attr *attr; 2021 struct ib_qp_init_attr *init_attr; 2022 int ret; 2023 2024 if (copy_from_user(&cmd, buf, sizeof cmd)) 2025 return -EFAULT; 2026 2027 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2028 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 2029 if (!attr || !init_attr) { 2030 ret = -ENOMEM; 2031 goto out; 2032 } 2033 2034 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2035 if (!qp) { 2036 ret = -EINVAL; 2037 goto out; 2038 } 2039 2040 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 2041 2042 put_qp_read(qp); 2043 2044 if (ret) 2045 goto out; 2046 2047 memset(&resp, 0, sizeof resp); 2048 2049 resp.qp_state = attr->qp_state; 2050 resp.cur_qp_state = attr->cur_qp_state; 2051 resp.path_mtu = attr->path_mtu; 2052 resp.path_mig_state = attr->path_mig_state; 2053 resp.qkey = attr->qkey; 2054 resp.rq_psn = attr->rq_psn; 2055 resp.sq_psn = attr->sq_psn; 2056 resp.dest_qp_num = attr->dest_qp_num; 2057 resp.qp_access_flags = attr->qp_access_flags; 2058 resp.pkey_index = attr->pkey_index; 2059 resp.alt_pkey_index = attr->alt_pkey_index; 2060 resp.sq_draining = attr->sq_draining; 2061 resp.max_rd_atomic = attr->max_rd_atomic; 2062 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 2063 resp.min_rnr_timer = attr->min_rnr_timer; 2064 resp.port_num = attr->port_num; 2065 resp.timeout = attr->timeout; 2066 resp.retry_cnt = attr->retry_cnt; 2067 resp.rnr_retry = attr->rnr_retry; 2068 resp.alt_port_num = attr->alt_port_num; 2069 resp.alt_timeout = attr->alt_timeout; 2070 2071 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16); 2072 resp.dest.flow_label = attr->ah_attr.grh.flow_label; 2073 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index; 2074 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit; 2075 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class; 2076 resp.dest.dlid = attr->ah_attr.dlid; 2077 resp.dest.sl = attr->ah_attr.sl; 2078 resp.dest.src_path_bits = attr->ah_attr.src_path_bits; 2079 resp.dest.static_rate = attr->ah_attr.static_rate; 2080 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH); 2081 resp.dest.port_num = attr->ah_attr.port_num; 2082 2083 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16); 2084 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label; 2085 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index; 2086 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit; 2087 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class; 2088 resp.alt_dest.dlid = attr->alt_ah_attr.dlid; 2089 resp.alt_dest.sl = attr->alt_ah_attr.sl; 2090 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits; 2091 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate; 2092 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH); 2093 resp.alt_dest.port_num = attr->alt_ah_attr.port_num; 2094 2095 resp.max_send_wr = init_attr->cap.max_send_wr; 2096 resp.max_recv_wr = init_attr->cap.max_recv_wr; 2097 resp.max_send_sge = init_attr->cap.max_send_sge; 2098 resp.max_recv_sge = init_attr->cap.max_recv_sge; 2099 resp.max_inline_data = init_attr->cap.max_inline_data; 2100 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 2101 2102 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2103 &resp, sizeof resp)) 2104 ret = -EFAULT; 2105 2106 out: 2107 kfree(attr); 2108 kfree(init_attr); 2109 2110 return ret ? ret : in_len; 2111 } 2112 2113 /* Remove ignored fields set in the attribute mask */ 2114 static int modify_qp_mask(enum ib_qp_type qp_type, int mask) 2115 { 2116 switch (qp_type) { 2117 case IB_QPT_XRC_INI: 2118 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); 2119 case IB_QPT_XRC_TGT: 2120 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | 2121 IB_QP_RNR_RETRY); 2122 default: 2123 return mask; 2124 } 2125 } 2126 2127 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 2128 const char __user *buf, int in_len, 2129 int out_len) 2130 { 2131 struct ib_uverbs_modify_qp cmd; 2132 struct ib_udata udata; 2133 struct ib_qp *qp; 2134 struct ib_qp_attr *attr; 2135 int ret; 2136 2137 if (copy_from_user(&cmd, buf, sizeof cmd)) 2138 return -EFAULT; 2139 2140 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 2141 out_len); 2142 2143 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2144 if (!attr) 2145 return -ENOMEM; 2146 2147 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2148 if (!qp) { 2149 ret = -EINVAL; 2150 goto out; 2151 } 2152 2153 attr->qp_state = cmd.qp_state; 2154 attr->cur_qp_state = cmd.cur_qp_state; 2155 attr->path_mtu = cmd.path_mtu; 2156 attr->path_mig_state = cmd.path_mig_state; 2157 attr->qkey = cmd.qkey; 2158 attr->rq_psn = cmd.rq_psn; 2159 attr->sq_psn = cmd.sq_psn; 2160 attr->dest_qp_num = cmd.dest_qp_num; 2161 attr->qp_access_flags = cmd.qp_access_flags; 2162 attr->pkey_index = cmd.pkey_index; 2163 attr->alt_pkey_index = cmd.alt_pkey_index; 2164 attr->en_sqd_async_notify = cmd.en_sqd_async_notify; 2165 attr->max_rd_atomic = cmd.max_rd_atomic; 2166 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic; 2167 attr->min_rnr_timer = cmd.min_rnr_timer; 2168 attr->port_num = cmd.port_num; 2169 attr->timeout = cmd.timeout; 2170 attr->retry_cnt = cmd.retry_cnt; 2171 attr->rnr_retry = cmd.rnr_retry; 2172 attr->alt_port_num = cmd.alt_port_num; 2173 attr->alt_timeout = cmd.alt_timeout; 2174 2175 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16); 2176 attr->ah_attr.grh.flow_label = cmd.dest.flow_label; 2177 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index; 2178 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit; 2179 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class; 2180 attr->ah_attr.dlid = cmd.dest.dlid; 2181 attr->ah_attr.sl = cmd.dest.sl; 2182 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits; 2183 attr->ah_attr.static_rate = cmd.dest.static_rate; 2184 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0; 2185 attr->ah_attr.port_num = cmd.dest.port_num; 2186 2187 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16); 2188 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label; 2189 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index; 2190 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit; 2191 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class; 2192 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid; 2193 attr->alt_ah_attr.sl = cmd.alt_dest.sl; 2194 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits; 2195 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate; 2196 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; 2197 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; 2198 2199 if (qp->real_qp == qp) { 2200 ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask); 2201 if (ret) 2202 goto release_qp; 2203 ret = qp->device->modify_qp(qp, attr, 2204 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); 2205 } else { 2206 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); 2207 } 2208 2209 if (ret) 2210 goto release_qp; 2211 2212 ret = in_len; 2213 2214 release_qp: 2215 put_qp_read(qp); 2216 2217 out: 2218 kfree(attr); 2219 2220 return ret; 2221 } 2222 2223 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 2224 const char __user *buf, int in_len, 2225 int out_len) 2226 { 2227 struct ib_uverbs_destroy_qp cmd; 2228 struct ib_uverbs_destroy_qp_resp resp; 2229 struct ib_uobject *uobj; 2230 struct ib_qp *qp; 2231 struct ib_uqp_object *obj; 2232 int ret = -EINVAL; 2233 2234 if (copy_from_user(&cmd, buf, sizeof cmd)) 2235 return -EFAULT; 2236 2237 memset(&resp, 0, sizeof resp); 2238 2239 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext); 2240 if (!uobj) 2241 return -EINVAL; 2242 qp = uobj->object; 2243 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 2244 2245 if (!list_empty(&obj->mcast_list)) { 2246 put_uobj_write(uobj); 2247 return -EBUSY; 2248 } 2249 2250 ret = ib_destroy_qp(qp); 2251 if (!ret) 2252 uobj->live = 0; 2253 2254 put_uobj_write(uobj); 2255 2256 if (ret) 2257 return ret; 2258 2259 if (obj->uxrcd) 2260 atomic_dec(&obj->uxrcd->refcnt); 2261 2262 idr_remove_uobj(&ib_uverbs_qp_idr, uobj); 2263 2264 mutex_lock(&file->mutex); 2265 list_del(&uobj->list); 2266 mutex_unlock(&file->mutex); 2267 2268 ib_uverbs_release_uevent(file, &obj->uevent); 2269 2270 resp.events_reported = obj->uevent.events_reported; 2271 2272 put_uobj(uobj); 2273 2274 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2275 &resp, sizeof resp)) 2276 return -EFAULT; 2277 2278 return in_len; 2279 } 2280 2281 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 2282 const char __user *buf, int in_len, 2283 int out_len) 2284 { 2285 struct ib_uverbs_post_send cmd; 2286 struct ib_uverbs_post_send_resp resp; 2287 struct ib_uverbs_send_wr *user_wr; 2288 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 2289 struct ib_qp *qp; 2290 int i, sg_ind; 2291 int is_ud; 2292 ssize_t ret = -EINVAL; 2293 2294 if (copy_from_user(&cmd, buf, sizeof cmd)) 2295 return -EFAULT; 2296 2297 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 2298 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 2299 return -EINVAL; 2300 2301 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 2302 return -EINVAL; 2303 2304 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 2305 if (!user_wr) 2306 return -ENOMEM; 2307 2308 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2309 if (!qp) 2310 goto out; 2311 2312 is_ud = qp->qp_type == IB_QPT_UD; 2313 sg_ind = 0; 2314 last = NULL; 2315 for (i = 0; i < cmd.wr_count; ++i) { 2316 if (copy_from_user(user_wr, 2317 buf + sizeof cmd + i * cmd.wqe_size, 2318 cmd.wqe_size)) { 2319 ret = -EFAULT; 2320 goto out_put; 2321 } 2322 2323 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 2324 ret = -EINVAL; 2325 goto out_put; 2326 } 2327 2328 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2329 user_wr->num_sge * sizeof (struct ib_sge), 2330 GFP_KERNEL); 2331 if (!next) { 2332 ret = -ENOMEM; 2333 goto out_put; 2334 } 2335 2336 if (!last) 2337 wr = next; 2338 else 2339 last->next = next; 2340 last = next; 2341 2342 next->next = NULL; 2343 next->wr_id = user_wr->wr_id; 2344 next->num_sge = user_wr->num_sge; 2345 next->opcode = user_wr->opcode; 2346 next->send_flags = user_wr->send_flags; 2347 2348 if (is_ud) { 2349 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah, 2350 file->ucontext); 2351 if (!next->wr.ud.ah) { 2352 ret = -EINVAL; 2353 goto out_put; 2354 } 2355 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn; 2356 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey; 2357 if (next->opcode == IB_WR_SEND_WITH_IMM) 2358 next->ex.imm_data = 2359 (__be32 __force) user_wr->ex.imm_data; 2360 } else { 2361 switch (next->opcode) { 2362 case IB_WR_RDMA_WRITE_WITH_IMM: 2363 next->ex.imm_data = 2364 (__be32 __force) user_wr->ex.imm_data; 2365 case IB_WR_RDMA_WRITE: 2366 case IB_WR_RDMA_READ: 2367 next->wr.rdma.remote_addr = 2368 user_wr->wr.rdma.remote_addr; 2369 next->wr.rdma.rkey = 2370 user_wr->wr.rdma.rkey; 2371 break; 2372 case IB_WR_SEND_WITH_IMM: 2373 next->ex.imm_data = 2374 (__be32 __force) user_wr->ex.imm_data; 2375 break; 2376 case IB_WR_SEND_WITH_INV: 2377 next->ex.invalidate_rkey = 2378 user_wr->ex.invalidate_rkey; 2379 break; 2380 case IB_WR_ATOMIC_CMP_AND_SWP: 2381 case IB_WR_ATOMIC_FETCH_AND_ADD: 2382 next->wr.atomic.remote_addr = 2383 user_wr->wr.atomic.remote_addr; 2384 next->wr.atomic.compare_add = 2385 user_wr->wr.atomic.compare_add; 2386 next->wr.atomic.swap = user_wr->wr.atomic.swap; 2387 next->wr.atomic.rkey = user_wr->wr.atomic.rkey; 2388 break; 2389 default: 2390 break; 2391 } 2392 } 2393 2394 if (next->num_sge) { 2395 next->sg_list = (void *) next + 2396 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2397 if (copy_from_user(next->sg_list, 2398 buf + sizeof cmd + 2399 cmd.wr_count * cmd.wqe_size + 2400 sg_ind * sizeof (struct ib_sge), 2401 next->num_sge * sizeof (struct ib_sge))) { 2402 ret = -EFAULT; 2403 goto out_put; 2404 } 2405 sg_ind += next->num_sge; 2406 } else 2407 next->sg_list = NULL; 2408 } 2409 2410 resp.bad_wr = 0; 2411 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); 2412 if (ret) 2413 for (next = wr; next; next = next->next) { 2414 ++resp.bad_wr; 2415 if (next == bad_wr) 2416 break; 2417 } 2418 2419 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2420 &resp, sizeof resp)) 2421 ret = -EFAULT; 2422 2423 out_put: 2424 put_qp_read(qp); 2425 2426 while (wr) { 2427 if (is_ud && wr->wr.ud.ah) 2428 put_ah_read(wr->wr.ud.ah); 2429 next = wr->next; 2430 kfree(wr); 2431 wr = next; 2432 } 2433 2434 out: 2435 kfree(user_wr); 2436 2437 return ret ? ret : in_len; 2438 } 2439 2440 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 2441 int in_len, 2442 u32 wr_count, 2443 u32 sge_count, 2444 u32 wqe_size) 2445 { 2446 struct ib_uverbs_recv_wr *user_wr; 2447 struct ib_recv_wr *wr = NULL, *last, *next; 2448 int sg_ind; 2449 int i; 2450 int ret; 2451 2452 if (in_len < wqe_size * wr_count + 2453 sge_count * sizeof (struct ib_uverbs_sge)) 2454 return ERR_PTR(-EINVAL); 2455 2456 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 2457 return ERR_PTR(-EINVAL); 2458 2459 user_wr = kmalloc(wqe_size, GFP_KERNEL); 2460 if (!user_wr) 2461 return ERR_PTR(-ENOMEM); 2462 2463 sg_ind = 0; 2464 last = NULL; 2465 for (i = 0; i < wr_count; ++i) { 2466 if (copy_from_user(user_wr, buf + i * wqe_size, 2467 wqe_size)) { 2468 ret = -EFAULT; 2469 goto err; 2470 } 2471 2472 if (user_wr->num_sge + sg_ind > sge_count) { 2473 ret = -EINVAL; 2474 goto err; 2475 } 2476 2477 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2478 user_wr->num_sge * sizeof (struct ib_sge), 2479 GFP_KERNEL); 2480 if (!next) { 2481 ret = -ENOMEM; 2482 goto err; 2483 } 2484 2485 if (!last) 2486 wr = next; 2487 else 2488 last->next = next; 2489 last = next; 2490 2491 next->next = NULL; 2492 next->wr_id = user_wr->wr_id; 2493 next->num_sge = user_wr->num_sge; 2494 2495 if (next->num_sge) { 2496 next->sg_list = (void *) next + 2497 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2498 if (copy_from_user(next->sg_list, 2499 buf + wr_count * wqe_size + 2500 sg_ind * sizeof (struct ib_sge), 2501 next->num_sge * sizeof (struct ib_sge))) { 2502 ret = -EFAULT; 2503 goto err; 2504 } 2505 sg_ind += next->num_sge; 2506 } else 2507 next->sg_list = NULL; 2508 } 2509 2510 kfree(user_wr); 2511 return wr; 2512 2513 err: 2514 kfree(user_wr); 2515 2516 while (wr) { 2517 next = wr->next; 2518 kfree(wr); 2519 wr = next; 2520 } 2521 2522 return ERR_PTR(ret); 2523 } 2524 2525 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 2526 const char __user *buf, int in_len, 2527 int out_len) 2528 { 2529 struct ib_uverbs_post_recv cmd; 2530 struct ib_uverbs_post_recv_resp resp; 2531 struct ib_recv_wr *wr, *next, *bad_wr; 2532 struct ib_qp *qp; 2533 ssize_t ret = -EINVAL; 2534 2535 if (copy_from_user(&cmd, buf, sizeof cmd)) 2536 return -EFAULT; 2537 2538 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2539 in_len - sizeof cmd, cmd.wr_count, 2540 cmd.sge_count, cmd.wqe_size); 2541 if (IS_ERR(wr)) 2542 return PTR_ERR(wr); 2543 2544 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2545 if (!qp) 2546 goto out; 2547 2548 resp.bad_wr = 0; 2549 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); 2550 2551 put_qp_read(qp); 2552 2553 if (ret) 2554 for (next = wr; next; next = next->next) { 2555 ++resp.bad_wr; 2556 if (next == bad_wr) 2557 break; 2558 } 2559 2560 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2561 &resp, sizeof resp)) 2562 ret = -EFAULT; 2563 2564 out: 2565 while (wr) { 2566 next = wr->next; 2567 kfree(wr); 2568 wr = next; 2569 } 2570 2571 return ret ? ret : in_len; 2572 } 2573 2574 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 2575 const char __user *buf, int in_len, 2576 int out_len) 2577 { 2578 struct ib_uverbs_post_srq_recv cmd; 2579 struct ib_uverbs_post_srq_recv_resp resp; 2580 struct ib_recv_wr *wr, *next, *bad_wr; 2581 struct ib_srq *srq; 2582 ssize_t ret = -EINVAL; 2583 2584 if (copy_from_user(&cmd, buf, sizeof cmd)) 2585 return -EFAULT; 2586 2587 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2588 in_len - sizeof cmd, cmd.wr_count, 2589 cmd.sge_count, cmd.wqe_size); 2590 if (IS_ERR(wr)) 2591 return PTR_ERR(wr); 2592 2593 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2594 if (!srq) 2595 goto out; 2596 2597 resp.bad_wr = 0; 2598 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 2599 2600 put_srq_read(srq); 2601 2602 if (ret) 2603 for (next = wr; next; next = next->next) { 2604 ++resp.bad_wr; 2605 if (next == bad_wr) 2606 break; 2607 } 2608 2609 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2610 &resp, sizeof resp)) 2611 ret = -EFAULT; 2612 2613 out: 2614 while (wr) { 2615 next = wr->next; 2616 kfree(wr); 2617 wr = next; 2618 } 2619 2620 return ret ? ret : in_len; 2621 } 2622 2623 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 2624 const char __user *buf, int in_len, 2625 int out_len) 2626 { 2627 struct ib_uverbs_create_ah cmd; 2628 struct ib_uverbs_create_ah_resp resp; 2629 struct ib_uobject *uobj; 2630 struct ib_pd *pd; 2631 struct ib_ah *ah; 2632 struct ib_ah_attr attr; 2633 int ret; 2634 2635 if (out_len < sizeof resp) 2636 return -ENOSPC; 2637 2638 if (copy_from_user(&cmd, buf, sizeof cmd)) 2639 return -EFAULT; 2640 2641 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 2642 if (!uobj) 2643 return -ENOMEM; 2644 2645 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class); 2646 down_write(&uobj->mutex); 2647 2648 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 2649 if (!pd) { 2650 ret = -EINVAL; 2651 goto err; 2652 } 2653 2654 attr.dlid = cmd.attr.dlid; 2655 attr.sl = cmd.attr.sl; 2656 attr.src_path_bits = cmd.attr.src_path_bits; 2657 attr.static_rate = cmd.attr.static_rate; 2658 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0; 2659 attr.port_num = cmd.attr.port_num; 2660 attr.grh.flow_label = cmd.attr.grh.flow_label; 2661 attr.grh.sgid_index = cmd.attr.grh.sgid_index; 2662 attr.grh.hop_limit = cmd.attr.grh.hop_limit; 2663 attr.grh.traffic_class = cmd.attr.grh.traffic_class; 2664 attr.vlan_id = 0; 2665 memset(&attr.dmac, 0, sizeof(attr.dmac)); 2666 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); 2667 2668 ah = ib_create_ah(pd, &attr); 2669 if (IS_ERR(ah)) { 2670 ret = PTR_ERR(ah); 2671 goto err_put; 2672 } 2673 2674 ah->uobject = uobj; 2675 uobj->object = ah; 2676 2677 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj); 2678 if (ret) 2679 goto err_destroy; 2680 2681 resp.ah_handle = uobj->id; 2682 2683 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2684 &resp, sizeof resp)) { 2685 ret = -EFAULT; 2686 goto err_copy; 2687 } 2688 2689 put_pd_read(pd); 2690 2691 mutex_lock(&file->mutex); 2692 list_add_tail(&uobj->list, &file->ucontext->ah_list); 2693 mutex_unlock(&file->mutex); 2694 2695 uobj->live = 1; 2696 2697 up_write(&uobj->mutex); 2698 2699 return in_len; 2700 2701 err_copy: 2702 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2703 2704 err_destroy: 2705 ib_destroy_ah(ah); 2706 2707 err_put: 2708 put_pd_read(pd); 2709 2710 err: 2711 put_uobj_write(uobj); 2712 return ret; 2713 } 2714 2715 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 2716 const char __user *buf, int in_len, int out_len) 2717 { 2718 struct ib_uverbs_destroy_ah cmd; 2719 struct ib_ah *ah; 2720 struct ib_uobject *uobj; 2721 int ret; 2722 2723 if (copy_from_user(&cmd, buf, sizeof cmd)) 2724 return -EFAULT; 2725 2726 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext); 2727 if (!uobj) 2728 return -EINVAL; 2729 ah = uobj->object; 2730 2731 ret = ib_destroy_ah(ah); 2732 if (!ret) 2733 uobj->live = 0; 2734 2735 put_uobj_write(uobj); 2736 2737 if (ret) 2738 return ret; 2739 2740 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2741 2742 mutex_lock(&file->mutex); 2743 list_del(&uobj->list); 2744 mutex_unlock(&file->mutex); 2745 2746 put_uobj(uobj); 2747 2748 return in_len; 2749 } 2750 2751 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 2752 const char __user *buf, int in_len, 2753 int out_len) 2754 { 2755 struct ib_uverbs_attach_mcast cmd; 2756 struct ib_qp *qp; 2757 struct ib_uqp_object *obj; 2758 struct ib_uverbs_mcast_entry *mcast; 2759 int ret; 2760 2761 if (copy_from_user(&cmd, buf, sizeof cmd)) 2762 return -EFAULT; 2763 2764 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 2765 if (!qp) 2766 return -EINVAL; 2767 2768 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2769 2770 list_for_each_entry(mcast, &obj->mcast_list, list) 2771 if (cmd.mlid == mcast->lid && 2772 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2773 ret = 0; 2774 goto out_put; 2775 } 2776 2777 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 2778 if (!mcast) { 2779 ret = -ENOMEM; 2780 goto out_put; 2781 } 2782 2783 mcast->lid = cmd.mlid; 2784 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 2785 2786 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 2787 if (!ret) 2788 list_add_tail(&mcast->list, &obj->mcast_list); 2789 else 2790 kfree(mcast); 2791 2792 out_put: 2793 put_qp_write(qp); 2794 2795 return ret ? ret : in_len; 2796 } 2797 2798 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 2799 const char __user *buf, int in_len, 2800 int out_len) 2801 { 2802 struct ib_uverbs_detach_mcast cmd; 2803 struct ib_uqp_object *obj; 2804 struct ib_qp *qp; 2805 struct ib_uverbs_mcast_entry *mcast; 2806 int ret = -EINVAL; 2807 2808 if (copy_from_user(&cmd, buf, sizeof cmd)) 2809 return -EFAULT; 2810 2811 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 2812 if (!qp) 2813 return -EINVAL; 2814 2815 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 2816 if (ret) 2817 goto out_put; 2818 2819 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2820 2821 list_for_each_entry(mcast, &obj->mcast_list, list) 2822 if (cmd.mlid == mcast->lid && 2823 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2824 list_del(&mcast->list); 2825 kfree(mcast); 2826 break; 2827 } 2828 2829 out_put: 2830 put_qp_write(qp); 2831 2832 return ret ? ret : in_len; 2833 } 2834 2835 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, 2836 union ib_flow_spec *ib_spec) 2837 { 2838 if (kern_spec->reserved) 2839 return -EINVAL; 2840 2841 ib_spec->type = kern_spec->type; 2842 2843 switch (ib_spec->type) { 2844 case IB_FLOW_SPEC_ETH: 2845 ib_spec->eth.size = sizeof(struct ib_flow_spec_eth); 2846 if (ib_spec->eth.size != kern_spec->eth.size) 2847 return -EINVAL; 2848 memcpy(&ib_spec->eth.val, &kern_spec->eth.val, 2849 sizeof(struct ib_flow_eth_filter)); 2850 memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask, 2851 sizeof(struct ib_flow_eth_filter)); 2852 break; 2853 case IB_FLOW_SPEC_IPV4: 2854 ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4); 2855 if (ib_spec->ipv4.size != kern_spec->ipv4.size) 2856 return -EINVAL; 2857 memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val, 2858 sizeof(struct ib_flow_ipv4_filter)); 2859 memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask, 2860 sizeof(struct ib_flow_ipv4_filter)); 2861 break; 2862 case IB_FLOW_SPEC_TCP: 2863 case IB_FLOW_SPEC_UDP: 2864 ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp); 2865 if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size) 2866 return -EINVAL; 2867 memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val, 2868 sizeof(struct ib_flow_tcp_udp_filter)); 2869 memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask, 2870 sizeof(struct ib_flow_tcp_udp_filter)); 2871 break; 2872 default: 2873 return -EINVAL; 2874 } 2875 return 0; 2876 } 2877 2878 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, 2879 struct ib_udata *ucore, 2880 struct ib_udata *uhw) 2881 { 2882 struct ib_uverbs_create_flow cmd; 2883 struct ib_uverbs_create_flow_resp resp; 2884 struct ib_uobject *uobj; 2885 struct ib_flow *flow_id; 2886 struct ib_uverbs_flow_attr *kern_flow_attr; 2887 struct ib_flow_attr *flow_attr; 2888 struct ib_qp *qp; 2889 int err = 0; 2890 void *kern_spec; 2891 void *ib_spec; 2892 int i; 2893 2894 if (ucore->inlen < sizeof(cmd)) 2895 return -EINVAL; 2896 2897 if (ucore->outlen < sizeof(resp)) 2898 return -ENOSPC; 2899 2900 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 2901 if (err) 2902 return err; 2903 2904 ucore->inbuf += sizeof(cmd); 2905 ucore->inlen -= sizeof(cmd); 2906 2907 if (cmd.comp_mask) 2908 return -EINVAL; 2909 2910 if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER && 2911 !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW)) 2912 return -EPERM; 2913 2914 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) 2915 return -EINVAL; 2916 2917 if (cmd.flow_attr.size > ucore->inlen || 2918 cmd.flow_attr.size > 2919 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) 2920 return -EINVAL; 2921 2922 if (cmd.flow_attr.reserved[0] || 2923 cmd.flow_attr.reserved[1]) 2924 return -EINVAL; 2925 2926 if (cmd.flow_attr.num_of_specs) { 2927 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, 2928 GFP_KERNEL); 2929 if (!kern_flow_attr) 2930 return -ENOMEM; 2931 2932 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); 2933 err = ib_copy_from_udata(kern_flow_attr + 1, ucore, 2934 cmd.flow_attr.size); 2935 if (err) 2936 goto err_free_attr; 2937 } else { 2938 kern_flow_attr = &cmd.flow_attr; 2939 } 2940 2941 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 2942 if (!uobj) { 2943 err = -ENOMEM; 2944 goto err_free_attr; 2945 } 2946 init_uobj(uobj, 0, file->ucontext, &rule_lock_class); 2947 down_write(&uobj->mutex); 2948 2949 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2950 if (!qp) { 2951 err = -EINVAL; 2952 goto err_uobj; 2953 } 2954 2955 flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL); 2956 if (!flow_attr) { 2957 err = -ENOMEM; 2958 goto err_put; 2959 } 2960 2961 flow_attr->type = kern_flow_attr->type; 2962 flow_attr->priority = kern_flow_attr->priority; 2963 flow_attr->num_of_specs = kern_flow_attr->num_of_specs; 2964 flow_attr->port = kern_flow_attr->port; 2965 flow_attr->flags = kern_flow_attr->flags; 2966 flow_attr->size = sizeof(*flow_attr); 2967 2968 kern_spec = kern_flow_attr + 1; 2969 ib_spec = flow_attr + 1; 2970 for (i = 0; i < flow_attr->num_of_specs && 2971 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && 2972 cmd.flow_attr.size >= 2973 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { 2974 err = kern_spec_to_ib_spec(kern_spec, ib_spec); 2975 if (err) 2976 goto err_free; 2977 flow_attr->size += 2978 ((union ib_flow_spec *) ib_spec)->size; 2979 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; 2980 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size; 2981 ib_spec += ((union ib_flow_spec *) ib_spec)->size; 2982 } 2983 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { 2984 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", 2985 i, cmd.flow_attr.size); 2986 err = -EINVAL; 2987 goto err_free; 2988 } 2989 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); 2990 if (IS_ERR(flow_id)) { 2991 err = PTR_ERR(flow_id); 2992 goto err_free; 2993 } 2994 flow_id->qp = qp; 2995 flow_id->uobject = uobj; 2996 uobj->object = flow_id; 2997 2998 err = idr_add_uobj(&ib_uverbs_rule_idr, uobj); 2999 if (err) 3000 goto destroy_flow; 3001 3002 memset(&resp, 0, sizeof(resp)); 3003 resp.flow_handle = uobj->id; 3004 3005 err = ib_copy_to_udata(ucore, 3006 &resp, sizeof(resp)); 3007 if (err) 3008 goto err_copy; 3009 3010 put_qp_read(qp); 3011 mutex_lock(&file->mutex); 3012 list_add_tail(&uobj->list, &file->ucontext->rule_list); 3013 mutex_unlock(&file->mutex); 3014 3015 uobj->live = 1; 3016 3017 up_write(&uobj->mutex); 3018 kfree(flow_attr); 3019 if (cmd.flow_attr.num_of_specs) 3020 kfree(kern_flow_attr); 3021 return 0; 3022 err_copy: 3023 idr_remove_uobj(&ib_uverbs_rule_idr, uobj); 3024 destroy_flow: 3025 ib_destroy_flow(flow_id); 3026 err_free: 3027 kfree(flow_attr); 3028 err_put: 3029 put_qp_read(qp); 3030 err_uobj: 3031 put_uobj_write(uobj); 3032 err_free_attr: 3033 if (cmd.flow_attr.num_of_specs) 3034 kfree(kern_flow_attr); 3035 return err; 3036 } 3037 3038 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, 3039 struct ib_udata *ucore, 3040 struct ib_udata *uhw) 3041 { 3042 struct ib_uverbs_destroy_flow cmd; 3043 struct ib_flow *flow_id; 3044 struct ib_uobject *uobj; 3045 int ret; 3046 3047 if (ucore->inlen < sizeof(cmd)) 3048 return -EINVAL; 3049 3050 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3051 if (ret) 3052 return ret; 3053 3054 if (cmd.comp_mask) 3055 return -EINVAL; 3056 3057 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle, 3058 file->ucontext); 3059 if (!uobj) 3060 return -EINVAL; 3061 flow_id = uobj->object; 3062 3063 ret = ib_destroy_flow(flow_id); 3064 if (!ret) 3065 uobj->live = 0; 3066 3067 put_uobj_write(uobj); 3068 3069 idr_remove_uobj(&ib_uverbs_rule_idr, uobj); 3070 3071 mutex_lock(&file->mutex); 3072 list_del(&uobj->list); 3073 mutex_unlock(&file->mutex); 3074 3075 put_uobj(uobj); 3076 3077 return ret; 3078 } 3079 3080 static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 3081 struct ib_uverbs_create_xsrq *cmd, 3082 struct ib_udata *udata) 3083 { 3084 struct ib_uverbs_create_srq_resp resp; 3085 struct ib_usrq_object *obj; 3086 struct ib_pd *pd; 3087 struct ib_srq *srq; 3088 struct ib_uobject *uninitialized_var(xrcd_uobj); 3089 struct ib_srq_init_attr attr; 3090 int ret; 3091 3092 obj = kmalloc(sizeof *obj, GFP_KERNEL); 3093 if (!obj) 3094 return -ENOMEM; 3095 3096 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class); 3097 down_write(&obj->uevent.uobject.mutex); 3098 3099 if (cmd->srq_type == IB_SRQT_XRC) { 3100 attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj); 3101 if (!attr.ext.xrc.xrcd) { 3102 ret = -EINVAL; 3103 goto err; 3104 } 3105 3106 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 3107 atomic_inc(&obj->uxrcd->refcnt); 3108 3109 attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0); 3110 if (!attr.ext.xrc.cq) { 3111 ret = -EINVAL; 3112 goto err_put_xrcd; 3113 } 3114 } 3115 3116 pd = idr_read_pd(cmd->pd_handle, file->ucontext); 3117 if (!pd) { 3118 ret = -EINVAL; 3119 goto err_put_cq; 3120 } 3121 3122 attr.event_handler = ib_uverbs_srq_event_handler; 3123 attr.srq_context = file; 3124 attr.srq_type = cmd->srq_type; 3125 attr.attr.max_wr = cmd->max_wr; 3126 attr.attr.max_sge = cmd->max_sge; 3127 attr.attr.srq_limit = cmd->srq_limit; 3128 3129 obj->uevent.events_reported = 0; 3130 INIT_LIST_HEAD(&obj->uevent.event_list); 3131 3132 srq = pd->device->create_srq(pd, &attr, udata); 3133 if (IS_ERR(srq)) { 3134 ret = PTR_ERR(srq); 3135 goto err_put; 3136 } 3137 3138 srq->device = pd->device; 3139 srq->pd = pd; 3140 srq->srq_type = cmd->srq_type; 3141 srq->uobject = &obj->uevent.uobject; 3142 srq->event_handler = attr.event_handler; 3143 srq->srq_context = attr.srq_context; 3144 3145 if (cmd->srq_type == IB_SRQT_XRC) { 3146 srq->ext.xrc.cq = attr.ext.xrc.cq; 3147 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; 3148 atomic_inc(&attr.ext.xrc.cq->usecnt); 3149 atomic_inc(&attr.ext.xrc.xrcd->usecnt); 3150 } 3151 3152 atomic_inc(&pd->usecnt); 3153 atomic_set(&srq->usecnt, 0); 3154 3155 obj->uevent.uobject.object = srq; 3156 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 3157 if (ret) 3158 goto err_destroy; 3159 3160 memset(&resp, 0, sizeof resp); 3161 resp.srq_handle = obj->uevent.uobject.id; 3162 resp.max_wr = attr.attr.max_wr; 3163 resp.max_sge = attr.attr.max_sge; 3164 if (cmd->srq_type == IB_SRQT_XRC) 3165 resp.srqn = srq->ext.xrc.srq_num; 3166 3167 if (copy_to_user((void __user *) (unsigned long) cmd->response, 3168 &resp, sizeof resp)) { 3169 ret = -EFAULT; 3170 goto err_copy; 3171 } 3172 3173 if (cmd->srq_type == IB_SRQT_XRC) { 3174 put_uobj_read(xrcd_uobj); 3175 put_cq_read(attr.ext.xrc.cq); 3176 } 3177 put_pd_read(pd); 3178 3179 mutex_lock(&file->mutex); 3180 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list); 3181 mutex_unlock(&file->mutex); 3182 3183 obj->uevent.uobject.live = 1; 3184 3185 up_write(&obj->uevent.uobject.mutex); 3186 3187 return 0; 3188 3189 err_copy: 3190 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 3191 3192 err_destroy: 3193 ib_destroy_srq(srq); 3194 3195 err_put: 3196 put_pd_read(pd); 3197 3198 err_put_cq: 3199 if (cmd->srq_type == IB_SRQT_XRC) 3200 put_cq_read(attr.ext.xrc.cq); 3201 3202 err_put_xrcd: 3203 if (cmd->srq_type == IB_SRQT_XRC) { 3204 atomic_dec(&obj->uxrcd->refcnt); 3205 put_uobj_read(xrcd_uobj); 3206 } 3207 3208 err: 3209 put_uobj_write(&obj->uevent.uobject); 3210 return ret; 3211 } 3212 3213 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 3214 const char __user *buf, int in_len, 3215 int out_len) 3216 { 3217 struct ib_uverbs_create_srq cmd; 3218 struct ib_uverbs_create_xsrq xcmd; 3219 struct ib_uverbs_create_srq_resp resp; 3220 struct ib_udata udata; 3221 int ret; 3222 3223 if (out_len < sizeof resp) 3224 return -ENOSPC; 3225 3226 if (copy_from_user(&cmd, buf, sizeof cmd)) 3227 return -EFAULT; 3228 3229 xcmd.response = cmd.response; 3230 xcmd.user_handle = cmd.user_handle; 3231 xcmd.srq_type = IB_SRQT_BASIC; 3232 xcmd.pd_handle = cmd.pd_handle; 3233 xcmd.max_wr = cmd.max_wr; 3234 xcmd.max_sge = cmd.max_sge; 3235 xcmd.srq_limit = cmd.srq_limit; 3236 3237 INIT_UDATA(&udata, buf + sizeof cmd, 3238 (unsigned long) cmd.response + sizeof resp, 3239 in_len - sizeof cmd, out_len - sizeof resp); 3240 3241 ret = __uverbs_create_xsrq(file, &xcmd, &udata); 3242 if (ret) 3243 return ret; 3244 3245 return in_len; 3246 } 3247 3248 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, 3249 const char __user *buf, int in_len, int out_len) 3250 { 3251 struct ib_uverbs_create_xsrq cmd; 3252 struct ib_uverbs_create_srq_resp resp; 3253 struct ib_udata udata; 3254 int ret; 3255 3256 if (out_len < sizeof resp) 3257 return -ENOSPC; 3258 3259 if (copy_from_user(&cmd, buf, sizeof cmd)) 3260 return -EFAULT; 3261 3262 INIT_UDATA(&udata, buf + sizeof cmd, 3263 (unsigned long) cmd.response + sizeof resp, 3264 in_len - sizeof cmd, out_len - sizeof resp); 3265 3266 ret = __uverbs_create_xsrq(file, &cmd, &udata); 3267 if (ret) 3268 return ret; 3269 3270 return in_len; 3271 } 3272 3273 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 3274 const char __user *buf, int in_len, 3275 int out_len) 3276 { 3277 struct ib_uverbs_modify_srq cmd; 3278 struct ib_udata udata; 3279 struct ib_srq *srq; 3280 struct ib_srq_attr attr; 3281 int ret; 3282 3283 if (copy_from_user(&cmd, buf, sizeof cmd)) 3284 return -EFAULT; 3285 3286 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 3287 out_len); 3288 3289 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 3290 if (!srq) 3291 return -EINVAL; 3292 3293 attr.max_wr = cmd.max_wr; 3294 attr.srq_limit = cmd.srq_limit; 3295 3296 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 3297 3298 put_srq_read(srq); 3299 3300 return ret ? ret : in_len; 3301 } 3302 3303 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 3304 const char __user *buf, 3305 int in_len, int out_len) 3306 { 3307 struct ib_uverbs_query_srq cmd; 3308 struct ib_uverbs_query_srq_resp resp; 3309 struct ib_srq_attr attr; 3310 struct ib_srq *srq; 3311 int ret; 3312 3313 if (out_len < sizeof resp) 3314 return -ENOSPC; 3315 3316 if (copy_from_user(&cmd, buf, sizeof cmd)) 3317 return -EFAULT; 3318 3319 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 3320 if (!srq) 3321 return -EINVAL; 3322 3323 ret = ib_query_srq(srq, &attr); 3324 3325 put_srq_read(srq); 3326 3327 if (ret) 3328 return ret; 3329 3330 memset(&resp, 0, sizeof resp); 3331 3332 resp.max_wr = attr.max_wr; 3333 resp.max_sge = attr.max_sge; 3334 resp.srq_limit = attr.srq_limit; 3335 3336 if (copy_to_user((void __user *) (unsigned long) cmd.response, 3337 &resp, sizeof resp)) 3338 return -EFAULT; 3339 3340 return in_len; 3341 } 3342 3343 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 3344 const char __user *buf, int in_len, 3345 int out_len) 3346 { 3347 struct ib_uverbs_destroy_srq cmd; 3348 struct ib_uverbs_destroy_srq_resp resp; 3349 struct ib_uobject *uobj; 3350 struct ib_srq *srq; 3351 struct ib_uevent_object *obj; 3352 int ret = -EINVAL; 3353 struct ib_usrq_object *us; 3354 enum ib_srq_type srq_type; 3355 3356 if (copy_from_user(&cmd, buf, sizeof cmd)) 3357 return -EFAULT; 3358 3359 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext); 3360 if (!uobj) 3361 return -EINVAL; 3362 srq = uobj->object; 3363 obj = container_of(uobj, struct ib_uevent_object, uobject); 3364 srq_type = srq->srq_type; 3365 3366 ret = ib_destroy_srq(srq); 3367 if (!ret) 3368 uobj->live = 0; 3369 3370 put_uobj_write(uobj); 3371 3372 if (ret) 3373 return ret; 3374 3375 if (srq_type == IB_SRQT_XRC) { 3376 us = container_of(obj, struct ib_usrq_object, uevent); 3377 atomic_dec(&us->uxrcd->refcnt); 3378 } 3379 3380 idr_remove_uobj(&ib_uverbs_srq_idr, uobj); 3381 3382 mutex_lock(&file->mutex); 3383 list_del(&uobj->list); 3384 mutex_unlock(&file->mutex); 3385 3386 ib_uverbs_release_uevent(file, obj); 3387 3388 memset(&resp, 0, sizeof resp); 3389 resp.events_reported = obj->events_reported; 3390 3391 put_uobj(uobj); 3392 3393 if (copy_to_user((void __user *) (unsigned long) cmd.response, 3394 &resp, sizeof resp)) 3395 ret = -EFAULT; 3396 3397 return ret ? ret : in_len; 3398 } 3399 3400 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, 3401 struct ib_udata *ucore, 3402 struct ib_udata *uhw) 3403 { 3404 struct ib_uverbs_ex_query_device_resp resp; 3405 struct ib_uverbs_ex_query_device cmd; 3406 struct ib_device_attr attr; 3407 struct ib_device *device; 3408 int err; 3409 3410 device = file->device->ib_dev; 3411 if (ucore->inlen < sizeof(cmd)) 3412 return -EINVAL; 3413 3414 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3415 if (err) 3416 return err; 3417 3418 if (cmd.comp_mask) 3419 return -EINVAL; 3420 3421 if (cmd.reserved) 3422 return -EINVAL; 3423 3424 resp.response_length = offsetof(typeof(resp), odp_caps); 3425 3426 if (ucore->outlen < resp.response_length) 3427 return -ENOSPC; 3428 3429 memset(&attr, 0, sizeof(attr)); 3430 3431 err = device->query_device(device, &attr, uhw); 3432 if (err) 3433 return err; 3434 3435 copy_query_dev_fields(file, &resp.base, &attr); 3436 resp.comp_mask = 0; 3437 3438 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps)) 3439 goto end; 3440 3441 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 3442 resp.odp_caps.general_caps = attr.odp_caps.general_caps; 3443 resp.odp_caps.per_transport_caps.rc_odp_caps = 3444 attr.odp_caps.per_transport_caps.rc_odp_caps; 3445 resp.odp_caps.per_transport_caps.uc_odp_caps = 3446 attr.odp_caps.per_transport_caps.uc_odp_caps; 3447 resp.odp_caps.per_transport_caps.ud_odp_caps = 3448 attr.odp_caps.per_transport_caps.ud_odp_caps; 3449 resp.odp_caps.reserved = 0; 3450 #else 3451 memset(&resp.odp_caps, 0, sizeof(resp.odp_caps)); 3452 #endif 3453 resp.response_length += sizeof(resp.odp_caps); 3454 3455 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask)) 3456 goto end; 3457 3458 resp.timestamp_mask = attr.timestamp_mask; 3459 resp.response_length += sizeof(resp.timestamp_mask); 3460 3461 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock)) 3462 goto end; 3463 3464 resp.hca_core_clock = attr.hca_core_clock; 3465 resp.response_length += sizeof(resp.hca_core_clock); 3466 3467 end: 3468 err = ib_copy_to_udata(ucore, &resp, resp.response_length); 3469 if (err) 3470 return err; 3471 3472 return 0; 3473 } 3474