1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/file.h> 37 #include <linux/fs.h> 38 #include <linux/slab.h> 39 #include <linux/sched.h> 40 41 #include <asm/uaccess.h> 42 43 #include "uverbs.h" 44 #include "core_priv.h" 45 46 struct uverbs_lock_class { 47 struct lock_class_key key; 48 char name[16]; 49 }; 50 51 static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" }; 52 static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" }; 53 static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" }; 54 static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" }; 55 static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" }; 56 static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; 57 static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; 58 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; 59 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" }; 60 61 /* 62 * The ib_uobject locking scheme is as follows: 63 * 64 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it 65 * needs to be held during all idr write operations. When an object is 66 * looked up, a reference must be taken on the object's kref before 67 * dropping this lock. For read operations, the rcu_read_lock() 68 * and rcu_write_lock() but similarly the kref reference is grabbed 69 * before the rcu_read_unlock(). 70 * 71 * - Each object also has an rwsem. This rwsem must be held for 72 * reading while an operation that uses the object is performed. 73 * For example, while registering an MR, the associated PD's 74 * uobject.mutex must be held for reading. The rwsem must be held 75 * for writing while initializing or destroying an object. 76 * 77 * - In addition, each object has a "live" flag. If this flag is not 78 * set, then lookups of the object will fail even if it is found in 79 * the idr. This handles a reader that blocks and does not acquire 80 * the rwsem until after the object is destroyed. The destroy 81 * operation will set the live flag to 0 and then drop the rwsem; 82 * this will allow the reader to acquire the rwsem, see that the 83 * live flag is 0, and then drop the rwsem and its reference to 84 * object. The underlying storage will not be freed until the last 85 * reference to the object is dropped. 86 */ 87 88 static void init_uobj(struct ib_uobject *uobj, u64 user_handle, 89 struct ib_ucontext *context, struct uverbs_lock_class *c) 90 { 91 uobj->user_handle = user_handle; 92 uobj->context = context; 93 kref_init(&uobj->ref); 94 init_rwsem(&uobj->mutex); 95 lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name); 96 uobj->live = 0; 97 } 98 99 static void release_uobj(struct kref *kref) 100 { 101 kfree_rcu(container_of(kref, struct ib_uobject, ref), rcu); 102 } 103 104 static void put_uobj(struct ib_uobject *uobj) 105 { 106 kref_put(&uobj->ref, release_uobj); 107 } 108 109 static void put_uobj_read(struct ib_uobject *uobj) 110 { 111 up_read(&uobj->mutex); 112 put_uobj(uobj); 113 } 114 115 static void put_uobj_write(struct ib_uobject *uobj) 116 { 117 up_write(&uobj->mutex); 118 put_uobj(uobj); 119 } 120 121 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj) 122 { 123 int ret; 124 125 idr_preload(GFP_KERNEL); 126 spin_lock(&ib_uverbs_idr_lock); 127 128 ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT); 129 if (ret >= 0) 130 uobj->id = ret; 131 132 spin_unlock(&ib_uverbs_idr_lock); 133 idr_preload_end(); 134 135 return ret < 0 ? ret : 0; 136 } 137 138 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj) 139 { 140 spin_lock(&ib_uverbs_idr_lock); 141 idr_remove(idr, uobj->id); 142 spin_unlock(&ib_uverbs_idr_lock); 143 } 144 145 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, 146 struct ib_ucontext *context) 147 { 148 struct ib_uobject *uobj; 149 150 rcu_read_lock(); 151 uobj = idr_find(idr, id); 152 if (uobj) { 153 if (uobj->context == context) 154 kref_get(&uobj->ref); 155 else 156 uobj = NULL; 157 } 158 rcu_read_unlock(); 159 160 return uobj; 161 } 162 163 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, 164 struct ib_ucontext *context, int nested) 165 { 166 struct ib_uobject *uobj; 167 168 uobj = __idr_get_uobj(idr, id, context); 169 if (!uobj) 170 return NULL; 171 172 if (nested) 173 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING); 174 else 175 down_read(&uobj->mutex); 176 if (!uobj->live) { 177 put_uobj_read(uobj); 178 return NULL; 179 } 180 181 return uobj; 182 } 183 184 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id, 185 struct ib_ucontext *context) 186 { 187 struct ib_uobject *uobj; 188 189 uobj = __idr_get_uobj(idr, id, context); 190 if (!uobj) 191 return NULL; 192 193 down_write(&uobj->mutex); 194 if (!uobj->live) { 195 put_uobj_write(uobj); 196 return NULL; 197 } 198 199 return uobj; 200 } 201 202 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context, 203 int nested) 204 { 205 struct ib_uobject *uobj; 206 207 uobj = idr_read_uobj(idr, id, context, nested); 208 return uobj ? uobj->object : NULL; 209 } 210 211 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context) 212 { 213 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0); 214 } 215 216 static void put_pd_read(struct ib_pd *pd) 217 { 218 put_uobj_read(pd->uobject); 219 } 220 221 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested) 222 { 223 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested); 224 } 225 226 static void put_cq_read(struct ib_cq *cq) 227 { 228 put_uobj_read(cq->uobject); 229 } 230 231 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context) 232 { 233 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0); 234 } 235 236 static void put_ah_read(struct ib_ah *ah) 237 { 238 put_uobj_read(ah->uobject); 239 } 240 241 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context) 242 { 243 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0); 244 } 245 246 static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context) 247 { 248 struct ib_uobject *uobj; 249 250 uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context); 251 return uobj ? uobj->object : NULL; 252 } 253 254 static void put_qp_read(struct ib_qp *qp) 255 { 256 put_uobj_read(qp->uobject); 257 } 258 259 static void put_qp_write(struct ib_qp *qp) 260 { 261 put_uobj_write(qp->uobject); 262 } 263 264 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context) 265 { 266 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0); 267 } 268 269 static void put_srq_read(struct ib_srq *srq) 270 { 271 put_uobj_read(srq->uobject); 272 } 273 274 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context, 275 struct ib_uobject **uobj) 276 { 277 *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0); 278 return *uobj ? (*uobj)->object : NULL; 279 } 280 281 static void put_xrcd_read(struct ib_uobject *uobj) 282 { 283 put_uobj_read(uobj); 284 } 285 286 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 287 struct ib_device *ib_dev, 288 const char __user *buf, 289 int in_len, int out_len) 290 { 291 struct ib_uverbs_get_context cmd; 292 struct ib_uverbs_get_context_resp resp; 293 struct ib_udata udata; 294 struct ib_ucontext *ucontext; 295 struct file *filp; 296 int ret; 297 298 if (out_len < sizeof resp) 299 return -ENOSPC; 300 301 if (copy_from_user(&cmd, buf, sizeof cmd)) 302 return -EFAULT; 303 304 mutex_lock(&file->mutex); 305 306 if (file->ucontext) { 307 ret = -EINVAL; 308 goto err; 309 } 310 311 INIT_UDATA(&udata, buf + sizeof cmd, 312 (unsigned long) cmd.response + sizeof resp, 313 in_len - sizeof cmd, out_len - sizeof resp); 314 315 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata); 316 if (IS_ERR(ucontext)) { 317 ret = PTR_ERR(ucontext); 318 goto err; 319 } 320 321 ucontext->device = ib_dev; 322 INIT_LIST_HEAD(&ucontext->pd_list); 323 INIT_LIST_HEAD(&ucontext->mr_list); 324 INIT_LIST_HEAD(&ucontext->mw_list); 325 INIT_LIST_HEAD(&ucontext->cq_list); 326 INIT_LIST_HEAD(&ucontext->qp_list); 327 INIT_LIST_HEAD(&ucontext->srq_list); 328 INIT_LIST_HEAD(&ucontext->ah_list); 329 INIT_LIST_HEAD(&ucontext->xrcd_list); 330 INIT_LIST_HEAD(&ucontext->rule_list); 331 rcu_read_lock(); 332 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); 333 rcu_read_unlock(); 334 ucontext->closing = 0; 335 336 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 337 ucontext->umem_tree = RB_ROOT; 338 init_rwsem(&ucontext->umem_rwsem); 339 ucontext->odp_mrs_count = 0; 340 INIT_LIST_HEAD(&ucontext->no_private_counters); 341 342 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)) 343 ucontext->invalidate_range = NULL; 344 345 #endif 346 347 resp.num_comp_vectors = file->device->num_comp_vectors; 348 349 ret = get_unused_fd_flags(O_CLOEXEC); 350 if (ret < 0) 351 goto err_free; 352 resp.async_fd = ret; 353 354 filp = ib_uverbs_alloc_event_file(file, ib_dev, 1); 355 if (IS_ERR(filp)) { 356 ret = PTR_ERR(filp); 357 goto err_fd; 358 } 359 360 if (copy_to_user((void __user *) (unsigned long) cmd.response, 361 &resp, sizeof resp)) { 362 ret = -EFAULT; 363 goto err_file; 364 } 365 366 file->ucontext = ucontext; 367 368 fd_install(resp.async_fd, filp); 369 370 mutex_unlock(&file->mutex); 371 372 return in_len; 373 374 err_file: 375 ib_uverbs_free_async_event_file(file); 376 fput(filp); 377 378 err_fd: 379 put_unused_fd(resp.async_fd); 380 381 err_free: 382 put_pid(ucontext->tgid); 383 ib_dev->dealloc_ucontext(ucontext); 384 385 err: 386 mutex_unlock(&file->mutex); 387 return ret; 388 } 389 390 static void copy_query_dev_fields(struct ib_uverbs_file *file, 391 struct ib_device *ib_dev, 392 struct ib_uverbs_query_device_resp *resp, 393 struct ib_device_attr *attr) 394 { 395 resp->fw_ver = attr->fw_ver; 396 resp->node_guid = ib_dev->node_guid; 397 resp->sys_image_guid = attr->sys_image_guid; 398 resp->max_mr_size = attr->max_mr_size; 399 resp->page_size_cap = attr->page_size_cap; 400 resp->vendor_id = attr->vendor_id; 401 resp->vendor_part_id = attr->vendor_part_id; 402 resp->hw_ver = attr->hw_ver; 403 resp->max_qp = attr->max_qp; 404 resp->max_qp_wr = attr->max_qp_wr; 405 resp->device_cap_flags = lower_32_bits(attr->device_cap_flags); 406 resp->max_sge = attr->max_sge; 407 resp->max_sge_rd = attr->max_sge_rd; 408 resp->max_cq = attr->max_cq; 409 resp->max_cqe = attr->max_cqe; 410 resp->max_mr = attr->max_mr; 411 resp->max_pd = attr->max_pd; 412 resp->max_qp_rd_atom = attr->max_qp_rd_atom; 413 resp->max_ee_rd_atom = attr->max_ee_rd_atom; 414 resp->max_res_rd_atom = attr->max_res_rd_atom; 415 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom; 416 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom; 417 resp->atomic_cap = attr->atomic_cap; 418 resp->max_ee = attr->max_ee; 419 resp->max_rdd = attr->max_rdd; 420 resp->max_mw = attr->max_mw; 421 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp; 422 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp; 423 resp->max_mcast_grp = attr->max_mcast_grp; 424 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach; 425 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach; 426 resp->max_ah = attr->max_ah; 427 resp->max_fmr = attr->max_fmr; 428 resp->max_map_per_fmr = attr->max_map_per_fmr; 429 resp->max_srq = attr->max_srq; 430 resp->max_srq_wr = attr->max_srq_wr; 431 resp->max_srq_sge = attr->max_srq_sge; 432 resp->max_pkeys = attr->max_pkeys; 433 resp->local_ca_ack_delay = attr->local_ca_ack_delay; 434 resp->phys_port_cnt = ib_dev->phys_port_cnt; 435 } 436 437 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 438 struct ib_device *ib_dev, 439 const char __user *buf, 440 int in_len, int out_len) 441 { 442 struct ib_uverbs_query_device cmd; 443 struct ib_uverbs_query_device_resp resp; 444 445 if (out_len < sizeof resp) 446 return -ENOSPC; 447 448 if (copy_from_user(&cmd, buf, sizeof cmd)) 449 return -EFAULT; 450 451 memset(&resp, 0, sizeof resp); 452 copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs); 453 454 if (copy_to_user((void __user *) (unsigned long) cmd.response, 455 &resp, sizeof resp)) 456 return -EFAULT; 457 458 return in_len; 459 } 460 461 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 462 struct ib_device *ib_dev, 463 const char __user *buf, 464 int in_len, int out_len) 465 { 466 struct ib_uverbs_query_port cmd; 467 struct ib_uverbs_query_port_resp resp; 468 struct ib_port_attr attr; 469 int ret; 470 471 if (out_len < sizeof resp) 472 return -ENOSPC; 473 474 if (copy_from_user(&cmd, buf, sizeof cmd)) 475 return -EFAULT; 476 477 ret = ib_query_port(ib_dev, cmd.port_num, &attr); 478 if (ret) 479 return ret; 480 481 memset(&resp, 0, sizeof resp); 482 483 resp.state = attr.state; 484 resp.max_mtu = attr.max_mtu; 485 resp.active_mtu = attr.active_mtu; 486 resp.gid_tbl_len = attr.gid_tbl_len; 487 resp.port_cap_flags = attr.port_cap_flags; 488 resp.max_msg_sz = attr.max_msg_sz; 489 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 490 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 491 resp.pkey_tbl_len = attr.pkey_tbl_len; 492 resp.lid = attr.lid; 493 resp.sm_lid = attr.sm_lid; 494 resp.lmc = attr.lmc; 495 resp.max_vl_num = attr.max_vl_num; 496 resp.sm_sl = attr.sm_sl; 497 resp.subnet_timeout = attr.subnet_timeout; 498 resp.init_type_reply = attr.init_type_reply; 499 resp.active_width = attr.active_width; 500 resp.active_speed = attr.active_speed; 501 resp.phys_state = attr.phys_state; 502 resp.link_layer = rdma_port_get_link_layer(ib_dev, 503 cmd.port_num); 504 505 if (copy_to_user((void __user *) (unsigned long) cmd.response, 506 &resp, sizeof resp)) 507 return -EFAULT; 508 509 return in_len; 510 } 511 512 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 513 struct ib_device *ib_dev, 514 const char __user *buf, 515 int in_len, int out_len) 516 { 517 struct ib_uverbs_alloc_pd cmd; 518 struct ib_uverbs_alloc_pd_resp resp; 519 struct ib_udata udata; 520 struct ib_uobject *uobj; 521 struct ib_pd *pd; 522 int ret; 523 524 if (out_len < sizeof resp) 525 return -ENOSPC; 526 527 if (copy_from_user(&cmd, buf, sizeof cmd)) 528 return -EFAULT; 529 530 INIT_UDATA(&udata, buf + sizeof cmd, 531 (unsigned long) cmd.response + sizeof resp, 532 in_len - sizeof cmd, out_len - sizeof resp); 533 534 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 535 if (!uobj) 536 return -ENOMEM; 537 538 init_uobj(uobj, 0, file->ucontext, &pd_lock_class); 539 down_write(&uobj->mutex); 540 541 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata); 542 if (IS_ERR(pd)) { 543 ret = PTR_ERR(pd); 544 goto err; 545 } 546 547 pd->device = ib_dev; 548 pd->uobject = uobj; 549 pd->local_mr = NULL; 550 atomic_set(&pd->usecnt, 0); 551 552 uobj->object = pd; 553 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj); 554 if (ret) 555 goto err_idr; 556 557 memset(&resp, 0, sizeof resp); 558 resp.pd_handle = uobj->id; 559 560 if (copy_to_user((void __user *) (unsigned long) cmd.response, 561 &resp, sizeof resp)) { 562 ret = -EFAULT; 563 goto err_copy; 564 } 565 566 mutex_lock(&file->mutex); 567 list_add_tail(&uobj->list, &file->ucontext->pd_list); 568 mutex_unlock(&file->mutex); 569 570 uobj->live = 1; 571 572 up_write(&uobj->mutex); 573 574 return in_len; 575 576 err_copy: 577 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 578 579 err_idr: 580 ib_dealloc_pd(pd); 581 582 err: 583 put_uobj_write(uobj); 584 return ret; 585 } 586 587 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 588 struct ib_device *ib_dev, 589 const char __user *buf, 590 int in_len, int out_len) 591 { 592 struct ib_uverbs_dealloc_pd cmd; 593 struct ib_uobject *uobj; 594 struct ib_pd *pd; 595 int ret; 596 597 if (copy_from_user(&cmd, buf, sizeof cmd)) 598 return -EFAULT; 599 600 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext); 601 if (!uobj) 602 return -EINVAL; 603 pd = uobj->object; 604 605 if (atomic_read(&pd->usecnt)) { 606 ret = -EBUSY; 607 goto err_put; 608 } 609 610 ret = pd->device->dealloc_pd(uobj->object); 611 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd"); 612 if (ret) 613 goto err_put; 614 615 uobj->live = 0; 616 put_uobj_write(uobj); 617 618 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 619 620 mutex_lock(&file->mutex); 621 list_del(&uobj->list); 622 mutex_unlock(&file->mutex); 623 624 put_uobj(uobj); 625 626 return in_len; 627 628 err_put: 629 put_uobj_write(uobj); 630 return ret; 631 } 632 633 struct xrcd_table_entry { 634 struct rb_node node; 635 struct ib_xrcd *xrcd; 636 struct inode *inode; 637 }; 638 639 static int xrcd_table_insert(struct ib_uverbs_device *dev, 640 struct inode *inode, 641 struct ib_xrcd *xrcd) 642 { 643 struct xrcd_table_entry *entry, *scan; 644 struct rb_node **p = &dev->xrcd_tree.rb_node; 645 struct rb_node *parent = NULL; 646 647 entry = kmalloc(sizeof *entry, GFP_KERNEL); 648 if (!entry) 649 return -ENOMEM; 650 651 entry->xrcd = xrcd; 652 entry->inode = inode; 653 654 while (*p) { 655 parent = *p; 656 scan = rb_entry(parent, struct xrcd_table_entry, node); 657 658 if (inode < scan->inode) { 659 p = &(*p)->rb_left; 660 } else if (inode > scan->inode) { 661 p = &(*p)->rb_right; 662 } else { 663 kfree(entry); 664 return -EEXIST; 665 } 666 } 667 668 rb_link_node(&entry->node, parent, p); 669 rb_insert_color(&entry->node, &dev->xrcd_tree); 670 igrab(inode); 671 return 0; 672 } 673 674 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, 675 struct inode *inode) 676 { 677 struct xrcd_table_entry *entry; 678 struct rb_node *p = dev->xrcd_tree.rb_node; 679 680 while (p) { 681 entry = rb_entry(p, struct xrcd_table_entry, node); 682 683 if (inode < entry->inode) 684 p = p->rb_left; 685 else if (inode > entry->inode) 686 p = p->rb_right; 687 else 688 return entry; 689 } 690 691 return NULL; 692 } 693 694 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) 695 { 696 struct xrcd_table_entry *entry; 697 698 entry = xrcd_table_search(dev, inode); 699 if (!entry) 700 return NULL; 701 702 return entry->xrcd; 703 } 704 705 static void xrcd_table_delete(struct ib_uverbs_device *dev, 706 struct inode *inode) 707 { 708 struct xrcd_table_entry *entry; 709 710 entry = xrcd_table_search(dev, inode); 711 if (entry) { 712 iput(inode); 713 rb_erase(&entry->node, &dev->xrcd_tree); 714 kfree(entry); 715 } 716 } 717 718 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, 719 struct ib_device *ib_dev, 720 const char __user *buf, int in_len, 721 int out_len) 722 { 723 struct ib_uverbs_open_xrcd cmd; 724 struct ib_uverbs_open_xrcd_resp resp; 725 struct ib_udata udata; 726 struct ib_uxrcd_object *obj; 727 struct ib_xrcd *xrcd = NULL; 728 struct fd f = {NULL, 0}; 729 struct inode *inode = NULL; 730 int ret = 0; 731 int new_xrcd = 0; 732 733 if (out_len < sizeof resp) 734 return -ENOSPC; 735 736 if (copy_from_user(&cmd, buf, sizeof cmd)) 737 return -EFAULT; 738 739 INIT_UDATA(&udata, buf + sizeof cmd, 740 (unsigned long) cmd.response + sizeof resp, 741 in_len - sizeof cmd, out_len - sizeof resp); 742 743 mutex_lock(&file->device->xrcd_tree_mutex); 744 745 if (cmd.fd != -1) { 746 /* search for file descriptor */ 747 f = fdget(cmd.fd); 748 if (!f.file) { 749 ret = -EBADF; 750 goto err_tree_mutex_unlock; 751 } 752 753 inode = file_inode(f.file); 754 xrcd = find_xrcd(file->device, inode); 755 if (!xrcd && !(cmd.oflags & O_CREAT)) { 756 /* no file descriptor. Need CREATE flag */ 757 ret = -EAGAIN; 758 goto err_tree_mutex_unlock; 759 } 760 761 if (xrcd && cmd.oflags & O_EXCL) { 762 ret = -EINVAL; 763 goto err_tree_mutex_unlock; 764 } 765 } 766 767 obj = kmalloc(sizeof *obj, GFP_KERNEL); 768 if (!obj) { 769 ret = -ENOMEM; 770 goto err_tree_mutex_unlock; 771 } 772 773 init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class); 774 775 down_write(&obj->uobject.mutex); 776 777 if (!xrcd) { 778 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata); 779 if (IS_ERR(xrcd)) { 780 ret = PTR_ERR(xrcd); 781 goto err; 782 } 783 784 xrcd->inode = inode; 785 xrcd->device = ib_dev; 786 atomic_set(&xrcd->usecnt, 0); 787 mutex_init(&xrcd->tgt_qp_mutex); 788 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 789 new_xrcd = 1; 790 } 791 792 atomic_set(&obj->refcnt, 0); 793 obj->uobject.object = xrcd; 794 ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 795 if (ret) 796 goto err_idr; 797 798 memset(&resp, 0, sizeof resp); 799 resp.xrcd_handle = obj->uobject.id; 800 801 if (inode) { 802 if (new_xrcd) { 803 /* create new inode/xrcd table entry */ 804 ret = xrcd_table_insert(file->device, inode, xrcd); 805 if (ret) 806 goto err_insert_xrcd; 807 } 808 atomic_inc(&xrcd->usecnt); 809 } 810 811 if (copy_to_user((void __user *) (unsigned long) cmd.response, 812 &resp, sizeof resp)) { 813 ret = -EFAULT; 814 goto err_copy; 815 } 816 817 if (f.file) 818 fdput(f); 819 820 mutex_lock(&file->mutex); 821 list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list); 822 mutex_unlock(&file->mutex); 823 824 obj->uobject.live = 1; 825 up_write(&obj->uobject.mutex); 826 827 mutex_unlock(&file->device->xrcd_tree_mutex); 828 return in_len; 829 830 err_copy: 831 if (inode) { 832 if (new_xrcd) 833 xrcd_table_delete(file->device, inode); 834 atomic_dec(&xrcd->usecnt); 835 } 836 837 err_insert_xrcd: 838 idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 839 840 err_idr: 841 ib_dealloc_xrcd(xrcd); 842 843 err: 844 put_uobj_write(&obj->uobject); 845 846 err_tree_mutex_unlock: 847 if (f.file) 848 fdput(f); 849 850 mutex_unlock(&file->device->xrcd_tree_mutex); 851 852 return ret; 853 } 854 855 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, 856 struct ib_device *ib_dev, 857 const char __user *buf, int in_len, 858 int out_len) 859 { 860 struct ib_uverbs_close_xrcd cmd; 861 struct ib_uobject *uobj; 862 struct ib_xrcd *xrcd = NULL; 863 struct inode *inode = NULL; 864 struct ib_uxrcd_object *obj; 865 int live; 866 int ret = 0; 867 868 if (copy_from_user(&cmd, buf, sizeof cmd)) 869 return -EFAULT; 870 871 mutex_lock(&file->device->xrcd_tree_mutex); 872 uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext); 873 if (!uobj) { 874 ret = -EINVAL; 875 goto out; 876 } 877 878 xrcd = uobj->object; 879 inode = xrcd->inode; 880 obj = container_of(uobj, struct ib_uxrcd_object, uobject); 881 if (atomic_read(&obj->refcnt)) { 882 put_uobj_write(uobj); 883 ret = -EBUSY; 884 goto out; 885 } 886 887 if (!inode || atomic_dec_and_test(&xrcd->usecnt)) { 888 ret = ib_dealloc_xrcd(uobj->object); 889 if (!ret) 890 uobj->live = 0; 891 } 892 893 live = uobj->live; 894 if (inode && ret) 895 atomic_inc(&xrcd->usecnt); 896 897 put_uobj_write(uobj); 898 899 if (ret) 900 goto out; 901 902 if (inode && !live) 903 xrcd_table_delete(file->device, inode); 904 905 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj); 906 mutex_lock(&file->mutex); 907 list_del(&uobj->list); 908 mutex_unlock(&file->mutex); 909 910 put_uobj(uobj); 911 ret = in_len; 912 913 out: 914 mutex_unlock(&file->device->xrcd_tree_mutex); 915 return ret; 916 } 917 918 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, 919 struct ib_xrcd *xrcd) 920 { 921 struct inode *inode; 922 923 inode = xrcd->inode; 924 if (inode && !atomic_dec_and_test(&xrcd->usecnt)) 925 return; 926 927 ib_dealloc_xrcd(xrcd); 928 929 if (inode) 930 xrcd_table_delete(dev, inode); 931 } 932 933 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 934 struct ib_device *ib_dev, 935 const char __user *buf, int in_len, 936 int out_len) 937 { 938 struct ib_uverbs_reg_mr cmd; 939 struct ib_uverbs_reg_mr_resp resp; 940 struct ib_udata udata; 941 struct ib_uobject *uobj; 942 struct ib_pd *pd; 943 struct ib_mr *mr; 944 int ret; 945 946 if (out_len < sizeof resp) 947 return -ENOSPC; 948 949 if (copy_from_user(&cmd, buf, sizeof cmd)) 950 return -EFAULT; 951 952 INIT_UDATA(&udata, buf + sizeof cmd, 953 (unsigned long) cmd.response + sizeof resp, 954 in_len - sizeof cmd, out_len - sizeof resp); 955 956 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 957 return -EINVAL; 958 959 ret = ib_check_mr_access(cmd.access_flags); 960 if (ret) 961 return ret; 962 963 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 964 if (!uobj) 965 return -ENOMEM; 966 967 init_uobj(uobj, 0, file->ucontext, &mr_lock_class); 968 down_write(&uobj->mutex); 969 970 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 971 if (!pd) { 972 ret = -EINVAL; 973 goto err_free; 974 } 975 976 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) { 977 if (!(pd->device->attrs.device_cap_flags & 978 IB_DEVICE_ON_DEMAND_PAGING)) { 979 pr_debug("ODP support not available\n"); 980 ret = -EINVAL; 981 goto err_put; 982 } 983 } 984 985 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 986 cmd.access_flags, &udata); 987 if (IS_ERR(mr)) { 988 ret = PTR_ERR(mr); 989 goto err_put; 990 } 991 992 mr->device = pd->device; 993 mr->pd = pd; 994 mr->uobject = uobj; 995 atomic_inc(&pd->usecnt); 996 997 uobj->object = mr; 998 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj); 999 if (ret) 1000 goto err_unreg; 1001 1002 memset(&resp, 0, sizeof resp); 1003 resp.lkey = mr->lkey; 1004 resp.rkey = mr->rkey; 1005 resp.mr_handle = uobj->id; 1006 1007 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1008 &resp, sizeof resp)) { 1009 ret = -EFAULT; 1010 goto err_copy; 1011 } 1012 1013 put_pd_read(pd); 1014 1015 mutex_lock(&file->mutex); 1016 list_add_tail(&uobj->list, &file->ucontext->mr_list); 1017 mutex_unlock(&file->mutex); 1018 1019 uobj->live = 1; 1020 1021 up_write(&uobj->mutex); 1022 1023 return in_len; 1024 1025 err_copy: 1026 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 1027 1028 err_unreg: 1029 ib_dereg_mr(mr); 1030 1031 err_put: 1032 put_pd_read(pd); 1033 1034 err_free: 1035 put_uobj_write(uobj); 1036 return ret; 1037 } 1038 1039 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, 1040 struct ib_device *ib_dev, 1041 const char __user *buf, int in_len, 1042 int out_len) 1043 { 1044 struct ib_uverbs_rereg_mr cmd; 1045 struct ib_uverbs_rereg_mr_resp resp; 1046 struct ib_udata udata; 1047 struct ib_pd *pd = NULL; 1048 struct ib_mr *mr; 1049 struct ib_pd *old_pd; 1050 int ret; 1051 struct ib_uobject *uobj; 1052 1053 if (out_len < sizeof(resp)) 1054 return -ENOSPC; 1055 1056 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1057 return -EFAULT; 1058 1059 INIT_UDATA(&udata, buf + sizeof(cmd), 1060 (unsigned long) cmd.response + sizeof(resp), 1061 in_len - sizeof(cmd), out_len - sizeof(resp)); 1062 1063 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags) 1064 return -EINVAL; 1065 1066 if ((cmd.flags & IB_MR_REREG_TRANS) && 1067 (!cmd.start || !cmd.hca_va || 0 >= cmd.length || 1068 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))) 1069 return -EINVAL; 1070 1071 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, 1072 file->ucontext); 1073 1074 if (!uobj) 1075 return -EINVAL; 1076 1077 mr = uobj->object; 1078 1079 if (cmd.flags & IB_MR_REREG_ACCESS) { 1080 ret = ib_check_mr_access(cmd.access_flags); 1081 if (ret) 1082 goto put_uobjs; 1083 } 1084 1085 if (cmd.flags & IB_MR_REREG_PD) { 1086 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1087 if (!pd) { 1088 ret = -EINVAL; 1089 goto put_uobjs; 1090 } 1091 } 1092 1093 old_pd = mr->pd; 1094 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, 1095 cmd.length, cmd.hca_va, 1096 cmd.access_flags, pd, &udata); 1097 if (!ret) { 1098 if (cmd.flags & IB_MR_REREG_PD) { 1099 atomic_inc(&pd->usecnt); 1100 mr->pd = pd; 1101 atomic_dec(&old_pd->usecnt); 1102 } 1103 } else { 1104 goto put_uobj_pd; 1105 } 1106 1107 memset(&resp, 0, sizeof(resp)); 1108 resp.lkey = mr->lkey; 1109 resp.rkey = mr->rkey; 1110 1111 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1112 &resp, sizeof(resp))) 1113 ret = -EFAULT; 1114 else 1115 ret = in_len; 1116 1117 put_uobj_pd: 1118 if (cmd.flags & IB_MR_REREG_PD) 1119 put_pd_read(pd); 1120 1121 put_uobjs: 1122 1123 put_uobj_write(mr->uobject); 1124 1125 return ret; 1126 } 1127 1128 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 1129 struct ib_device *ib_dev, 1130 const char __user *buf, int in_len, 1131 int out_len) 1132 { 1133 struct ib_uverbs_dereg_mr cmd; 1134 struct ib_mr *mr; 1135 struct ib_uobject *uobj; 1136 int ret = -EINVAL; 1137 1138 if (copy_from_user(&cmd, buf, sizeof cmd)) 1139 return -EFAULT; 1140 1141 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext); 1142 if (!uobj) 1143 return -EINVAL; 1144 1145 mr = uobj->object; 1146 1147 ret = ib_dereg_mr(mr); 1148 if (!ret) 1149 uobj->live = 0; 1150 1151 put_uobj_write(uobj); 1152 1153 if (ret) 1154 return ret; 1155 1156 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 1157 1158 mutex_lock(&file->mutex); 1159 list_del(&uobj->list); 1160 mutex_unlock(&file->mutex); 1161 1162 put_uobj(uobj); 1163 1164 return in_len; 1165 } 1166 1167 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, 1168 struct ib_device *ib_dev, 1169 const char __user *buf, int in_len, 1170 int out_len) 1171 { 1172 struct ib_uverbs_alloc_mw cmd; 1173 struct ib_uverbs_alloc_mw_resp resp; 1174 struct ib_uobject *uobj; 1175 struct ib_pd *pd; 1176 struct ib_mw *mw; 1177 struct ib_udata udata; 1178 int ret; 1179 1180 if (out_len < sizeof(resp)) 1181 return -ENOSPC; 1182 1183 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1184 return -EFAULT; 1185 1186 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 1187 if (!uobj) 1188 return -ENOMEM; 1189 1190 init_uobj(uobj, 0, file->ucontext, &mw_lock_class); 1191 down_write(&uobj->mutex); 1192 1193 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1194 if (!pd) { 1195 ret = -EINVAL; 1196 goto err_free; 1197 } 1198 1199 INIT_UDATA(&udata, buf + sizeof(cmd), 1200 (unsigned long)cmd.response + sizeof(resp), 1201 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1202 out_len - sizeof(resp)); 1203 1204 mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata); 1205 if (IS_ERR(mw)) { 1206 ret = PTR_ERR(mw); 1207 goto err_put; 1208 } 1209 1210 mw->device = pd->device; 1211 mw->pd = pd; 1212 mw->uobject = uobj; 1213 atomic_inc(&pd->usecnt); 1214 1215 uobj->object = mw; 1216 ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj); 1217 if (ret) 1218 goto err_unalloc; 1219 1220 memset(&resp, 0, sizeof(resp)); 1221 resp.rkey = mw->rkey; 1222 resp.mw_handle = uobj->id; 1223 1224 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1225 &resp, sizeof(resp))) { 1226 ret = -EFAULT; 1227 goto err_copy; 1228 } 1229 1230 put_pd_read(pd); 1231 1232 mutex_lock(&file->mutex); 1233 list_add_tail(&uobj->list, &file->ucontext->mw_list); 1234 mutex_unlock(&file->mutex); 1235 1236 uobj->live = 1; 1237 1238 up_write(&uobj->mutex); 1239 1240 return in_len; 1241 1242 err_copy: 1243 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 1244 1245 err_unalloc: 1246 uverbs_dealloc_mw(mw); 1247 1248 err_put: 1249 put_pd_read(pd); 1250 1251 err_free: 1252 put_uobj_write(uobj); 1253 return ret; 1254 } 1255 1256 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file, 1257 struct ib_device *ib_dev, 1258 const char __user *buf, int in_len, 1259 int out_len) 1260 { 1261 struct ib_uverbs_dealloc_mw cmd; 1262 struct ib_mw *mw; 1263 struct ib_uobject *uobj; 1264 int ret = -EINVAL; 1265 1266 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1267 return -EFAULT; 1268 1269 uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext); 1270 if (!uobj) 1271 return -EINVAL; 1272 1273 mw = uobj->object; 1274 1275 ret = uverbs_dealloc_mw(mw); 1276 if (!ret) 1277 uobj->live = 0; 1278 1279 put_uobj_write(uobj); 1280 1281 if (ret) 1282 return ret; 1283 1284 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 1285 1286 mutex_lock(&file->mutex); 1287 list_del(&uobj->list); 1288 mutex_unlock(&file->mutex); 1289 1290 put_uobj(uobj); 1291 1292 return in_len; 1293 } 1294 1295 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 1296 struct ib_device *ib_dev, 1297 const char __user *buf, int in_len, 1298 int out_len) 1299 { 1300 struct ib_uverbs_create_comp_channel cmd; 1301 struct ib_uverbs_create_comp_channel_resp resp; 1302 struct file *filp; 1303 int ret; 1304 1305 if (out_len < sizeof resp) 1306 return -ENOSPC; 1307 1308 if (copy_from_user(&cmd, buf, sizeof cmd)) 1309 return -EFAULT; 1310 1311 ret = get_unused_fd_flags(O_CLOEXEC); 1312 if (ret < 0) 1313 return ret; 1314 resp.fd = ret; 1315 1316 filp = ib_uverbs_alloc_event_file(file, ib_dev, 0); 1317 if (IS_ERR(filp)) { 1318 put_unused_fd(resp.fd); 1319 return PTR_ERR(filp); 1320 } 1321 1322 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1323 &resp, sizeof resp)) { 1324 put_unused_fd(resp.fd); 1325 fput(filp); 1326 return -EFAULT; 1327 } 1328 1329 fd_install(resp.fd, filp); 1330 return in_len; 1331 } 1332 1333 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, 1334 struct ib_device *ib_dev, 1335 struct ib_udata *ucore, 1336 struct ib_udata *uhw, 1337 struct ib_uverbs_ex_create_cq *cmd, 1338 size_t cmd_sz, 1339 int (*cb)(struct ib_uverbs_file *file, 1340 struct ib_ucq_object *obj, 1341 struct ib_uverbs_ex_create_cq_resp *resp, 1342 struct ib_udata *udata, 1343 void *context), 1344 void *context) 1345 { 1346 struct ib_ucq_object *obj; 1347 struct ib_uverbs_event_file *ev_file = NULL; 1348 struct ib_cq *cq; 1349 int ret; 1350 struct ib_uverbs_ex_create_cq_resp resp; 1351 struct ib_cq_init_attr attr = {}; 1352 1353 if (cmd->comp_vector >= file->device->num_comp_vectors) 1354 return ERR_PTR(-EINVAL); 1355 1356 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1357 if (!obj) 1358 return ERR_PTR(-ENOMEM); 1359 1360 init_uobj(&obj->uobject, cmd->user_handle, file->ucontext, &cq_lock_class); 1361 down_write(&obj->uobject.mutex); 1362 1363 if (cmd->comp_channel >= 0) { 1364 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel); 1365 if (!ev_file) { 1366 ret = -EINVAL; 1367 goto err; 1368 } 1369 } 1370 1371 obj->uverbs_file = file; 1372 obj->comp_events_reported = 0; 1373 obj->async_events_reported = 0; 1374 INIT_LIST_HEAD(&obj->comp_list); 1375 INIT_LIST_HEAD(&obj->async_list); 1376 1377 attr.cqe = cmd->cqe; 1378 attr.comp_vector = cmd->comp_vector; 1379 1380 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags)) 1381 attr.flags = cmd->flags; 1382 1383 cq = ib_dev->create_cq(ib_dev, &attr, 1384 file->ucontext, uhw); 1385 if (IS_ERR(cq)) { 1386 ret = PTR_ERR(cq); 1387 goto err_file; 1388 } 1389 1390 cq->device = ib_dev; 1391 cq->uobject = &obj->uobject; 1392 cq->comp_handler = ib_uverbs_comp_handler; 1393 cq->event_handler = ib_uverbs_cq_event_handler; 1394 cq->cq_context = ev_file; 1395 atomic_set(&cq->usecnt, 0); 1396 1397 obj->uobject.object = cq; 1398 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1399 if (ret) 1400 goto err_free; 1401 1402 memset(&resp, 0, sizeof resp); 1403 resp.base.cq_handle = obj->uobject.id; 1404 resp.base.cqe = cq->cqe; 1405 1406 resp.response_length = offsetof(typeof(resp), response_length) + 1407 sizeof(resp.response_length); 1408 1409 ret = cb(file, obj, &resp, ucore, context); 1410 if (ret) 1411 goto err_cb; 1412 1413 mutex_lock(&file->mutex); 1414 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list); 1415 mutex_unlock(&file->mutex); 1416 1417 obj->uobject.live = 1; 1418 1419 up_write(&obj->uobject.mutex); 1420 1421 return obj; 1422 1423 err_cb: 1424 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1425 1426 err_free: 1427 ib_destroy_cq(cq); 1428 1429 err_file: 1430 if (ev_file) 1431 ib_uverbs_release_ucq(file, ev_file, obj); 1432 1433 err: 1434 put_uobj_write(&obj->uobject); 1435 1436 return ERR_PTR(ret); 1437 } 1438 1439 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file, 1440 struct ib_ucq_object *obj, 1441 struct ib_uverbs_ex_create_cq_resp *resp, 1442 struct ib_udata *ucore, void *context) 1443 { 1444 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1445 return -EFAULT; 1446 1447 return 0; 1448 } 1449 1450 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 1451 struct ib_device *ib_dev, 1452 const char __user *buf, int in_len, 1453 int out_len) 1454 { 1455 struct ib_uverbs_create_cq cmd; 1456 struct ib_uverbs_ex_create_cq cmd_ex; 1457 struct ib_uverbs_create_cq_resp resp; 1458 struct ib_udata ucore; 1459 struct ib_udata uhw; 1460 struct ib_ucq_object *obj; 1461 1462 if (out_len < sizeof(resp)) 1463 return -ENOSPC; 1464 1465 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1466 return -EFAULT; 1467 1468 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), sizeof(resp)); 1469 1470 INIT_UDATA(&uhw, buf + sizeof(cmd), 1471 (unsigned long)cmd.response + sizeof(resp), 1472 in_len - sizeof(cmd), out_len - sizeof(resp)); 1473 1474 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1475 cmd_ex.user_handle = cmd.user_handle; 1476 cmd_ex.cqe = cmd.cqe; 1477 cmd_ex.comp_vector = cmd.comp_vector; 1478 cmd_ex.comp_channel = cmd.comp_channel; 1479 1480 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex, 1481 offsetof(typeof(cmd_ex), comp_channel) + 1482 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb, 1483 NULL); 1484 1485 if (IS_ERR(obj)) 1486 return PTR_ERR(obj); 1487 1488 return in_len; 1489 } 1490 1491 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file, 1492 struct ib_ucq_object *obj, 1493 struct ib_uverbs_ex_create_cq_resp *resp, 1494 struct ib_udata *ucore, void *context) 1495 { 1496 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1497 return -EFAULT; 1498 1499 return 0; 1500 } 1501 1502 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file, 1503 struct ib_device *ib_dev, 1504 struct ib_udata *ucore, 1505 struct ib_udata *uhw) 1506 { 1507 struct ib_uverbs_ex_create_cq_resp resp; 1508 struct ib_uverbs_ex_create_cq cmd; 1509 struct ib_ucq_object *obj; 1510 int err; 1511 1512 if (ucore->inlen < sizeof(cmd)) 1513 return -EINVAL; 1514 1515 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 1516 if (err) 1517 return err; 1518 1519 if (cmd.comp_mask) 1520 return -EINVAL; 1521 1522 if (cmd.reserved) 1523 return -EINVAL; 1524 1525 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1526 sizeof(resp.response_length))) 1527 return -ENOSPC; 1528 1529 obj = create_cq(file, ib_dev, ucore, uhw, &cmd, 1530 min(ucore->inlen, sizeof(cmd)), 1531 ib_uverbs_ex_create_cq_cb, NULL); 1532 1533 if (IS_ERR(obj)) 1534 return PTR_ERR(obj); 1535 1536 return 0; 1537 } 1538 1539 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 1540 struct ib_device *ib_dev, 1541 const char __user *buf, int in_len, 1542 int out_len) 1543 { 1544 struct ib_uverbs_resize_cq cmd; 1545 struct ib_uverbs_resize_cq_resp resp; 1546 struct ib_udata udata; 1547 struct ib_cq *cq; 1548 int ret = -EINVAL; 1549 1550 if (copy_from_user(&cmd, buf, sizeof cmd)) 1551 return -EFAULT; 1552 1553 INIT_UDATA(&udata, buf + sizeof cmd, 1554 (unsigned long) cmd.response + sizeof resp, 1555 in_len - sizeof cmd, out_len - sizeof resp); 1556 1557 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1558 if (!cq) 1559 return -EINVAL; 1560 1561 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 1562 if (ret) 1563 goto out; 1564 1565 resp.cqe = cq->cqe; 1566 1567 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1568 &resp, sizeof resp.cqe)) 1569 ret = -EFAULT; 1570 1571 out: 1572 put_cq_read(cq); 1573 1574 return ret ? ret : in_len; 1575 } 1576 1577 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc) 1578 { 1579 struct ib_uverbs_wc tmp; 1580 1581 tmp.wr_id = wc->wr_id; 1582 tmp.status = wc->status; 1583 tmp.opcode = wc->opcode; 1584 tmp.vendor_err = wc->vendor_err; 1585 tmp.byte_len = wc->byte_len; 1586 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data; 1587 tmp.qp_num = wc->qp->qp_num; 1588 tmp.src_qp = wc->src_qp; 1589 tmp.wc_flags = wc->wc_flags; 1590 tmp.pkey_index = wc->pkey_index; 1591 tmp.slid = wc->slid; 1592 tmp.sl = wc->sl; 1593 tmp.dlid_path_bits = wc->dlid_path_bits; 1594 tmp.port_num = wc->port_num; 1595 tmp.reserved = 0; 1596 1597 if (copy_to_user(dest, &tmp, sizeof tmp)) 1598 return -EFAULT; 1599 1600 return 0; 1601 } 1602 1603 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 1604 struct ib_device *ib_dev, 1605 const char __user *buf, int in_len, 1606 int out_len) 1607 { 1608 struct ib_uverbs_poll_cq cmd; 1609 struct ib_uverbs_poll_cq_resp resp; 1610 u8 __user *header_ptr; 1611 u8 __user *data_ptr; 1612 struct ib_cq *cq; 1613 struct ib_wc wc; 1614 int ret; 1615 1616 if (copy_from_user(&cmd, buf, sizeof cmd)) 1617 return -EFAULT; 1618 1619 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1620 if (!cq) 1621 return -EINVAL; 1622 1623 /* we copy a struct ib_uverbs_poll_cq_resp to user space */ 1624 header_ptr = (void __user *)(unsigned long) cmd.response; 1625 data_ptr = header_ptr + sizeof resp; 1626 1627 memset(&resp, 0, sizeof resp); 1628 while (resp.count < cmd.ne) { 1629 ret = ib_poll_cq(cq, 1, &wc); 1630 if (ret < 0) 1631 goto out_put; 1632 if (!ret) 1633 break; 1634 1635 ret = copy_wc_to_user(data_ptr, &wc); 1636 if (ret) 1637 goto out_put; 1638 1639 data_ptr += sizeof(struct ib_uverbs_wc); 1640 ++resp.count; 1641 } 1642 1643 if (copy_to_user(header_ptr, &resp, sizeof resp)) { 1644 ret = -EFAULT; 1645 goto out_put; 1646 } 1647 1648 ret = in_len; 1649 1650 out_put: 1651 put_cq_read(cq); 1652 return ret; 1653 } 1654 1655 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 1656 struct ib_device *ib_dev, 1657 const char __user *buf, int in_len, 1658 int out_len) 1659 { 1660 struct ib_uverbs_req_notify_cq cmd; 1661 struct ib_cq *cq; 1662 1663 if (copy_from_user(&cmd, buf, sizeof cmd)) 1664 return -EFAULT; 1665 1666 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1667 if (!cq) 1668 return -EINVAL; 1669 1670 ib_req_notify_cq(cq, cmd.solicited_only ? 1671 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 1672 1673 put_cq_read(cq); 1674 1675 return in_len; 1676 } 1677 1678 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 1679 struct ib_device *ib_dev, 1680 const char __user *buf, int in_len, 1681 int out_len) 1682 { 1683 struct ib_uverbs_destroy_cq cmd; 1684 struct ib_uverbs_destroy_cq_resp resp; 1685 struct ib_uobject *uobj; 1686 struct ib_cq *cq; 1687 struct ib_ucq_object *obj; 1688 struct ib_uverbs_event_file *ev_file; 1689 int ret = -EINVAL; 1690 1691 if (copy_from_user(&cmd, buf, sizeof cmd)) 1692 return -EFAULT; 1693 1694 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); 1695 if (!uobj) 1696 return -EINVAL; 1697 cq = uobj->object; 1698 ev_file = cq->cq_context; 1699 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1700 1701 ret = ib_destroy_cq(cq); 1702 if (!ret) 1703 uobj->live = 0; 1704 1705 put_uobj_write(uobj); 1706 1707 if (ret) 1708 return ret; 1709 1710 idr_remove_uobj(&ib_uverbs_cq_idr, uobj); 1711 1712 mutex_lock(&file->mutex); 1713 list_del(&uobj->list); 1714 mutex_unlock(&file->mutex); 1715 1716 ib_uverbs_release_ucq(file, ev_file, obj); 1717 1718 memset(&resp, 0, sizeof resp); 1719 resp.comp_events_reported = obj->comp_events_reported; 1720 resp.async_events_reported = obj->async_events_reported; 1721 1722 put_uobj(uobj); 1723 1724 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1725 &resp, sizeof resp)) 1726 return -EFAULT; 1727 1728 return in_len; 1729 } 1730 1731 static int create_qp(struct ib_uverbs_file *file, 1732 struct ib_udata *ucore, 1733 struct ib_udata *uhw, 1734 struct ib_uverbs_ex_create_qp *cmd, 1735 size_t cmd_sz, 1736 int (*cb)(struct ib_uverbs_file *file, 1737 struct ib_uverbs_ex_create_qp_resp *resp, 1738 struct ib_udata *udata), 1739 void *context) 1740 { 1741 struct ib_uqp_object *obj; 1742 struct ib_device *device; 1743 struct ib_pd *pd = NULL; 1744 struct ib_xrcd *xrcd = NULL; 1745 struct ib_uobject *uninitialized_var(xrcd_uobj); 1746 struct ib_cq *scq = NULL, *rcq = NULL; 1747 struct ib_srq *srq = NULL; 1748 struct ib_qp *qp; 1749 char *buf; 1750 struct ib_qp_init_attr attr; 1751 struct ib_uverbs_ex_create_qp_resp resp; 1752 int ret; 1753 1754 if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) 1755 return -EPERM; 1756 1757 obj = kzalloc(sizeof *obj, GFP_KERNEL); 1758 if (!obj) 1759 return -ENOMEM; 1760 1761 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, 1762 &qp_lock_class); 1763 down_write(&obj->uevent.uobject.mutex); 1764 1765 if (cmd->qp_type == IB_QPT_XRC_TGT) { 1766 xrcd = idr_read_xrcd(cmd->pd_handle, file->ucontext, 1767 &xrcd_uobj); 1768 if (!xrcd) { 1769 ret = -EINVAL; 1770 goto err_put; 1771 } 1772 device = xrcd->device; 1773 } else { 1774 if (cmd->qp_type == IB_QPT_XRC_INI) { 1775 cmd->max_recv_wr = 0; 1776 cmd->max_recv_sge = 0; 1777 } else { 1778 if (cmd->is_srq) { 1779 srq = idr_read_srq(cmd->srq_handle, 1780 file->ucontext); 1781 if (!srq || srq->srq_type != IB_SRQT_BASIC) { 1782 ret = -EINVAL; 1783 goto err_put; 1784 } 1785 } 1786 1787 if (cmd->recv_cq_handle != cmd->send_cq_handle) { 1788 rcq = idr_read_cq(cmd->recv_cq_handle, 1789 file->ucontext, 0); 1790 if (!rcq) { 1791 ret = -EINVAL; 1792 goto err_put; 1793 } 1794 } 1795 } 1796 1797 scq = idr_read_cq(cmd->send_cq_handle, file->ucontext, !!rcq); 1798 rcq = rcq ?: scq; 1799 pd = idr_read_pd(cmd->pd_handle, file->ucontext); 1800 if (!pd || !scq) { 1801 ret = -EINVAL; 1802 goto err_put; 1803 } 1804 1805 device = pd->device; 1806 } 1807 1808 attr.event_handler = ib_uverbs_qp_event_handler; 1809 attr.qp_context = file; 1810 attr.send_cq = scq; 1811 attr.recv_cq = rcq; 1812 attr.srq = srq; 1813 attr.xrcd = xrcd; 1814 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR : 1815 IB_SIGNAL_REQ_WR; 1816 attr.qp_type = cmd->qp_type; 1817 attr.create_flags = 0; 1818 1819 attr.cap.max_send_wr = cmd->max_send_wr; 1820 attr.cap.max_recv_wr = cmd->max_recv_wr; 1821 attr.cap.max_send_sge = cmd->max_send_sge; 1822 attr.cap.max_recv_sge = cmd->max_recv_sge; 1823 attr.cap.max_inline_data = cmd->max_inline_data; 1824 1825 obj->uevent.events_reported = 0; 1826 INIT_LIST_HEAD(&obj->uevent.event_list); 1827 INIT_LIST_HEAD(&obj->mcast_list); 1828 1829 if (cmd_sz >= offsetof(typeof(*cmd), create_flags) + 1830 sizeof(cmd->create_flags)) 1831 attr.create_flags = cmd->create_flags; 1832 1833 if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | 1834 IB_QP_CREATE_CROSS_CHANNEL | 1835 IB_QP_CREATE_MANAGED_SEND | 1836 IB_QP_CREATE_MANAGED_RECV)) { 1837 ret = -EINVAL; 1838 goto err_put; 1839 } 1840 1841 buf = (void *)cmd + sizeof(*cmd); 1842 if (cmd_sz > sizeof(*cmd)) 1843 if (!(buf[0] == 0 && !memcmp(buf, buf + 1, 1844 cmd_sz - sizeof(*cmd) - 1))) { 1845 ret = -EINVAL; 1846 goto err_put; 1847 } 1848 1849 if (cmd->qp_type == IB_QPT_XRC_TGT) 1850 qp = ib_create_qp(pd, &attr); 1851 else 1852 qp = device->create_qp(pd, &attr, uhw); 1853 1854 if (IS_ERR(qp)) { 1855 ret = PTR_ERR(qp); 1856 goto err_put; 1857 } 1858 1859 if (cmd->qp_type != IB_QPT_XRC_TGT) { 1860 qp->real_qp = qp; 1861 qp->device = device; 1862 qp->pd = pd; 1863 qp->send_cq = attr.send_cq; 1864 qp->recv_cq = attr.recv_cq; 1865 qp->srq = attr.srq; 1866 qp->event_handler = attr.event_handler; 1867 qp->qp_context = attr.qp_context; 1868 qp->qp_type = attr.qp_type; 1869 atomic_set(&qp->usecnt, 0); 1870 atomic_inc(&pd->usecnt); 1871 atomic_inc(&attr.send_cq->usecnt); 1872 if (attr.recv_cq) 1873 atomic_inc(&attr.recv_cq->usecnt); 1874 if (attr.srq) 1875 atomic_inc(&attr.srq->usecnt); 1876 } 1877 qp->uobject = &obj->uevent.uobject; 1878 1879 obj->uevent.uobject.object = qp; 1880 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1881 if (ret) 1882 goto err_destroy; 1883 1884 memset(&resp, 0, sizeof resp); 1885 resp.base.qpn = qp->qp_num; 1886 resp.base.qp_handle = obj->uevent.uobject.id; 1887 resp.base.max_recv_sge = attr.cap.max_recv_sge; 1888 resp.base.max_send_sge = attr.cap.max_send_sge; 1889 resp.base.max_recv_wr = attr.cap.max_recv_wr; 1890 resp.base.max_send_wr = attr.cap.max_send_wr; 1891 resp.base.max_inline_data = attr.cap.max_inline_data; 1892 1893 resp.response_length = offsetof(typeof(resp), response_length) + 1894 sizeof(resp.response_length); 1895 1896 ret = cb(file, &resp, ucore); 1897 if (ret) 1898 goto err_cb; 1899 1900 if (xrcd) { 1901 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, 1902 uobject); 1903 atomic_inc(&obj->uxrcd->refcnt); 1904 put_xrcd_read(xrcd_uobj); 1905 } 1906 1907 if (pd) 1908 put_pd_read(pd); 1909 if (scq) 1910 put_cq_read(scq); 1911 if (rcq && rcq != scq) 1912 put_cq_read(rcq); 1913 if (srq) 1914 put_srq_read(srq); 1915 1916 mutex_lock(&file->mutex); 1917 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 1918 mutex_unlock(&file->mutex); 1919 1920 obj->uevent.uobject.live = 1; 1921 1922 up_write(&obj->uevent.uobject.mutex); 1923 1924 return 0; 1925 err_cb: 1926 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1927 1928 err_destroy: 1929 ib_destroy_qp(qp); 1930 1931 err_put: 1932 if (xrcd) 1933 put_xrcd_read(xrcd_uobj); 1934 if (pd) 1935 put_pd_read(pd); 1936 if (scq) 1937 put_cq_read(scq); 1938 if (rcq && rcq != scq) 1939 put_cq_read(rcq); 1940 if (srq) 1941 put_srq_read(srq); 1942 1943 put_uobj_write(&obj->uevent.uobject); 1944 return ret; 1945 } 1946 1947 static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file, 1948 struct ib_uverbs_ex_create_qp_resp *resp, 1949 struct ib_udata *ucore) 1950 { 1951 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1952 return -EFAULT; 1953 1954 return 0; 1955 } 1956 1957 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 1958 struct ib_device *ib_dev, 1959 const char __user *buf, int in_len, 1960 int out_len) 1961 { 1962 struct ib_uverbs_create_qp cmd; 1963 struct ib_uverbs_ex_create_qp cmd_ex; 1964 struct ib_udata ucore; 1965 struct ib_udata uhw; 1966 ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp); 1967 int err; 1968 1969 if (out_len < resp_size) 1970 return -ENOSPC; 1971 1972 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1973 return -EFAULT; 1974 1975 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), 1976 resp_size); 1977 INIT_UDATA(&uhw, buf + sizeof(cmd), 1978 (unsigned long)cmd.response + resp_size, 1979 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1980 out_len - resp_size); 1981 1982 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1983 cmd_ex.user_handle = cmd.user_handle; 1984 cmd_ex.pd_handle = cmd.pd_handle; 1985 cmd_ex.send_cq_handle = cmd.send_cq_handle; 1986 cmd_ex.recv_cq_handle = cmd.recv_cq_handle; 1987 cmd_ex.srq_handle = cmd.srq_handle; 1988 cmd_ex.max_send_wr = cmd.max_send_wr; 1989 cmd_ex.max_recv_wr = cmd.max_recv_wr; 1990 cmd_ex.max_send_sge = cmd.max_send_sge; 1991 cmd_ex.max_recv_sge = cmd.max_recv_sge; 1992 cmd_ex.max_inline_data = cmd.max_inline_data; 1993 cmd_ex.sq_sig_all = cmd.sq_sig_all; 1994 cmd_ex.qp_type = cmd.qp_type; 1995 cmd_ex.is_srq = cmd.is_srq; 1996 1997 err = create_qp(file, &ucore, &uhw, &cmd_ex, 1998 offsetof(typeof(cmd_ex), is_srq) + 1999 sizeof(cmd.is_srq), ib_uverbs_create_qp_cb, 2000 NULL); 2001 2002 if (err) 2003 return err; 2004 2005 return in_len; 2006 } 2007 2008 static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file, 2009 struct ib_uverbs_ex_create_qp_resp *resp, 2010 struct ib_udata *ucore) 2011 { 2012 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 2013 return -EFAULT; 2014 2015 return 0; 2016 } 2017 2018 int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file, 2019 struct ib_device *ib_dev, 2020 struct ib_udata *ucore, 2021 struct ib_udata *uhw) 2022 { 2023 struct ib_uverbs_ex_create_qp_resp resp; 2024 struct ib_uverbs_ex_create_qp cmd = {0}; 2025 int err; 2026 2027 if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) + 2028 sizeof(cmd.comp_mask))) 2029 return -EINVAL; 2030 2031 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 2032 if (err) 2033 return err; 2034 2035 if (cmd.comp_mask) 2036 return -EINVAL; 2037 2038 if (cmd.reserved) 2039 return -EINVAL; 2040 2041 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 2042 sizeof(resp.response_length))) 2043 return -ENOSPC; 2044 2045 err = create_qp(file, ucore, uhw, &cmd, 2046 min(ucore->inlen, sizeof(cmd)), 2047 ib_uverbs_ex_create_qp_cb, NULL); 2048 2049 if (err) 2050 return err; 2051 2052 return 0; 2053 } 2054 2055 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, 2056 struct ib_device *ib_dev, 2057 const char __user *buf, int in_len, int out_len) 2058 { 2059 struct ib_uverbs_open_qp cmd; 2060 struct ib_uverbs_create_qp_resp resp; 2061 struct ib_udata udata; 2062 struct ib_uqp_object *obj; 2063 struct ib_xrcd *xrcd; 2064 struct ib_uobject *uninitialized_var(xrcd_uobj); 2065 struct ib_qp *qp; 2066 struct ib_qp_open_attr attr; 2067 int ret; 2068 2069 if (out_len < sizeof resp) 2070 return -ENOSPC; 2071 2072 if (copy_from_user(&cmd, buf, sizeof cmd)) 2073 return -EFAULT; 2074 2075 INIT_UDATA(&udata, buf + sizeof cmd, 2076 (unsigned long) cmd.response + sizeof resp, 2077 in_len - sizeof cmd, out_len - sizeof resp); 2078 2079 obj = kmalloc(sizeof *obj, GFP_KERNEL); 2080 if (!obj) 2081 return -ENOMEM; 2082 2083 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class); 2084 down_write(&obj->uevent.uobject.mutex); 2085 2086 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); 2087 if (!xrcd) { 2088 ret = -EINVAL; 2089 goto err_put; 2090 } 2091 2092 attr.event_handler = ib_uverbs_qp_event_handler; 2093 attr.qp_context = file; 2094 attr.qp_num = cmd.qpn; 2095 attr.qp_type = cmd.qp_type; 2096 2097 obj->uevent.events_reported = 0; 2098 INIT_LIST_HEAD(&obj->uevent.event_list); 2099 INIT_LIST_HEAD(&obj->mcast_list); 2100 2101 qp = ib_open_qp(xrcd, &attr); 2102 if (IS_ERR(qp)) { 2103 ret = PTR_ERR(qp); 2104 goto err_put; 2105 } 2106 2107 qp->uobject = &obj->uevent.uobject; 2108 2109 obj->uevent.uobject.object = qp; 2110 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 2111 if (ret) 2112 goto err_destroy; 2113 2114 memset(&resp, 0, sizeof resp); 2115 resp.qpn = qp->qp_num; 2116 resp.qp_handle = obj->uevent.uobject.id; 2117 2118 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2119 &resp, sizeof resp)) { 2120 ret = -EFAULT; 2121 goto err_remove; 2122 } 2123 2124 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 2125 atomic_inc(&obj->uxrcd->refcnt); 2126 put_xrcd_read(xrcd_uobj); 2127 2128 mutex_lock(&file->mutex); 2129 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 2130 mutex_unlock(&file->mutex); 2131 2132 obj->uevent.uobject.live = 1; 2133 2134 up_write(&obj->uevent.uobject.mutex); 2135 2136 return in_len; 2137 2138 err_remove: 2139 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 2140 2141 err_destroy: 2142 ib_destroy_qp(qp); 2143 2144 err_put: 2145 put_xrcd_read(xrcd_uobj); 2146 put_uobj_write(&obj->uevent.uobject); 2147 return ret; 2148 } 2149 2150 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 2151 struct ib_device *ib_dev, 2152 const char __user *buf, int in_len, 2153 int out_len) 2154 { 2155 struct ib_uverbs_query_qp cmd; 2156 struct ib_uverbs_query_qp_resp resp; 2157 struct ib_qp *qp; 2158 struct ib_qp_attr *attr; 2159 struct ib_qp_init_attr *init_attr; 2160 int ret; 2161 2162 if (copy_from_user(&cmd, buf, sizeof cmd)) 2163 return -EFAULT; 2164 2165 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2166 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 2167 if (!attr || !init_attr) { 2168 ret = -ENOMEM; 2169 goto out; 2170 } 2171 2172 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2173 if (!qp) { 2174 ret = -EINVAL; 2175 goto out; 2176 } 2177 2178 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 2179 2180 put_qp_read(qp); 2181 2182 if (ret) 2183 goto out; 2184 2185 memset(&resp, 0, sizeof resp); 2186 2187 resp.qp_state = attr->qp_state; 2188 resp.cur_qp_state = attr->cur_qp_state; 2189 resp.path_mtu = attr->path_mtu; 2190 resp.path_mig_state = attr->path_mig_state; 2191 resp.qkey = attr->qkey; 2192 resp.rq_psn = attr->rq_psn; 2193 resp.sq_psn = attr->sq_psn; 2194 resp.dest_qp_num = attr->dest_qp_num; 2195 resp.qp_access_flags = attr->qp_access_flags; 2196 resp.pkey_index = attr->pkey_index; 2197 resp.alt_pkey_index = attr->alt_pkey_index; 2198 resp.sq_draining = attr->sq_draining; 2199 resp.max_rd_atomic = attr->max_rd_atomic; 2200 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 2201 resp.min_rnr_timer = attr->min_rnr_timer; 2202 resp.port_num = attr->port_num; 2203 resp.timeout = attr->timeout; 2204 resp.retry_cnt = attr->retry_cnt; 2205 resp.rnr_retry = attr->rnr_retry; 2206 resp.alt_port_num = attr->alt_port_num; 2207 resp.alt_timeout = attr->alt_timeout; 2208 2209 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16); 2210 resp.dest.flow_label = attr->ah_attr.grh.flow_label; 2211 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index; 2212 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit; 2213 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class; 2214 resp.dest.dlid = attr->ah_attr.dlid; 2215 resp.dest.sl = attr->ah_attr.sl; 2216 resp.dest.src_path_bits = attr->ah_attr.src_path_bits; 2217 resp.dest.static_rate = attr->ah_attr.static_rate; 2218 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH); 2219 resp.dest.port_num = attr->ah_attr.port_num; 2220 2221 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16); 2222 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label; 2223 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index; 2224 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit; 2225 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class; 2226 resp.alt_dest.dlid = attr->alt_ah_attr.dlid; 2227 resp.alt_dest.sl = attr->alt_ah_attr.sl; 2228 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits; 2229 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate; 2230 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH); 2231 resp.alt_dest.port_num = attr->alt_ah_attr.port_num; 2232 2233 resp.max_send_wr = init_attr->cap.max_send_wr; 2234 resp.max_recv_wr = init_attr->cap.max_recv_wr; 2235 resp.max_send_sge = init_attr->cap.max_send_sge; 2236 resp.max_recv_sge = init_attr->cap.max_recv_sge; 2237 resp.max_inline_data = init_attr->cap.max_inline_data; 2238 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 2239 2240 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2241 &resp, sizeof resp)) 2242 ret = -EFAULT; 2243 2244 out: 2245 kfree(attr); 2246 kfree(init_attr); 2247 2248 return ret ? ret : in_len; 2249 } 2250 2251 /* Remove ignored fields set in the attribute mask */ 2252 static int modify_qp_mask(enum ib_qp_type qp_type, int mask) 2253 { 2254 switch (qp_type) { 2255 case IB_QPT_XRC_INI: 2256 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); 2257 case IB_QPT_XRC_TGT: 2258 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | 2259 IB_QP_RNR_RETRY); 2260 default: 2261 return mask; 2262 } 2263 } 2264 2265 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 2266 struct ib_device *ib_dev, 2267 const char __user *buf, int in_len, 2268 int out_len) 2269 { 2270 struct ib_uverbs_modify_qp cmd; 2271 struct ib_udata udata; 2272 struct ib_qp *qp; 2273 struct ib_qp_attr *attr; 2274 int ret; 2275 2276 if (copy_from_user(&cmd, buf, sizeof cmd)) 2277 return -EFAULT; 2278 2279 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 2280 out_len); 2281 2282 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2283 if (!attr) 2284 return -ENOMEM; 2285 2286 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2287 if (!qp) { 2288 ret = -EINVAL; 2289 goto out; 2290 } 2291 2292 attr->qp_state = cmd.qp_state; 2293 attr->cur_qp_state = cmd.cur_qp_state; 2294 attr->path_mtu = cmd.path_mtu; 2295 attr->path_mig_state = cmd.path_mig_state; 2296 attr->qkey = cmd.qkey; 2297 attr->rq_psn = cmd.rq_psn; 2298 attr->sq_psn = cmd.sq_psn; 2299 attr->dest_qp_num = cmd.dest_qp_num; 2300 attr->qp_access_flags = cmd.qp_access_flags; 2301 attr->pkey_index = cmd.pkey_index; 2302 attr->alt_pkey_index = cmd.alt_pkey_index; 2303 attr->en_sqd_async_notify = cmd.en_sqd_async_notify; 2304 attr->max_rd_atomic = cmd.max_rd_atomic; 2305 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic; 2306 attr->min_rnr_timer = cmd.min_rnr_timer; 2307 attr->port_num = cmd.port_num; 2308 attr->timeout = cmd.timeout; 2309 attr->retry_cnt = cmd.retry_cnt; 2310 attr->rnr_retry = cmd.rnr_retry; 2311 attr->alt_port_num = cmd.alt_port_num; 2312 attr->alt_timeout = cmd.alt_timeout; 2313 2314 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16); 2315 attr->ah_attr.grh.flow_label = cmd.dest.flow_label; 2316 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index; 2317 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit; 2318 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class; 2319 attr->ah_attr.dlid = cmd.dest.dlid; 2320 attr->ah_attr.sl = cmd.dest.sl; 2321 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits; 2322 attr->ah_attr.static_rate = cmd.dest.static_rate; 2323 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0; 2324 attr->ah_attr.port_num = cmd.dest.port_num; 2325 2326 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16); 2327 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label; 2328 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index; 2329 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit; 2330 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class; 2331 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid; 2332 attr->alt_ah_attr.sl = cmd.alt_dest.sl; 2333 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits; 2334 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate; 2335 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; 2336 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; 2337 2338 if (qp->real_qp == qp) { 2339 ret = ib_resolve_eth_dmac(qp, attr, &cmd.attr_mask); 2340 if (ret) 2341 goto release_qp; 2342 ret = qp->device->modify_qp(qp, attr, 2343 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); 2344 } else { 2345 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); 2346 } 2347 2348 if (ret) 2349 goto release_qp; 2350 2351 ret = in_len; 2352 2353 release_qp: 2354 put_qp_read(qp); 2355 2356 out: 2357 kfree(attr); 2358 2359 return ret; 2360 } 2361 2362 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 2363 struct ib_device *ib_dev, 2364 const char __user *buf, int in_len, 2365 int out_len) 2366 { 2367 struct ib_uverbs_destroy_qp cmd; 2368 struct ib_uverbs_destroy_qp_resp resp; 2369 struct ib_uobject *uobj; 2370 struct ib_qp *qp; 2371 struct ib_uqp_object *obj; 2372 int ret = -EINVAL; 2373 2374 if (copy_from_user(&cmd, buf, sizeof cmd)) 2375 return -EFAULT; 2376 2377 memset(&resp, 0, sizeof resp); 2378 2379 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext); 2380 if (!uobj) 2381 return -EINVAL; 2382 qp = uobj->object; 2383 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 2384 2385 if (!list_empty(&obj->mcast_list)) { 2386 put_uobj_write(uobj); 2387 return -EBUSY; 2388 } 2389 2390 ret = ib_destroy_qp(qp); 2391 if (!ret) 2392 uobj->live = 0; 2393 2394 put_uobj_write(uobj); 2395 2396 if (ret) 2397 return ret; 2398 2399 if (obj->uxrcd) 2400 atomic_dec(&obj->uxrcd->refcnt); 2401 2402 idr_remove_uobj(&ib_uverbs_qp_idr, uobj); 2403 2404 mutex_lock(&file->mutex); 2405 list_del(&uobj->list); 2406 mutex_unlock(&file->mutex); 2407 2408 ib_uverbs_release_uevent(file, &obj->uevent); 2409 2410 resp.events_reported = obj->uevent.events_reported; 2411 2412 put_uobj(uobj); 2413 2414 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2415 &resp, sizeof resp)) 2416 return -EFAULT; 2417 2418 return in_len; 2419 } 2420 2421 static void *alloc_wr(size_t wr_size, __u32 num_sge) 2422 { 2423 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) + 2424 num_sge * sizeof (struct ib_sge), GFP_KERNEL); 2425 }; 2426 2427 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 2428 struct ib_device *ib_dev, 2429 const char __user *buf, int in_len, 2430 int out_len) 2431 { 2432 struct ib_uverbs_post_send cmd; 2433 struct ib_uverbs_post_send_resp resp; 2434 struct ib_uverbs_send_wr *user_wr; 2435 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 2436 struct ib_qp *qp; 2437 int i, sg_ind; 2438 int is_ud; 2439 ssize_t ret = -EINVAL; 2440 size_t next_size; 2441 2442 if (copy_from_user(&cmd, buf, sizeof cmd)) 2443 return -EFAULT; 2444 2445 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 2446 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 2447 return -EINVAL; 2448 2449 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 2450 return -EINVAL; 2451 2452 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 2453 if (!user_wr) 2454 return -ENOMEM; 2455 2456 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2457 if (!qp) 2458 goto out; 2459 2460 is_ud = qp->qp_type == IB_QPT_UD; 2461 sg_ind = 0; 2462 last = NULL; 2463 for (i = 0; i < cmd.wr_count; ++i) { 2464 if (copy_from_user(user_wr, 2465 buf + sizeof cmd + i * cmd.wqe_size, 2466 cmd.wqe_size)) { 2467 ret = -EFAULT; 2468 goto out_put; 2469 } 2470 2471 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 2472 ret = -EINVAL; 2473 goto out_put; 2474 } 2475 2476 if (is_ud) { 2477 struct ib_ud_wr *ud; 2478 2479 if (user_wr->opcode != IB_WR_SEND && 2480 user_wr->opcode != IB_WR_SEND_WITH_IMM) { 2481 ret = -EINVAL; 2482 goto out_put; 2483 } 2484 2485 next_size = sizeof(*ud); 2486 ud = alloc_wr(next_size, user_wr->num_sge); 2487 if (!ud) { 2488 ret = -ENOMEM; 2489 goto out_put; 2490 } 2491 2492 ud->ah = idr_read_ah(user_wr->wr.ud.ah, file->ucontext); 2493 if (!ud->ah) { 2494 kfree(ud); 2495 ret = -EINVAL; 2496 goto out_put; 2497 } 2498 ud->remote_qpn = user_wr->wr.ud.remote_qpn; 2499 ud->remote_qkey = user_wr->wr.ud.remote_qkey; 2500 2501 next = &ud->wr; 2502 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || 2503 user_wr->opcode == IB_WR_RDMA_WRITE || 2504 user_wr->opcode == IB_WR_RDMA_READ) { 2505 struct ib_rdma_wr *rdma; 2506 2507 next_size = sizeof(*rdma); 2508 rdma = alloc_wr(next_size, user_wr->num_sge); 2509 if (!rdma) { 2510 ret = -ENOMEM; 2511 goto out_put; 2512 } 2513 2514 rdma->remote_addr = user_wr->wr.rdma.remote_addr; 2515 rdma->rkey = user_wr->wr.rdma.rkey; 2516 2517 next = &rdma->wr; 2518 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || 2519 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 2520 struct ib_atomic_wr *atomic; 2521 2522 next_size = sizeof(*atomic); 2523 atomic = alloc_wr(next_size, user_wr->num_sge); 2524 if (!atomic) { 2525 ret = -ENOMEM; 2526 goto out_put; 2527 } 2528 2529 atomic->remote_addr = user_wr->wr.atomic.remote_addr; 2530 atomic->compare_add = user_wr->wr.atomic.compare_add; 2531 atomic->swap = user_wr->wr.atomic.swap; 2532 atomic->rkey = user_wr->wr.atomic.rkey; 2533 2534 next = &atomic->wr; 2535 } else if (user_wr->opcode == IB_WR_SEND || 2536 user_wr->opcode == IB_WR_SEND_WITH_IMM || 2537 user_wr->opcode == IB_WR_SEND_WITH_INV) { 2538 next_size = sizeof(*next); 2539 next = alloc_wr(next_size, user_wr->num_sge); 2540 if (!next) { 2541 ret = -ENOMEM; 2542 goto out_put; 2543 } 2544 } else { 2545 ret = -EINVAL; 2546 goto out_put; 2547 } 2548 2549 if (user_wr->opcode == IB_WR_SEND_WITH_IMM || 2550 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 2551 next->ex.imm_data = 2552 (__be32 __force) user_wr->ex.imm_data; 2553 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) { 2554 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey; 2555 } 2556 2557 if (!last) 2558 wr = next; 2559 else 2560 last->next = next; 2561 last = next; 2562 2563 next->next = NULL; 2564 next->wr_id = user_wr->wr_id; 2565 next->num_sge = user_wr->num_sge; 2566 next->opcode = user_wr->opcode; 2567 next->send_flags = user_wr->send_flags; 2568 2569 if (next->num_sge) { 2570 next->sg_list = (void *) next + 2571 ALIGN(next_size, sizeof(struct ib_sge)); 2572 if (copy_from_user(next->sg_list, 2573 buf + sizeof cmd + 2574 cmd.wr_count * cmd.wqe_size + 2575 sg_ind * sizeof (struct ib_sge), 2576 next->num_sge * sizeof (struct ib_sge))) { 2577 ret = -EFAULT; 2578 goto out_put; 2579 } 2580 sg_ind += next->num_sge; 2581 } else 2582 next->sg_list = NULL; 2583 } 2584 2585 resp.bad_wr = 0; 2586 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); 2587 if (ret) 2588 for (next = wr; next; next = next->next) { 2589 ++resp.bad_wr; 2590 if (next == bad_wr) 2591 break; 2592 } 2593 2594 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2595 &resp, sizeof resp)) 2596 ret = -EFAULT; 2597 2598 out_put: 2599 put_qp_read(qp); 2600 2601 while (wr) { 2602 if (is_ud && ud_wr(wr)->ah) 2603 put_ah_read(ud_wr(wr)->ah); 2604 next = wr->next; 2605 kfree(wr); 2606 wr = next; 2607 } 2608 2609 out: 2610 kfree(user_wr); 2611 2612 return ret ? ret : in_len; 2613 } 2614 2615 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 2616 int in_len, 2617 u32 wr_count, 2618 u32 sge_count, 2619 u32 wqe_size) 2620 { 2621 struct ib_uverbs_recv_wr *user_wr; 2622 struct ib_recv_wr *wr = NULL, *last, *next; 2623 int sg_ind; 2624 int i; 2625 int ret; 2626 2627 if (in_len < wqe_size * wr_count + 2628 sge_count * sizeof (struct ib_uverbs_sge)) 2629 return ERR_PTR(-EINVAL); 2630 2631 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 2632 return ERR_PTR(-EINVAL); 2633 2634 user_wr = kmalloc(wqe_size, GFP_KERNEL); 2635 if (!user_wr) 2636 return ERR_PTR(-ENOMEM); 2637 2638 sg_ind = 0; 2639 last = NULL; 2640 for (i = 0; i < wr_count; ++i) { 2641 if (copy_from_user(user_wr, buf + i * wqe_size, 2642 wqe_size)) { 2643 ret = -EFAULT; 2644 goto err; 2645 } 2646 2647 if (user_wr->num_sge + sg_ind > sge_count) { 2648 ret = -EINVAL; 2649 goto err; 2650 } 2651 2652 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2653 user_wr->num_sge * sizeof (struct ib_sge), 2654 GFP_KERNEL); 2655 if (!next) { 2656 ret = -ENOMEM; 2657 goto err; 2658 } 2659 2660 if (!last) 2661 wr = next; 2662 else 2663 last->next = next; 2664 last = next; 2665 2666 next->next = NULL; 2667 next->wr_id = user_wr->wr_id; 2668 next->num_sge = user_wr->num_sge; 2669 2670 if (next->num_sge) { 2671 next->sg_list = (void *) next + 2672 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2673 if (copy_from_user(next->sg_list, 2674 buf + wr_count * wqe_size + 2675 sg_ind * sizeof (struct ib_sge), 2676 next->num_sge * sizeof (struct ib_sge))) { 2677 ret = -EFAULT; 2678 goto err; 2679 } 2680 sg_ind += next->num_sge; 2681 } else 2682 next->sg_list = NULL; 2683 } 2684 2685 kfree(user_wr); 2686 return wr; 2687 2688 err: 2689 kfree(user_wr); 2690 2691 while (wr) { 2692 next = wr->next; 2693 kfree(wr); 2694 wr = next; 2695 } 2696 2697 return ERR_PTR(ret); 2698 } 2699 2700 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 2701 struct ib_device *ib_dev, 2702 const char __user *buf, int in_len, 2703 int out_len) 2704 { 2705 struct ib_uverbs_post_recv cmd; 2706 struct ib_uverbs_post_recv_resp resp; 2707 struct ib_recv_wr *wr, *next, *bad_wr; 2708 struct ib_qp *qp; 2709 ssize_t ret = -EINVAL; 2710 2711 if (copy_from_user(&cmd, buf, sizeof cmd)) 2712 return -EFAULT; 2713 2714 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2715 in_len - sizeof cmd, cmd.wr_count, 2716 cmd.sge_count, cmd.wqe_size); 2717 if (IS_ERR(wr)) 2718 return PTR_ERR(wr); 2719 2720 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2721 if (!qp) 2722 goto out; 2723 2724 resp.bad_wr = 0; 2725 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); 2726 2727 put_qp_read(qp); 2728 2729 if (ret) 2730 for (next = wr; next; next = next->next) { 2731 ++resp.bad_wr; 2732 if (next == bad_wr) 2733 break; 2734 } 2735 2736 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2737 &resp, sizeof resp)) 2738 ret = -EFAULT; 2739 2740 out: 2741 while (wr) { 2742 next = wr->next; 2743 kfree(wr); 2744 wr = next; 2745 } 2746 2747 return ret ? ret : in_len; 2748 } 2749 2750 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 2751 struct ib_device *ib_dev, 2752 const char __user *buf, int in_len, 2753 int out_len) 2754 { 2755 struct ib_uverbs_post_srq_recv cmd; 2756 struct ib_uverbs_post_srq_recv_resp resp; 2757 struct ib_recv_wr *wr, *next, *bad_wr; 2758 struct ib_srq *srq; 2759 ssize_t ret = -EINVAL; 2760 2761 if (copy_from_user(&cmd, buf, sizeof cmd)) 2762 return -EFAULT; 2763 2764 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2765 in_len - sizeof cmd, cmd.wr_count, 2766 cmd.sge_count, cmd.wqe_size); 2767 if (IS_ERR(wr)) 2768 return PTR_ERR(wr); 2769 2770 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2771 if (!srq) 2772 goto out; 2773 2774 resp.bad_wr = 0; 2775 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 2776 2777 put_srq_read(srq); 2778 2779 if (ret) 2780 for (next = wr; next; next = next->next) { 2781 ++resp.bad_wr; 2782 if (next == bad_wr) 2783 break; 2784 } 2785 2786 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2787 &resp, sizeof resp)) 2788 ret = -EFAULT; 2789 2790 out: 2791 while (wr) { 2792 next = wr->next; 2793 kfree(wr); 2794 wr = next; 2795 } 2796 2797 return ret ? ret : in_len; 2798 } 2799 2800 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 2801 struct ib_device *ib_dev, 2802 const char __user *buf, int in_len, 2803 int out_len) 2804 { 2805 struct ib_uverbs_create_ah cmd; 2806 struct ib_uverbs_create_ah_resp resp; 2807 struct ib_uobject *uobj; 2808 struct ib_pd *pd; 2809 struct ib_ah *ah; 2810 struct ib_ah_attr attr; 2811 int ret; 2812 2813 if (out_len < sizeof resp) 2814 return -ENOSPC; 2815 2816 if (copy_from_user(&cmd, buf, sizeof cmd)) 2817 return -EFAULT; 2818 2819 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 2820 if (!uobj) 2821 return -ENOMEM; 2822 2823 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class); 2824 down_write(&uobj->mutex); 2825 2826 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 2827 if (!pd) { 2828 ret = -EINVAL; 2829 goto err; 2830 } 2831 2832 attr.dlid = cmd.attr.dlid; 2833 attr.sl = cmd.attr.sl; 2834 attr.src_path_bits = cmd.attr.src_path_bits; 2835 attr.static_rate = cmd.attr.static_rate; 2836 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0; 2837 attr.port_num = cmd.attr.port_num; 2838 attr.grh.flow_label = cmd.attr.grh.flow_label; 2839 attr.grh.sgid_index = cmd.attr.grh.sgid_index; 2840 attr.grh.hop_limit = cmd.attr.grh.hop_limit; 2841 attr.grh.traffic_class = cmd.attr.grh.traffic_class; 2842 memset(&attr.dmac, 0, sizeof(attr.dmac)); 2843 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); 2844 2845 ah = ib_create_ah(pd, &attr); 2846 if (IS_ERR(ah)) { 2847 ret = PTR_ERR(ah); 2848 goto err_put; 2849 } 2850 2851 ah->uobject = uobj; 2852 uobj->object = ah; 2853 2854 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj); 2855 if (ret) 2856 goto err_destroy; 2857 2858 resp.ah_handle = uobj->id; 2859 2860 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2861 &resp, sizeof resp)) { 2862 ret = -EFAULT; 2863 goto err_copy; 2864 } 2865 2866 put_pd_read(pd); 2867 2868 mutex_lock(&file->mutex); 2869 list_add_tail(&uobj->list, &file->ucontext->ah_list); 2870 mutex_unlock(&file->mutex); 2871 2872 uobj->live = 1; 2873 2874 up_write(&uobj->mutex); 2875 2876 return in_len; 2877 2878 err_copy: 2879 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2880 2881 err_destroy: 2882 ib_destroy_ah(ah); 2883 2884 err_put: 2885 put_pd_read(pd); 2886 2887 err: 2888 put_uobj_write(uobj); 2889 return ret; 2890 } 2891 2892 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 2893 struct ib_device *ib_dev, 2894 const char __user *buf, int in_len, int out_len) 2895 { 2896 struct ib_uverbs_destroy_ah cmd; 2897 struct ib_ah *ah; 2898 struct ib_uobject *uobj; 2899 int ret; 2900 2901 if (copy_from_user(&cmd, buf, sizeof cmd)) 2902 return -EFAULT; 2903 2904 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext); 2905 if (!uobj) 2906 return -EINVAL; 2907 ah = uobj->object; 2908 2909 ret = ib_destroy_ah(ah); 2910 if (!ret) 2911 uobj->live = 0; 2912 2913 put_uobj_write(uobj); 2914 2915 if (ret) 2916 return ret; 2917 2918 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2919 2920 mutex_lock(&file->mutex); 2921 list_del(&uobj->list); 2922 mutex_unlock(&file->mutex); 2923 2924 put_uobj(uobj); 2925 2926 return in_len; 2927 } 2928 2929 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 2930 struct ib_device *ib_dev, 2931 const char __user *buf, int in_len, 2932 int out_len) 2933 { 2934 struct ib_uverbs_attach_mcast cmd; 2935 struct ib_qp *qp; 2936 struct ib_uqp_object *obj; 2937 struct ib_uverbs_mcast_entry *mcast; 2938 int ret; 2939 2940 if (copy_from_user(&cmd, buf, sizeof cmd)) 2941 return -EFAULT; 2942 2943 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 2944 if (!qp) 2945 return -EINVAL; 2946 2947 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2948 2949 list_for_each_entry(mcast, &obj->mcast_list, list) 2950 if (cmd.mlid == mcast->lid && 2951 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2952 ret = 0; 2953 goto out_put; 2954 } 2955 2956 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 2957 if (!mcast) { 2958 ret = -ENOMEM; 2959 goto out_put; 2960 } 2961 2962 mcast->lid = cmd.mlid; 2963 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 2964 2965 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 2966 if (!ret) 2967 list_add_tail(&mcast->list, &obj->mcast_list); 2968 else 2969 kfree(mcast); 2970 2971 out_put: 2972 put_qp_write(qp); 2973 2974 return ret ? ret : in_len; 2975 } 2976 2977 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 2978 struct ib_device *ib_dev, 2979 const char __user *buf, int in_len, 2980 int out_len) 2981 { 2982 struct ib_uverbs_detach_mcast cmd; 2983 struct ib_uqp_object *obj; 2984 struct ib_qp *qp; 2985 struct ib_uverbs_mcast_entry *mcast; 2986 int ret = -EINVAL; 2987 2988 if (copy_from_user(&cmd, buf, sizeof cmd)) 2989 return -EFAULT; 2990 2991 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 2992 if (!qp) 2993 return -EINVAL; 2994 2995 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 2996 if (ret) 2997 goto out_put; 2998 2999 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 3000 3001 list_for_each_entry(mcast, &obj->mcast_list, list) 3002 if (cmd.mlid == mcast->lid && 3003 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 3004 list_del(&mcast->list); 3005 kfree(mcast); 3006 break; 3007 } 3008 3009 out_put: 3010 put_qp_write(qp); 3011 3012 return ret ? ret : in_len; 3013 } 3014 3015 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, 3016 union ib_flow_spec *ib_spec) 3017 { 3018 if (kern_spec->reserved) 3019 return -EINVAL; 3020 3021 ib_spec->type = kern_spec->type; 3022 3023 switch (ib_spec->type) { 3024 case IB_FLOW_SPEC_ETH: 3025 ib_spec->eth.size = sizeof(struct ib_flow_spec_eth); 3026 if (ib_spec->eth.size != kern_spec->eth.size) 3027 return -EINVAL; 3028 memcpy(&ib_spec->eth.val, &kern_spec->eth.val, 3029 sizeof(struct ib_flow_eth_filter)); 3030 memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask, 3031 sizeof(struct ib_flow_eth_filter)); 3032 break; 3033 case IB_FLOW_SPEC_IPV4: 3034 ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4); 3035 if (ib_spec->ipv4.size != kern_spec->ipv4.size) 3036 return -EINVAL; 3037 memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val, 3038 sizeof(struct ib_flow_ipv4_filter)); 3039 memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask, 3040 sizeof(struct ib_flow_ipv4_filter)); 3041 break; 3042 case IB_FLOW_SPEC_TCP: 3043 case IB_FLOW_SPEC_UDP: 3044 ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp); 3045 if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size) 3046 return -EINVAL; 3047 memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val, 3048 sizeof(struct ib_flow_tcp_udp_filter)); 3049 memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask, 3050 sizeof(struct ib_flow_tcp_udp_filter)); 3051 break; 3052 default: 3053 return -EINVAL; 3054 } 3055 return 0; 3056 } 3057 3058 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, 3059 struct ib_device *ib_dev, 3060 struct ib_udata *ucore, 3061 struct ib_udata *uhw) 3062 { 3063 struct ib_uverbs_create_flow cmd; 3064 struct ib_uverbs_create_flow_resp resp; 3065 struct ib_uobject *uobj; 3066 struct ib_flow *flow_id; 3067 struct ib_uverbs_flow_attr *kern_flow_attr; 3068 struct ib_flow_attr *flow_attr; 3069 struct ib_qp *qp; 3070 int err = 0; 3071 void *kern_spec; 3072 void *ib_spec; 3073 int i; 3074 3075 if (ucore->inlen < sizeof(cmd)) 3076 return -EINVAL; 3077 3078 if (ucore->outlen < sizeof(resp)) 3079 return -ENOSPC; 3080 3081 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3082 if (err) 3083 return err; 3084 3085 ucore->inbuf += sizeof(cmd); 3086 ucore->inlen -= sizeof(cmd); 3087 3088 if (cmd.comp_mask) 3089 return -EINVAL; 3090 3091 if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER && 3092 !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW)) 3093 return -EPERM; 3094 3095 if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED) 3096 return -EINVAL; 3097 3098 if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && 3099 ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) || 3100 (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT))) 3101 return -EINVAL; 3102 3103 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) 3104 return -EINVAL; 3105 3106 if (cmd.flow_attr.size > ucore->inlen || 3107 cmd.flow_attr.size > 3108 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) 3109 return -EINVAL; 3110 3111 if (cmd.flow_attr.reserved[0] || 3112 cmd.flow_attr.reserved[1]) 3113 return -EINVAL; 3114 3115 if (cmd.flow_attr.num_of_specs) { 3116 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, 3117 GFP_KERNEL); 3118 if (!kern_flow_attr) 3119 return -ENOMEM; 3120 3121 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); 3122 err = ib_copy_from_udata(kern_flow_attr + 1, ucore, 3123 cmd.flow_attr.size); 3124 if (err) 3125 goto err_free_attr; 3126 } else { 3127 kern_flow_attr = &cmd.flow_attr; 3128 } 3129 3130 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 3131 if (!uobj) { 3132 err = -ENOMEM; 3133 goto err_free_attr; 3134 } 3135 init_uobj(uobj, 0, file->ucontext, &rule_lock_class); 3136 down_write(&uobj->mutex); 3137 3138 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 3139 if (!qp) { 3140 err = -EINVAL; 3141 goto err_uobj; 3142 } 3143 3144 flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL); 3145 if (!flow_attr) { 3146 err = -ENOMEM; 3147 goto err_put; 3148 } 3149 3150 flow_attr->type = kern_flow_attr->type; 3151 flow_attr->priority = kern_flow_attr->priority; 3152 flow_attr->num_of_specs = kern_flow_attr->num_of_specs; 3153 flow_attr->port = kern_flow_attr->port; 3154 flow_attr->flags = kern_flow_attr->flags; 3155 flow_attr->size = sizeof(*flow_attr); 3156 3157 kern_spec = kern_flow_attr + 1; 3158 ib_spec = flow_attr + 1; 3159 for (i = 0; i < flow_attr->num_of_specs && 3160 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && 3161 cmd.flow_attr.size >= 3162 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { 3163 err = kern_spec_to_ib_spec(kern_spec, ib_spec); 3164 if (err) 3165 goto err_free; 3166 flow_attr->size += 3167 ((union ib_flow_spec *) ib_spec)->size; 3168 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; 3169 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size; 3170 ib_spec += ((union ib_flow_spec *) ib_spec)->size; 3171 } 3172 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { 3173 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", 3174 i, cmd.flow_attr.size); 3175 err = -EINVAL; 3176 goto err_free; 3177 } 3178 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); 3179 if (IS_ERR(flow_id)) { 3180 err = PTR_ERR(flow_id); 3181 goto err_free; 3182 } 3183 flow_id->qp = qp; 3184 flow_id->uobject = uobj; 3185 uobj->object = flow_id; 3186 3187 err = idr_add_uobj(&ib_uverbs_rule_idr, uobj); 3188 if (err) 3189 goto destroy_flow; 3190 3191 memset(&resp, 0, sizeof(resp)); 3192 resp.flow_handle = uobj->id; 3193 3194 err = ib_copy_to_udata(ucore, 3195 &resp, sizeof(resp)); 3196 if (err) 3197 goto err_copy; 3198 3199 put_qp_read(qp); 3200 mutex_lock(&file->mutex); 3201 list_add_tail(&uobj->list, &file->ucontext->rule_list); 3202 mutex_unlock(&file->mutex); 3203 3204 uobj->live = 1; 3205 3206 up_write(&uobj->mutex); 3207 kfree(flow_attr); 3208 if (cmd.flow_attr.num_of_specs) 3209 kfree(kern_flow_attr); 3210 return 0; 3211 err_copy: 3212 idr_remove_uobj(&ib_uverbs_rule_idr, uobj); 3213 destroy_flow: 3214 ib_destroy_flow(flow_id); 3215 err_free: 3216 kfree(flow_attr); 3217 err_put: 3218 put_qp_read(qp); 3219 err_uobj: 3220 put_uobj_write(uobj); 3221 err_free_attr: 3222 if (cmd.flow_attr.num_of_specs) 3223 kfree(kern_flow_attr); 3224 return err; 3225 } 3226 3227 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, 3228 struct ib_device *ib_dev, 3229 struct ib_udata *ucore, 3230 struct ib_udata *uhw) 3231 { 3232 struct ib_uverbs_destroy_flow cmd; 3233 struct ib_flow *flow_id; 3234 struct ib_uobject *uobj; 3235 int ret; 3236 3237 if (ucore->inlen < sizeof(cmd)) 3238 return -EINVAL; 3239 3240 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3241 if (ret) 3242 return ret; 3243 3244 if (cmd.comp_mask) 3245 return -EINVAL; 3246 3247 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle, 3248 file->ucontext); 3249 if (!uobj) 3250 return -EINVAL; 3251 flow_id = uobj->object; 3252 3253 ret = ib_destroy_flow(flow_id); 3254 if (!ret) 3255 uobj->live = 0; 3256 3257 put_uobj_write(uobj); 3258 3259 idr_remove_uobj(&ib_uverbs_rule_idr, uobj); 3260 3261 mutex_lock(&file->mutex); 3262 list_del(&uobj->list); 3263 mutex_unlock(&file->mutex); 3264 3265 put_uobj(uobj); 3266 3267 return ret; 3268 } 3269 3270 static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 3271 struct ib_device *ib_dev, 3272 struct ib_uverbs_create_xsrq *cmd, 3273 struct ib_udata *udata) 3274 { 3275 struct ib_uverbs_create_srq_resp resp; 3276 struct ib_usrq_object *obj; 3277 struct ib_pd *pd; 3278 struct ib_srq *srq; 3279 struct ib_uobject *uninitialized_var(xrcd_uobj); 3280 struct ib_srq_init_attr attr; 3281 int ret; 3282 3283 obj = kmalloc(sizeof *obj, GFP_KERNEL); 3284 if (!obj) 3285 return -ENOMEM; 3286 3287 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class); 3288 down_write(&obj->uevent.uobject.mutex); 3289 3290 if (cmd->srq_type == IB_SRQT_XRC) { 3291 attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj); 3292 if (!attr.ext.xrc.xrcd) { 3293 ret = -EINVAL; 3294 goto err; 3295 } 3296 3297 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 3298 atomic_inc(&obj->uxrcd->refcnt); 3299 3300 attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0); 3301 if (!attr.ext.xrc.cq) { 3302 ret = -EINVAL; 3303 goto err_put_xrcd; 3304 } 3305 } 3306 3307 pd = idr_read_pd(cmd->pd_handle, file->ucontext); 3308 if (!pd) { 3309 ret = -EINVAL; 3310 goto err_put_cq; 3311 } 3312 3313 attr.event_handler = ib_uverbs_srq_event_handler; 3314 attr.srq_context = file; 3315 attr.srq_type = cmd->srq_type; 3316 attr.attr.max_wr = cmd->max_wr; 3317 attr.attr.max_sge = cmd->max_sge; 3318 attr.attr.srq_limit = cmd->srq_limit; 3319 3320 obj->uevent.events_reported = 0; 3321 INIT_LIST_HEAD(&obj->uevent.event_list); 3322 3323 srq = pd->device->create_srq(pd, &attr, udata); 3324 if (IS_ERR(srq)) { 3325 ret = PTR_ERR(srq); 3326 goto err_put; 3327 } 3328 3329 srq->device = pd->device; 3330 srq->pd = pd; 3331 srq->srq_type = cmd->srq_type; 3332 srq->uobject = &obj->uevent.uobject; 3333 srq->event_handler = attr.event_handler; 3334 srq->srq_context = attr.srq_context; 3335 3336 if (cmd->srq_type == IB_SRQT_XRC) { 3337 srq->ext.xrc.cq = attr.ext.xrc.cq; 3338 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; 3339 atomic_inc(&attr.ext.xrc.cq->usecnt); 3340 atomic_inc(&attr.ext.xrc.xrcd->usecnt); 3341 } 3342 3343 atomic_inc(&pd->usecnt); 3344 atomic_set(&srq->usecnt, 0); 3345 3346 obj->uevent.uobject.object = srq; 3347 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 3348 if (ret) 3349 goto err_destroy; 3350 3351 memset(&resp, 0, sizeof resp); 3352 resp.srq_handle = obj->uevent.uobject.id; 3353 resp.max_wr = attr.attr.max_wr; 3354 resp.max_sge = attr.attr.max_sge; 3355 if (cmd->srq_type == IB_SRQT_XRC) 3356 resp.srqn = srq->ext.xrc.srq_num; 3357 3358 if (copy_to_user((void __user *) (unsigned long) cmd->response, 3359 &resp, sizeof resp)) { 3360 ret = -EFAULT; 3361 goto err_copy; 3362 } 3363 3364 if (cmd->srq_type == IB_SRQT_XRC) { 3365 put_uobj_read(xrcd_uobj); 3366 put_cq_read(attr.ext.xrc.cq); 3367 } 3368 put_pd_read(pd); 3369 3370 mutex_lock(&file->mutex); 3371 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list); 3372 mutex_unlock(&file->mutex); 3373 3374 obj->uevent.uobject.live = 1; 3375 3376 up_write(&obj->uevent.uobject.mutex); 3377 3378 return 0; 3379 3380 err_copy: 3381 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 3382 3383 err_destroy: 3384 ib_destroy_srq(srq); 3385 3386 err_put: 3387 put_pd_read(pd); 3388 3389 err_put_cq: 3390 if (cmd->srq_type == IB_SRQT_XRC) 3391 put_cq_read(attr.ext.xrc.cq); 3392 3393 err_put_xrcd: 3394 if (cmd->srq_type == IB_SRQT_XRC) { 3395 atomic_dec(&obj->uxrcd->refcnt); 3396 put_uobj_read(xrcd_uobj); 3397 } 3398 3399 err: 3400 put_uobj_write(&obj->uevent.uobject); 3401 return ret; 3402 } 3403 3404 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 3405 struct ib_device *ib_dev, 3406 const char __user *buf, int in_len, 3407 int out_len) 3408 { 3409 struct ib_uverbs_create_srq cmd; 3410 struct ib_uverbs_create_xsrq xcmd; 3411 struct ib_uverbs_create_srq_resp resp; 3412 struct ib_udata udata; 3413 int ret; 3414 3415 if (out_len < sizeof resp) 3416 return -ENOSPC; 3417 3418 if (copy_from_user(&cmd, buf, sizeof cmd)) 3419 return -EFAULT; 3420 3421 xcmd.response = cmd.response; 3422 xcmd.user_handle = cmd.user_handle; 3423 xcmd.srq_type = IB_SRQT_BASIC; 3424 xcmd.pd_handle = cmd.pd_handle; 3425 xcmd.max_wr = cmd.max_wr; 3426 xcmd.max_sge = cmd.max_sge; 3427 xcmd.srq_limit = cmd.srq_limit; 3428 3429 INIT_UDATA(&udata, buf + sizeof cmd, 3430 (unsigned long) cmd.response + sizeof resp, 3431 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr), 3432 out_len - sizeof resp); 3433 3434 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata); 3435 if (ret) 3436 return ret; 3437 3438 return in_len; 3439 } 3440 3441 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, 3442 struct ib_device *ib_dev, 3443 const char __user *buf, int in_len, int out_len) 3444 { 3445 struct ib_uverbs_create_xsrq cmd; 3446 struct ib_uverbs_create_srq_resp resp; 3447 struct ib_udata udata; 3448 int ret; 3449 3450 if (out_len < sizeof resp) 3451 return -ENOSPC; 3452 3453 if (copy_from_user(&cmd, buf, sizeof cmd)) 3454 return -EFAULT; 3455 3456 INIT_UDATA(&udata, buf + sizeof cmd, 3457 (unsigned long) cmd.response + sizeof resp, 3458 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr), 3459 out_len - sizeof resp); 3460 3461 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata); 3462 if (ret) 3463 return ret; 3464 3465 return in_len; 3466 } 3467 3468 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 3469 struct ib_device *ib_dev, 3470 const char __user *buf, int in_len, 3471 int out_len) 3472 { 3473 struct ib_uverbs_modify_srq cmd; 3474 struct ib_udata udata; 3475 struct ib_srq *srq; 3476 struct ib_srq_attr attr; 3477 int ret; 3478 3479 if (copy_from_user(&cmd, buf, sizeof cmd)) 3480 return -EFAULT; 3481 3482 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 3483 out_len); 3484 3485 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 3486 if (!srq) 3487 return -EINVAL; 3488 3489 attr.max_wr = cmd.max_wr; 3490 attr.srq_limit = cmd.srq_limit; 3491 3492 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 3493 3494 put_srq_read(srq); 3495 3496 return ret ? ret : in_len; 3497 } 3498 3499 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 3500 struct ib_device *ib_dev, 3501 const char __user *buf, 3502 int in_len, int out_len) 3503 { 3504 struct ib_uverbs_query_srq cmd; 3505 struct ib_uverbs_query_srq_resp resp; 3506 struct ib_srq_attr attr; 3507 struct ib_srq *srq; 3508 int ret; 3509 3510 if (out_len < sizeof resp) 3511 return -ENOSPC; 3512 3513 if (copy_from_user(&cmd, buf, sizeof cmd)) 3514 return -EFAULT; 3515 3516 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 3517 if (!srq) 3518 return -EINVAL; 3519 3520 ret = ib_query_srq(srq, &attr); 3521 3522 put_srq_read(srq); 3523 3524 if (ret) 3525 return ret; 3526 3527 memset(&resp, 0, sizeof resp); 3528 3529 resp.max_wr = attr.max_wr; 3530 resp.max_sge = attr.max_sge; 3531 resp.srq_limit = attr.srq_limit; 3532 3533 if (copy_to_user((void __user *) (unsigned long) cmd.response, 3534 &resp, sizeof resp)) 3535 return -EFAULT; 3536 3537 return in_len; 3538 } 3539 3540 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 3541 struct ib_device *ib_dev, 3542 const char __user *buf, int in_len, 3543 int out_len) 3544 { 3545 struct ib_uverbs_destroy_srq cmd; 3546 struct ib_uverbs_destroy_srq_resp resp; 3547 struct ib_uobject *uobj; 3548 struct ib_srq *srq; 3549 struct ib_uevent_object *obj; 3550 int ret = -EINVAL; 3551 struct ib_usrq_object *us; 3552 enum ib_srq_type srq_type; 3553 3554 if (copy_from_user(&cmd, buf, sizeof cmd)) 3555 return -EFAULT; 3556 3557 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext); 3558 if (!uobj) 3559 return -EINVAL; 3560 srq = uobj->object; 3561 obj = container_of(uobj, struct ib_uevent_object, uobject); 3562 srq_type = srq->srq_type; 3563 3564 ret = ib_destroy_srq(srq); 3565 if (!ret) 3566 uobj->live = 0; 3567 3568 put_uobj_write(uobj); 3569 3570 if (ret) 3571 return ret; 3572 3573 if (srq_type == IB_SRQT_XRC) { 3574 us = container_of(obj, struct ib_usrq_object, uevent); 3575 atomic_dec(&us->uxrcd->refcnt); 3576 } 3577 3578 idr_remove_uobj(&ib_uverbs_srq_idr, uobj); 3579 3580 mutex_lock(&file->mutex); 3581 list_del(&uobj->list); 3582 mutex_unlock(&file->mutex); 3583 3584 ib_uverbs_release_uevent(file, obj); 3585 3586 memset(&resp, 0, sizeof resp); 3587 resp.events_reported = obj->events_reported; 3588 3589 put_uobj(uobj); 3590 3591 if (copy_to_user((void __user *) (unsigned long) cmd.response, 3592 &resp, sizeof resp)) 3593 ret = -EFAULT; 3594 3595 return ret ? ret : in_len; 3596 } 3597 3598 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, 3599 struct ib_device *ib_dev, 3600 struct ib_udata *ucore, 3601 struct ib_udata *uhw) 3602 { 3603 struct ib_uverbs_ex_query_device_resp resp = { {0} }; 3604 struct ib_uverbs_ex_query_device cmd; 3605 struct ib_device_attr attr = {0}; 3606 int err; 3607 3608 if (ucore->inlen < sizeof(cmd)) 3609 return -EINVAL; 3610 3611 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3612 if (err) 3613 return err; 3614 3615 if (cmd.comp_mask) 3616 return -EINVAL; 3617 3618 if (cmd.reserved) 3619 return -EINVAL; 3620 3621 resp.response_length = offsetof(typeof(resp), odp_caps); 3622 3623 if (ucore->outlen < resp.response_length) 3624 return -ENOSPC; 3625 3626 err = ib_dev->query_device(ib_dev, &attr, uhw); 3627 if (err) 3628 return err; 3629 3630 copy_query_dev_fields(file, ib_dev, &resp.base, &attr); 3631 3632 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps)) 3633 goto end; 3634 3635 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 3636 resp.odp_caps.general_caps = attr.odp_caps.general_caps; 3637 resp.odp_caps.per_transport_caps.rc_odp_caps = 3638 attr.odp_caps.per_transport_caps.rc_odp_caps; 3639 resp.odp_caps.per_transport_caps.uc_odp_caps = 3640 attr.odp_caps.per_transport_caps.uc_odp_caps; 3641 resp.odp_caps.per_transport_caps.ud_odp_caps = 3642 attr.odp_caps.per_transport_caps.ud_odp_caps; 3643 #endif 3644 resp.response_length += sizeof(resp.odp_caps); 3645 3646 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask)) 3647 goto end; 3648 3649 resp.timestamp_mask = attr.timestamp_mask; 3650 resp.response_length += sizeof(resp.timestamp_mask); 3651 3652 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock)) 3653 goto end; 3654 3655 resp.hca_core_clock = attr.hca_core_clock; 3656 resp.response_length += sizeof(resp.hca_core_clock); 3657 3658 end: 3659 err = ib_copy_to_udata(ucore, &resp, resp.response_length); 3660 return err; 3661 } 3662