1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/file.h> 37 #include <linux/fs.h> 38 #include <linux/slab.h> 39 40 #include <asm/uaccess.h> 41 42 #include "uverbs.h" 43 44 struct uverbs_lock_class { 45 struct lock_class_key key; 46 char name[16]; 47 }; 48 49 static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" }; 50 static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" }; 51 static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" }; 52 static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" }; 53 static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" }; 54 static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; 55 static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; 56 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; 57 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" }; 58 59 #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ 60 do { \ 61 (udata)->inbuf = (void __user *) (ibuf); \ 62 (udata)->outbuf = (void __user *) (obuf); \ 63 (udata)->inlen = (ilen); \ 64 (udata)->outlen = (olen); \ 65 } while (0) 66 67 /* 68 * The ib_uobject locking scheme is as follows: 69 * 70 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it 71 * needs to be held during all idr operations. When an object is 72 * looked up, a reference must be taken on the object's kref before 73 * dropping this lock. 74 * 75 * - Each object also has an rwsem. This rwsem must be held for 76 * reading while an operation that uses the object is performed. 77 * For example, while registering an MR, the associated PD's 78 * uobject.mutex must be held for reading. The rwsem must be held 79 * for writing while initializing or destroying an object. 80 * 81 * - In addition, each object has a "live" flag. If this flag is not 82 * set, then lookups of the object will fail even if it is found in 83 * the idr. This handles a reader that blocks and does not acquire 84 * the rwsem until after the object is destroyed. The destroy 85 * operation will set the live flag to 0 and then drop the rwsem; 86 * this will allow the reader to acquire the rwsem, see that the 87 * live flag is 0, and then drop the rwsem and its reference to 88 * object. The underlying storage will not be freed until the last 89 * reference to the object is dropped. 90 */ 91 92 static void init_uobj(struct ib_uobject *uobj, u64 user_handle, 93 struct ib_ucontext *context, struct uverbs_lock_class *c) 94 { 95 uobj->user_handle = user_handle; 96 uobj->context = context; 97 kref_init(&uobj->ref); 98 init_rwsem(&uobj->mutex); 99 lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name); 100 uobj->live = 0; 101 } 102 103 static void release_uobj(struct kref *kref) 104 { 105 kfree(container_of(kref, struct ib_uobject, ref)); 106 } 107 108 static void put_uobj(struct ib_uobject *uobj) 109 { 110 kref_put(&uobj->ref, release_uobj); 111 } 112 113 static void put_uobj_read(struct ib_uobject *uobj) 114 { 115 up_read(&uobj->mutex); 116 put_uobj(uobj); 117 } 118 119 static void put_uobj_write(struct ib_uobject *uobj) 120 { 121 up_write(&uobj->mutex); 122 put_uobj(uobj); 123 } 124 125 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj) 126 { 127 int ret; 128 129 idr_preload(GFP_KERNEL); 130 spin_lock(&ib_uverbs_idr_lock); 131 132 ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT); 133 if (ret >= 0) 134 uobj->id = ret; 135 136 spin_unlock(&ib_uverbs_idr_lock); 137 idr_preload_end(); 138 139 return ret < 0 ? ret : 0; 140 } 141 142 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj) 143 { 144 spin_lock(&ib_uverbs_idr_lock); 145 idr_remove(idr, uobj->id); 146 spin_unlock(&ib_uverbs_idr_lock); 147 } 148 149 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, 150 struct ib_ucontext *context) 151 { 152 struct ib_uobject *uobj; 153 154 spin_lock(&ib_uverbs_idr_lock); 155 uobj = idr_find(idr, id); 156 if (uobj) { 157 if (uobj->context == context) 158 kref_get(&uobj->ref); 159 else 160 uobj = NULL; 161 } 162 spin_unlock(&ib_uverbs_idr_lock); 163 164 return uobj; 165 } 166 167 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, 168 struct ib_ucontext *context, int nested) 169 { 170 struct ib_uobject *uobj; 171 172 uobj = __idr_get_uobj(idr, id, context); 173 if (!uobj) 174 return NULL; 175 176 if (nested) 177 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING); 178 else 179 down_read(&uobj->mutex); 180 if (!uobj->live) { 181 put_uobj_read(uobj); 182 return NULL; 183 } 184 185 return uobj; 186 } 187 188 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id, 189 struct ib_ucontext *context) 190 { 191 struct ib_uobject *uobj; 192 193 uobj = __idr_get_uobj(idr, id, context); 194 if (!uobj) 195 return NULL; 196 197 down_write(&uobj->mutex); 198 if (!uobj->live) { 199 put_uobj_write(uobj); 200 return NULL; 201 } 202 203 return uobj; 204 } 205 206 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context, 207 int nested) 208 { 209 struct ib_uobject *uobj; 210 211 uobj = idr_read_uobj(idr, id, context, nested); 212 return uobj ? uobj->object : NULL; 213 } 214 215 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context) 216 { 217 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0); 218 } 219 220 static void put_pd_read(struct ib_pd *pd) 221 { 222 put_uobj_read(pd->uobject); 223 } 224 225 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested) 226 { 227 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested); 228 } 229 230 static void put_cq_read(struct ib_cq *cq) 231 { 232 put_uobj_read(cq->uobject); 233 } 234 235 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context) 236 { 237 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0); 238 } 239 240 static void put_ah_read(struct ib_ah *ah) 241 { 242 put_uobj_read(ah->uobject); 243 } 244 245 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context) 246 { 247 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0); 248 } 249 250 static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context) 251 { 252 struct ib_uobject *uobj; 253 254 uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context); 255 return uobj ? uobj->object : NULL; 256 } 257 258 static void put_qp_read(struct ib_qp *qp) 259 { 260 put_uobj_read(qp->uobject); 261 } 262 263 static void put_qp_write(struct ib_qp *qp) 264 { 265 put_uobj_write(qp->uobject); 266 } 267 268 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context) 269 { 270 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0); 271 } 272 273 static void put_srq_read(struct ib_srq *srq) 274 { 275 put_uobj_read(srq->uobject); 276 } 277 278 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context, 279 struct ib_uobject **uobj) 280 { 281 *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0); 282 return *uobj ? (*uobj)->object : NULL; 283 } 284 285 static void put_xrcd_read(struct ib_uobject *uobj) 286 { 287 put_uobj_read(uobj); 288 } 289 290 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 291 const char __user *buf, 292 int in_len, int out_len) 293 { 294 struct ib_uverbs_get_context cmd; 295 struct ib_uverbs_get_context_resp resp; 296 struct ib_udata udata; 297 struct ib_device *ibdev = file->device->ib_dev; 298 struct ib_ucontext *ucontext; 299 struct file *filp; 300 int ret; 301 302 if (out_len < sizeof resp) 303 return -ENOSPC; 304 305 if (copy_from_user(&cmd, buf, sizeof cmd)) 306 return -EFAULT; 307 308 mutex_lock(&file->mutex); 309 310 if (file->ucontext) { 311 ret = -EINVAL; 312 goto err; 313 } 314 315 INIT_UDATA(&udata, buf + sizeof cmd, 316 (unsigned long) cmd.response + sizeof resp, 317 in_len - sizeof cmd, out_len - sizeof resp); 318 319 ucontext = ibdev->alloc_ucontext(ibdev, &udata); 320 if (IS_ERR(ucontext)) { 321 ret = PTR_ERR(ucontext); 322 goto err; 323 } 324 325 ucontext->device = ibdev; 326 INIT_LIST_HEAD(&ucontext->pd_list); 327 INIT_LIST_HEAD(&ucontext->mr_list); 328 INIT_LIST_HEAD(&ucontext->mw_list); 329 INIT_LIST_HEAD(&ucontext->cq_list); 330 INIT_LIST_HEAD(&ucontext->qp_list); 331 INIT_LIST_HEAD(&ucontext->srq_list); 332 INIT_LIST_HEAD(&ucontext->ah_list); 333 INIT_LIST_HEAD(&ucontext->xrcd_list); 334 INIT_LIST_HEAD(&ucontext->rule_list); 335 ucontext->closing = 0; 336 337 resp.num_comp_vectors = file->device->num_comp_vectors; 338 339 ret = get_unused_fd_flags(O_CLOEXEC); 340 if (ret < 0) 341 goto err_free; 342 resp.async_fd = ret; 343 344 filp = ib_uverbs_alloc_event_file(file, 1); 345 if (IS_ERR(filp)) { 346 ret = PTR_ERR(filp); 347 goto err_fd; 348 } 349 350 if (copy_to_user((void __user *) (unsigned long) cmd.response, 351 &resp, sizeof resp)) { 352 ret = -EFAULT; 353 goto err_file; 354 } 355 356 file->async_file = filp->private_data; 357 358 INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev, 359 ib_uverbs_event_handler); 360 ret = ib_register_event_handler(&file->event_handler); 361 if (ret) 362 goto err_file; 363 364 kref_get(&file->async_file->ref); 365 kref_get(&file->ref); 366 file->ucontext = ucontext; 367 368 fd_install(resp.async_fd, filp); 369 370 mutex_unlock(&file->mutex); 371 372 return in_len; 373 374 err_file: 375 fput(filp); 376 377 err_fd: 378 put_unused_fd(resp.async_fd); 379 380 err_free: 381 ibdev->dealloc_ucontext(ucontext); 382 383 err: 384 mutex_unlock(&file->mutex); 385 return ret; 386 } 387 388 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 389 const char __user *buf, 390 int in_len, int out_len) 391 { 392 struct ib_uverbs_query_device cmd; 393 struct ib_uverbs_query_device_resp resp; 394 struct ib_device_attr attr; 395 int ret; 396 397 if (out_len < sizeof resp) 398 return -ENOSPC; 399 400 if (copy_from_user(&cmd, buf, sizeof cmd)) 401 return -EFAULT; 402 403 ret = ib_query_device(file->device->ib_dev, &attr); 404 if (ret) 405 return ret; 406 407 memset(&resp, 0, sizeof resp); 408 409 resp.fw_ver = attr.fw_ver; 410 resp.node_guid = file->device->ib_dev->node_guid; 411 resp.sys_image_guid = attr.sys_image_guid; 412 resp.max_mr_size = attr.max_mr_size; 413 resp.page_size_cap = attr.page_size_cap; 414 resp.vendor_id = attr.vendor_id; 415 resp.vendor_part_id = attr.vendor_part_id; 416 resp.hw_ver = attr.hw_ver; 417 resp.max_qp = attr.max_qp; 418 resp.max_qp_wr = attr.max_qp_wr; 419 resp.device_cap_flags = attr.device_cap_flags; 420 resp.max_sge = attr.max_sge; 421 resp.max_sge_rd = attr.max_sge_rd; 422 resp.max_cq = attr.max_cq; 423 resp.max_cqe = attr.max_cqe; 424 resp.max_mr = attr.max_mr; 425 resp.max_pd = attr.max_pd; 426 resp.max_qp_rd_atom = attr.max_qp_rd_atom; 427 resp.max_ee_rd_atom = attr.max_ee_rd_atom; 428 resp.max_res_rd_atom = attr.max_res_rd_atom; 429 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom; 430 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom; 431 resp.atomic_cap = attr.atomic_cap; 432 resp.max_ee = attr.max_ee; 433 resp.max_rdd = attr.max_rdd; 434 resp.max_mw = attr.max_mw; 435 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp; 436 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp; 437 resp.max_mcast_grp = attr.max_mcast_grp; 438 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach; 439 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach; 440 resp.max_ah = attr.max_ah; 441 resp.max_fmr = attr.max_fmr; 442 resp.max_map_per_fmr = attr.max_map_per_fmr; 443 resp.max_srq = attr.max_srq; 444 resp.max_srq_wr = attr.max_srq_wr; 445 resp.max_srq_sge = attr.max_srq_sge; 446 resp.max_pkeys = attr.max_pkeys; 447 resp.local_ca_ack_delay = attr.local_ca_ack_delay; 448 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt; 449 450 if (copy_to_user((void __user *) (unsigned long) cmd.response, 451 &resp, sizeof resp)) 452 return -EFAULT; 453 454 return in_len; 455 } 456 457 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 458 const char __user *buf, 459 int in_len, int out_len) 460 { 461 struct ib_uverbs_query_port cmd; 462 struct ib_uverbs_query_port_resp resp; 463 struct ib_port_attr attr; 464 int ret; 465 466 if (out_len < sizeof resp) 467 return -ENOSPC; 468 469 if (copy_from_user(&cmd, buf, sizeof cmd)) 470 return -EFAULT; 471 472 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr); 473 if (ret) 474 return ret; 475 476 memset(&resp, 0, sizeof resp); 477 478 resp.state = attr.state; 479 resp.max_mtu = attr.max_mtu; 480 resp.active_mtu = attr.active_mtu; 481 resp.gid_tbl_len = attr.gid_tbl_len; 482 resp.port_cap_flags = attr.port_cap_flags; 483 resp.max_msg_sz = attr.max_msg_sz; 484 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 485 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 486 resp.pkey_tbl_len = attr.pkey_tbl_len; 487 resp.lid = attr.lid; 488 resp.sm_lid = attr.sm_lid; 489 resp.lmc = attr.lmc; 490 resp.max_vl_num = attr.max_vl_num; 491 resp.sm_sl = attr.sm_sl; 492 resp.subnet_timeout = attr.subnet_timeout; 493 resp.init_type_reply = attr.init_type_reply; 494 resp.active_width = attr.active_width; 495 resp.active_speed = attr.active_speed; 496 resp.phys_state = attr.phys_state; 497 resp.link_layer = rdma_port_get_link_layer(file->device->ib_dev, 498 cmd.port_num); 499 500 if (copy_to_user((void __user *) (unsigned long) cmd.response, 501 &resp, sizeof resp)) 502 return -EFAULT; 503 504 return in_len; 505 } 506 507 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 508 const char __user *buf, 509 int in_len, int out_len) 510 { 511 struct ib_uverbs_alloc_pd cmd; 512 struct ib_uverbs_alloc_pd_resp resp; 513 struct ib_udata udata; 514 struct ib_uobject *uobj; 515 struct ib_pd *pd; 516 int ret; 517 518 if (out_len < sizeof resp) 519 return -ENOSPC; 520 521 if (copy_from_user(&cmd, buf, sizeof cmd)) 522 return -EFAULT; 523 524 INIT_UDATA(&udata, buf + sizeof cmd, 525 (unsigned long) cmd.response + sizeof resp, 526 in_len - sizeof cmd, out_len - sizeof resp); 527 528 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 529 if (!uobj) 530 return -ENOMEM; 531 532 init_uobj(uobj, 0, file->ucontext, &pd_lock_class); 533 down_write(&uobj->mutex); 534 535 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, 536 file->ucontext, &udata); 537 if (IS_ERR(pd)) { 538 ret = PTR_ERR(pd); 539 goto err; 540 } 541 542 pd->device = file->device->ib_dev; 543 pd->uobject = uobj; 544 atomic_set(&pd->usecnt, 0); 545 546 uobj->object = pd; 547 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj); 548 if (ret) 549 goto err_idr; 550 551 memset(&resp, 0, sizeof resp); 552 resp.pd_handle = uobj->id; 553 554 if (copy_to_user((void __user *) (unsigned long) cmd.response, 555 &resp, sizeof resp)) { 556 ret = -EFAULT; 557 goto err_copy; 558 } 559 560 mutex_lock(&file->mutex); 561 list_add_tail(&uobj->list, &file->ucontext->pd_list); 562 mutex_unlock(&file->mutex); 563 564 uobj->live = 1; 565 566 up_write(&uobj->mutex); 567 568 return in_len; 569 570 err_copy: 571 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 572 573 err_idr: 574 ib_dealloc_pd(pd); 575 576 err: 577 put_uobj_write(uobj); 578 return ret; 579 } 580 581 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 582 const char __user *buf, 583 int in_len, int out_len) 584 { 585 struct ib_uverbs_dealloc_pd cmd; 586 struct ib_uobject *uobj; 587 int ret; 588 589 if (copy_from_user(&cmd, buf, sizeof cmd)) 590 return -EFAULT; 591 592 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext); 593 if (!uobj) 594 return -EINVAL; 595 596 ret = ib_dealloc_pd(uobj->object); 597 if (!ret) 598 uobj->live = 0; 599 600 put_uobj_write(uobj); 601 602 if (ret) 603 return ret; 604 605 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 606 607 mutex_lock(&file->mutex); 608 list_del(&uobj->list); 609 mutex_unlock(&file->mutex); 610 611 put_uobj(uobj); 612 613 return in_len; 614 } 615 616 struct xrcd_table_entry { 617 struct rb_node node; 618 struct ib_xrcd *xrcd; 619 struct inode *inode; 620 }; 621 622 static int xrcd_table_insert(struct ib_uverbs_device *dev, 623 struct inode *inode, 624 struct ib_xrcd *xrcd) 625 { 626 struct xrcd_table_entry *entry, *scan; 627 struct rb_node **p = &dev->xrcd_tree.rb_node; 628 struct rb_node *parent = NULL; 629 630 entry = kmalloc(sizeof *entry, GFP_KERNEL); 631 if (!entry) 632 return -ENOMEM; 633 634 entry->xrcd = xrcd; 635 entry->inode = inode; 636 637 while (*p) { 638 parent = *p; 639 scan = rb_entry(parent, struct xrcd_table_entry, node); 640 641 if (inode < scan->inode) { 642 p = &(*p)->rb_left; 643 } else if (inode > scan->inode) { 644 p = &(*p)->rb_right; 645 } else { 646 kfree(entry); 647 return -EEXIST; 648 } 649 } 650 651 rb_link_node(&entry->node, parent, p); 652 rb_insert_color(&entry->node, &dev->xrcd_tree); 653 igrab(inode); 654 return 0; 655 } 656 657 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, 658 struct inode *inode) 659 { 660 struct xrcd_table_entry *entry; 661 struct rb_node *p = dev->xrcd_tree.rb_node; 662 663 while (p) { 664 entry = rb_entry(p, struct xrcd_table_entry, node); 665 666 if (inode < entry->inode) 667 p = p->rb_left; 668 else if (inode > entry->inode) 669 p = p->rb_right; 670 else 671 return entry; 672 } 673 674 return NULL; 675 } 676 677 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) 678 { 679 struct xrcd_table_entry *entry; 680 681 entry = xrcd_table_search(dev, inode); 682 if (!entry) 683 return NULL; 684 685 return entry->xrcd; 686 } 687 688 static void xrcd_table_delete(struct ib_uverbs_device *dev, 689 struct inode *inode) 690 { 691 struct xrcd_table_entry *entry; 692 693 entry = xrcd_table_search(dev, inode); 694 if (entry) { 695 iput(inode); 696 rb_erase(&entry->node, &dev->xrcd_tree); 697 kfree(entry); 698 } 699 } 700 701 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, 702 const char __user *buf, int in_len, 703 int out_len) 704 { 705 struct ib_uverbs_open_xrcd cmd; 706 struct ib_uverbs_open_xrcd_resp resp; 707 struct ib_udata udata; 708 struct ib_uxrcd_object *obj; 709 struct ib_xrcd *xrcd = NULL; 710 struct fd f = {NULL, 0}; 711 struct inode *inode = NULL; 712 int ret = 0; 713 int new_xrcd = 0; 714 715 if (out_len < sizeof resp) 716 return -ENOSPC; 717 718 if (copy_from_user(&cmd, buf, sizeof cmd)) 719 return -EFAULT; 720 721 INIT_UDATA(&udata, buf + sizeof cmd, 722 (unsigned long) cmd.response + sizeof resp, 723 in_len - sizeof cmd, out_len - sizeof resp); 724 725 mutex_lock(&file->device->xrcd_tree_mutex); 726 727 if (cmd.fd != -1) { 728 /* search for file descriptor */ 729 f = fdget(cmd.fd); 730 if (!f.file) { 731 ret = -EBADF; 732 goto err_tree_mutex_unlock; 733 } 734 735 inode = file_inode(f.file); 736 xrcd = find_xrcd(file->device, inode); 737 if (!xrcd && !(cmd.oflags & O_CREAT)) { 738 /* no file descriptor. Need CREATE flag */ 739 ret = -EAGAIN; 740 goto err_tree_mutex_unlock; 741 } 742 743 if (xrcd && cmd.oflags & O_EXCL) { 744 ret = -EINVAL; 745 goto err_tree_mutex_unlock; 746 } 747 } 748 749 obj = kmalloc(sizeof *obj, GFP_KERNEL); 750 if (!obj) { 751 ret = -ENOMEM; 752 goto err_tree_mutex_unlock; 753 } 754 755 init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class); 756 757 down_write(&obj->uobject.mutex); 758 759 if (!xrcd) { 760 xrcd = file->device->ib_dev->alloc_xrcd(file->device->ib_dev, 761 file->ucontext, &udata); 762 if (IS_ERR(xrcd)) { 763 ret = PTR_ERR(xrcd); 764 goto err; 765 } 766 767 xrcd->inode = inode; 768 xrcd->device = file->device->ib_dev; 769 atomic_set(&xrcd->usecnt, 0); 770 mutex_init(&xrcd->tgt_qp_mutex); 771 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 772 new_xrcd = 1; 773 } 774 775 atomic_set(&obj->refcnt, 0); 776 obj->uobject.object = xrcd; 777 ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 778 if (ret) 779 goto err_idr; 780 781 memset(&resp, 0, sizeof resp); 782 resp.xrcd_handle = obj->uobject.id; 783 784 if (inode) { 785 if (new_xrcd) { 786 /* create new inode/xrcd table entry */ 787 ret = xrcd_table_insert(file->device, inode, xrcd); 788 if (ret) 789 goto err_insert_xrcd; 790 } 791 atomic_inc(&xrcd->usecnt); 792 } 793 794 if (copy_to_user((void __user *) (unsigned long) cmd.response, 795 &resp, sizeof resp)) { 796 ret = -EFAULT; 797 goto err_copy; 798 } 799 800 if (f.file) 801 fdput(f); 802 803 mutex_lock(&file->mutex); 804 list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list); 805 mutex_unlock(&file->mutex); 806 807 obj->uobject.live = 1; 808 up_write(&obj->uobject.mutex); 809 810 mutex_unlock(&file->device->xrcd_tree_mutex); 811 return in_len; 812 813 err_copy: 814 if (inode) { 815 if (new_xrcd) 816 xrcd_table_delete(file->device, inode); 817 atomic_dec(&xrcd->usecnt); 818 } 819 820 err_insert_xrcd: 821 idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 822 823 err_idr: 824 ib_dealloc_xrcd(xrcd); 825 826 err: 827 put_uobj_write(&obj->uobject); 828 829 err_tree_mutex_unlock: 830 if (f.file) 831 fdput(f); 832 833 mutex_unlock(&file->device->xrcd_tree_mutex); 834 835 return ret; 836 } 837 838 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, 839 const char __user *buf, int in_len, 840 int out_len) 841 { 842 struct ib_uverbs_close_xrcd cmd; 843 struct ib_uobject *uobj; 844 struct ib_xrcd *xrcd = NULL; 845 struct inode *inode = NULL; 846 struct ib_uxrcd_object *obj; 847 int live; 848 int ret = 0; 849 850 if (copy_from_user(&cmd, buf, sizeof cmd)) 851 return -EFAULT; 852 853 mutex_lock(&file->device->xrcd_tree_mutex); 854 uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext); 855 if (!uobj) { 856 ret = -EINVAL; 857 goto out; 858 } 859 860 xrcd = uobj->object; 861 inode = xrcd->inode; 862 obj = container_of(uobj, struct ib_uxrcd_object, uobject); 863 if (atomic_read(&obj->refcnt)) { 864 put_uobj_write(uobj); 865 ret = -EBUSY; 866 goto out; 867 } 868 869 if (!inode || atomic_dec_and_test(&xrcd->usecnt)) { 870 ret = ib_dealloc_xrcd(uobj->object); 871 if (!ret) 872 uobj->live = 0; 873 } 874 875 live = uobj->live; 876 if (inode && ret) 877 atomic_inc(&xrcd->usecnt); 878 879 put_uobj_write(uobj); 880 881 if (ret) 882 goto out; 883 884 if (inode && !live) 885 xrcd_table_delete(file->device, inode); 886 887 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj); 888 mutex_lock(&file->mutex); 889 list_del(&uobj->list); 890 mutex_unlock(&file->mutex); 891 892 put_uobj(uobj); 893 ret = in_len; 894 895 out: 896 mutex_unlock(&file->device->xrcd_tree_mutex); 897 return ret; 898 } 899 900 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, 901 struct ib_xrcd *xrcd) 902 { 903 struct inode *inode; 904 905 inode = xrcd->inode; 906 if (inode && !atomic_dec_and_test(&xrcd->usecnt)) 907 return; 908 909 ib_dealloc_xrcd(xrcd); 910 911 if (inode) 912 xrcd_table_delete(dev, inode); 913 } 914 915 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 916 const char __user *buf, int in_len, 917 int out_len) 918 { 919 struct ib_uverbs_reg_mr cmd; 920 struct ib_uverbs_reg_mr_resp resp; 921 struct ib_udata udata; 922 struct ib_uobject *uobj; 923 struct ib_pd *pd; 924 struct ib_mr *mr; 925 int ret; 926 927 if (out_len < sizeof resp) 928 return -ENOSPC; 929 930 if (copy_from_user(&cmd, buf, sizeof cmd)) 931 return -EFAULT; 932 933 INIT_UDATA(&udata, buf + sizeof cmd, 934 (unsigned long) cmd.response + sizeof resp, 935 in_len - sizeof cmd, out_len - sizeof resp); 936 937 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 938 return -EINVAL; 939 940 /* 941 * Local write permission is required if remote write or 942 * remote atomic permission is also requested. 943 */ 944 if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 945 !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE)) 946 return -EINVAL; 947 948 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 949 if (!uobj) 950 return -ENOMEM; 951 952 init_uobj(uobj, 0, file->ucontext, &mr_lock_class); 953 down_write(&uobj->mutex); 954 955 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 956 if (!pd) { 957 ret = -EINVAL; 958 goto err_free; 959 } 960 961 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 962 cmd.access_flags, &udata); 963 if (IS_ERR(mr)) { 964 ret = PTR_ERR(mr); 965 goto err_put; 966 } 967 968 mr->device = pd->device; 969 mr->pd = pd; 970 mr->uobject = uobj; 971 atomic_inc(&pd->usecnt); 972 atomic_set(&mr->usecnt, 0); 973 974 uobj->object = mr; 975 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj); 976 if (ret) 977 goto err_unreg; 978 979 memset(&resp, 0, sizeof resp); 980 resp.lkey = mr->lkey; 981 resp.rkey = mr->rkey; 982 resp.mr_handle = uobj->id; 983 984 if (copy_to_user((void __user *) (unsigned long) cmd.response, 985 &resp, sizeof resp)) { 986 ret = -EFAULT; 987 goto err_copy; 988 } 989 990 put_pd_read(pd); 991 992 mutex_lock(&file->mutex); 993 list_add_tail(&uobj->list, &file->ucontext->mr_list); 994 mutex_unlock(&file->mutex); 995 996 uobj->live = 1; 997 998 up_write(&uobj->mutex); 999 1000 return in_len; 1001 1002 err_copy: 1003 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 1004 1005 err_unreg: 1006 ib_dereg_mr(mr); 1007 1008 err_put: 1009 put_pd_read(pd); 1010 1011 err_free: 1012 put_uobj_write(uobj); 1013 return ret; 1014 } 1015 1016 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 1017 const char __user *buf, int in_len, 1018 int out_len) 1019 { 1020 struct ib_uverbs_dereg_mr cmd; 1021 struct ib_mr *mr; 1022 struct ib_uobject *uobj; 1023 int ret = -EINVAL; 1024 1025 if (copy_from_user(&cmd, buf, sizeof cmd)) 1026 return -EFAULT; 1027 1028 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext); 1029 if (!uobj) 1030 return -EINVAL; 1031 1032 mr = uobj->object; 1033 1034 ret = ib_dereg_mr(mr); 1035 if (!ret) 1036 uobj->live = 0; 1037 1038 put_uobj_write(uobj); 1039 1040 if (ret) 1041 return ret; 1042 1043 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 1044 1045 mutex_lock(&file->mutex); 1046 list_del(&uobj->list); 1047 mutex_unlock(&file->mutex); 1048 1049 put_uobj(uobj); 1050 1051 return in_len; 1052 } 1053 1054 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, 1055 const char __user *buf, int in_len, 1056 int out_len) 1057 { 1058 struct ib_uverbs_alloc_mw cmd; 1059 struct ib_uverbs_alloc_mw_resp resp; 1060 struct ib_uobject *uobj; 1061 struct ib_pd *pd; 1062 struct ib_mw *mw; 1063 int ret; 1064 1065 if (out_len < sizeof(resp)) 1066 return -ENOSPC; 1067 1068 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1069 return -EFAULT; 1070 1071 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 1072 if (!uobj) 1073 return -ENOMEM; 1074 1075 init_uobj(uobj, 0, file->ucontext, &mw_lock_class); 1076 down_write(&uobj->mutex); 1077 1078 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1079 if (!pd) { 1080 ret = -EINVAL; 1081 goto err_free; 1082 } 1083 1084 mw = pd->device->alloc_mw(pd, cmd.mw_type); 1085 if (IS_ERR(mw)) { 1086 ret = PTR_ERR(mw); 1087 goto err_put; 1088 } 1089 1090 mw->device = pd->device; 1091 mw->pd = pd; 1092 mw->uobject = uobj; 1093 atomic_inc(&pd->usecnt); 1094 1095 uobj->object = mw; 1096 ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj); 1097 if (ret) 1098 goto err_unalloc; 1099 1100 memset(&resp, 0, sizeof(resp)); 1101 resp.rkey = mw->rkey; 1102 resp.mw_handle = uobj->id; 1103 1104 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1105 &resp, sizeof(resp))) { 1106 ret = -EFAULT; 1107 goto err_copy; 1108 } 1109 1110 put_pd_read(pd); 1111 1112 mutex_lock(&file->mutex); 1113 list_add_tail(&uobj->list, &file->ucontext->mw_list); 1114 mutex_unlock(&file->mutex); 1115 1116 uobj->live = 1; 1117 1118 up_write(&uobj->mutex); 1119 1120 return in_len; 1121 1122 err_copy: 1123 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 1124 1125 err_unalloc: 1126 ib_dealloc_mw(mw); 1127 1128 err_put: 1129 put_pd_read(pd); 1130 1131 err_free: 1132 put_uobj_write(uobj); 1133 return ret; 1134 } 1135 1136 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file, 1137 const char __user *buf, int in_len, 1138 int out_len) 1139 { 1140 struct ib_uverbs_dealloc_mw cmd; 1141 struct ib_mw *mw; 1142 struct ib_uobject *uobj; 1143 int ret = -EINVAL; 1144 1145 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1146 return -EFAULT; 1147 1148 uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext); 1149 if (!uobj) 1150 return -EINVAL; 1151 1152 mw = uobj->object; 1153 1154 ret = ib_dealloc_mw(mw); 1155 if (!ret) 1156 uobj->live = 0; 1157 1158 put_uobj_write(uobj); 1159 1160 if (ret) 1161 return ret; 1162 1163 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 1164 1165 mutex_lock(&file->mutex); 1166 list_del(&uobj->list); 1167 mutex_unlock(&file->mutex); 1168 1169 put_uobj(uobj); 1170 1171 return in_len; 1172 } 1173 1174 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 1175 const char __user *buf, int in_len, 1176 int out_len) 1177 { 1178 struct ib_uverbs_create_comp_channel cmd; 1179 struct ib_uverbs_create_comp_channel_resp resp; 1180 struct file *filp; 1181 int ret; 1182 1183 if (out_len < sizeof resp) 1184 return -ENOSPC; 1185 1186 if (copy_from_user(&cmd, buf, sizeof cmd)) 1187 return -EFAULT; 1188 1189 ret = get_unused_fd_flags(O_CLOEXEC); 1190 if (ret < 0) 1191 return ret; 1192 resp.fd = ret; 1193 1194 filp = ib_uverbs_alloc_event_file(file, 0); 1195 if (IS_ERR(filp)) { 1196 put_unused_fd(resp.fd); 1197 return PTR_ERR(filp); 1198 } 1199 1200 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1201 &resp, sizeof resp)) { 1202 put_unused_fd(resp.fd); 1203 fput(filp); 1204 return -EFAULT; 1205 } 1206 1207 fd_install(resp.fd, filp); 1208 return in_len; 1209 } 1210 1211 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 1212 const char __user *buf, int in_len, 1213 int out_len) 1214 { 1215 struct ib_uverbs_create_cq cmd; 1216 struct ib_uverbs_create_cq_resp resp; 1217 struct ib_udata udata; 1218 struct ib_ucq_object *obj; 1219 struct ib_uverbs_event_file *ev_file = NULL; 1220 struct ib_cq *cq; 1221 int ret; 1222 1223 if (out_len < sizeof resp) 1224 return -ENOSPC; 1225 1226 if (copy_from_user(&cmd, buf, sizeof cmd)) 1227 return -EFAULT; 1228 1229 INIT_UDATA(&udata, buf + sizeof cmd, 1230 (unsigned long) cmd.response + sizeof resp, 1231 in_len - sizeof cmd, out_len - sizeof resp); 1232 1233 if (cmd.comp_vector >= file->device->num_comp_vectors) 1234 return -EINVAL; 1235 1236 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1237 if (!obj) 1238 return -ENOMEM; 1239 1240 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_class); 1241 down_write(&obj->uobject.mutex); 1242 1243 if (cmd.comp_channel >= 0) { 1244 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel); 1245 if (!ev_file) { 1246 ret = -EINVAL; 1247 goto err; 1248 } 1249 } 1250 1251 obj->uverbs_file = file; 1252 obj->comp_events_reported = 0; 1253 obj->async_events_reported = 0; 1254 INIT_LIST_HEAD(&obj->comp_list); 1255 INIT_LIST_HEAD(&obj->async_list); 1256 1257 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, 1258 cmd.comp_vector, 1259 file->ucontext, &udata); 1260 if (IS_ERR(cq)) { 1261 ret = PTR_ERR(cq); 1262 goto err_file; 1263 } 1264 1265 cq->device = file->device->ib_dev; 1266 cq->uobject = &obj->uobject; 1267 cq->comp_handler = ib_uverbs_comp_handler; 1268 cq->event_handler = ib_uverbs_cq_event_handler; 1269 cq->cq_context = ev_file; 1270 atomic_set(&cq->usecnt, 0); 1271 1272 obj->uobject.object = cq; 1273 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1274 if (ret) 1275 goto err_free; 1276 1277 memset(&resp, 0, sizeof resp); 1278 resp.cq_handle = obj->uobject.id; 1279 resp.cqe = cq->cqe; 1280 1281 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1282 &resp, sizeof resp)) { 1283 ret = -EFAULT; 1284 goto err_copy; 1285 } 1286 1287 mutex_lock(&file->mutex); 1288 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list); 1289 mutex_unlock(&file->mutex); 1290 1291 obj->uobject.live = 1; 1292 1293 up_write(&obj->uobject.mutex); 1294 1295 return in_len; 1296 1297 err_copy: 1298 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1299 1300 err_free: 1301 ib_destroy_cq(cq); 1302 1303 err_file: 1304 if (ev_file) 1305 ib_uverbs_release_ucq(file, ev_file, obj); 1306 1307 err: 1308 put_uobj_write(&obj->uobject); 1309 return ret; 1310 } 1311 1312 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 1313 const char __user *buf, int in_len, 1314 int out_len) 1315 { 1316 struct ib_uverbs_resize_cq cmd; 1317 struct ib_uverbs_resize_cq_resp resp; 1318 struct ib_udata udata; 1319 struct ib_cq *cq; 1320 int ret = -EINVAL; 1321 1322 if (copy_from_user(&cmd, buf, sizeof cmd)) 1323 return -EFAULT; 1324 1325 INIT_UDATA(&udata, buf + sizeof cmd, 1326 (unsigned long) cmd.response + sizeof resp, 1327 in_len - sizeof cmd, out_len - sizeof resp); 1328 1329 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1330 if (!cq) 1331 return -EINVAL; 1332 1333 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 1334 if (ret) 1335 goto out; 1336 1337 resp.cqe = cq->cqe; 1338 1339 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1340 &resp, sizeof resp.cqe)) 1341 ret = -EFAULT; 1342 1343 out: 1344 put_cq_read(cq); 1345 1346 return ret ? ret : in_len; 1347 } 1348 1349 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc) 1350 { 1351 struct ib_uverbs_wc tmp; 1352 1353 tmp.wr_id = wc->wr_id; 1354 tmp.status = wc->status; 1355 tmp.opcode = wc->opcode; 1356 tmp.vendor_err = wc->vendor_err; 1357 tmp.byte_len = wc->byte_len; 1358 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data; 1359 tmp.qp_num = wc->qp->qp_num; 1360 tmp.src_qp = wc->src_qp; 1361 tmp.wc_flags = wc->wc_flags; 1362 tmp.pkey_index = wc->pkey_index; 1363 tmp.slid = wc->slid; 1364 tmp.sl = wc->sl; 1365 tmp.dlid_path_bits = wc->dlid_path_bits; 1366 tmp.port_num = wc->port_num; 1367 tmp.reserved = 0; 1368 1369 if (copy_to_user(dest, &tmp, sizeof tmp)) 1370 return -EFAULT; 1371 1372 return 0; 1373 } 1374 1375 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 1376 const char __user *buf, int in_len, 1377 int out_len) 1378 { 1379 struct ib_uverbs_poll_cq cmd; 1380 struct ib_uverbs_poll_cq_resp resp; 1381 u8 __user *header_ptr; 1382 u8 __user *data_ptr; 1383 struct ib_cq *cq; 1384 struct ib_wc wc; 1385 int ret; 1386 1387 if (copy_from_user(&cmd, buf, sizeof cmd)) 1388 return -EFAULT; 1389 1390 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1391 if (!cq) 1392 return -EINVAL; 1393 1394 /* we copy a struct ib_uverbs_poll_cq_resp to user space */ 1395 header_ptr = (void __user *)(unsigned long) cmd.response; 1396 data_ptr = header_ptr + sizeof resp; 1397 1398 memset(&resp, 0, sizeof resp); 1399 while (resp.count < cmd.ne) { 1400 ret = ib_poll_cq(cq, 1, &wc); 1401 if (ret < 0) 1402 goto out_put; 1403 if (!ret) 1404 break; 1405 1406 ret = copy_wc_to_user(data_ptr, &wc); 1407 if (ret) 1408 goto out_put; 1409 1410 data_ptr += sizeof(struct ib_uverbs_wc); 1411 ++resp.count; 1412 } 1413 1414 if (copy_to_user(header_ptr, &resp, sizeof resp)) { 1415 ret = -EFAULT; 1416 goto out_put; 1417 } 1418 1419 ret = in_len; 1420 1421 out_put: 1422 put_cq_read(cq); 1423 return ret; 1424 } 1425 1426 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 1427 const char __user *buf, int in_len, 1428 int out_len) 1429 { 1430 struct ib_uverbs_req_notify_cq cmd; 1431 struct ib_cq *cq; 1432 1433 if (copy_from_user(&cmd, buf, sizeof cmd)) 1434 return -EFAULT; 1435 1436 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1437 if (!cq) 1438 return -EINVAL; 1439 1440 ib_req_notify_cq(cq, cmd.solicited_only ? 1441 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 1442 1443 put_cq_read(cq); 1444 1445 return in_len; 1446 } 1447 1448 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 1449 const char __user *buf, int in_len, 1450 int out_len) 1451 { 1452 struct ib_uverbs_destroy_cq cmd; 1453 struct ib_uverbs_destroy_cq_resp resp; 1454 struct ib_uobject *uobj; 1455 struct ib_cq *cq; 1456 struct ib_ucq_object *obj; 1457 struct ib_uverbs_event_file *ev_file; 1458 int ret = -EINVAL; 1459 1460 if (copy_from_user(&cmd, buf, sizeof cmd)) 1461 return -EFAULT; 1462 1463 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); 1464 if (!uobj) 1465 return -EINVAL; 1466 cq = uobj->object; 1467 ev_file = cq->cq_context; 1468 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1469 1470 ret = ib_destroy_cq(cq); 1471 if (!ret) 1472 uobj->live = 0; 1473 1474 put_uobj_write(uobj); 1475 1476 if (ret) 1477 return ret; 1478 1479 idr_remove_uobj(&ib_uverbs_cq_idr, uobj); 1480 1481 mutex_lock(&file->mutex); 1482 list_del(&uobj->list); 1483 mutex_unlock(&file->mutex); 1484 1485 ib_uverbs_release_ucq(file, ev_file, obj); 1486 1487 memset(&resp, 0, sizeof resp); 1488 resp.comp_events_reported = obj->comp_events_reported; 1489 resp.async_events_reported = obj->async_events_reported; 1490 1491 put_uobj(uobj); 1492 1493 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1494 &resp, sizeof resp)) 1495 return -EFAULT; 1496 1497 return in_len; 1498 } 1499 1500 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 1501 const char __user *buf, int in_len, 1502 int out_len) 1503 { 1504 struct ib_uverbs_create_qp cmd; 1505 struct ib_uverbs_create_qp_resp resp; 1506 struct ib_udata udata; 1507 struct ib_uqp_object *obj; 1508 struct ib_device *device; 1509 struct ib_pd *pd = NULL; 1510 struct ib_xrcd *xrcd = NULL; 1511 struct ib_uobject *uninitialized_var(xrcd_uobj); 1512 struct ib_cq *scq = NULL, *rcq = NULL; 1513 struct ib_srq *srq = NULL; 1514 struct ib_qp *qp; 1515 struct ib_qp_init_attr attr; 1516 int ret; 1517 1518 if (out_len < sizeof resp) 1519 return -ENOSPC; 1520 1521 if (copy_from_user(&cmd, buf, sizeof cmd)) 1522 return -EFAULT; 1523 1524 if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) 1525 return -EPERM; 1526 1527 INIT_UDATA(&udata, buf + sizeof cmd, 1528 (unsigned long) cmd.response + sizeof resp, 1529 in_len - sizeof cmd, out_len - sizeof resp); 1530 1531 obj = kzalloc(sizeof *obj, GFP_KERNEL); 1532 if (!obj) 1533 return -ENOMEM; 1534 1535 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class); 1536 down_write(&obj->uevent.uobject.mutex); 1537 1538 if (cmd.qp_type == IB_QPT_XRC_TGT) { 1539 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); 1540 if (!xrcd) { 1541 ret = -EINVAL; 1542 goto err_put; 1543 } 1544 device = xrcd->device; 1545 } else { 1546 if (cmd.qp_type == IB_QPT_XRC_INI) { 1547 cmd.max_recv_wr = cmd.max_recv_sge = 0; 1548 } else { 1549 if (cmd.is_srq) { 1550 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 1551 if (!srq || srq->srq_type != IB_SRQT_BASIC) { 1552 ret = -EINVAL; 1553 goto err_put; 1554 } 1555 } 1556 1557 if (cmd.recv_cq_handle != cmd.send_cq_handle) { 1558 rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0); 1559 if (!rcq) { 1560 ret = -EINVAL; 1561 goto err_put; 1562 } 1563 } 1564 } 1565 1566 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq); 1567 rcq = rcq ?: scq; 1568 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1569 if (!pd || !scq) { 1570 ret = -EINVAL; 1571 goto err_put; 1572 } 1573 1574 device = pd->device; 1575 } 1576 1577 attr.event_handler = ib_uverbs_qp_event_handler; 1578 attr.qp_context = file; 1579 attr.send_cq = scq; 1580 attr.recv_cq = rcq; 1581 attr.srq = srq; 1582 attr.xrcd = xrcd; 1583 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 1584 attr.qp_type = cmd.qp_type; 1585 attr.create_flags = 0; 1586 1587 attr.cap.max_send_wr = cmd.max_send_wr; 1588 attr.cap.max_recv_wr = cmd.max_recv_wr; 1589 attr.cap.max_send_sge = cmd.max_send_sge; 1590 attr.cap.max_recv_sge = cmd.max_recv_sge; 1591 attr.cap.max_inline_data = cmd.max_inline_data; 1592 1593 obj->uevent.events_reported = 0; 1594 INIT_LIST_HEAD(&obj->uevent.event_list); 1595 INIT_LIST_HEAD(&obj->mcast_list); 1596 1597 if (cmd.qp_type == IB_QPT_XRC_TGT) 1598 qp = ib_create_qp(pd, &attr); 1599 else 1600 qp = device->create_qp(pd, &attr, &udata); 1601 1602 if (IS_ERR(qp)) { 1603 ret = PTR_ERR(qp); 1604 goto err_put; 1605 } 1606 1607 if (cmd.qp_type != IB_QPT_XRC_TGT) { 1608 qp->real_qp = qp; 1609 qp->device = device; 1610 qp->pd = pd; 1611 qp->send_cq = attr.send_cq; 1612 qp->recv_cq = attr.recv_cq; 1613 qp->srq = attr.srq; 1614 qp->event_handler = attr.event_handler; 1615 qp->qp_context = attr.qp_context; 1616 qp->qp_type = attr.qp_type; 1617 atomic_set(&qp->usecnt, 0); 1618 atomic_inc(&pd->usecnt); 1619 atomic_inc(&attr.send_cq->usecnt); 1620 if (attr.recv_cq) 1621 atomic_inc(&attr.recv_cq->usecnt); 1622 if (attr.srq) 1623 atomic_inc(&attr.srq->usecnt); 1624 } 1625 qp->uobject = &obj->uevent.uobject; 1626 1627 obj->uevent.uobject.object = qp; 1628 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1629 if (ret) 1630 goto err_destroy; 1631 1632 memset(&resp, 0, sizeof resp); 1633 resp.qpn = qp->qp_num; 1634 resp.qp_handle = obj->uevent.uobject.id; 1635 resp.max_recv_sge = attr.cap.max_recv_sge; 1636 resp.max_send_sge = attr.cap.max_send_sge; 1637 resp.max_recv_wr = attr.cap.max_recv_wr; 1638 resp.max_send_wr = attr.cap.max_send_wr; 1639 resp.max_inline_data = attr.cap.max_inline_data; 1640 1641 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1642 &resp, sizeof resp)) { 1643 ret = -EFAULT; 1644 goto err_copy; 1645 } 1646 1647 if (xrcd) { 1648 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, 1649 uobject); 1650 atomic_inc(&obj->uxrcd->refcnt); 1651 put_xrcd_read(xrcd_uobj); 1652 } 1653 1654 if (pd) 1655 put_pd_read(pd); 1656 if (scq) 1657 put_cq_read(scq); 1658 if (rcq && rcq != scq) 1659 put_cq_read(rcq); 1660 if (srq) 1661 put_srq_read(srq); 1662 1663 mutex_lock(&file->mutex); 1664 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 1665 mutex_unlock(&file->mutex); 1666 1667 obj->uevent.uobject.live = 1; 1668 1669 up_write(&obj->uevent.uobject.mutex); 1670 1671 return in_len; 1672 1673 err_copy: 1674 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1675 1676 err_destroy: 1677 ib_destroy_qp(qp); 1678 1679 err_put: 1680 if (xrcd) 1681 put_xrcd_read(xrcd_uobj); 1682 if (pd) 1683 put_pd_read(pd); 1684 if (scq) 1685 put_cq_read(scq); 1686 if (rcq && rcq != scq) 1687 put_cq_read(rcq); 1688 if (srq) 1689 put_srq_read(srq); 1690 1691 put_uobj_write(&obj->uevent.uobject); 1692 return ret; 1693 } 1694 1695 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, 1696 const char __user *buf, int in_len, int out_len) 1697 { 1698 struct ib_uverbs_open_qp cmd; 1699 struct ib_uverbs_create_qp_resp resp; 1700 struct ib_udata udata; 1701 struct ib_uqp_object *obj; 1702 struct ib_xrcd *xrcd; 1703 struct ib_uobject *uninitialized_var(xrcd_uobj); 1704 struct ib_qp *qp; 1705 struct ib_qp_open_attr attr; 1706 int ret; 1707 1708 if (out_len < sizeof resp) 1709 return -ENOSPC; 1710 1711 if (copy_from_user(&cmd, buf, sizeof cmd)) 1712 return -EFAULT; 1713 1714 INIT_UDATA(&udata, buf + sizeof cmd, 1715 (unsigned long) cmd.response + sizeof resp, 1716 in_len - sizeof cmd, out_len - sizeof resp); 1717 1718 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1719 if (!obj) 1720 return -ENOMEM; 1721 1722 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class); 1723 down_write(&obj->uevent.uobject.mutex); 1724 1725 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); 1726 if (!xrcd) { 1727 ret = -EINVAL; 1728 goto err_put; 1729 } 1730 1731 attr.event_handler = ib_uverbs_qp_event_handler; 1732 attr.qp_context = file; 1733 attr.qp_num = cmd.qpn; 1734 attr.qp_type = cmd.qp_type; 1735 1736 obj->uevent.events_reported = 0; 1737 INIT_LIST_HEAD(&obj->uevent.event_list); 1738 INIT_LIST_HEAD(&obj->mcast_list); 1739 1740 qp = ib_open_qp(xrcd, &attr); 1741 if (IS_ERR(qp)) { 1742 ret = PTR_ERR(qp); 1743 goto err_put; 1744 } 1745 1746 qp->uobject = &obj->uevent.uobject; 1747 1748 obj->uevent.uobject.object = qp; 1749 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1750 if (ret) 1751 goto err_destroy; 1752 1753 memset(&resp, 0, sizeof resp); 1754 resp.qpn = qp->qp_num; 1755 resp.qp_handle = obj->uevent.uobject.id; 1756 1757 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1758 &resp, sizeof resp)) { 1759 ret = -EFAULT; 1760 goto err_remove; 1761 } 1762 1763 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 1764 atomic_inc(&obj->uxrcd->refcnt); 1765 put_xrcd_read(xrcd_uobj); 1766 1767 mutex_lock(&file->mutex); 1768 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 1769 mutex_unlock(&file->mutex); 1770 1771 obj->uevent.uobject.live = 1; 1772 1773 up_write(&obj->uevent.uobject.mutex); 1774 1775 return in_len; 1776 1777 err_remove: 1778 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1779 1780 err_destroy: 1781 ib_destroy_qp(qp); 1782 1783 err_put: 1784 put_xrcd_read(xrcd_uobj); 1785 put_uobj_write(&obj->uevent.uobject); 1786 return ret; 1787 } 1788 1789 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 1790 const char __user *buf, int in_len, 1791 int out_len) 1792 { 1793 struct ib_uverbs_query_qp cmd; 1794 struct ib_uverbs_query_qp_resp resp; 1795 struct ib_qp *qp; 1796 struct ib_qp_attr *attr; 1797 struct ib_qp_init_attr *init_attr; 1798 int ret; 1799 1800 if (copy_from_user(&cmd, buf, sizeof cmd)) 1801 return -EFAULT; 1802 1803 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1804 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 1805 if (!attr || !init_attr) { 1806 ret = -ENOMEM; 1807 goto out; 1808 } 1809 1810 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1811 if (!qp) { 1812 ret = -EINVAL; 1813 goto out; 1814 } 1815 1816 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 1817 1818 put_qp_read(qp); 1819 1820 if (ret) 1821 goto out; 1822 1823 memset(&resp, 0, sizeof resp); 1824 1825 resp.qp_state = attr->qp_state; 1826 resp.cur_qp_state = attr->cur_qp_state; 1827 resp.path_mtu = attr->path_mtu; 1828 resp.path_mig_state = attr->path_mig_state; 1829 resp.qkey = attr->qkey; 1830 resp.rq_psn = attr->rq_psn; 1831 resp.sq_psn = attr->sq_psn; 1832 resp.dest_qp_num = attr->dest_qp_num; 1833 resp.qp_access_flags = attr->qp_access_flags; 1834 resp.pkey_index = attr->pkey_index; 1835 resp.alt_pkey_index = attr->alt_pkey_index; 1836 resp.sq_draining = attr->sq_draining; 1837 resp.max_rd_atomic = attr->max_rd_atomic; 1838 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 1839 resp.min_rnr_timer = attr->min_rnr_timer; 1840 resp.port_num = attr->port_num; 1841 resp.timeout = attr->timeout; 1842 resp.retry_cnt = attr->retry_cnt; 1843 resp.rnr_retry = attr->rnr_retry; 1844 resp.alt_port_num = attr->alt_port_num; 1845 resp.alt_timeout = attr->alt_timeout; 1846 1847 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16); 1848 resp.dest.flow_label = attr->ah_attr.grh.flow_label; 1849 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index; 1850 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit; 1851 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class; 1852 resp.dest.dlid = attr->ah_attr.dlid; 1853 resp.dest.sl = attr->ah_attr.sl; 1854 resp.dest.src_path_bits = attr->ah_attr.src_path_bits; 1855 resp.dest.static_rate = attr->ah_attr.static_rate; 1856 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH); 1857 resp.dest.port_num = attr->ah_attr.port_num; 1858 1859 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16); 1860 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label; 1861 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index; 1862 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit; 1863 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class; 1864 resp.alt_dest.dlid = attr->alt_ah_attr.dlid; 1865 resp.alt_dest.sl = attr->alt_ah_attr.sl; 1866 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits; 1867 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate; 1868 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH); 1869 resp.alt_dest.port_num = attr->alt_ah_attr.port_num; 1870 1871 resp.max_send_wr = init_attr->cap.max_send_wr; 1872 resp.max_recv_wr = init_attr->cap.max_recv_wr; 1873 resp.max_send_sge = init_attr->cap.max_send_sge; 1874 resp.max_recv_sge = init_attr->cap.max_recv_sge; 1875 resp.max_inline_data = init_attr->cap.max_inline_data; 1876 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 1877 1878 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1879 &resp, sizeof resp)) 1880 ret = -EFAULT; 1881 1882 out: 1883 kfree(attr); 1884 kfree(init_attr); 1885 1886 return ret ? ret : in_len; 1887 } 1888 1889 /* Remove ignored fields set in the attribute mask */ 1890 static int modify_qp_mask(enum ib_qp_type qp_type, int mask) 1891 { 1892 switch (qp_type) { 1893 case IB_QPT_XRC_INI: 1894 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); 1895 case IB_QPT_XRC_TGT: 1896 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | 1897 IB_QP_RNR_RETRY); 1898 default: 1899 return mask; 1900 } 1901 } 1902 1903 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 1904 const char __user *buf, int in_len, 1905 int out_len) 1906 { 1907 struct ib_uverbs_modify_qp cmd; 1908 struct ib_udata udata; 1909 struct ib_qp *qp; 1910 struct ib_qp_attr *attr; 1911 int ret; 1912 1913 if (copy_from_user(&cmd, buf, sizeof cmd)) 1914 return -EFAULT; 1915 1916 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 1917 out_len); 1918 1919 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1920 if (!attr) 1921 return -ENOMEM; 1922 1923 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1924 if (!qp) { 1925 ret = -EINVAL; 1926 goto out; 1927 } 1928 1929 attr->qp_state = cmd.qp_state; 1930 attr->cur_qp_state = cmd.cur_qp_state; 1931 attr->path_mtu = cmd.path_mtu; 1932 attr->path_mig_state = cmd.path_mig_state; 1933 attr->qkey = cmd.qkey; 1934 attr->rq_psn = cmd.rq_psn; 1935 attr->sq_psn = cmd.sq_psn; 1936 attr->dest_qp_num = cmd.dest_qp_num; 1937 attr->qp_access_flags = cmd.qp_access_flags; 1938 attr->pkey_index = cmd.pkey_index; 1939 attr->alt_pkey_index = cmd.alt_pkey_index; 1940 attr->en_sqd_async_notify = cmd.en_sqd_async_notify; 1941 attr->max_rd_atomic = cmd.max_rd_atomic; 1942 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic; 1943 attr->min_rnr_timer = cmd.min_rnr_timer; 1944 attr->port_num = cmd.port_num; 1945 attr->timeout = cmd.timeout; 1946 attr->retry_cnt = cmd.retry_cnt; 1947 attr->rnr_retry = cmd.rnr_retry; 1948 attr->alt_port_num = cmd.alt_port_num; 1949 attr->alt_timeout = cmd.alt_timeout; 1950 1951 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16); 1952 attr->ah_attr.grh.flow_label = cmd.dest.flow_label; 1953 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index; 1954 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit; 1955 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class; 1956 attr->ah_attr.dlid = cmd.dest.dlid; 1957 attr->ah_attr.sl = cmd.dest.sl; 1958 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits; 1959 attr->ah_attr.static_rate = cmd.dest.static_rate; 1960 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0; 1961 attr->ah_attr.port_num = cmd.dest.port_num; 1962 1963 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16); 1964 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label; 1965 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index; 1966 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit; 1967 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class; 1968 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid; 1969 attr->alt_ah_attr.sl = cmd.alt_dest.sl; 1970 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits; 1971 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate; 1972 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; 1973 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; 1974 1975 if (qp->real_qp == qp) { 1976 ret = qp->device->modify_qp(qp, attr, 1977 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); 1978 } else { 1979 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); 1980 } 1981 1982 put_qp_read(qp); 1983 1984 if (ret) 1985 goto out; 1986 1987 ret = in_len; 1988 1989 out: 1990 kfree(attr); 1991 1992 return ret; 1993 } 1994 1995 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 1996 const char __user *buf, int in_len, 1997 int out_len) 1998 { 1999 struct ib_uverbs_destroy_qp cmd; 2000 struct ib_uverbs_destroy_qp_resp resp; 2001 struct ib_uobject *uobj; 2002 struct ib_qp *qp; 2003 struct ib_uqp_object *obj; 2004 int ret = -EINVAL; 2005 2006 if (copy_from_user(&cmd, buf, sizeof cmd)) 2007 return -EFAULT; 2008 2009 memset(&resp, 0, sizeof resp); 2010 2011 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext); 2012 if (!uobj) 2013 return -EINVAL; 2014 qp = uobj->object; 2015 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 2016 2017 if (!list_empty(&obj->mcast_list)) { 2018 put_uobj_write(uobj); 2019 return -EBUSY; 2020 } 2021 2022 ret = ib_destroy_qp(qp); 2023 if (!ret) 2024 uobj->live = 0; 2025 2026 put_uobj_write(uobj); 2027 2028 if (ret) 2029 return ret; 2030 2031 if (obj->uxrcd) 2032 atomic_dec(&obj->uxrcd->refcnt); 2033 2034 idr_remove_uobj(&ib_uverbs_qp_idr, uobj); 2035 2036 mutex_lock(&file->mutex); 2037 list_del(&uobj->list); 2038 mutex_unlock(&file->mutex); 2039 2040 ib_uverbs_release_uevent(file, &obj->uevent); 2041 2042 resp.events_reported = obj->uevent.events_reported; 2043 2044 put_uobj(uobj); 2045 2046 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2047 &resp, sizeof resp)) 2048 return -EFAULT; 2049 2050 return in_len; 2051 } 2052 2053 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 2054 const char __user *buf, int in_len, 2055 int out_len) 2056 { 2057 struct ib_uverbs_post_send cmd; 2058 struct ib_uverbs_post_send_resp resp; 2059 struct ib_uverbs_send_wr *user_wr; 2060 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 2061 struct ib_qp *qp; 2062 int i, sg_ind; 2063 int is_ud; 2064 ssize_t ret = -EINVAL; 2065 2066 if (copy_from_user(&cmd, buf, sizeof cmd)) 2067 return -EFAULT; 2068 2069 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 2070 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 2071 return -EINVAL; 2072 2073 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 2074 return -EINVAL; 2075 2076 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 2077 if (!user_wr) 2078 return -ENOMEM; 2079 2080 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2081 if (!qp) 2082 goto out; 2083 2084 is_ud = qp->qp_type == IB_QPT_UD; 2085 sg_ind = 0; 2086 last = NULL; 2087 for (i = 0; i < cmd.wr_count; ++i) { 2088 if (copy_from_user(user_wr, 2089 buf + sizeof cmd + i * cmd.wqe_size, 2090 cmd.wqe_size)) { 2091 ret = -EFAULT; 2092 goto out_put; 2093 } 2094 2095 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 2096 ret = -EINVAL; 2097 goto out_put; 2098 } 2099 2100 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2101 user_wr->num_sge * sizeof (struct ib_sge), 2102 GFP_KERNEL); 2103 if (!next) { 2104 ret = -ENOMEM; 2105 goto out_put; 2106 } 2107 2108 if (!last) 2109 wr = next; 2110 else 2111 last->next = next; 2112 last = next; 2113 2114 next->next = NULL; 2115 next->wr_id = user_wr->wr_id; 2116 next->num_sge = user_wr->num_sge; 2117 next->opcode = user_wr->opcode; 2118 next->send_flags = user_wr->send_flags; 2119 2120 if (is_ud) { 2121 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah, 2122 file->ucontext); 2123 if (!next->wr.ud.ah) { 2124 ret = -EINVAL; 2125 goto out_put; 2126 } 2127 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn; 2128 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey; 2129 } else { 2130 switch (next->opcode) { 2131 case IB_WR_RDMA_WRITE_WITH_IMM: 2132 next->ex.imm_data = 2133 (__be32 __force) user_wr->ex.imm_data; 2134 case IB_WR_RDMA_WRITE: 2135 case IB_WR_RDMA_READ: 2136 next->wr.rdma.remote_addr = 2137 user_wr->wr.rdma.remote_addr; 2138 next->wr.rdma.rkey = 2139 user_wr->wr.rdma.rkey; 2140 break; 2141 case IB_WR_SEND_WITH_IMM: 2142 next->ex.imm_data = 2143 (__be32 __force) user_wr->ex.imm_data; 2144 break; 2145 case IB_WR_SEND_WITH_INV: 2146 next->ex.invalidate_rkey = 2147 user_wr->ex.invalidate_rkey; 2148 break; 2149 case IB_WR_ATOMIC_CMP_AND_SWP: 2150 case IB_WR_ATOMIC_FETCH_AND_ADD: 2151 next->wr.atomic.remote_addr = 2152 user_wr->wr.atomic.remote_addr; 2153 next->wr.atomic.compare_add = 2154 user_wr->wr.atomic.compare_add; 2155 next->wr.atomic.swap = user_wr->wr.atomic.swap; 2156 next->wr.atomic.rkey = user_wr->wr.atomic.rkey; 2157 break; 2158 default: 2159 break; 2160 } 2161 } 2162 2163 if (next->num_sge) { 2164 next->sg_list = (void *) next + 2165 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2166 if (copy_from_user(next->sg_list, 2167 buf + sizeof cmd + 2168 cmd.wr_count * cmd.wqe_size + 2169 sg_ind * sizeof (struct ib_sge), 2170 next->num_sge * sizeof (struct ib_sge))) { 2171 ret = -EFAULT; 2172 goto out_put; 2173 } 2174 sg_ind += next->num_sge; 2175 } else 2176 next->sg_list = NULL; 2177 } 2178 2179 resp.bad_wr = 0; 2180 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); 2181 if (ret) 2182 for (next = wr; next; next = next->next) { 2183 ++resp.bad_wr; 2184 if (next == bad_wr) 2185 break; 2186 } 2187 2188 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2189 &resp, sizeof resp)) 2190 ret = -EFAULT; 2191 2192 out_put: 2193 put_qp_read(qp); 2194 2195 while (wr) { 2196 if (is_ud && wr->wr.ud.ah) 2197 put_ah_read(wr->wr.ud.ah); 2198 next = wr->next; 2199 kfree(wr); 2200 wr = next; 2201 } 2202 2203 out: 2204 kfree(user_wr); 2205 2206 return ret ? ret : in_len; 2207 } 2208 2209 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 2210 int in_len, 2211 u32 wr_count, 2212 u32 sge_count, 2213 u32 wqe_size) 2214 { 2215 struct ib_uverbs_recv_wr *user_wr; 2216 struct ib_recv_wr *wr = NULL, *last, *next; 2217 int sg_ind; 2218 int i; 2219 int ret; 2220 2221 if (in_len < wqe_size * wr_count + 2222 sge_count * sizeof (struct ib_uverbs_sge)) 2223 return ERR_PTR(-EINVAL); 2224 2225 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 2226 return ERR_PTR(-EINVAL); 2227 2228 user_wr = kmalloc(wqe_size, GFP_KERNEL); 2229 if (!user_wr) 2230 return ERR_PTR(-ENOMEM); 2231 2232 sg_ind = 0; 2233 last = NULL; 2234 for (i = 0; i < wr_count; ++i) { 2235 if (copy_from_user(user_wr, buf + i * wqe_size, 2236 wqe_size)) { 2237 ret = -EFAULT; 2238 goto err; 2239 } 2240 2241 if (user_wr->num_sge + sg_ind > sge_count) { 2242 ret = -EINVAL; 2243 goto err; 2244 } 2245 2246 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2247 user_wr->num_sge * sizeof (struct ib_sge), 2248 GFP_KERNEL); 2249 if (!next) { 2250 ret = -ENOMEM; 2251 goto err; 2252 } 2253 2254 if (!last) 2255 wr = next; 2256 else 2257 last->next = next; 2258 last = next; 2259 2260 next->next = NULL; 2261 next->wr_id = user_wr->wr_id; 2262 next->num_sge = user_wr->num_sge; 2263 2264 if (next->num_sge) { 2265 next->sg_list = (void *) next + 2266 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2267 if (copy_from_user(next->sg_list, 2268 buf + wr_count * wqe_size + 2269 sg_ind * sizeof (struct ib_sge), 2270 next->num_sge * sizeof (struct ib_sge))) { 2271 ret = -EFAULT; 2272 goto err; 2273 } 2274 sg_ind += next->num_sge; 2275 } else 2276 next->sg_list = NULL; 2277 } 2278 2279 kfree(user_wr); 2280 return wr; 2281 2282 err: 2283 kfree(user_wr); 2284 2285 while (wr) { 2286 next = wr->next; 2287 kfree(wr); 2288 wr = next; 2289 } 2290 2291 return ERR_PTR(ret); 2292 } 2293 2294 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 2295 const char __user *buf, int in_len, 2296 int out_len) 2297 { 2298 struct ib_uverbs_post_recv cmd; 2299 struct ib_uverbs_post_recv_resp resp; 2300 struct ib_recv_wr *wr, *next, *bad_wr; 2301 struct ib_qp *qp; 2302 ssize_t ret = -EINVAL; 2303 2304 if (copy_from_user(&cmd, buf, sizeof cmd)) 2305 return -EFAULT; 2306 2307 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2308 in_len - sizeof cmd, cmd.wr_count, 2309 cmd.sge_count, cmd.wqe_size); 2310 if (IS_ERR(wr)) 2311 return PTR_ERR(wr); 2312 2313 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2314 if (!qp) 2315 goto out; 2316 2317 resp.bad_wr = 0; 2318 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); 2319 2320 put_qp_read(qp); 2321 2322 if (ret) 2323 for (next = wr; next; next = next->next) { 2324 ++resp.bad_wr; 2325 if (next == bad_wr) 2326 break; 2327 } 2328 2329 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2330 &resp, sizeof resp)) 2331 ret = -EFAULT; 2332 2333 out: 2334 while (wr) { 2335 next = wr->next; 2336 kfree(wr); 2337 wr = next; 2338 } 2339 2340 return ret ? ret : in_len; 2341 } 2342 2343 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 2344 const char __user *buf, int in_len, 2345 int out_len) 2346 { 2347 struct ib_uverbs_post_srq_recv cmd; 2348 struct ib_uverbs_post_srq_recv_resp resp; 2349 struct ib_recv_wr *wr, *next, *bad_wr; 2350 struct ib_srq *srq; 2351 ssize_t ret = -EINVAL; 2352 2353 if (copy_from_user(&cmd, buf, sizeof cmd)) 2354 return -EFAULT; 2355 2356 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2357 in_len - sizeof cmd, cmd.wr_count, 2358 cmd.sge_count, cmd.wqe_size); 2359 if (IS_ERR(wr)) 2360 return PTR_ERR(wr); 2361 2362 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2363 if (!srq) 2364 goto out; 2365 2366 resp.bad_wr = 0; 2367 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 2368 2369 put_srq_read(srq); 2370 2371 if (ret) 2372 for (next = wr; next; next = next->next) { 2373 ++resp.bad_wr; 2374 if (next == bad_wr) 2375 break; 2376 } 2377 2378 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2379 &resp, sizeof resp)) 2380 ret = -EFAULT; 2381 2382 out: 2383 while (wr) { 2384 next = wr->next; 2385 kfree(wr); 2386 wr = next; 2387 } 2388 2389 return ret ? ret : in_len; 2390 } 2391 2392 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 2393 const char __user *buf, int in_len, 2394 int out_len) 2395 { 2396 struct ib_uverbs_create_ah cmd; 2397 struct ib_uverbs_create_ah_resp resp; 2398 struct ib_uobject *uobj; 2399 struct ib_pd *pd; 2400 struct ib_ah *ah; 2401 struct ib_ah_attr attr; 2402 int ret; 2403 2404 if (out_len < sizeof resp) 2405 return -ENOSPC; 2406 2407 if (copy_from_user(&cmd, buf, sizeof cmd)) 2408 return -EFAULT; 2409 2410 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 2411 if (!uobj) 2412 return -ENOMEM; 2413 2414 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class); 2415 down_write(&uobj->mutex); 2416 2417 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 2418 if (!pd) { 2419 ret = -EINVAL; 2420 goto err; 2421 } 2422 2423 attr.dlid = cmd.attr.dlid; 2424 attr.sl = cmd.attr.sl; 2425 attr.src_path_bits = cmd.attr.src_path_bits; 2426 attr.static_rate = cmd.attr.static_rate; 2427 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0; 2428 attr.port_num = cmd.attr.port_num; 2429 attr.grh.flow_label = cmd.attr.grh.flow_label; 2430 attr.grh.sgid_index = cmd.attr.grh.sgid_index; 2431 attr.grh.hop_limit = cmd.attr.grh.hop_limit; 2432 attr.grh.traffic_class = cmd.attr.grh.traffic_class; 2433 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); 2434 2435 ah = ib_create_ah(pd, &attr); 2436 if (IS_ERR(ah)) { 2437 ret = PTR_ERR(ah); 2438 goto err_put; 2439 } 2440 2441 ah->uobject = uobj; 2442 uobj->object = ah; 2443 2444 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj); 2445 if (ret) 2446 goto err_destroy; 2447 2448 resp.ah_handle = uobj->id; 2449 2450 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2451 &resp, sizeof resp)) { 2452 ret = -EFAULT; 2453 goto err_copy; 2454 } 2455 2456 put_pd_read(pd); 2457 2458 mutex_lock(&file->mutex); 2459 list_add_tail(&uobj->list, &file->ucontext->ah_list); 2460 mutex_unlock(&file->mutex); 2461 2462 uobj->live = 1; 2463 2464 up_write(&uobj->mutex); 2465 2466 return in_len; 2467 2468 err_copy: 2469 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2470 2471 err_destroy: 2472 ib_destroy_ah(ah); 2473 2474 err_put: 2475 put_pd_read(pd); 2476 2477 err: 2478 put_uobj_write(uobj); 2479 return ret; 2480 } 2481 2482 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 2483 const char __user *buf, int in_len, int out_len) 2484 { 2485 struct ib_uverbs_destroy_ah cmd; 2486 struct ib_ah *ah; 2487 struct ib_uobject *uobj; 2488 int ret; 2489 2490 if (copy_from_user(&cmd, buf, sizeof cmd)) 2491 return -EFAULT; 2492 2493 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext); 2494 if (!uobj) 2495 return -EINVAL; 2496 ah = uobj->object; 2497 2498 ret = ib_destroy_ah(ah); 2499 if (!ret) 2500 uobj->live = 0; 2501 2502 put_uobj_write(uobj); 2503 2504 if (ret) 2505 return ret; 2506 2507 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2508 2509 mutex_lock(&file->mutex); 2510 list_del(&uobj->list); 2511 mutex_unlock(&file->mutex); 2512 2513 put_uobj(uobj); 2514 2515 return in_len; 2516 } 2517 2518 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 2519 const char __user *buf, int in_len, 2520 int out_len) 2521 { 2522 struct ib_uverbs_attach_mcast cmd; 2523 struct ib_qp *qp; 2524 struct ib_uqp_object *obj; 2525 struct ib_uverbs_mcast_entry *mcast; 2526 int ret; 2527 2528 if (copy_from_user(&cmd, buf, sizeof cmd)) 2529 return -EFAULT; 2530 2531 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 2532 if (!qp) 2533 return -EINVAL; 2534 2535 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2536 2537 list_for_each_entry(mcast, &obj->mcast_list, list) 2538 if (cmd.mlid == mcast->lid && 2539 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2540 ret = 0; 2541 goto out_put; 2542 } 2543 2544 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 2545 if (!mcast) { 2546 ret = -ENOMEM; 2547 goto out_put; 2548 } 2549 2550 mcast->lid = cmd.mlid; 2551 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 2552 2553 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 2554 if (!ret) 2555 list_add_tail(&mcast->list, &obj->mcast_list); 2556 else 2557 kfree(mcast); 2558 2559 out_put: 2560 put_qp_write(qp); 2561 2562 return ret ? ret : in_len; 2563 } 2564 2565 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 2566 const char __user *buf, int in_len, 2567 int out_len) 2568 { 2569 struct ib_uverbs_detach_mcast cmd; 2570 struct ib_uqp_object *obj; 2571 struct ib_qp *qp; 2572 struct ib_uverbs_mcast_entry *mcast; 2573 int ret = -EINVAL; 2574 2575 if (copy_from_user(&cmd, buf, sizeof cmd)) 2576 return -EFAULT; 2577 2578 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 2579 if (!qp) 2580 return -EINVAL; 2581 2582 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 2583 if (ret) 2584 goto out_put; 2585 2586 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2587 2588 list_for_each_entry(mcast, &obj->mcast_list, list) 2589 if (cmd.mlid == mcast->lid && 2590 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2591 list_del(&mcast->list); 2592 kfree(mcast); 2593 break; 2594 } 2595 2596 out_put: 2597 put_qp_write(qp); 2598 2599 return ret ? ret : in_len; 2600 } 2601 2602 static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec, 2603 union ib_flow_spec *ib_spec) 2604 { 2605 ib_spec->type = kern_spec->type; 2606 2607 switch (ib_spec->type) { 2608 case IB_FLOW_SPEC_ETH: 2609 ib_spec->eth.size = sizeof(struct ib_flow_spec_eth); 2610 if (ib_spec->eth.size != kern_spec->eth.size) 2611 return -EINVAL; 2612 memcpy(&ib_spec->eth.val, &kern_spec->eth.val, 2613 sizeof(struct ib_flow_eth_filter)); 2614 memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask, 2615 sizeof(struct ib_flow_eth_filter)); 2616 break; 2617 case IB_FLOW_SPEC_IPV4: 2618 ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4); 2619 if (ib_spec->ipv4.size != kern_spec->ipv4.size) 2620 return -EINVAL; 2621 memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val, 2622 sizeof(struct ib_flow_ipv4_filter)); 2623 memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask, 2624 sizeof(struct ib_flow_ipv4_filter)); 2625 break; 2626 case IB_FLOW_SPEC_TCP: 2627 case IB_FLOW_SPEC_UDP: 2628 ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp); 2629 if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size) 2630 return -EINVAL; 2631 memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val, 2632 sizeof(struct ib_flow_tcp_udp_filter)); 2633 memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask, 2634 sizeof(struct ib_flow_tcp_udp_filter)); 2635 break; 2636 default: 2637 return -EINVAL; 2638 } 2639 return 0; 2640 } 2641 2642 ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file, 2643 const char __user *buf, int in_len, 2644 int out_len) 2645 { 2646 struct ib_uverbs_create_flow cmd; 2647 struct ib_uverbs_create_flow_resp resp; 2648 struct ib_uobject *uobj; 2649 struct ib_flow *flow_id; 2650 struct ib_kern_flow_attr *kern_flow_attr; 2651 struct ib_flow_attr *flow_attr; 2652 struct ib_qp *qp; 2653 int err = 0; 2654 void *kern_spec; 2655 void *ib_spec; 2656 int i; 2657 int kern_attr_size; 2658 2659 if (out_len < sizeof(resp)) 2660 return -ENOSPC; 2661 2662 if (copy_from_user(&cmd, buf, sizeof(cmd))) 2663 return -EFAULT; 2664 2665 if (cmd.comp_mask) 2666 return -EINVAL; 2667 2668 if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER && 2669 !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW)) 2670 return -EPERM; 2671 2672 if (cmd.flow_attr.num_of_specs < 0 || 2673 cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) 2674 return -EINVAL; 2675 2676 kern_attr_size = cmd.flow_attr.size - sizeof(cmd) - 2677 sizeof(struct ib_uverbs_cmd_hdr_ex); 2678 2679 if (cmd.flow_attr.size < 0 || cmd.flow_attr.size > in_len || 2680 kern_attr_size < 0 || kern_attr_size > 2681 (cmd.flow_attr.num_of_specs * sizeof(struct ib_kern_spec))) 2682 return -EINVAL; 2683 2684 if (cmd.flow_attr.num_of_specs) { 2685 kern_flow_attr = kmalloc(cmd.flow_attr.size, GFP_KERNEL); 2686 if (!kern_flow_attr) 2687 return -ENOMEM; 2688 2689 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); 2690 if (copy_from_user(kern_flow_attr + 1, buf + sizeof(cmd), 2691 kern_attr_size)) { 2692 err = -EFAULT; 2693 goto err_free_attr; 2694 } 2695 } else { 2696 kern_flow_attr = &cmd.flow_attr; 2697 kern_attr_size = sizeof(cmd.flow_attr); 2698 } 2699 2700 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 2701 if (!uobj) { 2702 err = -ENOMEM; 2703 goto err_free_attr; 2704 } 2705 init_uobj(uobj, 0, file->ucontext, &rule_lock_class); 2706 down_write(&uobj->mutex); 2707 2708 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2709 if (!qp) { 2710 err = -EINVAL; 2711 goto err_uobj; 2712 } 2713 2714 flow_attr = kmalloc(cmd.flow_attr.size, GFP_KERNEL); 2715 if (!flow_attr) { 2716 err = -ENOMEM; 2717 goto err_put; 2718 } 2719 2720 flow_attr->type = kern_flow_attr->type; 2721 flow_attr->priority = kern_flow_attr->priority; 2722 flow_attr->num_of_specs = kern_flow_attr->num_of_specs; 2723 flow_attr->port = kern_flow_attr->port; 2724 flow_attr->flags = kern_flow_attr->flags; 2725 flow_attr->size = sizeof(*flow_attr); 2726 2727 kern_spec = kern_flow_attr + 1; 2728 ib_spec = flow_attr + 1; 2729 for (i = 0; i < flow_attr->num_of_specs && kern_attr_size > 0; i++) { 2730 err = kern_spec_to_ib_spec(kern_spec, ib_spec); 2731 if (err) 2732 goto err_free; 2733 flow_attr->size += 2734 ((union ib_flow_spec *) ib_spec)->size; 2735 kern_attr_size -= ((struct ib_kern_spec *) kern_spec)->size; 2736 kern_spec += ((struct ib_kern_spec *) kern_spec)->size; 2737 ib_spec += ((union ib_flow_spec *) ib_spec)->size; 2738 } 2739 if (kern_attr_size) { 2740 pr_warn("create flow failed, %d bytes left from uverb cmd\n", 2741 kern_attr_size); 2742 goto err_free; 2743 } 2744 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); 2745 if (IS_ERR(flow_id)) { 2746 err = PTR_ERR(flow_id); 2747 goto err_free; 2748 } 2749 flow_id->qp = qp; 2750 flow_id->uobject = uobj; 2751 uobj->object = flow_id; 2752 2753 err = idr_add_uobj(&ib_uverbs_rule_idr, uobj); 2754 if (err) 2755 goto destroy_flow; 2756 2757 memset(&resp, 0, sizeof(resp)); 2758 resp.flow_handle = uobj->id; 2759 2760 if (copy_to_user((void __user *)(unsigned long) cmd.response, 2761 &resp, sizeof(resp))) { 2762 err = -EFAULT; 2763 goto err_copy; 2764 } 2765 2766 put_qp_read(qp); 2767 mutex_lock(&file->mutex); 2768 list_add_tail(&uobj->list, &file->ucontext->rule_list); 2769 mutex_unlock(&file->mutex); 2770 2771 uobj->live = 1; 2772 2773 up_write(&uobj->mutex); 2774 kfree(flow_attr); 2775 if (cmd.flow_attr.num_of_specs) 2776 kfree(kern_flow_attr); 2777 return in_len; 2778 err_copy: 2779 idr_remove_uobj(&ib_uverbs_rule_idr, uobj); 2780 destroy_flow: 2781 ib_destroy_flow(flow_id); 2782 err_free: 2783 kfree(flow_attr); 2784 err_put: 2785 put_qp_read(qp); 2786 err_uobj: 2787 put_uobj_write(uobj); 2788 err_free_attr: 2789 if (cmd.flow_attr.num_of_specs) 2790 kfree(kern_flow_attr); 2791 return err; 2792 } 2793 2794 ssize_t ib_uverbs_destroy_flow(struct ib_uverbs_file *file, 2795 const char __user *buf, int in_len, 2796 int out_len) { 2797 struct ib_uverbs_destroy_flow cmd; 2798 struct ib_flow *flow_id; 2799 struct ib_uobject *uobj; 2800 int ret; 2801 2802 if (copy_from_user(&cmd, buf, sizeof(cmd))) 2803 return -EFAULT; 2804 2805 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle, 2806 file->ucontext); 2807 if (!uobj) 2808 return -EINVAL; 2809 flow_id = uobj->object; 2810 2811 ret = ib_destroy_flow(flow_id); 2812 if (!ret) 2813 uobj->live = 0; 2814 2815 put_uobj_write(uobj); 2816 2817 idr_remove_uobj(&ib_uverbs_rule_idr, uobj); 2818 2819 mutex_lock(&file->mutex); 2820 list_del(&uobj->list); 2821 mutex_unlock(&file->mutex); 2822 2823 put_uobj(uobj); 2824 2825 return ret ? ret : in_len; 2826 } 2827 2828 static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 2829 struct ib_uverbs_create_xsrq *cmd, 2830 struct ib_udata *udata) 2831 { 2832 struct ib_uverbs_create_srq_resp resp; 2833 struct ib_usrq_object *obj; 2834 struct ib_pd *pd; 2835 struct ib_srq *srq; 2836 struct ib_uobject *uninitialized_var(xrcd_uobj); 2837 struct ib_srq_init_attr attr; 2838 int ret; 2839 2840 obj = kmalloc(sizeof *obj, GFP_KERNEL); 2841 if (!obj) 2842 return -ENOMEM; 2843 2844 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class); 2845 down_write(&obj->uevent.uobject.mutex); 2846 2847 if (cmd->srq_type == IB_SRQT_XRC) { 2848 attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj); 2849 if (!attr.ext.xrc.xrcd) { 2850 ret = -EINVAL; 2851 goto err; 2852 } 2853 2854 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 2855 atomic_inc(&obj->uxrcd->refcnt); 2856 2857 attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0); 2858 if (!attr.ext.xrc.cq) { 2859 ret = -EINVAL; 2860 goto err_put_xrcd; 2861 } 2862 } 2863 2864 pd = idr_read_pd(cmd->pd_handle, file->ucontext); 2865 if (!pd) { 2866 ret = -EINVAL; 2867 goto err_put_cq; 2868 } 2869 2870 attr.event_handler = ib_uverbs_srq_event_handler; 2871 attr.srq_context = file; 2872 attr.srq_type = cmd->srq_type; 2873 attr.attr.max_wr = cmd->max_wr; 2874 attr.attr.max_sge = cmd->max_sge; 2875 attr.attr.srq_limit = cmd->srq_limit; 2876 2877 obj->uevent.events_reported = 0; 2878 INIT_LIST_HEAD(&obj->uevent.event_list); 2879 2880 srq = pd->device->create_srq(pd, &attr, udata); 2881 if (IS_ERR(srq)) { 2882 ret = PTR_ERR(srq); 2883 goto err_put; 2884 } 2885 2886 srq->device = pd->device; 2887 srq->pd = pd; 2888 srq->srq_type = cmd->srq_type; 2889 srq->uobject = &obj->uevent.uobject; 2890 srq->event_handler = attr.event_handler; 2891 srq->srq_context = attr.srq_context; 2892 2893 if (cmd->srq_type == IB_SRQT_XRC) { 2894 srq->ext.xrc.cq = attr.ext.xrc.cq; 2895 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; 2896 atomic_inc(&attr.ext.xrc.cq->usecnt); 2897 atomic_inc(&attr.ext.xrc.xrcd->usecnt); 2898 } 2899 2900 atomic_inc(&pd->usecnt); 2901 atomic_set(&srq->usecnt, 0); 2902 2903 obj->uevent.uobject.object = srq; 2904 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 2905 if (ret) 2906 goto err_destroy; 2907 2908 memset(&resp, 0, sizeof resp); 2909 resp.srq_handle = obj->uevent.uobject.id; 2910 resp.max_wr = attr.attr.max_wr; 2911 resp.max_sge = attr.attr.max_sge; 2912 if (cmd->srq_type == IB_SRQT_XRC) 2913 resp.srqn = srq->ext.xrc.srq_num; 2914 2915 if (copy_to_user((void __user *) (unsigned long) cmd->response, 2916 &resp, sizeof resp)) { 2917 ret = -EFAULT; 2918 goto err_copy; 2919 } 2920 2921 if (cmd->srq_type == IB_SRQT_XRC) { 2922 put_uobj_read(xrcd_uobj); 2923 put_cq_read(attr.ext.xrc.cq); 2924 } 2925 put_pd_read(pd); 2926 2927 mutex_lock(&file->mutex); 2928 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list); 2929 mutex_unlock(&file->mutex); 2930 2931 obj->uevent.uobject.live = 1; 2932 2933 up_write(&obj->uevent.uobject.mutex); 2934 2935 return 0; 2936 2937 err_copy: 2938 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 2939 2940 err_destroy: 2941 ib_destroy_srq(srq); 2942 2943 err_put: 2944 put_pd_read(pd); 2945 2946 err_put_cq: 2947 if (cmd->srq_type == IB_SRQT_XRC) 2948 put_cq_read(attr.ext.xrc.cq); 2949 2950 err_put_xrcd: 2951 if (cmd->srq_type == IB_SRQT_XRC) { 2952 atomic_dec(&obj->uxrcd->refcnt); 2953 put_uobj_read(xrcd_uobj); 2954 } 2955 2956 err: 2957 put_uobj_write(&obj->uevent.uobject); 2958 return ret; 2959 } 2960 2961 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 2962 const char __user *buf, int in_len, 2963 int out_len) 2964 { 2965 struct ib_uverbs_create_srq cmd; 2966 struct ib_uverbs_create_xsrq xcmd; 2967 struct ib_uverbs_create_srq_resp resp; 2968 struct ib_udata udata; 2969 int ret; 2970 2971 if (out_len < sizeof resp) 2972 return -ENOSPC; 2973 2974 if (copy_from_user(&cmd, buf, sizeof cmd)) 2975 return -EFAULT; 2976 2977 xcmd.response = cmd.response; 2978 xcmd.user_handle = cmd.user_handle; 2979 xcmd.srq_type = IB_SRQT_BASIC; 2980 xcmd.pd_handle = cmd.pd_handle; 2981 xcmd.max_wr = cmd.max_wr; 2982 xcmd.max_sge = cmd.max_sge; 2983 xcmd.srq_limit = cmd.srq_limit; 2984 2985 INIT_UDATA(&udata, buf + sizeof cmd, 2986 (unsigned long) cmd.response + sizeof resp, 2987 in_len - sizeof cmd, out_len - sizeof resp); 2988 2989 ret = __uverbs_create_xsrq(file, &xcmd, &udata); 2990 if (ret) 2991 return ret; 2992 2993 return in_len; 2994 } 2995 2996 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, 2997 const char __user *buf, int in_len, int out_len) 2998 { 2999 struct ib_uverbs_create_xsrq cmd; 3000 struct ib_uverbs_create_srq_resp resp; 3001 struct ib_udata udata; 3002 int ret; 3003 3004 if (out_len < sizeof resp) 3005 return -ENOSPC; 3006 3007 if (copy_from_user(&cmd, buf, sizeof cmd)) 3008 return -EFAULT; 3009 3010 INIT_UDATA(&udata, buf + sizeof cmd, 3011 (unsigned long) cmd.response + sizeof resp, 3012 in_len - sizeof cmd, out_len - sizeof resp); 3013 3014 ret = __uverbs_create_xsrq(file, &cmd, &udata); 3015 if (ret) 3016 return ret; 3017 3018 return in_len; 3019 } 3020 3021 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 3022 const char __user *buf, int in_len, 3023 int out_len) 3024 { 3025 struct ib_uverbs_modify_srq cmd; 3026 struct ib_udata udata; 3027 struct ib_srq *srq; 3028 struct ib_srq_attr attr; 3029 int ret; 3030 3031 if (copy_from_user(&cmd, buf, sizeof cmd)) 3032 return -EFAULT; 3033 3034 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 3035 out_len); 3036 3037 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 3038 if (!srq) 3039 return -EINVAL; 3040 3041 attr.max_wr = cmd.max_wr; 3042 attr.srq_limit = cmd.srq_limit; 3043 3044 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 3045 3046 put_srq_read(srq); 3047 3048 return ret ? ret : in_len; 3049 } 3050 3051 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 3052 const char __user *buf, 3053 int in_len, int out_len) 3054 { 3055 struct ib_uverbs_query_srq cmd; 3056 struct ib_uverbs_query_srq_resp resp; 3057 struct ib_srq_attr attr; 3058 struct ib_srq *srq; 3059 int ret; 3060 3061 if (out_len < sizeof resp) 3062 return -ENOSPC; 3063 3064 if (copy_from_user(&cmd, buf, sizeof cmd)) 3065 return -EFAULT; 3066 3067 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 3068 if (!srq) 3069 return -EINVAL; 3070 3071 ret = ib_query_srq(srq, &attr); 3072 3073 put_srq_read(srq); 3074 3075 if (ret) 3076 return ret; 3077 3078 memset(&resp, 0, sizeof resp); 3079 3080 resp.max_wr = attr.max_wr; 3081 resp.max_sge = attr.max_sge; 3082 resp.srq_limit = attr.srq_limit; 3083 3084 if (copy_to_user((void __user *) (unsigned long) cmd.response, 3085 &resp, sizeof resp)) 3086 return -EFAULT; 3087 3088 return in_len; 3089 } 3090 3091 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 3092 const char __user *buf, int in_len, 3093 int out_len) 3094 { 3095 struct ib_uverbs_destroy_srq cmd; 3096 struct ib_uverbs_destroy_srq_resp resp; 3097 struct ib_uobject *uobj; 3098 struct ib_srq *srq; 3099 struct ib_uevent_object *obj; 3100 int ret = -EINVAL; 3101 struct ib_usrq_object *us; 3102 enum ib_srq_type srq_type; 3103 3104 if (copy_from_user(&cmd, buf, sizeof cmd)) 3105 return -EFAULT; 3106 3107 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext); 3108 if (!uobj) 3109 return -EINVAL; 3110 srq = uobj->object; 3111 obj = container_of(uobj, struct ib_uevent_object, uobject); 3112 srq_type = srq->srq_type; 3113 3114 ret = ib_destroy_srq(srq); 3115 if (!ret) 3116 uobj->live = 0; 3117 3118 put_uobj_write(uobj); 3119 3120 if (ret) 3121 return ret; 3122 3123 if (srq_type == IB_SRQT_XRC) { 3124 us = container_of(obj, struct ib_usrq_object, uevent); 3125 atomic_dec(&us->uxrcd->refcnt); 3126 } 3127 3128 idr_remove_uobj(&ib_uverbs_srq_idr, uobj); 3129 3130 mutex_lock(&file->mutex); 3131 list_del(&uobj->list); 3132 mutex_unlock(&file->mutex); 3133 3134 ib_uverbs_release_uevent(file, obj); 3135 3136 memset(&resp, 0, sizeof resp); 3137 resp.events_reported = obj->events_reported; 3138 3139 put_uobj(uobj); 3140 3141 if (copy_to_user((void __user *) (unsigned long) cmd.response, 3142 &resp, sizeof resp)) 3143 ret = -EFAULT; 3144 3145 return ret ? ret : in_len; 3146 } 3147