1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/file.h> 37 #include <linux/fs.h> 38 #include <linux/slab.h> 39 40 #include <asm/uaccess.h> 41 42 #include "uverbs.h" 43 #include "core_priv.h" 44 45 struct uverbs_lock_class { 46 struct lock_class_key key; 47 char name[16]; 48 }; 49 50 static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" }; 51 static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" }; 52 static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" }; 53 static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" }; 54 static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" }; 55 static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; 56 static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; 57 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; 58 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" }; 59 60 /* 61 * The ib_uobject locking scheme is as follows: 62 * 63 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it 64 * needs to be held during all idr operations. When an object is 65 * looked up, a reference must be taken on the object's kref before 66 * dropping this lock. 67 * 68 * - Each object also has an rwsem. This rwsem must be held for 69 * reading while an operation that uses the object is performed. 70 * For example, while registering an MR, the associated PD's 71 * uobject.mutex must be held for reading. The rwsem must be held 72 * for writing while initializing or destroying an object. 73 * 74 * - In addition, each object has a "live" flag. If this flag is not 75 * set, then lookups of the object will fail even if it is found in 76 * the idr. This handles a reader that blocks and does not acquire 77 * the rwsem until after the object is destroyed. The destroy 78 * operation will set the live flag to 0 and then drop the rwsem; 79 * this will allow the reader to acquire the rwsem, see that the 80 * live flag is 0, and then drop the rwsem and its reference to 81 * object. The underlying storage will not be freed until the last 82 * reference to the object is dropped. 83 */ 84 85 static void init_uobj(struct ib_uobject *uobj, u64 user_handle, 86 struct ib_ucontext *context, struct uverbs_lock_class *c) 87 { 88 uobj->user_handle = user_handle; 89 uobj->context = context; 90 kref_init(&uobj->ref); 91 init_rwsem(&uobj->mutex); 92 lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name); 93 uobj->live = 0; 94 } 95 96 static void release_uobj(struct kref *kref) 97 { 98 kfree(container_of(kref, struct ib_uobject, ref)); 99 } 100 101 static void put_uobj(struct ib_uobject *uobj) 102 { 103 kref_put(&uobj->ref, release_uobj); 104 } 105 106 static void put_uobj_read(struct ib_uobject *uobj) 107 { 108 up_read(&uobj->mutex); 109 put_uobj(uobj); 110 } 111 112 static void put_uobj_write(struct ib_uobject *uobj) 113 { 114 up_write(&uobj->mutex); 115 put_uobj(uobj); 116 } 117 118 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj) 119 { 120 int ret; 121 122 idr_preload(GFP_KERNEL); 123 spin_lock(&ib_uverbs_idr_lock); 124 125 ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT); 126 if (ret >= 0) 127 uobj->id = ret; 128 129 spin_unlock(&ib_uverbs_idr_lock); 130 idr_preload_end(); 131 132 return ret < 0 ? ret : 0; 133 } 134 135 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj) 136 { 137 spin_lock(&ib_uverbs_idr_lock); 138 idr_remove(idr, uobj->id); 139 spin_unlock(&ib_uverbs_idr_lock); 140 } 141 142 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, 143 struct ib_ucontext *context) 144 { 145 struct ib_uobject *uobj; 146 147 spin_lock(&ib_uverbs_idr_lock); 148 uobj = idr_find(idr, id); 149 if (uobj) { 150 if (uobj->context == context) 151 kref_get(&uobj->ref); 152 else 153 uobj = NULL; 154 } 155 spin_unlock(&ib_uverbs_idr_lock); 156 157 return uobj; 158 } 159 160 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, 161 struct ib_ucontext *context, int nested) 162 { 163 struct ib_uobject *uobj; 164 165 uobj = __idr_get_uobj(idr, id, context); 166 if (!uobj) 167 return NULL; 168 169 if (nested) 170 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING); 171 else 172 down_read(&uobj->mutex); 173 if (!uobj->live) { 174 put_uobj_read(uobj); 175 return NULL; 176 } 177 178 return uobj; 179 } 180 181 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id, 182 struct ib_ucontext *context) 183 { 184 struct ib_uobject *uobj; 185 186 uobj = __idr_get_uobj(idr, id, context); 187 if (!uobj) 188 return NULL; 189 190 down_write(&uobj->mutex); 191 if (!uobj->live) { 192 put_uobj_write(uobj); 193 return NULL; 194 } 195 196 return uobj; 197 } 198 199 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context, 200 int nested) 201 { 202 struct ib_uobject *uobj; 203 204 uobj = idr_read_uobj(idr, id, context, nested); 205 return uobj ? uobj->object : NULL; 206 } 207 208 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context) 209 { 210 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0); 211 } 212 213 static void put_pd_read(struct ib_pd *pd) 214 { 215 put_uobj_read(pd->uobject); 216 } 217 218 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested) 219 { 220 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested); 221 } 222 223 static void put_cq_read(struct ib_cq *cq) 224 { 225 put_uobj_read(cq->uobject); 226 } 227 228 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context) 229 { 230 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0); 231 } 232 233 static void put_ah_read(struct ib_ah *ah) 234 { 235 put_uobj_read(ah->uobject); 236 } 237 238 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context) 239 { 240 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0); 241 } 242 243 static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context) 244 { 245 struct ib_uobject *uobj; 246 247 uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context); 248 return uobj ? uobj->object : NULL; 249 } 250 251 static void put_qp_read(struct ib_qp *qp) 252 { 253 put_uobj_read(qp->uobject); 254 } 255 256 static void put_qp_write(struct ib_qp *qp) 257 { 258 put_uobj_write(qp->uobject); 259 } 260 261 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context) 262 { 263 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0); 264 } 265 266 static void put_srq_read(struct ib_srq *srq) 267 { 268 put_uobj_read(srq->uobject); 269 } 270 271 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context, 272 struct ib_uobject **uobj) 273 { 274 *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0); 275 return *uobj ? (*uobj)->object : NULL; 276 } 277 278 static void put_xrcd_read(struct ib_uobject *uobj) 279 { 280 put_uobj_read(uobj); 281 } 282 283 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 284 const char __user *buf, 285 int in_len, int out_len) 286 { 287 struct ib_uverbs_get_context cmd; 288 struct ib_uverbs_get_context_resp resp; 289 struct ib_udata udata; 290 struct ib_device *ibdev = file->device->ib_dev; 291 struct ib_ucontext *ucontext; 292 struct file *filp; 293 int ret; 294 295 if (out_len < sizeof resp) 296 return -ENOSPC; 297 298 if (copy_from_user(&cmd, buf, sizeof cmd)) 299 return -EFAULT; 300 301 mutex_lock(&file->mutex); 302 303 if (file->ucontext) { 304 ret = -EINVAL; 305 goto err; 306 } 307 308 INIT_UDATA(&udata, buf + sizeof cmd, 309 (unsigned long) cmd.response + sizeof resp, 310 in_len - sizeof cmd, out_len - sizeof resp); 311 312 ucontext = ibdev->alloc_ucontext(ibdev, &udata); 313 if (IS_ERR(ucontext)) { 314 ret = PTR_ERR(ucontext); 315 goto err; 316 } 317 318 ucontext->device = ibdev; 319 INIT_LIST_HEAD(&ucontext->pd_list); 320 INIT_LIST_HEAD(&ucontext->mr_list); 321 INIT_LIST_HEAD(&ucontext->mw_list); 322 INIT_LIST_HEAD(&ucontext->cq_list); 323 INIT_LIST_HEAD(&ucontext->qp_list); 324 INIT_LIST_HEAD(&ucontext->srq_list); 325 INIT_LIST_HEAD(&ucontext->ah_list); 326 INIT_LIST_HEAD(&ucontext->xrcd_list); 327 INIT_LIST_HEAD(&ucontext->rule_list); 328 ucontext->closing = 0; 329 330 resp.num_comp_vectors = file->device->num_comp_vectors; 331 332 ret = get_unused_fd_flags(O_CLOEXEC); 333 if (ret < 0) 334 goto err_free; 335 resp.async_fd = ret; 336 337 filp = ib_uverbs_alloc_event_file(file, 1); 338 if (IS_ERR(filp)) { 339 ret = PTR_ERR(filp); 340 goto err_fd; 341 } 342 343 if (copy_to_user((void __user *) (unsigned long) cmd.response, 344 &resp, sizeof resp)) { 345 ret = -EFAULT; 346 goto err_file; 347 } 348 349 file->async_file = filp->private_data; 350 351 INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev, 352 ib_uverbs_event_handler); 353 ret = ib_register_event_handler(&file->event_handler); 354 if (ret) 355 goto err_file; 356 357 kref_get(&file->async_file->ref); 358 kref_get(&file->ref); 359 file->ucontext = ucontext; 360 361 fd_install(resp.async_fd, filp); 362 363 mutex_unlock(&file->mutex); 364 365 return in_len; 366 367 err_file: 368 fput(filp); 369 370 err_fd: 371 put_unused_fd(resp.async_fd); 372 373 err_free: 374 ibdev->dealloc_ucontext(ucontext); 375 376 err: 377 mutex_unlock(&file->mutex); 378 return ret; 379 } 380 381 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 382 const char __user *buf, 383 int in_len, int out_len) 384 { 385 struct ib_uverbs_query_device cmd; 386 struct ib_uverbs_query_device_resp resp; 387 struct ib_device_attr attr; 388 int ret; 389 390 if (out_len < sizeof resp) 391 return -ENOSPC; 392 393 if (copy_from_user(&cmd, buf, sizeof cmd)) 394 return -EFAULT; 395 396 ret = ib_query_device(file->device->ib_dev, &attr); 397 if (ret) 398 return ret; 399 400 memset(&resp, 0, sizeof resp); 401 402 resp.fw_ver = attr.fw_ver; 403 resp.node_guid = file->device->ib_dev->node_guid; 404 resp.sys_image_guid = attr.sys_image_guid; 405 resp.max_mr_size = attr.max_mr_size; 406 resp.page_size_cap = attr.page_size_cap; 407 resp.vendor_id = attr.vendor_id; 408 resp.vendor_part_id = attr.vendor_part_id; 409 resp.hw_ver = attr.hw_ver; 410 resp.max_qp = attr.max_qp; 411 resp.max_qp_wr = attr.max_qp_wr; 412 resp.device_cap_flags = attr.device_cap_flags; 413 resp.max_sge = attr.max_sge; 414 resp.max_sge_rd = attr.max_sge_rd; 415 resp.max_cq = attr.max_cq; 416 resp.max_cqe = attr.max_cqe; 417 resp.max_mr = attr.max_mr; 418 resp.max_pd = attr.max_pd; 419 resp.max_qp_rd_atom = attr.max_qp_rd_atom; 420 resp.max_ee_rd_atom = attr.max_ee_rd_atom; 421 resp.max_res_rd_atom = attr.max_res_rd_atom; 422 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom; 423 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom; 424 resp.atomic_cap = attr.atomic_cap; 425 resp.max_ee = attr.max_ee; 426 resp.max_rdd = attr.max_rdd; 427 resp.max_mw = attr.max_mw; 428 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp; 429 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp; 430 resp.max_mcast_grp = attr.max_mcast_grp; 431 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach; 432 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach; 433 resp.max_ah = attr.max_ah; 434 resp.max_fmr = attr.max_fmr; 435 resp.max_map_per_fmr = attr.max_map_per_fmr; 436 resp.max_srq = attr.max_srq; 437 resp.max_srq_wr = attr.max_srq_wr; 438 resp.max_srq_sge = attr.max_srq_sge; 439 resp.max_pkeys = attr.max_pkeys; 440 resp.local_ca_ack_delay = attr.local_ca_ack_delay; 441 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt; 442 443 if (copy_to_user((void __user *) (unsigned long) cmd.response, 444 &resp, sizeof resp)) 445 return -EFAULT; 446 447 return in_len; 448 } 449 450 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 451 const char __user *buf, 452 int in_len, int out_len) 453 { 454 struct ib_uverbs_query_port cmd; 455 struct ib_uverbs_query_port_resp resp; 456 struct ib_port_attr attr; 457 int ret; 458 459 if (out_len < sizeof resp) 460 return -ENOSPC; 461 462 if (copy_from_user(&cmd, buf, sizeof cmd)) 463 return -EFAULT; 464 465 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr); 466 if (ret) 467 return ret; 468 469 memset(&resp, 0, sizeof resp); 470 471 resp.state = attr.state; 472 resp.max_mtu = attr.max_mtu; 473 resp.active_mtu = attr.active_mtu; 474 resp.gid_tbl_len = attr.gid_tbl_len; 475 resp.port_cap_flags = attr.port_cap_flags; 476 resp.max_msg_sz = attr.max_msg_sz; 477 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 478 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 479 resp.pkey_tbl_len = attr.pkey_tbl_len; 480 resp.lid = attr.lid; 481 resp.sm_lid = attr.sm_lid; 482 resp.lmc = attr.lmc; 483 resp.max_vl_num = attr.max_vl_num; 484 resp.sm_sl = attr.sm_sl; 485 resp.subnet_timeout = attr.subnet_timeout; 486 resp.init_type_reply = attr.init_type_reply; 487 resp.active_width = attr.active_width; 488 resp.active_speed = attr.active_speed; 489 resp.phys_state = attr.phys_state; 490 resp.link_layer = rdma_port_get_link_layer(file->device->ib_dev, 491 cmd.port_num); 492 493 if (copy_to_user((void __user *) (unsigned long) cmd.response, 494 &resp, sizeof resp)) 495 return -EFAULT; 496 497 return in_len; 498 } 499 500 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 501 const char __user *buf, 502 int in_len, int out_len) 503 { 504 struct ib_uverbs_alloc_pd cmd; 505 struct ib_uverbs_alloc_pd_resp resp; 506 struct ib_udata udata; 507 struct ib_uobject *uobj; 508 struct ib_pd *pd; 509 int ret; 510 511 if (out_len < sizeof resp) 512 return -ENOSPC; 513 514 if (copy_from_user(&cmd, buf, sizeof cmd)) 515 return -EFAULT; 516 517 INIT_UDATA(&udata, buf + sizeof cmd, 518 (unsigned long) cmd.response + sizeof resp, 519 in_len - sizeof cmd, out_len - sizeof resp); 520 521 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 522 if (!uobj) 523 return -ENOMEM; 524 525 init_uobj(uobj, 0, file->ucontext, &pd_lock_class); 526 down_write(&uobj->mutex); 527 528 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, 529 file->ucontext, &udata); 530 if (IS_ERR(pd)) { 531 ret = PTR_ERR(pd); 532 goto err; 533 } 534 535 pd->device = file->device->ib_dev; 536 pd->uobject = uobj; 537 atomic_set(&pd->usecnt, 0); 538 539 uobj->object = pd; 540 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj); 541 if (ret) 542 goto err_idr; 543 544 memset(&resp, 0, sizeof resp); 545 resp.pd_handle = uobj->id; 546 547 if (copy_to_user((void __user *) (unsigned long) cmd.response, 548 &resp, sizeof resp)) { 549 ret = -EFAULT; 550 goto err_copy; 551 } 552 553 mutex_lock(&file->mutex); 554 list_add_tail(&uobj->list, &file->ucontext->pd_list); 555 mutex_unlock(&file->mutex); 556 557 uobj->live = 1; 558 559 up_write(&uobj->mutex); 560 561 return in_len; 562 563 err_copy: 564 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 565 566 err_idr: 567 ib_dealloc_pd(pd); 568 569 err: 570 put_uobj_write(uobj); 571 return ret; 572 } 573 574 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 575 const char __user *buf, 576 int in_len, int out_len) 577 { 578 struct ib_uverbs_dealloc_pd cmd; 579 struct ib_uobject *uobj; 580 int ret; 581 582 if (copy_from_user(&cmd, buf, sizeof cmd)) 583 return -EFAULT; 584 585 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext); 586 if (!uobj) 587 return -EINVAL; 588 589 ret = ib_dealloc_pd(uobj->object); 590 if (!ret) 591 uobj->live = 0; 592 593 put_uobj_write(uobj); 594 595 if (ret) 596 return ret; 597 598 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 599 600 mutex_lock(&file->mutex); 601 list_del(&uobj->list); 602 mutex_unlock(&file->mutex); 603 604 put_uobj(uobj); 605 606 return in_len; 607 } 608 609 struct xrcd_table_entry { 610 struct rb_node node; 611 struct ib_xrcd *xrcd; 612 struct inode *inode; 613 }; 614 615 static int xrcd_table_insert(struct ib_uverbs_device *dev, 616 struct inode *inode, 617 struct ib_xrcd *xrcd) 618 { 619 struct xrcd_table_entry *entry, *scan; 620 struct rb_node **p = &dev->xrcd_tree.rb_node; 621 struct rb_node *parent = NULL; 622 623 entry = kmalloc(sizeof *entry, GFP_KERNEL); 624 if (!entry) 625 return -ENOMEM; 626 627 entry->xrcd = xrcd; 628 entry->inode = inode; 629 630 while (*p) { 631 parent = *p; 632 scan = rb_entry(parent, struct xrcd_table_entry, node); 633 634 if (inode < scan->inode) { 635 p = &(*p)->rb_left; 636 } else if (inode > scan->inode) { 637 p = &(*p)->rb_right; 638 } else { 639 kfree(entry); 640 return -EEXIST; 641 } 642 } 643 644 rb_link_node(&entry->node, parent, p); 645 rb_insert_color(&entry->node, &dev->xrcd_tree); 646 igrab(inode); 647 return 0; 648 } 649 650 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, 651 struct inode *inode) 652 { 653 struct xrcd_table_entry *entry; 654 struct rb_node *p = dev->xrcd_tree.rb_node; 655 656 while (p) { 657 entry = rb_entry(p, struct xrcd_table_entry, node); 658 659 if (inode < entry->inode) 660 p = p->rb_left; 661 else if (inode > entry->inode) 662 p = p->rb_right; 663 else 664 return entry; 665 } 666 667 return NULL; 668 } 669 670 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) 671 { 672 struct xrcd_table_entry *entry; 673 674 entry = xrcd_table_search(dev, inode); 675 if (!entry) 676 return NULL; 677 678 return entry->xrcd; 679 } 680 681 static void xrcd_table_delete(struct ib_uverbs_device *dev, 682 struct inode *inode) 683 { 684 struct xrcd_table_entry *entry; 685 686 entry = xrcd_table_search(dev, inode); 687 if (entry) { 688 iput(inode); 689 rb_erase(&entry->node, &dev->xrcd_tree); 690 kfree(entry); 691 } 692 } 693 694 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, 695 const char __user *buf, int in_len, 696 int out_len) 697 { 698 struct ib_uverbs_open_xrcd cmd; 699 struct ib_uverbs_open_xrcd_resp resp; 700 struct ib_udata udata; 701 struct ib_uxrcd_object *obj; 702 struct ib_xrcd *xrcd = NULL; 703 struct fd f = {NULL, 0}; 704 struct inode *inode = NULL; 705 int ret = 0; 706 int new_xrcd = 0; 707 708 if (out_len < sizeof resp) 709 return -ENOSPC; 710 711 if (copy_from_user(&cmd, buf, sizeof cmd)) 712 return -EFAULT; 713 714 INIT_UDATA(&udata, buf + sizeof cmd, 715 (unsigned long) cmd.response + sizeof resp, 716 in_len - sizeof cmd, out_len - sizeof resp); 717 718 mutex_lock(&file->device->xrcd_tree_mutex); 719 720 if (cmd.fd != -1) { 721 /* search for file descriptor */ 722 f = fdget(cmd.fd); 723 if (!f.file) { 724 ret = -EBADF; 725 goto err_tree_mutex_unlock; 726 } 727 728 inode = file_inode(f.file); 729 xrcd = find_xrcd(file->device, inode); 730 if (!xrcd && !(cmd.oflags & O_CREAT)) { 731 /* no file descriptor. Need CREATE flag */ 732 ret = -EAGAIN; 733 goto err_tree_mutex_unlock; 734 } 735 736 if (xrcd && cmd.oflags & O_EXCL) { 737 ret = -EINVAL; 738 goto err_tree_mutex_unlock; 739 } 740 } 741 742 obj = kmalloc(sizeof *obj, GFP_KERNEL); 743 if (!obj) { 744 ret = -ENOMEM; 745 goto err_tree_mutex_unlock; 746 } 747 748 init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class); 749 750 down_write(&obj->uobject.mutex); 751 752 if (!xrcd) { 753 xrcd = file->device->ib_dev->alloc_xrcd(file->device->ib_dev, 754 file->ucontext, &udata); 755 if (IS_ERR(xrcd)) { 756 ret = PTR_ERR(xrcd); 757 goto err; 758 } 759 760 xrcd->inode = inode; 761 xrcd->device = file->device->ib_dev; 762 atomic_set(&xrcd->usecnt, 0); 763 mutex_init(&xrcd->tgt_qp_mutex); 764 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 765 new_xrcd = 1; 766 } 767 768 atomic_set(&obj->refcnt, 0); 769 obj->uobject.object = xrcd; 770 ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 771 if (ret) 772 goto err_idr; 773 774 memset(&resp, 0, sizeof resp); 775 resp.xrcd_handle = obj->uobject.id; 776 777 if (inode) { 778 if (new_xrcd) { 779 /* create new inode/xrcd table entry */ 780 ret = xrcd_table_insert(file->device, inode, xrcd); 781 if (ret) 782 goto err_insert_xrcd; 783 } 784 atomic_inc(&xrcd->usecnt); 785 } 786 787 if (copy_to_user((void __user *) (unsigned long) cmd.response, 788 &resp, sizeof resp)) { 789 ret = -EFAULT; 790 goto err_copy; 791 } 792 793 if (f.file) 794 fdput(f); 795 796 mutex_lock(&file->mutex); 797 list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list); 798 mutex_unlock(&file->mutex); 799 800 obj->uobject.live = 1; 801 up_write(&obj->uobject.mutex); 802 803 mutex_unlock(&file->device->xrcd_tree_mutex); 804 return in_len; 805 806 err_copy: 807 if (inode) { 808 if (new_xrcd) 809 xrcd_table_delete(file->device, inode); 810 atomic_dec(&xrcd->usecnt); 811 } 812 813 err_insert_xrcd: 814 idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 815 816 err_idr: 817 ib_dealloc_xrcd(xrcd); 818 819 err: 820 put_uobj_write(&obj->uobject); 821 822 err_tree_mutex_unlock: 823 if (f.file) 824 fdput(f); 825 826 mutex_unlock(&file->device->xrcd_tree_mutex); 827 828 return ret; 829 } 830 831 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, 832 const char __user *buf, int in_len, 833 int out_len) 834 { 835 struct ib_uverbs_close_xrcd cmd; 836 struct ib_uobject *uobj; 837 struct ib_xrcd *xrcd = NULL; 838 struct inode *inode = NULL; 839 struct ib_uxrcd_object *obj; 840 int live; 841 int ret = 0; 842 843 if (copy_from_user(&cmd, buf, sizeof cmd)) 844 return -EFAULT; 845 846 mutex_lock(&file->device->xrcd_tree_mutex); 847 uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext); 848 if (!uobj) { 849 ret = -EINVAL; 850 goto out; 851 } 852 853 xrcd = uobj->object; 854 inode = xrcd->inode; 855 obj = container_of(uobj, struct ib_uxrcd_object, uobject); 856 if (atomic_read(&obj->refcnt)) { 857 put_uobj_write(uobj); 858 ret = -EBUSY; 859 goto out; 860 } 861 862 if (!inode || atomic_dec_and_test(&xrcd->usecnt)) { 863 ret = ib_dealloc_xrcd(uobj->object); 864 if (!ret) 865 uobj->live = 0; 866 } 867 868 live = uobj->live; 869 if (inode && ret) 870 atomic_inc(&xrcd->usecnt); 871 872 put_uobj_write(uobj); 873 874 if (ret) 875 goto out; 876 877 if (inode && !live) 878 xrcd_table_delete(file->device, inode); 879 880 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj); 881 mutex_lock(&file->mutex); 882 list_del(&uobj->list); 883 mutex_unlock(&file->mutex); 884 885 put_uobj(uobj); 886 ret = in_len; 887 888 out: 889 mutex_unlock(&file->device->xrcd_tree_mutex); 890 return ret; 891 } 892 893 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, 894 struct ib_xrcd *xrcd) 895 { 896 struct inode *inode; 897 898 inode = xrcd->inode; 899 if (inode && !atomic_dec_and_test(&xrcd->usecnt)) 900 return; 901 902 ib_dealloc_xrcd(xrcd); 903 904 if (inode) 905 xrcd_table_delete(dev, inode); 906 } 907 908 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 909 const char __user *buf, int in_len, 910 int out_len) 911 { 912 struct ib_uverbs_reg_mr cmd; 913 struct ib_uverbs_reg_mr_resp resp; 914 struct ib_udata udata; 915 struct ib_uobject *uobj; 916 struct ib_pd *pd; 917 struct ib_mr *mr; 918 int ret; 919 920 if (out_len < sizeof resp) 921 return -ENOSPC; 922 923 if (copy_from_user(&cmd, buf, sizeof cmd)) 924 return -EFAULT; 925 926 INIT_UDATA(&udata, buf + sizeof cmd, 927 (unsigned long) cmd.response + sizeof resp, 928 in_len - sizeof cmd, out_len - sizeof resp); 929 930 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 931 return -EINVAL; 932 933 ret = ib_check_mr_access(cmd.access_flags); 934 if (ret) 935 return ret; 936 937 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 938 if (!uobj) 939 return -ENOMEM; 940 941 init_uobj(uobj, 0, file->ucontext, &mr_lock_class); 942 down_write(&uobj->mutex); 943 944 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 945 if (!pd) { 946 ret = -EINVAL; 947 goto err_free; 948 } 949 950 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 951 cmd.access_flags, &udata); 952 if (IS_ERR(mr)) { 953 ret = PTR_ERR(mr); 954 goto err_put; 955 } 956 957 mr->device = pd->device; 958 mr->pd = pd; 959 mr->uobject = uobj; 960 atomic_inc(&pd->usecnt); 961 atomic_set(&mr->usecnt, 0); 962 963 uobj->object = mr; 964 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj); 965 if (ret) 966 goto err_unreg; 967 968 memset(&resp, 0, sizeof resp); 969 resp.lkey = mr->lkey; 970 resp.rkey = mr->rkey; 971 resp.mr_handle = uobj->id; 972 973 if (copy_to_user((void __user *) (unsigned long) cmd.response, 974 &resp, sizeof resp)) { 975 ret = -EFAULT; 976 goto err_copy; 977 } 978 979 put_pd_read(pd); 980 981 mutex_lock(&file->mutex); 982 list_add_tail(&uobj->list, &file->ucontext->mr_list); 983 mutex_unlock(&file->mutex); 984 985 uobj->live = 1; 986 987 up_write(&uobj->mutex); 988 989 return in_len; 990 991 err_copy: 992 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 993 994 err_unreg: 995 ib_dereg_mr(mr); 996 997 err_put: 998 put_pd_read(pd); 999 1000 err_free: 1001 put_uobj_write(uobj); 1002 return ret; 1003 } 1004 1005 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, 1006 const char __user *buf, int in_len, 1007 int out_len) 1008 { 1009 struct ib_uverbs_rereg_mr cmd; 1010 struct ib_uverbs_rereg_mr_resp resp; 1011 struct ib_udata udata; 1012 struct ib_pd *pd = NULL; 1013 struct ib_mr *mr; 1014 struct ib_pd *old_pd; 1015 int ret; 1016 struct ib_uobject *uobj; 1017 1018 if (out_len < sizeof(resp)) 1019 return -ENOSPC; 1020 1021 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1022 return -EFAULT; 1023 1024 INIT_UDATA(&udata, buf + sizeof(cmd), 1025 (unsigned long) cmd.response + sizeof(resp), 1026 in_len - sizeof(cmd), out_len - sizeof(resp)); 1027 1028 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags) 1029 return -EINVAL; 1030 1031 if ((cmd.flags & IB_MR_REREG_TRANS) && 1032 (!cmd.start || !cmd.hca_va || 0 >= cmd.length || 1033 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))) 1034 return -EINVAL; 1035 1036 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, 1037 file->ucontext); 1038 1039 if (!uobj) 1040 return -EINVAL; 1041 1042 mr = uobj->object; 1043 1044 if (cmd.flags & IB_MR_REREG_ACCESS) { 1045 ret = ib_check_mr_access(cmd.access_flags); 1046 if (ret) 1047 goto put_uobjs; 1048 } 1049 1050 if (cmd.flags & IB_MR_REREG_PD) { 1051 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1052 if (!pd) { 1053 ret = -EINVAL; 1054 goto put_uobjs; 1055 } 1056 } 1057 1058 if (atomic_read(&mr->usecnt)) { 1059 ret = -EBUSY; 1060 goto put_uobj_pd; 1061 } 1062 1063 old_pd = mr->pd; 1064 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, 1065 cmd.length, cmd.hca_va, 1066 cmd.access_flags, pd, &udata); 1067 if (!ret) { 1068 if (cmd.flags & IB_MR_REREG_PD) { 1069 atomic_inc(&pd->usecnt); 1070 mr->pd = pd; 1071 atomic_dec(&old_pd->usecnt); 1072 } 1073 } else { 1074 goto put_uobj_pd; 1075 } 1076 1077 memset(&resp, 0, sizeof(resp)); 1078 resp.lkey = mr->lkey; 1079 resp.rkey = mr->rkey; 1080 1081 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1082 &resp, sizeof(resp))) 1083 ret = -EFAULT; 1084 else 1085 ret = in_len; 1086 1087 put_uobj_pd: 1088 if (cmd.flags & IB_MR_REREG_PD) 1089 put_pd_read(pd); 1090 1091 put_uobjs: 1092 1093 put_uobj_write(mr->uobject); 1094 1095 return ret; 1096 } 1097 1098 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 1099 const char __user *buf, int in_len, 1100 int out_len) 1101 { 1102 struct ib_uverbs_dereg_mr cmd; 1103 struct ib_mr *mr; 1104 struct ib_uobject *uobj; 1105 int ret = -EINVAL; 1106 1107 if (copy_from_user(&cmd, buf, sizeof cmd)) 1108 return -EFAULT; 1109 1110 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext); 1111 if (!uobj) 1112 return -EINVAL; 1113 1114 mr = uobj->object; 1115 1116 ret = ib_dereg_mr(mr); 1117 if (!ret) 1118 uobj->live = 0; 1119 1120 put_uobj_write(uobj); 1121 1122 if (ret) 1123 return ret; 1124 1125 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 1126 1127 mutex_lock(&file->mutex); 1128 list_del(&uobj->list); 1129 mutex_unlock(&file->mutex); 1130 1131 put_uobj(uobj); 1132 1133 return in_len; 1134 } 1135 1136 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, 1137 const char __user *buf, int in_len, 1138 int out_len) 1139 { 1140 struct ib_uverbs_alloc_mw cmd; 1141 struct ib_uverbs_alloc_mw_resp resp; 1142 struct ib_uobject *uobj; 1143 struct ib_pd *pd; 1144 struct ib_mw *mw; 1145 int ret; 1146 1147 if (out_len < sizeof(resp)) 1148 return -ENOSPC; 1149 1150 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1151 return -EFAULT; 1152 1153 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 1154 if (!uobj) 1155 return -ENOMEM; 1156 1157 init_uobj(uobj, 0, file->ucontext, &mw_lock_class); 1158 down_write(&uobj->mutex); 1159 1160 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1161 if (!pd) { 1162 ret = -EINVAL; 1163 goto err_free; 1164 } 1165 1166 mw = pd->device->alloc_mw(pd, cmd.mw_type); 1167 if (IS_ERR(mw)) { 1168 ret = PTR_ERR(mw); 1169 goto err_put; 1170 } 1171 1172 mw->device = pd->device; 1173 mw->pd = pd; 1174 mw->uobject = uobj; 1175 atomic_inc(&pd->usecnt); 1176 1177 uobj->object = mw; 1178 ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj); 1179 if (ret) 1180 goto err_unalloc; 1181 1182 memset(&resp, 0, sizeof(resp)); 1183 resp.rkey = mw->rkey; 1184 resp.mw_handle = uobj->id; 1185 1186 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1187 &resp, sizeof(resp))) { 1188 ret = -EFAULT; 1189 goto err_copy; 1190 } 1191 1192 put_pd_read(pd); 1193 1194 mutex_lock(&file->mutex); 1195 list_add_tail(&uobj->list, &file->ucontext->mw_list); 1196 mutex_unlock(&file->mutex); 1197 1198 uobj->live = 1; 1199 1200 up_write(&uobj->mutex); 1201 1202 return in_len; 1203 1204 err_copy: 1205 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 1206 1207 err_unalloc: 1208 ib_dealloc_mw(mw); 1209 1210 err_put: 1211 put_pd_read(pd); 1212 1213 err_free: 1214 put_uobj_write(uobj); 1215 return ret; 1216 } 1217 1218 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file, 1219 const char __user *buf, int in_len, 1220 int out_len) 1221 { 1222 struct ib_uverbs_dealloc_mw cmd; 1223 struct ib_mw *mw; 1224 struct ib_uobject *uobj; 1225 int ret = -EINVAL; 1226 1227 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1228 return -EFAULT; 1229 1230 uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext); 1231 if (!uobj) 1232 return -EINVAL; 1233 1234 mw = uobj->object; 1235 1236 ret = ib_dealloc_mw(mw); 1237 if (!ret) 1238 uobj->live = 0; 1239 1240 put_uobj_write(uobj); 1241 1242 if (ret) 1243 return ret; 1244 1245 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 1246 1247 mutex_lock(&file->mutex); 1248 list_del(&uobj->list); 1249 mutex_unlock(&file->mutex); 1250 1251 put_uobj(uobj); 1252 1253 return in_len; 1254 } 1255 1256 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 1257 const char __user *buf, int in_len, 1258 int out_len) 1259 { 1260 struct ib_uverbs_create_comp_channel cmd; 1261 struct ib_uverbs_create_comp_channel_resp resp; 1262 struct file *filp; 1263 int ret; 1264 1265 if (out_len < sizeof resp) 1266 return -ENOSPC; 1267 1268 if (copy_from_user(&cmd, buf, sizeof cmd)) 1269 return -EFAULT; 1270 1271 ret = get_unused_fd_flags(O_CLOEXEC); 1272 if (ret < 0) 1273 return ret; 1274 resp.fd = ret; 1275 1276 filp = ib_uverbs_alloc_event_file(file, 0); 1277 if (IS_ERR(filp)) { 1278 put_unused_fd(resp.fd); 1279 return PTR_ERR(filp); 1280 } 1281 1282 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1283 &resp, sizeof resp)) { 1284 put_unused_fd(resp.fd); 1285 fput(filp); 1286 return -EFAULT; 1287 } 1288 1289 fd_install(resp.fd, filp); 1290 return in_len; 1291 } 1292 1293 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 1294 const char __user *buf, int in_len, 1295 int out_len) 1296 { 1297 struct ib_uverbs_create_cq cmd; 1298 struct ib_uverbs_create_cq_resp resp; 1299 struct ib_udata udata; 1300 struct ib_ucq_object *obj; 1301 struct ib_uverbs_event_file *ev_file = NULL; 1302 struct ib_cq *cq; 1303 int ret; 1304 1305 if (out_len < sizeof resp) 1306 return -ENOSPC; 1307 1308 if (copy_from_user(&cmd, buf, sizeof cmd)) 1309 return -EFAULT; 1310 1311 INIT_UDATA(&udata, buf + sizeof cmd, 1312 (unsigned long) cmd.response + sizeof resp, 1313 in_len - sizeof cmd, out_len - sizeof resp); 1314 1315 if (cmd.comp_vector >= file->device->num_comp_vectors) 1316 return -EINVAL; 1317 1318 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1319 if (!obj) 1320 return -ENOMEM; 1321 1322 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_class); 1323 down_write(&obj->uobject.mutex); 1324 1325 if (cmd.comp_channel >= 0) { 1326 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel); 1327 if (!ev_file) { 1328 ret = -EINVAL; 1329 goto err; 1330 } 1331 } 1332 1333 obj->uverbs_file = file; 1334 obj->comp_events_reported = 0; 1335 obj->async_events_reported = 0; 1336 INIT_LIST_HEAD(&obj->comp_list); 1337 INIT_LIST_HEAD(&obj->async_list); 1338 1339 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, 1340 cmd.comp_vector, 1341 file->ucontext, &udata); 1342 if (IS_ERR(cq)) { 1343 ret = PTR_ERR(cq); 1344 goto err_file; 1345 } 1346 1347 cq->device = file->device->ib_dev; 1348 cq->uobject = &obj->uobject; 1349 cq->comp_handler = ib_uverbs_comp_handler; 1350 cq->event_handler = ib_uverbs_cq_event_handler; 1351 cq->cq_context = ev_file; 1352 atomic_set(&cq->usecnt, 0); 1353 1354 obj->uobject.object = cq; 1355 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1356 if (ret) 1357 goto err_free; 1358 1359 memset(&resp, 0, sizeof resp); 1360 resp.cq_handle = obj->uobject.id; 1361 resp.cqe = cq->cqe; 1362 1363 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1364 &resp, sizeof resp)) { 1365 ret = -EFAULT; 1366 goto err_copy; 1367 } 1368 1369 mutex_lock(&file->mutex); 1370 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list); 1371 mutex_unlock(&file->mutex); 1372 1373 obj->uobject.live = 1; 1374 1375 up_write(&obj->uobject.mutex); 1376 1377 return in_len; 1378 1379 err_copy: 1380 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1381 1382 err_free: 1383 ib_destroy_cq(cq); 1384 1385 err_file: 1386 if (ev_file) 1387 ib_uverbs_release_ucq(file, ev_file, obj); 1388 1389 err: 1390 put_uobj_write(&obj->uobject); 1391 return ret; 1392 } 1393 1394 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 1395 const char __user *buf, int in_len, 1396 int out_len) 1397 { 1398 struct ib_uverbs_resize_cq cmd; 1399 struct ib_uverbs_resize_cq_resp resp; 1400 struct ib_udata udata; 1401 struct ib_cq *cq; 1402 int ret = -EINVAL; 1403 1404 if (copy_from_user(&cmd, buf, sizeof cmd)) 1405 return -EFAULT; 1406 1407 INIT_UDATA(&udata, buf + sizeof cmd, 1408 (unsigned long) cmd.response + sizeof resp, 1409 in_len - sizeof cmd, out_len - sizeof resp); 1410 1411 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1412 if (!cq) 1413 return -EINVAL; 1414 1415 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 1416 if (ret) 1417 goto out; 1418 1419 resp.cqe = cq->cqe; 1420 1421 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1422 &resp, sizeof resp.cqe)) 1423 ret = -EFAULT; 1424 1425 out: 1426 put_cq_read(cq); 1427 1428 return ret ? ret : in_len; 1429 } 1430 1431 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc) 1432 { 1433 struct ib_uverbs_wc tmp; 1434 1435 tmp.wr_id = wc->wr_id; 1436 tmp.status = wc->status; 1437 tmp.opcode = wc->opcode; 1438 tmp.vendor_err = wc->vendor_err; 1439 tmp.byte_len = wc->byte_len; 1440 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data; 1441 tmp.qp_num = wc->qp->qp_num; 1442 tmp.src_qp = wc->src_qp; 1443 tmp.wc_flags = wc->wc_flags; 1444 tmp.pkey_index = wc->pkey_index; 1445 tmp.slid = wc->slid; 1446 tmp.sl = wc->sl; 1447 tmp.dlid_path_bits = wc->dlid_path_bits; 1448 tmp.port_num = wc->port_num; 1449 tmp.reserved = 0; 1450 1451 if (copy_to_user(dest, &tmp, sizeof tmp)) 1452 return -EFAULT; 1453 1454 return 0; 1455 } 1456 1457 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 1458 const char __user *buf, int in_len, 1459 int out_len) 1460 { 1461 struct ib_uverbs_poll_cq cmd; 1462 struct ib_uverbs_poll_cq_resp resp; 1463 u8 __user *header_ptr; 1464 u8 __user *data_ptr; 1465 struct ib_cq *cq; 1466 struct ib_wc wc; 1467 int ret; 1468 1469 if (copy_from_user(&cmd, buf, sizeof cmd)) 1470 return -EFAULT; 1471 1472 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1473 if (!cq) 1474 return -EINVAL; 1475 1476 /* we copy a struct ib_uverbs_poll_cq_resp to user space */ 1477 header_ptr = (void __user *)(unsigned long) cmd.response; 1478 data_ptr = header_ptr + sizeof resp; 1479 1480 memset(&resp, 0, sizeof resp); 1481 while (resp.count < cmd.ne) { 1482 ret = ib_poll_cq(cq, 1, &wc); 1483 if (ret < 0) 1484 goto out_put; 1485 if (!ret) 1486 break; 1487 1488 ret = copy_wc_to_user(data_ptr, &wc); 1489 if (ret) 1490 goto out_put; 1491 1492 data_ptr += sizeof(struct ib_uverbs_wc); 1493 ++resp.count; 1494 } 1495 1496 if (copy_to_user(header_ptr, &resp, sizeof resp)) { 1497 ret = -EFAULT; 1498 goto out_put; 1499 } 1500 1501 ret = in_len; 1502 1503 out_put: 1504 put_cq_read(cq); 1505 return ret; 1506 } 1507 1508 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 1509 const char __user *buf, int in_len, 1510 int out_len) 1511 { 1512 struct ib_uverbs_req_notify_cq cmd; 1513 struct ib_cq *cq; 1514 1515 if (copy_from_user(&cmd, buf, sizeof cmd)) 1516 return -EFAULT; 1517 1518 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1519 if (!cq) 1520 return -EINVAL; 1521 1522 ib_req_notify_cq(cq, cmd.solicited_only ? 1523 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 1524 1525 put_cq_read(cq); 1526 1527 return in_len; 1528 } 1529 1530 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 1531 const char __user *buf, int in_len, 1532 int out_len) 1533 { 1534 struct ib_uverbs_destroy_cq cmd; 1535 struct ib_uverbs_destroy_cq_resp resp; 1536 struct ib_uobject *uobj; 1537 struct ib_cq *cq; 1538 struct ib_ucq_object *obj; 1539 struct ib_uverbs_event_file *ev_file; 1540 int ret = -EINVAL; 1541 1542 if (copy_from_user(&cmd, buf, sizeof cmd)) 1543 return -EFAULT; 1544 1545 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); 1546 if (!uobj) 1547 return -EINVAL; 1548 cq = uobj->object; 1549 ev_file = cq->cq_context; 1550 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1551 1552 ret = ib_destroy_cq(cq); 1553 if (!ret) 1554 uobj->live = 0; 1555 1556 put_uobj_write(uobj); 1557 1558 if (ret) 1559 return ret; 1560 1561 idr_remove_uobj(&ib_uverbs_cq_idr, uobj); 1562 1563 mutex_lock(&file->mutex); 1564 list_del(&uobj->list); 1565 mutex_unlock(&file->mutex); 1566 1567 ib_uverbs_release_ucq(file, ev_file, obj); 1568 1569 memset(&resp, 0, sizeof resp); 1570 resp.comp_events_reported = obj->comp_events_reported; 1571 resp.async_events_reported = obj->async_events_reported; 1572 1573 put_uobj(uobj); 1574 1575 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1576 &resp, sizeof resp)) 1577 return -EFAULT; 1578 1579 return in_len; 1580 } 1581 1582 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 1583 const char __user *buf, int in_len, 1584 int out_len) 1585 { 1586 struct ib_uverbs_create_qp cmd; 1587 struct ib_uverbs_create_qp_resp resp; 1588 struct ib_udata udata; 1589 struct ib_uqp_object *obj; 1590 struct ib_device *device; 1591 struct ib_pd *pd = NULL; 1592 struct ib_xrcd *xrcd = NULL; 1593 struct ib_uobject *uninitialized_var(xrcd_uobj); 1594 struct ib_cq *scq = NULL, *rcq = NULL; 1595 struct ib_srq *srq = NULL; 1596 struct ib_qp *qp; 1597 struct ib_qp_init_attr attr; 1598 int ret; 1599 1600 if (out_len < sizeof resp) 1601 return -ENOSPC; 1602 1603 if (copy_from_user(&cmd, buf, sizeof cmd)) 1604 return -EFAULT; 1605 1606 if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) 1607 return -EPERM; 1608 1609 INIT_UDATA(&udata, buf + sizeof cmd, 1610 (unsigned long) cmd.response + sizeof resp, 1611 in_len - sizeof cmd, out_len - sizeof resp); 1612 1613 obj = kzalloc(sizeof *obj, GFP_KERNEL); 1614 if (!obj) 1615 return -ENOMEM; 1616 1617 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class); 1618 down_write(&obj->uevent.uobject.mutex); 1619 1620 if (cmd.qp_type == IB_QPT_XRC_TGT) { 1621 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); 1622 if (!xrcd) { 1623 ret = -EINVAL; 1624 goto err_put; 1625 } 1626 device = xrcd->device; 1627 } else { 1628 if (cmd.qp_type == IB_QPT_XRC_INI) { 1629 cmd.max_recv_wr = cmd.max_recv_sge = 0; 1630 } else { 1631 if (cmd.is_srq) { 1632 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 1633 if (!srq || srq->srq_type != IB_SRQT_BASIC) { 1634 ret = -EINVAL; 1635 goto err_put; 1636 } 1637 } 1638 1639 if (cmd.recv_cq_handle != cmd.send_cq_handle) { 1640 rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0); 1641 if (!rcq) { 1642 ret = -EINVAL; 1643 goto err_put; 1644 } 1645 } 1646 } 1647 1648 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq); 1649 rcq = rcq ?: scq; 1650 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1651 if (!pd || !scq) { 1652 ret = -EINVAL; 1653 goto err_put; 1654 } 1655 1656 device = pd->device; 1657 } 1658 1659 attr.event_handler = ib_uverbs_qp_event_handler; 1660 attr.qp_context = file; 1661 attr.send_cq = scq; 1662 attr.recv_cq = rcq; 1663 attr.srq = srq; 1664 attr.xrcd = xrcd; 1665 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 1666 attr.qp_type = cmd.qp_type; 1667 attr.create_flags = 0; 1668 1669 attr.cap.max_send_wr = cmd.max_send_wr; 1670 attr.cap.max_recv_wr = cmd.max_recv_wr; 1671 attr.cap.max_send_sge = cmd.max_send_sge; 1672 attr.cap.max_recv_sge = cmd.max_recv_sge; 1673 attr.cap.max_inline_data = cmd.max_inline_data; 1674 1675 obj->uevent.events_reported = 0; 1676 INIT_LIST_HEAD(&obj->uevent.event_list); 1677 INIT_LIST_HEAD(&obj->mcast_list); 1678 1679 if (cmd.qp_type == IB_QPT_XRC_TGT) 1680 qp = ib_create_qp(pd, &attr); 1681 else 1682 qp = device->create_qp(pd, &attr, &udata); 1683 1684 if (IS_ERR(qp)) { 1685 ret = PTR_ERR(qp); 1686 goto err_put; 1687 } 1688 1689 if (cmd.qp_type != IB_QPT_XRC_TGT) { 1690 qp->real_qp = qp; 1691 qp->device = device; 1692 qp->pd = pd; 1693 qp->send_cq = attr.send_cq; 1694 qp->recv_cq = attr.recv_cq; 1695 qp->srq = attr.srq; 1696 qp->event_handler = attr.event_handler; 1697 qp->qp_context = attr.qp_context; 1698 qp->qp_type = attr.qp_type; 1699 atomic_set(&qp->usecnt, 0); 1700 atomic_inc(&pd->usecnt); 1701 atomic_inc(&attr.send_cq->usecnt); 1702 if (attr.recv_cq) 1703 atomic_inc(&attr.recv_cq->usecnt); 1704 if (attr.srq) 1705 atomic_inc(&attr.srq->usecnt); 1706 } 1707 qp->uobject = &obj->uevent.uobject; 1708 1709 obj->uevent.uobject.object = qp; 1710 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1711 if (ret) 1712 goto err_destroy; 1713 1714 memset(&resp, 0, sizeof resp); 1715 resp.qpn = qp->qp_num; 1716 resp.qp_handle = obj->uevent.uobject.id; 1717 resp.max_recv_sge = attr.cap.max_recv_sge; 1718 resp.max_send_sge = attr.cap.max_send_sge; 1719 resp.max_recv_wr = attr.cap.max_recv_wr; 1720 resp.max_send_wr = attr.cap.max_send_wr; 1721 resp.max_inline_data = attr.cap.max_inline_data; 1722 1723 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1724 &resp, sizeof resp)) { 1725 ret = -EFAULT; 1726 goto err_copy; 1727 } 1728 1729 if (xrcd) { 1730 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, 1731 uobject); 1732 atomic_inc(&obj->uxrcd->refcnt); 1733 put_xrcd_read(xrcd_uobj); 1734 } 1735 1736 if (pd) 1737 put_pd_read(pd); 1738 if (scq) 1739 put_cq_read(scq); 1740 if (rcq && rcq != scq) 1741 put_cq_read(rcq); 1742 if (srq) 1743 put_srq_read(srq); 1744 1745 mutex_lock(&file->mutex); 1746 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 1747 mutex_unlock(&file->mutex); 1748 1749 obj->uevent.uobject.live = 1; 1750 1751 up_write(&obj->uevent.uobject.mutex); 1752 1753 return in_len; 1754 1755 err_copy: 1756 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1757 1758 err_destroy: 1759 ib_destroy_qp(qp); 1760 1761 err_put: 1762 if (xrcd) 1763 put_xrcd_read(xrcd_uobj); 1764 if (pd) 1765 put_pd_read(pd); 1766 if (scq) 1767 put_cq_read(scq); 1768 if (rcq && rcq != scq) 1769 put_cq_read(rcq); 1770 if (srq) 1771 put_srq_read(srq); 1772 1773 put_uobj_write(&obj->uevent.uobject); 1774 return ret; 1775 } 1776 1777 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, 1778 const char __user *buf, int in_len, int out_len) 1779 { 1780 struct ib_uverbs_open_qp cmd; 1781 struct ib_uverbs_create_qp_resp resp; 1782 struct ib_udata udata; 1783 struct ib_uqp_object *obj; 1784 struct ib_xrcd *xrcd; 1785 struct ib_uobject *uninitialized_var(xrcd_uobj); 1786 struct ib_qp *qp; 1787 struct ib_qp_open_attr attr; 1788 int ret; 1789 1790 if (out_len < sizeof resp) 1791 return -ENOSPC; 1792 1793 if (copy_from_user(&cmd, buf, sizeof cmd)) 1794 return -EFAULT; 1795 1796 INIT_UDATA(&udata, buf + sizeof cmd, 1797 (unsigned long) cmd.response + sizeof resp, 1798 in_len - sizeof cmd, out_len - sizeof resp); 1799 1800 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1801 if (!obj) 1802 return -ENOMEM; 1803 1804 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class); 1805 down_write(&obj->uevent.uobject.mutex); 1806 1807 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); 1808 if (!xrcd) { 1809 ret = -EINVAL; 1810 goto err_put; 1811 } 1812 1813 attr.event_handler = ib_uverbs_qp_event_handler; 1814 attr.qp_context = file; 1815 attr.qp_num = cmd.qpn; 1816 attr.qp_type = cmd.qp_type; 1817 1818 obj->uevent.events_reported = 0; 1819 INIT_LIST_HEAD(&obj->uevent.event_list); 1820 INIT_LIST_HEAD(&obj->mcast_list); 1821 1822 qp = ib_open_qp(xrcd, &attr); 1823 if (IS_ERR(qp)) { 1824 ret = PTR_ERR(qp); 1825 goto err_put; 1826 } 1827 1828 qp->uobject = &obj->uevent.uobject; 1829 1830 obj->uevent.uobject.object = qp; 1831 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1832 if (ret) 1833 goto err_destroy; 1834 1835 memset(&resp, 0, sizeof resp); 1836 resp.qpn = qp->qp_num; 1837 resp.qp_handle = obj->uevent.uobject.id; 1838 1839 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1840 &resp, sizeof resp)) { 1841 ret = -EFAULT; 1842 goto err_remove; 1843 } 1844 1845 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 1846 atomic_inc(&obj->uxrcd->refcnt); 1847 put_xrcd_read(xrcd_uobj); 1848 1849 mutex_lock(&file->mutex); 1850 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 1851 mutex_unlock(&file->mutex); 1852 1853 obj->uevent.uobject.live = 1; 1854 1855 up_write(&obj->uevent.uobject.mutex); 1856 1857 return in_len; 1858 1859 err_remove: 1860 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1861 1862 err_destroy: 1863 ib_destroy_qp(qp); 1864 1865 err_put: 1866 put_xrcd_read(xrcd_uobj); 1867 put_uobj_write(&obj->uevent.uobject); 1868 return ret; 1869 } 1870 1871 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 1872 const char __user *buf, int in_len, 1873 int out_len) 1874 { 1875 struct ib_uverbs_query_qp cmd; 1876 struct ib_uverbs_query_qp_resp resp; 1877 struct ib_qp *qp; 1878 struct ib_qp_attr *attr; 1879 struct ib_qp_init_attr *init_attr; 1880 int ret; 1881 1882 if (copy_from_user(&cmd, buf, sizeof cmd)) 1883 return -EFAULT; 1884 1885 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1886 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 1887 if (!attr || !init_attr) { 1888 ret = -ENOMEM; 1889 goto out; 1890 } 1891 1892 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1893 if (!qp) { 1894 ret = -EINVAL; 1895 goto out; 1896 } 1897 1898 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 1899 1900 put_qp_read(qp); 1901 1902 if (ret) 1903 goto out; 1904 1905 memset(&resp, 0, sizeof resp); 1906 1907 resp.qp_state = attr->qp_state; 1908 resp.cur_qp_state = attr->cur_qp_state; 1909 resp.path_mtu = attr->path_mtu; 1910 resp.path_mig_state = attr->path_mig_state; 1911 resp.qkey = attr->qkey; 1912 resp.rq_psn = attr->rq_psn; 1913 resp.sq_psn = attr->sq_psn; 1914 resp.dest_qp_num = attr->dest_qp_num; 1915 resp.qp_access_flags = attr->qp_access_flags; 1916 resp.pkey_index = attr->pkey_index; 1917 resp.alt_pkey_index = attr->alt_pkey_index; 1918 resp.sq_draining = attr->sq_draining; 1919 resp.max_rd_atomic = attr->max_rd_atomic; 1920 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 1921 resp.min_rnr_timer = attr->min_rnr_timer; 1922 resp.port_num = attr->port_num; 1923 resp.timeout = attr->timeout; 1924 resp.retry_cnt = attr->retry_cnt; 1925 resp.rnr_retry = attr->rnr_retry; 1926 resp.alt_port_num = attr->alt_port_num; 1927 resp.alt_timeout = attr->alt_timeout; 1928 1929 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16); 1930 resp.dest.flow_label = attr->ah_attr.grh.flow_label; 1931 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index; 1932 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit; 1933 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class; 1934 resp.dest.dlid = attr->ah_attr.dlid; 1935 resp.dest.sl = attr->ah_attr.sl; 1936 resp.dest.src_path_bits = attr->ah_attr.src_path_bits; 1937 resp.dest.static_rate = attr->ah_attr.static_rate; 1938 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH); 1939 resp.dest.port_num = attr->ah_attr.port_num; 1940 1941 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16); 1942 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label; 1943 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index; 1944 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit; 1945 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class; 1946 resp.alt_dest.dlid = attr->alt_ah_attr.dlid; 1947 resp.alt_dest.sl = attr->alt_ah_attr.sl; 1948 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits; 1949 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate; 1950 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH); 1951 resp.alt_dest.port_num = attr->alt_ah_attr.port_num; 1952 1953 resp.max_send_wr = init_attr->cap.max_send_wr; 1954 resp.max_recv_wr = init_attr->cap.max_recv_wr; 1955 resp.max_send_sge = init_attr->cap.max_send_sge; 1956 resp.max_recv_sge = init_attr->cap.max_recv_sge; 1957 resp.max_inline_data = init_attr->cap.max_inline_data; 1958 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 1959 1960 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1961 &resp, sizeof resp)) 1962 ret = -EFAULT; 1963 1964 out: 1965 kfree(attr); 1966 kfree(init_attr); 1967 1968 return ret ? ret : in_len; 1969 } 1970 1971 /* Remove ignored fields set in the attribute mask */ 1972 static int modify_qp_mask(enum ib_qp_type qp_type, int mask) 1973 { 1974 switch (qp_type) { 1975 case IB_QPT_XRC_INI: 1976 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); 1977 case IB_QPT_XRC_TGT: 1978 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | 1979 IB_QP_RNR_RETRY); 1980 default: 1981 return mask; 1982 } 1983 } 1984 1985 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 1986 const char __user *buf, int in_len, 1987 int out_len) 1988 { 1989 struct ib_uverbs_modify_qp cmd; 1990 struct ib_udata udata; 1991 struct ib_qp *qp; 1992 struct ib_qp_attr *attr; 1993 int ret; 1994 1995 if (copy_from_user(&cmd, buf, sizeof cmd)) 1996 return -EFAULT; 1997 1998 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 1999 out_len); 2000 2001 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2002 if (!attr) 2003 return -ENOMEM; 2004 2005 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2006 if (!qp) { 2007 ret = -EINVAL; 2008 goto out; 2009 } 2010 2011 attr->qp_state = cmd.qp_state; 2012 attr->cur_qp_state = cmd.cur_qp_state; 2013 attr->path_mtu = cmd.path_mtu; 2014 attr->path_mig_state = cmd.path_mig_state; 2015 attr->qkey = cmd.qkey; 2016 attr->rq_psn = cmd.rq_psn; 2017 attr->sq_psn = cmd.sq_psn; 2018 attr->dest_qp_num = cmd.dest_qp_num; 2019 attr->qp_access_flags = cmd.qp_access_flags; 2020 attr->pkey_index = cmd.pkey_index; 2021 attr->alt_pkey_index = cmd.alt_pkey_index; 2022 attr->en_sqd_async_notify = cmd.en_sqd_async_notify; 2023 attr->max_rd_atomic = cmd.max_rd_atomic; 2024 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic; 2025 attr->min_rnr_timer = cmd.min_rnr_timer; 2026 attr->port_num = cmd.port_num; 2027 attr->timeout = cmd.timeout; 2028 attr->retry_cnt = cmd.retry_cnt; 2029 attr->rnr_retry = cmd.rnr_retry; 2030 attr->alt_port_num = cmd.alt_port_num; 2031 attr->alt_timeout = cmd.alt_timeout; 2032 2033 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16); 2034 attr->ah_attr.grh.flow_label = cmd.dest.flow_label; 2035 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index; 2036 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit; 2037 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class; 2038 attr->ah_attr.dlid = cmd.dest.dlid; 2039 attr->ah_attr.sl = cmd.dest.sl; 2040 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits; 2041 attr->ah_attr.static_rate = cmd.dest.static_rate; 2042 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0; 2043 attr->ah_attr.port_num = cmd.dest.port_num; 2044 2045 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16); 2046 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label; 2047 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index; 2048 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit; 2049 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class; 2050 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid; 2051 attr->alt_ah_attr.sl = cmd.alt_dest.sl; 2052 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits; 2053 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate; 2054 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; 2055 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; 2056 2057 if (qp->real_qp == qp) { 2058 ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask); 2059 if (ret) 2060 goto out; 2061 ret = qp->device->modify_qp(qp, attr, 2062 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); 2063 } else { 2064 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); 2065 } 2066 2067 put_qp_read(qp); 2068 2069 if (ret) 2070 goto out; 2071 2072 ret = in_len; 2073 2074 out: 2075 kfree(attr); 2076 2077 return ret; 2078 } 2079 2080 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 2081 const char __user *buf, int in_len, 2082 int out_len) 2083 { 2084 struct ib_uverbs_destroy_qp cmd; 2085 struct ib_uverbs_destroy_qp_resp resp; 2086 struct ib_uobject *uobj; 2087 struct ib_qp *qp; 2088 struct ib_uqp_object *obj; 2089 int ret = -EINVAL; 2090 2091 if (copy_from_user(&cmd, buf, sizeof cmd)) 2092 return -EFAULT; 2093 2094 memset(&resp, 0, sizeof resp); 2095 2096 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext); 2097 if (!uobj) 2098 return -EINVAL; 2099 qp = uobj->object; 2100 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 2101 2102 if (!list_empty(&obj->mcast_list)) { 2103 put_uobj_write(uobj); 2104 return -EBUSY; 2105 } 2106 2107 ret = ib_destroy_qp(qp); 2108 if (!ret) 2109 uobj->live = 0; 2110 2111 put_uobj_write(uobj); 2112 2113 if (ret) 2114 return ret; 2115 2116 if (obj->uxrcd) 2117 atomic_dec(&obj->uxrcd->refcnt); 2118 2119 idr_remove_uobj(&ib_uverbs_qp_idr, uobj); 2120 2121 mutex_lock(&file->mutex); 2122 list_del(&uobj->list); 2123 mutex_unlock(&file->mutex); 2124 2125 ib_uverbs_release_uevent(file, &obj->uevent); 2126 2127 resp.events_reported = obj->uevent.events_reported; 2128 2129 put_uobj(uobj); 2130 2131 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2132 &resp, sizeof resp)) 2133 return -EFAULT; 2134 2135 return in_len; 2136 } 2137 2138 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 2139 const char __user *buf, int in_len, 2140 int out_len) 2141 { 2142 struct ib_uverbs_post_send cmd; 2143 struct ib_uverbs_post_send_resp resp; 2144 struct ib_uverbs_send_wr *user_wr; 2145 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 2146 struct ib_qp *qp; 2147 int i, sg_ind; 2148 int is_ud; 2149 ssize_t ret = -EINVAL; 2150 2151 if (copy_from_user(&cmd, buf, sizeof cmd)) 2152 return -EFAULT; 2153 2154 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 2155 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 2156 return -EINVAL; 2157 2158 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 2159 return -EINVAL; 2160 2161 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 2162 if (!user_wr) 2163 return -ENOMEM; 2164 2165 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2166 if (!qp) 2167 goto out; 2168 2169 is_ud = qp->qp_type == IB_QPT_UD; 2170 sg_ind = 0; 2171 last = NULL; 2172 for (i = 0; i < cmd.wr_count; ++i) { 2173 if (copy_from_user(user_wr, 2174 buf + sizeof cmd + i * cmd.wqe_size, 2175 cmd.wqe_size)) { 2176 ret = -EFAULT; 2177 goto out_put; 2178 } 2179 2180 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 2181 ret = -EINVAL; 2182 goto out_put; 2183 } 2184 2185 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2186 user_wr->num_sge * sizeof (struct ib_sge), 2187 GFP_KERNEL); 2188 if (!next) { 2189 ret = -ENOMEM; 2190 goto out_put; 2191 } 2192 2193 if (!last) 2194 wr = next; 2195 else 2196 last->next = next; 2197 last = next; 2198 2199 next->next = NULL; 2200 next->wr_id = user_wr->wr_id; 2201 next->num_sge = user_wr->num_sge; 2202 next->opcode = user_wr->opcode; 2203 next->send_flags = user_wr->send_flags; 2204 2205 if (is_ud) { 2206 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah, 2207 file->ucontext); 2208 if (!next->wr.ud.ah) { 2209 ret = -EINVAL; 2210 goto out_put; 2211 } 2212 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn; 2213 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey; 2214 if (next->opcode == IB_WR_SEND_WITH_IMM) 2215 next->ex.imm_data = 2216 (__be32 __force) user_wr->ex.imm_data; 2217 } else { 2218 switch (next->opcode) { 2219 case IB_WR_RDMA_WRITE_WITH_IMM: 2220 next->ex.imm_data = 2221 (__be32 __force) user_wr->ex.imm_data; 2222 case IB_WR_RDMA_WRITE: 2223 case IB_WR_RDMA_READ: 2224 next->wr.rdma.remote_addr = 2225 user_wr->wr.rdma.remote_addr; 2226 next->wr.rdma.rkey = 2227 user_wr->wr.rdma.rkey; 2228 break; 2229 case IB_WR_SEND_WITH_IMM: 2230 next->ex.imm_data = 2231 (__be32 __force) user_wr->ex.imm_data; 2232 break; 2233 case IB_WR_SEND_WITH_INV: 2234 next->ex.invalidate_rkey = 2235 user_wr->ex.invalidate_rkey; 2236 break; 2237 case IB_WR_ATOMIC_CMP_AND_SWP: 2238 case IB_WR_ATOMIC_FETCH_AND_ADD: 2239 next->wr.atomic.remote_addr = 2240 user_wr->wr.atomic.remote_addr; 2241 next->wr.atomic.compare_add = 2242 user_wr->wr.atomic.compare_add; 2243 next->wr.atomic.swap = user_wr->wr.atomic.swap; 2244 next->wr.atomic.rkey = user_wr->wr.atomic.rkey; 2245 break; 2246 default: 2247 break; 2248 } 2249 } 2250 2251 if (next->num_sge) { 2252 next->sg_list = (void *) next + 2253 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2254 if (copy_from_user(next->sg_list, 2255 buf + sizeof cmd + 2256 cmd.wr_count * cmd.wqe_size + 2257 sg_ind * sizeof (struct ib_sge), 2258 next->num_sge * sizeof (struct ib_sge))) { 2259 ret = -EFAULT; 2260 goto out_put; 2261 } 2262 sg_ind += next->num_sge; 2263 } else 2264 next->sg_list = NULL; 2265 } 2266 2267 resp.bad_wr = 0; 2268 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); 2269 if (ret) 2270 for (next = wr; next; next = next->next) { 2271 ++resp.bad_wr; 2272 if (next == bad_wr) 2273 break; 2274 } 2275 2276 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2277 &resp, sizeof resp)) 2278 ret = -EFAULT; 2279 2280 out_put: 2281 put_qp_read(qp); 2282 2283 while (wr) { 2284 if (is_ud && wr->wr.ud.ah) 2285 put_ah_read(wr->wr.ud.ah); 2286 next = wr->next; 2287 kfree(wr); 2288 wr = next; 2289 } 2290 2291 out: 2292 kfree(user_wr); 2293 2294 return ret ? ret : in_len; 2295 } 2296 2297 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 2298 int in_len, 2299 u32 wr_count, 2300 u32 sge_count, 2301 u32 wqe_size) 2302 { 2303 struct ib_uverbs_recv_wr *user_wr; 2304 struct ib_recv_wr *wr = NULL, *last, *next; 2305 int sg_ind; 2306 int i; 2307 int ret; 2308 2309 if (in_len < wqe_size * wr_count + 2310 sge_count * sizeof (struct ib_uverbs_sge)) 2311 return ERR_PTR(-EINVAL); 2312 2313 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 2314 return ERR_PTR(-EINVAL); 2315 2316 user_wr = kmalloc(wqe_size, GFP_KERNEL); 2317 if (!user_wr) 2318 return ERR_PTR(-ENOMEM); 2319 2320 sg_ind = 0; 2321 last = NULL; 2322 for (i = 0; i < wr_count; ++i) { 2323 if (copy_from_user(user_wr, buf + i * wqe_size, 2324 wqe_size)) { 2325 ret = -EFAULT; 2326 goto err; 2327 } 2328 2329 if (user_wr->num_sge + sg_ind > sge_count) { 2330 ret = -EINVAL; 2331 goto err; 2332 } 2333 2334 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2335 user_wr->num_sge * sizeof (struct ib_sge), 2336 GFP_KERNEL); 2337 if (!next) { 2338 ret = -ENOMEM; 2339 goto err; 2340 } 2341 2342 if (!last) 2343 wr = next; 2344 else 2345 last->next = next; 2346 last = next; 2347 2348 next->next = NULL; 2349 next->wr_id = user_wr->wr_id; 2350 next->num_sge = user_wr->num_sge; 2351 2352 if (next->num_sge) { 2353 next->sg_list = (void *) next + 2354 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2355 if (copy_from_user(next->sg_list, 2356 buf + wr_count * wqe_size + 2357 sg_ind * sizeof (struct ib_sge), 2358 next->num_sge * sizeof (struct ib_sge))) { 2359 ret = -EFAULT; 2360 goto err; 2361 } 2362 sg_ind += next->num_sge; 2363 } else 2364 next->sg_list = NULL; 2365 } 2366 2367 kfree(user_wr); 2368 return wr; 2369 2370 err: 2371 kfree(user_wr); 2372 2373 while (wr) { 2374 next = wr->next; 2375 kfree(wr); 2376 wr = next; 2377 } 2378 2379 return ERR_PTR(ret); 2380 } 2381 2382 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 2383 const char __user *buf, int in_len, 2384 int out_len) 2385 { 2386 struct ib_uverbs_post_recv cmd; 2387 struct ib_uverbs_post_recv_resp resp; 2388 struct ib_recv_wr *wr, *next, *bad_wr; 2389 struct ib_qp *qp; 2390 ssize_t ret = -EINVAL; 2391 2392 if (copy_from_user(&cmd, buf, sizeof cmd)) 2393 return -EFAULT; 2394 2395 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2396 in_len - sizeof cmd, cmd.wr_count, 2397 cmd.sge_count, cmd.wqe_size); 2398 if (IS_ERR(wr)) 2399 return PTR_ERR(wr); 2400 2401 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2402 if (!qp) 2403 goto out; 2404 2405 resp.bad_wr = 0; 2406 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); 2407 2408 put_qp_read(qp); 2409 2410 if (ret) 2411 for (next = wr; next; next = next->next) { 2412 ++resp.bad_wr; 2413 if (next == bad_wr) 2414 break; 2415 } 2416 2417 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2418 &resp, sizeof resp)) 2419 ret = -EFAULT; 2420 2421 out: 2422 while (wr) { 2423 next = wr->next; 2424 kfree(wr); 2425 wr = next; 2426 } 2427 2428 return ret ? ret : in_len; 2429 } 2430 2431 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 2432 const char __user *buf, int in_len, 2433 int out_len) 2434 { 2435 struct ib_uverbs_post_srq_recv cmd; 2436 struct ib_uverbs_post_srq_recv_resp resp; 2437 struct ib_recv_wr *wr, *next, *bad_wr; 2438 struct ib_srq *srq; 2439 ssize_t ret = -EINVAL; 2440 2441 if (copy_from_user(&cmd, buf, sizeof cmd)) 2442 return -EFAULT; 2443 2444 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2445 in_len - sizeof cmd, cmd.wr_count, 2446 cmd.sge_count, cmd.wqe_size); 2447 if (IS_ERR(wr)) 2448 return PTR_ERR(wr); 2449 2450 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2451 if (!srq) 2452 goto out; 2453 2454 resp.bad_wr = 0; 2455 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 2456 2457 put_srq_read(srq); 2458 2459 if (ret) 2460 for (next = wr; next; next = next->next) { 2461 ++resp.bad_wr; 2462 if (next == bad_wr) 2463 break; 2464 } 2465 2466 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2467 &resp, sizeof resp)) 2468 ret = -EFAULT; 2469 2470 out: 2471 while (wr) { 2472 next = wr->next; 2473 kfree(wr); 2474 wr = next; 2475 } 2476 2477 return ret ? ret : in_len; 2478 } 2479 2480 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 2481 const char __user *buf, int in_len, 2482 int out_len) 2483 { 2484 struct ib_uverbs_create_ah cmd; 2485 struct ib_uverbs_create_ah_resp resp; 2486 struct ib_uobject *uobj; 2487 struct ib_pd *pd; 2488 struct ib_ah *ah; 2489 struct ib_ah_attr attr; 2490 int ret; 2491 2492 if (out_len < sizeof resp) 2493 return -ENOSPC; 2494 2495 if (copy_from_user(&cmd, buf, sizeof cmd)) 2496 return -EFAULT; 2497 2498 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 2499 if (!uobj) 2500 return -ENOMEM; 2501 2502 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class); 2503 down_write(&uobj->mutex); 2504 2505 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 2506 if (!pd) { 2507 ret = -EINVAL; 2508 goto err; 2509 } 2510 2511 attr.dlid = cmd.attr.dlid; 2512 attr.sl = cmd.attr.sl; 2513 attr.src_path_bits = cmd.attr.src_path_bits; 2514 attr.static_rate = cmd.attr.static_rate; 2515 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0; 2516 attr.port_num = cmd.attr.port_num; 2517 attr.grh.flow_label = cmd.attr.grh.flow_label; 2518 attr.grh.sgid_index = cmd.attr.grh.sgid_index; 2519 attr.grh.hop_limit = cmd.attr.grh.hop_limit; 2520 attr.grh.traffic_class = cmd.attr.grh.traffic_class; 2521 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); 2522 2523 ah = ib_create_ah(pd, &attr); 2524 if (IS_ERR(ah)) { 2525 ret = PTR_ERR(ah); 2526 goto err_put; 2527 } 2528 2529 ah->uobject = uobj; 2530 uobj->object = ah; 2531 2532 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj); 2533 if (ret) 2534 goto err_destroy; 2535 2536 resp.ah_handle = uobj->id; 2537 2538 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2539 &resp, sizeof resp)) { 2540 ret = -EFAULT; 2541 goto err_copy; 2542 } 2543 2544 put_pd_read(pd); 2545 2546 mutex_lock(&file->mutex); 2547 list_add_tail(&uobj->list, &file->ucontext->ah_list); 2548 mutex_unlock(&file->mutex); 2549 2550 uobj->live = 1; 2551 2552 up_write(&uobj->mutex); 2553 2554 return in_len; 2555 2556 err_copy: 2557 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2558 2559 err_destroy: 2560 ib_destroy_ah(ah); 2561 2562 err_put: 2563 put_pd_read(pd); 2564 2565 err: 2566 put_uobj_write(uobj); 2567 return ret; 2568 } 2569 2570 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 2571 const char __user *buf, int in_len, int out_len) 2572 { 2573 struct ib_uverbs_destroy_ah cmd; 2574 struct ib_ah *ah; 2575 struct ib_uobject *uobj; 2576 int ret; 2577 2578 if (copy_from_user(&cmd, buf, sizeof cmd)) 2579 return -EFAULT; 2580 2581 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext); 2582 if (!uobj) 2583 return -EINVAL; 2584 ah = uobj->object; 2585 2586 ret = ib_destroy_ah(ah); 2587 if (!ret) 2588 uobj->live = 0; 2589 2590 put_uobj_write(uobj); 2591 2592 if (ret) 2593 return ret; 2594 2595 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2596 2597 mutex_lock(&file->mutex); 2598 list_del(&uobj->list); 2599 mutex_unlock(&file->mutex); 2600 2601 put_uobj(uobj); 2602 2603 return in_len; 2604 } 2605 2606 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 2607 const char __user *buf, int in_len, 2608 int out_len) 2609 { 2610 struct ib_uverbs_attach_mcast cmd; 2611 struct ib_qp *qp; 2612 struct ib_uqp_object *obj; 2613 struct ib_uverbs_mcast_entry *mcast; 2614 int ret; 2615 2616 if (copy_from_user(&cmd, buf, sizeof cmd)) 2617 return -EFAULT; 2618 2619 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 2620 if (!qp) 2621 return -EINVAL; 2622 2623 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2624 2625 list_for_each_entry(mcast, &obj->mcast_list, list) 2626 if (cmd.mlid == mcast->lid && 2627 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2628 ret = 0; 2629 goto out_put; 2630 } 2631 2632 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 2633 if (!mcast) { 2634 ret = -ENOMEM; 2635 goto out_put; 2636 } 2637 2638 mcast->lid = cmd.mlid; 2639 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 2640 2641 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 2642 if (!ret) 2643 list_add_tail(&mcast->list, &obj->mcast_list); 2644 else 2645 kfree(mcast); 2646 2647 out_put: 2648 put_qp_write(qp); 2649 2650 return ret ? ret : in_len; 2651 } 2652 2653 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 2654 const char __user *buf, int in_len, 2655 int out_len) 2656 { 2657 struct ib_uverbs_detach_mcast cmd; 2658 struct ib_uqp_object *obj; 2659 struct ib_qp *qp; 2660 struct ib_uverbs_mcast_entry *mcast; 2661 int ret = -EINVAL; 2662 2663 if (copy_from_user(&cmd, buf, sizeof cmd)) 2664 return -EFAULT; 2665 2666 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 2667 if (!qp) 2668 return -EINVAL; 2669 2670 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 2671 if (ret) 2672 goto out_put; 2673 2674 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2675 2676 list_for_each_entry(mcast, &obj->mcast_list, list) 2677 if (cmd.mlid == mcast->lid && 2678 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2679 list_del(&mcast->list); 2680 kfree(mcast); 2681 break; 2682 } 2683 2684 out_put: 2685 put_qp_write(qp); 2686 2687 return ret ? ret : in_len; 2688 } 2689 2690 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, 2691 union ib_flow_spec *ib_spec) 2692 { 2693 if (kern_spec->reserved) 2694 return -EINVAL; 2695 2696 ib_spec->type = kern_spec->type; 2697 2698 switch (ib_spec->type) { 2699 case IB_FLOW_SPEC_ETH: 2700 ib_spec->eth.size = sizeof(struct ib_flow_spec_eth); 2701 if (ib_spec->eth.size != kern_spec->eth.size) 2702 return -EINVAL; 2703 memcpy(&ib_spec->eth.val, &kern_spec->eth.val, 2704 sizeof(struct ib_flow_eth_filter)); 2705 memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask, 2706 sizeof(struct ib_flow_eth_filter)); 2707 break; 2708 case IB_FLOW_SPEC_IPV4: 2709 ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4); 2710 if (ib_spec->ipv4.size != kern_spec->ipv4.size) 2711 return -EINVAL; 2712 memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val, 2713 sizeof(struct ib_flow_ipv4_filter)); 2714 memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask, 2715 sizeof(struct ib_flow_ipv4_filter)); 2716 break; 2717 case IB_FLOW_SPEC_TCP: 2718 case IB_FLOW_SPEC_UDP: 2719 ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp); 2720 if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size) 2721 return -EINVAL; 2722 memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val, 2723 sizeof(struct ib_flow_tcp_udp_filter)); 2724 memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask, 2725 sizeof(struct ib_flow_tcp_udp_filter)); 2726 break; 2727 default: 2728 return -EINVAL; 2729 } 2730 return 0; 2731 } 2732 2733 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, 2734 struct ib_udata *ucore, 2735 struct ib_udata *uhw) 2736 { 2737 struct ib_uverbs_create_flow cmd; 2738 struct ib_uverbs_create_flow_resp resp; 2739 struct ib_uobject *uobj; 2740 struct ib_flow *flow_id; 2741 struct ib_uverbs_flow_attr *kern_flow_attr; 2742 struct ib_flow_attr *flow_attr; 2743 struct ib_qp *qp; 2744 int err = 0; 2745 void *kern_spec; 2746 void *ib_spec; 2747 int i; 2748 2749 if (ucore->inlen < sizeof(cmd)) 2750 return -EINVAL; 2751 2752 if (ucore->outlen < sizeof(resp)) 2753 return -ENOSPC; 2754 2755 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 2756 if (err) 2757 return err; 2758 2759 ucore->inbuf += sizeof(cmd); 2760 ucore->inlen -= sizeof(cmd); 2761 2762 if (cmd.comp_mask) 2763 return -EINVAL; 2764 2765 if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER && 2766 !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW)) 2767 return -EPERM; 2768 2769 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) 2770 return -EINVAL; 2771 2772 if (cmd.flow_attr.size > ucore->inlen || 2773 cmd.flow_attr.size > 2774 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) 2775 return -EINVAL; 2776 2777 if (cmd.flow_attr.reserved[0] || 2778 cmd.flow_attr.reserved[1]) 2779 return -EINVAL; 2780 2781 if (cmd.flow_attr.num_of_specs) { 2782 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, 2783 GFP_KERNEL); 2784 if (!kern_flow_attr) 2785 return -ENOMEM; 2786 2787 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); 2788 err = ib_copy_from_udata(kern_flow_attr + 1, ucore, 2789 cmd.flow_attr.size); 2790 if (err) 2791 goto err_free_attr; 2792 } else { 2793 kern_flow_attr = &cmd.flow_attr; 2794 } 2795 2796 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 2797 if (!uobj) { 2798 err = -ENOMEM; 2799 goto err_free_attr; 2800 } 2801 init_uobj(uobj, 0, file->ucontext, &rule_lock_class); 2802 down_write(&uobj->mutex); 2803 2804 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2805 if (!qp) { 2806 err = -EINVAL; 2807 goto err_uobj; 2808 } 2809 2810 flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL); 2811 if (!flow_attr) { 2812 err = -ENOMEM; 2813 goto err_put; 2814 } 2815 2816 flow_attr->type = kern_flow_attr->type; 2817 flow_attr->priority = kern_flow_attr->priority; 2818 flow_attr->num_of_specs = kern_flow_attr->num_of_specs; 2819 flow_attr->port = kern_flow_attr->port; 2820 flow_attr->flags = kern_flow_attr->flags; 2821 flow_attr->size = sizeof(*flow_attr); 2822 2823 kern_spec = kern_flow_attr + 1; 2824 ib_spec = flow_attr + 1; 2825 for (i = 0; i < flow_attr->num_of_specs && 2826 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && 2827 cmd.flow_attr.size >= 2828 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { 2829 err = kern_spec_to_ib_spec(kern_spec, ib_spec); 2830 if (err) 2831 goto err_free; 2832 flow_attr->size += 2833 ((union ib_flow_spec *) ib_spec)->size; 2834 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; 2835 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size; 2836 ib_spec += ((union ib_flow_spec *) ib_spec)->size; 2837 } 2838 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { 2839 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", 2840 i, cmd.flow_attr.size); 2841 err = -EINVAL; 2842 goto err_free; 2843 } 2844 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); 2845 if (IS_ERR(flow_id)) { 2846 err = PTR_ERR(flow_id); 2847 goto err_free; 2848 } 2849 flow_id->qp = qp; 2850 flow_id->uobject = uobj; 2851 uobj->object = flow_id; 2852 2853 err = idr_add_uobj(&ib_uverbs_rule_idr, uobj); 2854 if (err) 2855 goto destroy_flow; 2856 2857 memset(&resp, 0, sizeof(resp)); 2858 resp.flow_handle = uobj->id; 2859 2860 err = ib_copy_to_udata(ucore, 2861 &resp, sizeof(resp)); 2862 if (err) 2863 goto err_copy; 2864 2865 put_qp_read(qp); 2866 mutex_lock(&file->mutex); 2867 list_add_tail(&uobj->list, &file->ucontext->rule_list); 2868 mutex_unlock(&file->mutex); 2869 2870 uobj->live = 1; 2871 2872 up_write(&uobj->mutex); 2873 kfree(flow_attr); 2874 if (cmd.flow_attr.num_of_specs) 2875 kfree(kern_flow_attr); 2876 return 0; 2877 err_copy: 2878 idr_remove_uobj(&ib_uverbs_rule_idr, uobj); 2879 destroy_flow: 2880 ib_destroy_flow(flow_id); 2881 err_free: 2882 kfree(flow_attr); 2883 err_put: 2884 put_qp_read(qp); 2885 err_uobj: 2886 put_uobj_write(uobj); 2887 err_free_attr: 2888 if (cmd.flow_attr.num_of_specs) 2889 kfree(kern_flow_attr); 2890 return err; 2891 } 2892 2893 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, 2894 struct ib_udata *ucore, 2895 struct ib_udata *uhw) 2896 { 2897 struct ib_uverbs_destroy_flow cmd; 2898 struct ib_flow *flow_id; 2899 struct ib_uobject *uobj; 2900 int ret; 2901 2902 if (ucore->inlen < sizeof(cmd)) 2903 return -EINVAL; 2904 2905 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 2906 if (ret) 2907 return ret; 2908 2909 if (cmd.comp_mask) 2910 return -EINVAL; 2911 2912 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle, 2913 file->ucontext); 2914 if (!uobj) 2915 return -EINVAL; 2916 flow_id = uobj->object; 2917 2918 ret = ib_destroy_flow(flow_id); 2919 if (!ret) 2920 uobj->live = 0; 2921 2922 put_uobj_write(uobj); 2923 2924 idr_remove_uobj(&ib_uverbs_rule_idr, uobj); 2925 2926 mutex_lock(&file->mutex); 2927 list_del(&uobj->list); 2928 mutex_unlock(&file->mutex); 2929 2930 put_uobj(uobj); 2931 2932 return ret; 2933 } 2934 2935 static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 2936 struct ib_uverbs_create_xsrq *cmd, 2937 struct ib_udata *udata) 2938 { 2939 struct ib_uverbs_create_srq_resp resp; 2940 struct ib_usrq_object *obj; 2941 struct ib_pd *pd; 2942 struct ib_srq *srq; 2943 struct ib_uobject *uninitialized_var(xrcd_uobj); 2944 struct ib_srq_init_attr attr; 2945 int ret; 2946 2947 obj = kmalloc(sizeof *obj, GFP_KERNEL); 2948 if (!obj) 2949 return -ENOMEM; 2950 2951 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class); 2952 down_write(&obj->uevent.uobject.mutex); 2953 2954 if (cmd->srq_type == IB_SRQT_XRC) { 2955 attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj); 2956 if (!attr.ext.xrc.xrcd) { 2957 ret = -EINVAL; 2958 goto err; 2959 } 2960 2961 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 2962 atomic_inc(&obj->uxrcd->refcnt); 2963 2964 attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0); 2965 if (!attr.ext.xrc.cq) { 2966 ret = -EINVAL; 2967 goto err_put_xrcd; 2968 } 2969 } 2970 2971 pd = idr_read_pd(cmd->pd_handle, file->ucontext); 2972 if (!pd) { 2973 ret = -EINVAL; 2974 goto err_put_cq; 2975 } 2976 2977 attr.event_handler = ib_uverbs_srq_event_handler; 2978 attr.srq_context = file; 2979 attr.srq_type = cmd->srq_type; 2980 attr.attr.max_wr = cmd->max_wr; 2981 attr.attr.max_sge = cmd->max_sge; 2982 attr.attr.srq_limit = cmd->srq_limit; 2983 2984 obj->uevent.events_reported = 0; 2985 INIT_LIST_HEAD(&obj->uevent.event_list); 2986 2987 srq = pd->device->create_srq(pd, &attr, udata); 2988 if (IS_ERR(srq)) { 2989 ret = PTR_ERR(srq); 2990 goto err_put; 2991 } 2992 2993 srq->device = pd->device; 2994 srq->pd = pd; 2995 srq->srq_type = cmd->srq_type; 2996 srq->uobject = &obj->uevent.uobject; 2997 srq->event_handler = attr.event_handler; 2998 srq->srq_context = attr.srq_context; 2999 3000 if (cmd->srq_type == IB_SRQT_XRC) { 3001 srq->ext.xrc.cq = attr.ext.xrc.cq; 3002 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; 3003 atomic_inc(&attr.ext.xrc.cq->usecnt); 3004 atomic_inc(&attr.ext.xrc.xrcd->usecnt); 3005 } 3006 3007 atomic_inc(&pd->usecnt); 3008 atomic_set(&srq->usecnt, 0); 3009 3010 obj->uevent.uobject.object = srq; 3011 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 3012 if (ret) 3013 goto err_destroy; 3014 3015 memset(&resp, 0, sizeof resp); 3016 resp.srq_handle = obj->uevent.uobject.id; 3017 resp.max_wr = attr.attr.max_wr; 3018 resp.max_sge = attr.attr.max_sge; 3019 if (cmd->srq_type == IB_SRQT_XRC) 3020 resp.srqn = srq->ext.xrc.srq_num; 3021 3022 if (copy_to_user((void __user *) (unsigned long) cmd->response, 3023 &resp, sizeof resp)) { 3024 ret = -EFAULT; 3025 goto err_copy; 3026 } 3027 3028 if (cmd->srq_type == IB_SRQT_XRC) { 3029 put_uobj_read(xrcd_uobj); 3030 put_cq_read(attr.ext.xrc.cq); 3031 } 3032 put_pd_read(pd); 3033 3034 mutex_lock(&file->mutex); 3035 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list); 3036 mutex_unlock(&file->mutex); 3037 3038 obj->uevent.uobject.live = 1; 3039 3040 up_write(&obj->uevent.uobject.mutex); 3041 3042 return 0; 3043 3044 err_copy: 3045 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 3046 3047 err_destroy: 3048 ib_destroy_srq(srq); 3049 3050 err_put: 3051 put_pd_read(pd); 3052 3053 err_put_cq: 3054 if (cmd->srq_type == IB_SRQT_XRC) 3055 put_cq_read(attr.ext.xrc.cq); 3056 3057 err_put_xrcd: 3058 if (cmd->srq_type == IB_SRQT_XRC) { 3059 atomic_dec(&obj->uxrcd->refcnt); 3060 put_uobj_read(xrcd_uobj); 3061 } 3062 3063 err: 3064 put_uobj_write(&obj->uevent.uobject); 3065 return ret; 3066 } 3067 3068 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 3069 const char __user *buf, int in_len, 3070 int out_len) 3071 { 3072 struct ib_uverbs_create_srq cmd; 3073 struct ib_uverbs_create_xsrq xcmd; 3074 struct ib_uverbs_create_srq_resp resp; 3075 struct ib_udata udata; 3076 int ret; 3077 3078 if (out_len < sizeof resp) 3079 return -ENOSPC; 3080 3081 if (copy_from_user(&cmd, buf, sizeof cmd)) 3082 return -EFAULT; 3083 3084 xcmd.response = cmd.response; 3085 xcmd.user_handle = cmd.user_handle; 3086 xcmd.srq_type = IB_SRQT_BASIC; 3087 xcmd.pd_handle = cmd.pd_handle; 3088 xcmd.max_wr = cmd.max_wr; 3089 xcmd.max_sge = cmd.max_sge; 3090 xcmd.srq_limit = cmd.srq_limit; 3091 3092 INIT_UDATA(&udata, buf + sizeof cmd, 3093 (unsigned long) cmd.response + sizeof resp, 3094 in_len - sizeof cmd, out_len - sizeof resp); 3095 3096 ret = __uverbs_create_xsrq(file, &xcmd, &udata); 3097 if (ret) 3098 return ret; 3099 3100 return in_len; 3101 } 3102 3103 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, 3104 const char __user *buf, int in_len, int out_len) 3105 { 3106 struct ib_uverbs_create_xsrq cmd; 3107 struct ib_uverbs_create_srq_resp resp; 3108 struct ib_udata udata; 3109 int ret; 3110 3111 if (out_len < sizeof resp) 3112 return -ENOSPC; 3113 3114 if (copy_from_user(&cmd, buf, sizeof cmd)) 3115 return -EFAULT; 3116 3117 INIT_UDATA(&udata, buf + sizeof cmd, 3118 (unsigned long) cmd.response + sizeof resp, 3119 in_len - sizeof cmd, out_len - sizeof resp); 3120 3121 ret = __uverbs_create_xsrq(file, &cmd, &udata); 3122 if (ret) 3123 return ret; 3124 3125 return in_len; 3126 } 3127 3128 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 3129 const char __user *buf, int in_len, 3130 int out_len) 3131 { 3132 struct ib_uverbs_modify_srq cmd; 3133 struct ib_udata udata; 3134 struct ib_srq *srq; 3135 struct ib_srq_attr attr; 3136 int ret; 3137 3138 if (copy_from_user(&cmd, buf, sizeof cmd)) 3139 return -EFAULT; 3140 3141 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 3142 out_len); 3143 3144 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 3145 if (!srq) 3146 return -EINVAL; 3147 3148 attr.max_wr = cmd.max_wr; 3149 attr.srq_limit = cmd.srq_limit; 3150 3151 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 3152 3153 put_srq_read(srq); 3154 3155 return ret ? ret : in_len; 3156 } 3157 3158 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 3159 const char __user *buf, 3160 int in_len, int out_len) 3161 { 3162 struct ib_uverbs_query_srq cmd; 3163 struct ib_uverbs_query_srq_resp resp; 3164 struct ib_srq_attr attr; 3165 struct ib_srq *srq; 3166 int ret; 3167 3168 if (out_len < sizeof resp) 3169 return -ENOSPC; 3170 3171 if (copy_from_user(&cmd, buf, sizeof cmd)) 3172 return -EFAULT; 3173 3174 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 3175 if (!srq) 3176 return -EINVAL; 3177 3178 ret = ib_query_srq(srq, &attr); 3179 3180 put_srq_read(srq); 3181 3182 if (ret) 3183 return ret; 3184 3185 memset(&resp, 0, sizeof resp); 3186 3187 resp.max_wr = attr.max_wr; 3188 resp.max_sge = attr.max_sge; 3189 resp.srq_limit = attr.srq_limit; 3190 3191 if (copy_to_user((void __user *) (unsigned long) cmd.response, 3192 &resp, sizeof resp)) 3193 return -EFAULT; 3194 3195 return in_len; 3196 } 3197 3198 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 3199 const char __user *buf, int in_len, 3200 int out_len) 3201 { 3202 struct ib_uverbs_destroy_srq cmd; 3203 struct ib_uverbs_destroy_srq_resp resp; 3204 struct ib_uobject *uobj; 3205 struct ib_srq *srq; 3206 struct ib_uevent_object *obj; 3207 int ret = -EINVAL; 3208 struct ib_usrq_object *us; 3209 enum ib_srq_type srq_type; 3210 3211 if (copy_from_user(&cmd, buf, sizeof cmd)) 3212 return -EFAULT; 3213 3214 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext); 3215 if (!uobj) 3216 return -EINVAL; 3217 srq = uobj->object; 3218 obj = container_of(uobj, struct ib_uevent_object, uobject); 3219 srq_type = srq->srq_type; 3220 3221 ret = ib_destroy_srq(srq); 3222 if (!ret) 3223 uobj->live = 0; 3224 3225 put_uobj_write(uobj); 3226 3227 if (ret) 3228 return ret; 3229 3230 if (srq_type == IB_SRQT_XRC) { 3231 us = container_of(obj, struct ib_usrq_object, uevent); 3232 atomic_dec(&us->uxrcd->refcnt); 3233 } 3234 3235 idr_remove_uobj(&ib_uverbs_srq_idr, uobj); 3236 3237 mutex_lock(&file->mutex); 3238 list_del(&uobj->list); 3239 mutex_unlock(&file->mutex); 3240 3241 ib_uverbs_release_uevent(file, obj); 3242 3243 memset(&resp, 0, sizeof resp); 3244 resp.events_reported = obj->events_reported; 3245 3246 put_uobj(uobj); 3247 3248 if (copy_to_user((void __user *) (unsigned long) cmd.response, 3249 &resp, sizeof resp)) 3250 ret = -EFAULT; 3251 3252 return ret ? ret : in_len; 3253 } 3254