1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/file.h> 37 #include <linux/fs.h> 38 #include <linux/slab.h> 39 #include <linux/sched.h> 40 41 #include <linux/uaccess.h> 42 43 #include <rdma/uverbs_types.h> 44 #include <rdma/uverbs_std_types.h> 45 #include "rdma_core.h" 46 47 #include "uverbs.h" 48 #include "core_priv.h" 49 50 static struct ib_uverbs_completion_event_file * 51 ib_uverbs_lookup_comp_file(int fd, struct ib_ucontext *context) 52 { 53 struct ib_uobject *uobj = uobj_get_read(uobj_get_type(comp_channel), 54 fd, context); 55 struct ib_uobject_file *uobj_file; 56 57 if (IS_ERR(uobj)) 58 return (void *)uobj; 59 60 uverbs_uobject_get(uobj); 61 uobj_put_read(uobj); 62 63 uobj_file = container_of(uobj, struct ib_uobject_file, uobj); 64 return container_of(uobj_file, struct ib_uverbs_completion_event_file, 65 uobj_file); 66 } 67 68 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 69 struct ib_device *ib_dev, 70 const char __user *buf, 71 int in_len, int out_len) 72 { 73 struct ib_uverbs_get_context cmd; 74 struct ib_uverbs_get_context_resp resp; 75 struct ib_udata udata; 76 struct ib_ucontext *ucontext; 77 struct file *filp; 78 struct ib_rdmacg_object cg_obj; 79 int ret; 80 81 if (out_len < sizeof resp) 82 return -ENOSPC; 83 84 if (copy_from_user(&cmd, buf, sizeof cmd)) 85 return -EFAULT; 86 87 mutex_lock(&file->mutex); 88 89 if (file->ucontext) { 90 ret = -EINVAL; 91 goto err; 92 } 93 94 INIT_UDATA(&udata, buf + sizeof cmd, 95 (unsigned long) cmd.response + sizeof resp, 96 in_len - sizeof cmd, out_len - sizeof resp); 97 98 ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); 99 if (ret) 100 goto err; 101 102 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata); 103 if (IS_ERR(ucontext)) { 104 ret = PTR_ERR(ucontext); 105 goto err_alloc; 106 } 107 108 ucontext->device = ib_dev; 109 ucontext->cg_obj = cg_obj; 110 /* ufile is required when some objects are released */ 111 ucontext->ufile = file; 112 uverbs_initialize_ucontext(ucontext); 113 114 rcu_read_lock(); 115 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); 116 rcu_read_unlock(); 117 ucontext->closing = 0; 118 119 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 120 ucontext->umem_tree = RB_ROOT; 121 init_rwsem(&ucontext->umem_rwsem); 122 ucontext->odp_mrs_count = 0; 123 INIT_LIST_HEAD(&ucontext->no_private_counters); 124 125 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)) 126 ucontext->invalidate_range = NULL; 127 128 #endif 129 130 resp.num_comp_vectors = file->device->num_comp_vectors; 131 132 ret = get_unused_fd_flags(O_CLOEXEC); 133 if (ret < 0) 134 goto err_free; 135 resp.async_fd = ret; 136 137 filp = ib_uverbs_alloc_async_event_file(file, ib_dev); 138 if (IS_ERR(filp)) { 139 ret = PTR_ERR(filp); 140 goto err_fd; 141 } 142 143 if (copy_to_user((void __user *) (unsigned long) cmd.response, 144 &resp, sizeof resp)) { 145 ret = -EFAULT; 146 goto err_file; 147 } 148 149 file->ucontext = ucontext; 150 151 fd_install(resp.async_fd, filp); 152 153 mutex_unlock(&file->mutex); 154 155 return in_len; 156 157 err_file: 158 ib_uverbs_free_async_event_file(file); 159 fput(filp); 160 161 err_fd: 162 put_unused_fd(resp.async_fd); 163 164 err_free: 165 put_pid(ucontext->tgid); 166 ib_dev->dealloc_ucontext(ucontext); 167 168 err_alloc: 169 ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); 170 171 err: 172 mutex_unlock(&file->mutex); 173 return ret; 174 } 175 176 static void copy_query_dev_fields(struct ib_uverbs_file *file, 177 struct ib_device *ib_dev, 178 struct ib_uverbs_query_device_resp *resp, 179 struct ib_device_attr *attr) 180 { 181 resp->fw_ver = attr->fw_ver; 182 resp->node_guid = ib_dev->node_guid; 183 resp->sys_image_guid = attr->sys_image_guid; 184 resp->max_mr_size = attr->max_mr_size; 185 resp->page_size_cap = attr->page_size_cap; 186 resp->vendor_id = attr->vendor_id; 187 resp->vendor_part_id = attr->vendor_part_id; 188 resp->hw_ver = attr->hw_ver; 189 resp->max_qp = attr->max_qp; 190 resp->max_qp_wr = attr->max_qp_wr; 191 resp->device_cap_flags = lower_32_bits(attr->device_cap_flags); 192 resp->max_sge = attr->max_sge; 193 resp->max_sge_rd = attr->max_sge_rd; 194 resp->max_cq = attr->max_cq; 195 resp->max_cqe = attr->max_cqe; 196 resp->max_mr = attr->max_mr; 197 resp->max_pd = attr->max_pd; 198 resp->max_qp_rd_atom = attr->max_qp_rd_atom; 199 resp->max_ee_rd_atom = attr->max_ee_rd_atom; 200 resp->max_res_rd_atom = attr->max_res_rd_atom; 201 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom; 202 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom; 203 resp->atomic_cap = attr->atomic_cap; 204 resp->max_ee = attr->max_ee; 205 resp->max_rdd = attr->max_rdd; 206 resp->max_mw = attr->max_mw; 207 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp; 208 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp; 209 resp->max_mcast_grp = attr->max_mcast_grp; 210 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach; 211 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach; 212 resp->max_ah = attr->max_ah; 213 resp->max_fmr = attr->max_fmr; 214 resp->max_map_per_fmr = attr->max_map_per_fmr; 215 resp->max_srq = attr->max_srq; 216 resp->max_srq_wr = attr->max_srq_wr; 217 resp->max_srq_sge = attr->max_srq_sge; 218 resp->max_pkeys = attr->max_pkeys; 219 resp->local_ca_ack_delay = attr->local_ca_ack_delay; 220 resp->phys_port_cnt = ib_dev->phys_port_cnt; 221 } 222 223 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 224 struct ib_device *ib_dev, 225 const char __user *buf, 226 int in_len, int out_len) 227 { 228 struct ib_uverbs_query_device cmd; 229 struct ib_uverbs_query_device_resp resp; 230 231 if (out_len < sizeof resp) 232 return -ENOSPC; 233 234 if (copy_from_user(&cmd, buf, sizeof cmd)) 235 return -EFAULT; 236 237 memset(&resp, 0, sizeof resp); 238 copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs); 239 240 if (copy_to_user((void __user *) (unsigned long) cmd.response, 241 &resp, sizeof resp)) 242 return -EFAULT; 243 244 return in_len; 245 } 246 247 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 248 struct ib_device *ib_dev, 249 const char __user *buf, 250 int in_len, int out_len) 251 { 252 struct ib_uverbs_query_port cmd; 253 struct ib_uverbs_query_port_resp resp; 254 struct ib_port_attr attr; 255 int ret; 256 257 if (out_len < sizeof resp) 258 return -ENOSPC; 259 260 if (copy_from_user(&cmd, buf, sizeof cmd)) 261 return -EFAULT; 262 263 ret = ib_query_port(ib_dev, cmd.port_num, &attr); 264 if (ret) 265 return ret; 266 267 memset(&resp, 0, sizeof resp); 268 269 resp.state = attr.state; 270 resp.max_mtu = attr.max_mtu; 271 resp.active_mtu = attr.active_mtu; 272 resp.gid_tbl_len = attr.gid_tbl_len; 273 resp.port_cap_flags = attr.port_cap_flags; 274 resp.max_msg_sz = attr.max_msg_sz; 275 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 276 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 277 resp.pkey_tbl_len = attr.pkey_tbl_len; 278 resp.lid = attr.lid; 279 resp.sm_lid = attr.sm_lid; 280 resp.lmc = attr.lmc; 281 resp.max_vl_num = attr.max_vl_num; 282 resp.sm_sl = attr.sm_sl; 283 resp.subnet_timeout = attr.subnet_timeout; 284 resp.init_type_reply = attr.init_type_reply; 285 resp.active_width = attr.active_width; 286 resp.active_speed = attr.active_speed; 287 resp.phys_state = attr.phys_state; 288 resp.link_layer = rdma_port_get_link_layer(ib_dev, 289 cmd.port_num); 290 291 if (copy_to_user((void __user *) (unsigned long) cmd.response, 292 &resp, sizeof resp)) 293 return -EFAULT; 294 295 return in_len; 296 } 297 298 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 299 struct ib_device *ib_dev, 300 const char __user *buf, 301 int in_len, int out_len) 302 { 303 struct ib_uverbs_alloc_pd cmd; 304 struct ib_uverbs_alloc_pd_resp resp; 305 struct ib_udata udata; 306 struct ib_uobject *uobj; 307 struct ib_pd *pd; 308 int ret; 309 310 if (out_len < sizeof resp) 311 return -ENOSPC; 312 313 if (copy_from_user(&cmd, buf, sizeof cmd)) 314 return -EFAULT; 315 316 INIT_UDATA(&udata, buf + sizeof cmd, 317 (unsigned long) cmd.response + sizeof resp, 318 in_len - sizeof cmd, out_len - sizeof resp); 319 320 uobj = uobj_alloc(uobj_get_type(pd), file->ucontext); 321 if (IS_ERR(uobj)) 322 return PTR_ERR(uobj); 323 324 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata); 325 if (IS_ERR(pd)) { 326 ret = PTR_ERR(pd); 327 goto err; 328 } 329 330 pd->device = ib_dev; 331 pd->uobject = uobj; 332 pd->__internal_mr = NULL; 333 atomic_set(&pd->usecnt, 0); 334 335 uobj->object = pd; 336 memset(&resp, 0, sizeof resp); 337 resp.pd_handle = uobj->id; 338 339 if (copy_to_user((void __user *) (unsigned long) cmd.response, 340 &resp, sizeof resp)) { 341 ret = -EFAULT; 342 goto err_copy; 343 } 344 345 uobj_alloc_commit(uobj); 346 347 return in_len; 348 349 err_copy: 350 ib_dealloc_pd(pd); 351 352 err: 353 uobj_alloc_abort(uobj); 354 return ret; 355 } 356 357 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 358 struct ib_device *ib_dev, 359 const char __user *buf, 360 int in_len, int out_len) 361 { 362 struct ib_uverbs_dealloc_pd cmd; 363 struct ib_uobject *uobj; 364 int ret; 365 366 if (copy_from_user(&cmd, buf, sizeof cmd)) 367 return -EFAULT; 368 369 uobj = uobj_get_write(uobj_get_type(pd), cmd.pd_handle, 370 file->ucontext); 371 if (IS_ERR(uobj)) 372 return PTR_ERR(uobj); 373 374 ret = uobj_remove_commit(uobj); 375 376 return ret ?: in_len; 377 } 378 379 struct xrcd_table_entry { 380 struct rb_node node; 381 struct ib_xrcd *xrcd; 382 struct inode *inode; 383 }; 384 385 static int xrcd_table_insert(struct ib_uverbs_device *dev, 386 struct inode *inode, 387 struct ib_xrcd *xrcd) 388 { 389 struct xrcd_table_entry *entry, *scan; 390 struct rb_node **p = &dev->xrcd_tree.rb_node; 391 struct rb_node *parent = NULL; 392 393 entry = kmalloc(sizeof *entry, GFP_KERNEL); 394 if (!entry) 395 return -ENOMEM; 396 397 entry->xrcd = xrcd; 398 entry->inode = inode; 399 400 while (*p) { 401 parent = *p; 402 scan = rb_entry(parent, struct xrcd_table_entry, node); 403 404 if (inode < scan->inode) { 405 p = &(*p)->rb_left; 406 } else if (inode > scan->inode) { 407 p = &(*p)->rb_right; 408 } else { 409 kfree(entry); 410 return -EEXIST; 411 } 412 } 413 414 rb_link_node(&entry->node, parent, p); 415 rb_insert_color(&entry->node, &dev->xrcd_tree); 416 igrab(inode); 417 return 0; 418 } 419 420 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, 421 struct inode *inode) 422 { 423 struct xrcd_table_entry *entry; 424 struct rb_node *p = dev->xrcd_tree.rb_node; 425 426 while (p) { 427 entry = rb_entry(p, struct xrcd_table_entry, node); 428 429 if (inode < entry->inode) 430 p = p->rb_left; 431 else if (inode > entry->inode) 432 p = p->rb_right; 433 else 434 return entry; 435 } 436 437 return NULL; 438 } 439 440 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) 441 { 442 struct xrcd_table_entry *entry; 443 444 entry = xrcd_table_search(dev, inode); 445 if (!entry) 446 return NULL; 447 448 return entry->xrcd; 449 } 450 451 static void xrcd_table_delete(struct ib_uverbs_device *dev, 452 struct inode *inode) 453 { 454 struct xrcd_table_entry *entry; 455 456 entry = xrcd_table_search(dev, inode); 457 if (entry) { 458 iput(inode); 459 rb_erase(&entry->node, &dev->xrcd_tree); 460 kfree(entry); 461 } 462 } 463 464 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, 465 struct ib_device *ib_dev, 466 const char __user *buf, int in_len, 467 int out_len) 468 { 469 struct ib_uverbs_open_xrcd cmd; 470 struct ib_uverbs_open_xrcd_resp resp; 471 struct ib_udata udata; 472 struct ib_uxrcd_object *obj; 473 struct ib_xrcd *xrcd = NULL; 474 struct fd f = {NULL, 0}; 475 struct inode *inode = NULL; 476 int ret = 0; 477 int new_xrcd = 0; 478 479 if (out_len < sizeof resp) 480 return -ENOSPC; 481 482 if (copy_from_user(&cmd, buf, sizeof cmd)) 483 return -EFAULT; 484 485 INIT_UDATA(&udata, buf + sizeof cmd, 486 (unsigned long) cmd.response + sizeof resp, 487 in_len - sizeof cmd, out_len - sizeof resp); 488 489 mutex_lock(&file->device->xrcd_tree_mutex); 490 491 if (cmd.fd != -1) { 492 /* search for file descriptor */ 493 f = fdget(cmd.fd); 494 if (!f.file) { 495 ret = -EBADF; 496 goto err_tree_mutex_unlock; 497 } 498 499 inode = file_inode(f.file); 500 xrcd = find_xrcd(file->device, inode); 501 if (!xrcd && !(cmd.oflags & O_CREAT)) { 502 /* no file descriptor. Need CREATE flag */ 503 ret = -EAGAIN; 504 goto err_tree_mutex_unlock; 505 } 506 507 if (xrcd && cmd.oflags & O_EXCL) { 508 ret = -EINVAL; 509 goto err_tree_mutex_unlock; 510 } 511 } 512 513 obj = (struct ib_uxrcd_object *)uobj_alloc(uobj_get_type(xrcd), 514 file->ucontext); 515 if (IS_ERR(obj)) { 516 ret = PTR_ERR(obj); 517 goto err_tree_mutex_unlock; 518 } 519 520 if (!xrcd) { 521 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata); 522 if (IS_ERR(xrcd)) { 523 ret = PTR_ERR(xrcd); 524 goto err; 525 } 526 527 xrcd->inode = inode; 528 xrcd->device = ib_dev; 529 atomic_set(&xrcd->usecnt, 0); 530 mutex_init(&xrcd->tgt_qp_mutex); 531 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 532 new_xrcd = 1; 533 } 534 535 atomic_set(&obj->refcnt, 0); 536 obj->uobject.object = xrcd; 537 memset(&resp, 0, sizeof resp); 538 resp.xrcd_handle = obj->uobject.id; 539 540 if (inode) { 541 if (new_xrcd) { 542 /* create new inode/xrcd table entry */ 543 ret = xrcd_table_insert(file->device, inode, xrcd); 544 if (ret) 545 goto err_dealloc_xrcd; 546 } 547 atomic_inc(&xrcd->usecnt); 548 } 549 550 if (copy_to_user((void __user *) (unsigned long) cmd.response, 551 &resp, sizeof resp)) { 552 ret = -EFAULT; 553 goto err_copy; 554 } 555 556 if (f.file) 557 fdput(f); 558 559 uobj_alloc_commit(&obj->uobject); 560 561 mutex_unlock(&file->device->xrcd_tree_mutex); 562 return in_len; 563 564 err_copy: 565 if (inode) { 566 if (new_xrcd) 567 xrcd_table_delete(file->device, inode); 568 atomic_dec(&xrcd->usecnt); 569 } 570 571 err_dealloc_xrcd: 572 ib_dealloc_xrcd(xrcd); 573 574 err: 575 uobj_alloc_abort(&obj->uobject); 576 577 err_tree_mutex_unlock: 578 if (f.file) 579 fdput(f); 580 581 mutex_unlock(&file->device->xrcd_tree_mutex); 582 583 return ret; 584 } 585 586 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, 587 struct ib_device *ib_dev, 588 const char __user *buf, int in_len, 589 int out_len) 590 { 591 struct ib_uverbs_close_xrcd cmd; 592 struct ib_uobject *uobj; 593 int ret = 0; 594 595 if (copy_from_user(&cmd, buf, sizeof cmd)) 596 return -EFAULT; 597 598 uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle, 599 file->ucontext); 600 if (IS_ERR(uobj)) { 601 mutex_unlock(&file->device->xrcd_tree_mutex); 602 return PTR_ERR(uobj); 603 } 604 605 ret = uobj_remove_commit(uobj); 606 return ret ?: in_len; 607 } 608 609 int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, 610 struct ib_xrcd *xrcd, 611 enum rdma_remove_reason why) 612 { 613 struct inode *inode; 614 int ret; 615 616 inode = xrcd->inode; 617 if (inode && !atomic_dec_and_test(&xrcd->usecnt)) 618 return 0; 619 620 ret = ib_dealloc_xrcd(xrcd); 621 622 if (why == RDMA_REMOVE_DESTROY && ret) 623 atomic_inc(&xrcd->usecnt); 624 else if (inode) 625 xrcd_table_delete(dev, inode); 626 627 return ret; 628 } 629 630 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 631 struct ib_device *ib_dev, 632 const char __user *buf, int in_len, 633 int out_len) 634 { 635 struct ib_uverbs_reg_mr cmd; 636 struct ib_uverbs_reg_mr_resp resp; 637 struct ib_udata udata; 638 struct ib_uobject *uobj; 639 struct ib_pd *pd; 640 struct ib_mr *mr; 641 int ret; 642 643 if (out_len < sizeof resp) 644 return -ENOSPC; 645 646 if (copy_from_user(&cmd, buf, sizeof cmd)) 647 return -EFAULT; 648 649 INIT_UDATA(&udata, buf + sizeof cmd, 650 (unsigned long) cmd.response + sizeof resp, 651 in_len - sizeof cmd, out_len - sizeof resp); 652 653 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 654 return -EINVAL; 655 656 ret = ib_check_mr_access(cmd.access_flags); 657 if (ret) 658 return ret; 659 660 uobj = uobj_alloc(uobj_get_type(mr), file->ucontext); 661 if (IS_ERR(uobj)) 662 return PTR_ERR(uobj); 663 664 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 665 if (!pd) { 666 ret = -EINVAL; 667 goto err_free; 668 } 669 670 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) { 671 if (!(pd->device->attrs.device_cap_flags & 672 IB_DEVICE_ON_DEMAND_PAGING)) { 673 pr_debug("ODP support not available\n"); 674 ret = -EINVAL; 675 goto err_put; 676 } 677 } 678 679 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 680 cmd.access_flags, &udata); 681 if (IS_ERR(mr)) { 682 ret = PTR_ERR(mr); 683 goto err_put; 684 } 685 686 mr->device = pd->device; 687 mr->pd = pd; 688 mr->uobject = uobj; 689 atomic_inc(&pd->usecnt); 690 691 uobj->object = mr; 692 693 memset(&resp, 0, sizeof resp); 694 resp.lkey = mr->lkey; 695 resp.rkey = mr->rkey; 696 resp.mr_handle = uobj->id; 697 698 if (copy_to_user((void __user *) (unsigned long) cmd.response, 699 &resp, sizeof resp)) { 700 ret = -EFAULT; 701 goto err_copy; 702 } 703 704 uobj_put_obj_read(pd); 705 706 uobj_alloc_commit(uobj); 707 708 return in_len; 709 710 err_copy: 711 ib_dereg_mr(mr); 712 713 err_put: 714 uobj_put_obj_read(pd); 715 716 err_free: 717 uobj_alloc_abort(uobj); 718 return ret; 719 } 720 721 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, 722 struct ib_device *ib_dev, 723 const char __user *buf, int in_len, 724 int out_len) 725 { 726 struct ib_uverbs_rereg_mr cmd; 727 struct ib_uverbs_rereg_mr_resp resp; 728 struct ib_udata udata; 729 struct ib_pd *pd = NULL; 730 struct ib_mr *mr; 731 struct ib_pd *old_pd; 732 int ret; 733 struct ib_uobject *uobj; 734 735 if (out_len < sizeof(resp)) 736 return -ENOSPC; 737 738 if (copy_from_user(&cmd, buf, sizeof(cmd))) 739 return -EFAULT; 740 741 INIT_UDATA(&udata, buf + sizeof(cmd), 742 (unsigned long) cmd.response + sizeof(resp), 743 in_len - sizeof(cmd), out_len - sizeof(resp)); 744 745 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags) 746 return -EINVAL; 747 748 if ((cmd.flags & IB_MR_REREG_TRANS) && 749 (!cmd.start || !cmd.hca_va || 0 >= cmd.length || 750 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))) 751 return -EINVAL; 752 753 uobj = uobj_get_write(uobj_get_type(mr), cmd.mr_handle, 754 file->ucontext); 755 if (IS_ERR(uobj)) 756 return PTR_ERR(uobj); 757 758 mr = uobj->object; 759 760 if (cmd.flags & IB_MR_REREG_ACCESS) { 761 ret = ib_check_mr_access(cmd.access_flags); 762 if (ret) 763 goto put_uobjs; 764 } 765 766 if (cmd.flags & IB_MR_REREG_PD) { 767 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 768 if (!pd) { 769 ret = -EINVAL; 770 goto put_uobjs; 771 } 772 } 773 774 old_pd = mr->pd; 775 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, 776 cmd.length, cmd.hca_va, 777 cmd.access_flags, pd, &udata); 778 if (!ret) { 779 if (cmd.flags & IB_MR_REREG_PD) { 780 atomic_inc(&pd->usecnt); 781 mr->pd = pd; 782 atomic_dec(&old_pd->usecnt); 783 } 784 } else { 785 goto put_uobj_pd; 786 } 787 788 memset(&resp, 0, sizeof(resp)); 789 resp.lkey = mr->lkey; 790 resp.rkey = mr->rkey; 791 792 if (copy_to_user((void __user *)(unsigned long)cmd.response, 793 &resp, sizeof(resp))) 794 ret = -EFAULT; 795 else 796 ret = in_len; 797 798 put_uobj_pd: 799 if (cmd.flags & IB_MR_REREG_PD) 800 uobj_put_obj_read(pd); 801 802 put_uobjs: 803 uobj_put_write(uobj); 804 805 return ret; 806 } 807 808 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 809 struct ib_device *ib_dev, 810 const char __user *buf, int in_len, 811 int out_len) 812 { 813 struct ib_uverbs_dereg_mr cmd; 814 struct ib_uobject *uobj; 815 int ret = -EINVAL; 816 817 if (copy_from_user(&cmd, buf, sizeof cmd)) 818 return -EFAULT; 819 820 uobj = uobj_get_write(uobj_get_type(mr), cmd.mr_handle, 821 file->ucontext); 822 if (IS_ERR(uobj)) 823 return PTR_ERR(uobj); 824 825 ret = uobj_remove_commit(uobj); 826 827 return ret ?: in_len; 828 } 829 830 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, 831 struct ib_device *ib_dev, 832 const char __user *buf, int in_len, 833 int out_len) 834 { 835 struct ib_uverbs_alloc_mw cmd; 836 struct ib_uverbs_alloc_mw_resp resp; 837 struct ib_uobject *uobj; 838 struct ib_pd *pd; 839 struct ib_mw *mw; 840 struct ib_udata udata; 841 int ret; 842 843 if (out_len < sizeof(resp)) 844 return -ENOSPC; 845 846 if (copy_from_user(&cmd, buf, sizeof(cmd))) 847 return -EFAULT; 848 849 uobj = uobj_alloc(uobj_get_type(mw), file->ucontext); 850 if (IS_ERR(uobj)) 851 return PTR_ERR(uobj); 852 853 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 854 if (!pd) { 855 ret = -EINVAL; 856 goto err_free; 857 } 858 859 INIT_UDATA(&udata, buf + sizeof(cmd), 860 (unsigned long)cmd.response + sizeof(resp), 861 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 862 out_len - sizeof(resp)); 863 864 mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata); 865 if (IS_ERR(mw)) { 866 ret = PTR_ERR(mw); 867 goto err_put; 868 } 869 870 mw->device = pd->device; 871 mw->pd = pd; 872 mw->uobject = uobj; 873 atomic_inc(&pd->usecnt); 874 875 uobj->object = mw; 876 877 memset(&resp, 0, sizeof(resp)); 878 resp.rkey = mw->rkey; 879 resp.mw_handle = uobj->id; 880 881 if (copy_to_user((void __user *)(unsigned long)cmd.response, 882 &resp, sizeof(resp))) { 883 ret = -EFAULT; 884 goto err_copy; 885 } 886 887 uobj_put_obj_read(pd); 888 uobj_alloc_commit(uobj); 889 890 return in_len; 891 892 err_copy: 893 uverbs_dealloc_mw(mw); 894 err_put: 895 uobj_put_obj_read(pd); 896 err_free: 897 uobj_alloc_abort(uobj); 898 return ret; 899 } 900 901 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file, 902 struct ib_device *ib_dev, 903 const char __user *buf, int in_len, 904 int out_len) 905 { 906 struct ib_uverbs_dealloc_mw cmd; 907 struct ib_uobject *uobj; 908 int ret = -EINVAL; 909 910 if (copy_from_user(&cmd, buf, sizeof(cmd))) 911 return -EFAULT; 912 913 uobj = uobj_get_write(uobj_get_type(mw), cmd.mw_handle, 914 file->ucontext); 915 if (IS_ERR(uobj)) 916 return PTR_ERR(uobj); 917 918 ret = uobj_remove_commit(uobj); 919 return ret ?: in_len; 920 } 921 922 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 923 struct ib_device *ib_dev, 924 const char __user *buf, int in_len, 925 int out_len) 926 { 927 struct ib_uverbs_create_comp_channel cmd; 928 struct ib_uverbs_create_comp_channel_resp resp; 929 struct ib_uobject *uobj; 930 struct ib_uverbs_completion_event_file *ev_file; 931 932 if (out_len < sizeof resp) 933 return -ENOSPC; 934 935 if (copy_from_user(&cmd, buf, sizeof cmd)) 936 return -EFAULT; 937 938 uobj = uobj_alloc(uobj_get_type(comp_channel), file->ucontext); 939 if (IS_ERR(uobj)) 940 return PTR_ERR(uobj); 941 942 resp.fd = uobj->id; 943 944 ev_file = container_of(uobj, struct ib_uverbs_completion_event_file, 945 uobj_file.uobj); 946 ib_uverbs_init_event_queue(&ev_file->ev_queue); 947 948 if (copy_to_user((void __user *) (unsigned long) cmd.response, 949 &resp, sizeof resp)) { 950 uobj_alloc_abort(uobj); 951 return -EFAULT; 952 } 953 954 uobj_alloc_commit(uobj); 955 return in_len; 956 } 957 958 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, 959 struct ib_device *ib_dev, 960 struct ib_udata *ucore, 961 struct ib_udata *uhw, 962 struct ib_uverbs_ex_create_cq *cmd, 963 size_t cmd_sz, 964 int (*cb)(struct ib_uverbs_file *file, 965 struct ib_ucq_object *obj, 966 struct ib_uverbs_ex_create_cq_resp *resp, 967 struct ib_udata *udata, 968 void *context), 969 void *context) 970 { 971 struct ib_ucq_object *obj; 972 struct ib_uverbs_completion_event_file *ev_file = NULL; 973 struct ib_cq *cq; 974 int ret; 975 struct ib_uverbs_ex_create_cq_resp resp; 976 struct ib_cq_init_attr attr = {}; 977 978 if (cmd->comp_vector >= file->device->num_comp_vectors) 979 return ERR_PTR(-EINVAL); 980 981 obj = (struct ib_ucq_object *)uobj_alloc(uobj_get_type(cq), 982 file->ucontext); 983 if (IS_ERR(obj)) 984 return obj; 985 986 if (cmd->comp_channel >= 0) { 987 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel, 988 file->ucontext); 989 if (IS_ERR(ev_file)) { 990 ret = PTR_ERR(ev_file); 991 goto err; 992 } 993 } 994 995 obj->uobject.user_handle = cmd->user_handle; 996 obj->uverbs_file = file; 997 obj->comp_events_reported = 0; 998 obj->async_events_reported = 0; 999 INIT_LIST_HEAD(&obj->comp_list); 1000 INIT_LIST_HEAD(&obj->async_list); 1001 1002 attr.cqe = cmd->cqe; 1003 attr.comp_vector = cmd->comp_vector; 1004 1005 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags)) 1006 attr.flags = cmd->flags; 1007 1008 cq = ib_dev->create_cq(ib_dev, &attr, file->ucontext, uhw); 1009 if (IS_ERR(cq)) { 1010 ret = PTR_ERR(cq); 1011 goto err_file; 1012 } 1013 1014 cq->device = ib_dev; 1015 cq->uobject = &obj->uobject; 1016 cq->comp_handler = ib_uverbs_comp_handler; 1017 cq->event_handler = ib_uverbs_cq_event_handler; 1018 cq->cq_context = &ev_file->ev_queue; 1019 atomic_set(&cq->usecnt, 0); 1020 1021 obj->uobject.object = cq; 1022 memset(&resp, 0, sizeof resp); 1023 resp.base.cq_handle = obj->uobject.id; 1024 resp.base.cqe = cq->cqe; 1025 1026 resp.response_length = offsetof(typeof(resp), response_length) + 1027 sizeof(resp.response_length); 1028 1029 ret = cb(file, obj, &resp, ucore, context); 1030 if (ret) 1031 goto err_cb; 1032 1033 uobj_alloc_commit(&obj->uobject); 1034 1035 return obj; 1036 1037 err_cb: 1038 ib_destroy_cq(cq); 1039 1040 err_file: 1041 if (ev_file) 1042 ib_uverbs_release_ucq(file, ev_file, obj); 1043 1044 err: 1045 uobj_alloc_abort(&obj->uobject); 1046 1047 return ERR_PTR(ret); 1048 } 1049 1050 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file, 1051 struct ib_ucq_object *obj, 1052 struct ib_uverbs_ex_create_cq_resp *resp, 1053 struct ib_udata *ucore, void *context) 1054 { 1055 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1056 return -EFAULT; 1057 1058 return 0; 1059 } 1060 1061 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 1062 struct ib_device *ib_dev, 1063 const char __user *buf, int in_len, 1064 int out_len) 1065 { 1066 struct ib_uverbs_create_cq cmd; 1067 struct ib_uverbs_ex_create_cq cmd_ex; 1068 struct ib_uverbs_create_cq_resp resp; 1069 struct ib_udata ucore; 1070 struct ib_udata uhw; 1071 struct ib_ucq_object *obj; 1072 1073 if (out_len < sizeof(resp)) 1074 return -ENOSPC; 1075 1076 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1077 return -EFAULT; 1078 1079 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), sizeof(resp)); 1080 1081 INIT_UDATA(&uhw, buf + sizeof(cmd), 1082 (unsigned long)cmd.response + sizeof(resp), 1083 in_len - sizeof(cmd), out_len - sizeof(resp)); 1084 1085 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1086 cmd_ex.user_handle = cmd.user_handle; 1087 cmd_ex.cqe = cmd.cqe; 1088 cmd_ex.comp_vector = cmd.comp_vector; 1089 cmd_ex.comp_channel = cmd.comp_channel; 1090 1091 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex, 1092 offsetof(typeof(cmd_ex), comp_channel) + 1093 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb, 1094 NULL); 1095 1096 if (IS_ERR(obj)) 1097 return PTR_ERR(obj); 1098 1099 return in_len; 1100 } 1101 1102 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file, 1103 struct ib_ucq_object *obj, 1104 struct ib_uverbs_ex_create_cq_resp *resp, 1105 struct ib_udata *ucore, void *context) 1106 { 1107 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1108 return -EFAULT; 1109 1110 return 0; 1111 } 1112 1113 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file, 1114 struct ib_device *ib_dev, 1115 struct ib_udata *ucore, 1116 struct ib_udata *uhw) 1117 { 1118 struct ib_uverbs_ex_create_cq_resp resp; 1119 struct ib_uverbs_ex_create_cq cmd; 1120 struct ib_ucq_object *obj; 1121 int err; 1122 1123 if (ucore->inlen < sizeof(cmd)) 1124 return -EINVAL; 1125 1126 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 1127 if (err) 1128 return err; 1129 1130 if (cmd.comp_mask) 1131 return -EINVAL; 1132 1133 if (cmd.reserved) 1134 return -EINVAL; 1135 1136 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1137 sizeof(resp.response_length))) 1138 return -ENOSPC; 1139 1140 obj = create_cq(file, ib_dev, ucore, uhw, &cmd, 1141 min(ucore->inlen, sizeof(cmd)), 1142 ib_uverbs_ex_create_cq_cb, NULL); 1143 1144 if (IS_ERR(obj)) 1145 return PTR_ERR(obj); 1146 1147 return 0; 1148 } 1149 1150 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 1151 struct ib_device *ib_dev, 1152 const char __user *buf, int in_len, 1153 int out_len) 1154 { 1155 struct ib_uverbs_resize_cq cmd; 1156 struct ib_uverbs_resize_cq_resp resp = {}; 1157 struct ib_udata udata; 1158 struct ib_cq *cq; 1159 int ret = -EINVAL; 1160 1161 if (copy_from_user(&cmd, buf, sizeof cmd)) 1162 return -EFAULT; 1163 1164 INIT_UDATA(&udata, buf + sizeof cmd, 1165 (unsigned long) cmd.response + sizeof resp, 1166 in_len - sizeof cmd, out_len - sizeof resp); 1167 1168 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext); 1169 if (!cq) 1170 return -EINVAL; 1171 1172 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 1173 if (ret) 1174 goto out; 1175 1176 resp.cqe = cq->cqe; 1177 1178 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1179 &resp, sizeof resp.cqe)) 1180 ret = -EFAULT; 1181 1182 out: 1183 uobj_put_obj_read(cq); 1184 1185 return ret ? ret : in_len; 1186 } 1187 1188 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc) 1189 { 1190 struct ib_uverbs_wc tmp; 1191 1192 tmp.wr_id = wc->wr_id; 1193 tmp.status = wc->status; 1194 tmp.opcode = wc->opcode; 1195 tmp.vendor_err = wc->vendor_err; 1196 tmp.byte_len = wc->byte_len; 1197 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data; 1198 tmp.qp_num = wc->qp->qp_num; 1199 tmp.src_qp = wc->src_qp; 1200 tmp.wc_flags = wc->wc_flags; 1201 tmp.pkey_index = wc->pkey_index; 1202 tmp.slid = wc->slid; 1203 tmp.sl = wc->sl; 1204 tmp.dlid_path_bits = wc->dlid_path_bits; 1205 tmp.port_num = wc->port_num; 1206 tmp.reserved = 0; 1207 1208 if (copy_to_user(dest, &tmp, sizeof tmp)) 1209 return -EFAULT; 1210 1211 return 0; 1212 } 1213 1214 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 1215 struct ib_device *ib_dev, 1216 const char __user *buf, int in_len, 1217 int out_len) 1218 { 1219 struct ib_uverbs_poll_cq cmd; 1220 struct ib_uverbs_poll_cq_resp resp; 1221 u8 __user *header_ptr; 1222 u8 __user *data_ptr; 1223 struct ib_cq *cq; 1224 struct ib_wc wc; 1225 int ret; 1226 1227 if (copy_from_user(&cmd, buf, sizeof cmd)) 1228 return -EFAULT; 1229 1230 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext); 1231 if (!cq) 1232 return -EINVAL; 1233 1234 /* we copy a struct ib_uverbs_poll_cq_resp to user space */ 1235 header_ptr = (void __user *)(unsigned long) cmd.response; 1236 data_ptr = header_ptr + sizeof resp; 1237 1238 memset(&resp, 0, sizeof resp); 1239 while (resp.count < cmd.ne) { 1240 ret = ib_poll_cq(cq, 1, &wc); 1241 if (ret < 0) 1242 goto out_put; 1243 if (!ret) 1244 break; 1245 1246 ret = copy_wc_to_user(data_ptr, &wc); 1247 if (ret) 1248 goto out_put; 1249 1250 data_ptr += sizeof(struct ib_uverbs_wc); 1251 ++resp.count; 1252 } 1253 1254 if (copy_to_user(header_ptr, &resp, sizeof resp)) { 1255 ret = -EFAULT; 1256 goto out_put; 1257 } 1258 1259 ret = in_len; 1260 1261 out_put: 1262 uobj_put_obj_read(cq); 1263 return ret; 1264 } 1265 1266 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 1267 struct ib_device *ib_dev, 1268 const char __user *buf, int in_len, 1269 int out_len) 1270 { 1271 struct ib_uverbs_req_notify_cq cmd; 1272 struct ib_cq *cq; 1273 1274 if (copy_from_user(&cmd, buf, sizeof cmd)) 1275 return -EFAULT; 1276 1277 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext); 1278 if (!cq) 1279 return -EINVAL; 1280 1281 ib_req_notify_cq(cq, cmd.solicited_only ? 1282 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 1283 1284 uobj_put_obj_read(cq); 1285 1286 return in_len; 1287 } 1288 1289 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 1290 struct ib_device *ib_dev, 1291 const char __user *buf, int in_len, 1292 int out_len) 1293 { 1294 struct ib_uverbs_destroy_cq cmd; 1295 struct ib_uverbs_destroy_cq_resp resp; 1296 struct ib_uobject *uobj; 1297 struct ib_cq *cq; 1298 struct ib_ucq_object *obj; 1299 int ret = -EINVAL; 1300 1301 if (copy_from_user(&cmd, buf, sizeof cmd)) 1302 return -EFAULT; 1303 1304 uobj = uobj_get_write(uobj_get_type(cq), cmd.cq_handle, 1305 file->ucontext); 1306 if (IS_ERR(uobj)) 1307 return PTR_ERR(uobj); 1308 1309 /* 1310 * Make sure we don't free the memory in remove_commit as we still 1311 * needs the uobject memory to create the response. 1312 */ 1313 uverbs_uobject_get(uobj); 1314 cq = uobj->object; 1315 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1316 1317 memset(&resp, 0, sizeof(resp)); 1318 1319 ret = uobj_remove_commit(uobj); 1320 if (ret) { 1321 uverbs_uobject_put(uobj); 1322 return ret; 1323 } 1324 1325 resp.comp_events_reported = obj->comp_events_reported; 1326 resp.async_events_reported = obj->async_events_reported; 1327 1328 uverbs_uobject_put(uobj); 1329 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1330 &resp, sizeof resp)) 1331 return -EFAULT; 1332 1333 return in_len; 1334 } 1335 1336 static int create_qp(struct ib_uverbs_file *file, 1337 struct ib_udata *ucore, 1338 struct ib_udata *uhw, 1339 struct ib_uverbs_ex_create_qp *cmd, 1340 size_t cmd_sz, 1341 int (*cb)(struct ib_uverbs_file *file, 1342 struct ib_uverbs_ex_create_qp_resp *resp, 1343 struct ib_udata *udata), 1344 void *context) 1345 { 1346 struct ib_uqp_object *obj; 1347 struct ib_device *device; 1348 struct ib_pd *pd = NULL; 1349 struct ib_xrcd *xrcd = NULL; 1350 struct ib_uobject *xrcd_uobj = ERR_PTR(-ENOENT); 1351 struct ib_cq *scq = NULL, *rcq = NULL; 1352 struct ib_srq *srq = NULL; 1353 struct ib_qp *qp; 1354 char *buf; 1355 struct ib_qp_init_attr attr = {}; 1356 struct ib_uverbs_ex_create_qp_resp resp; 1357 int ret; 1358 struct ib_rwq_ind_table *ind_tbl = NULL; 1359 bool has_sq = true; 1360 1361 if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) 1362 return -EPERM; 1363 1364 obj = (struct ib_uqp_object *)uobj_alloc(uobj_get_type(qp), 1365 file->ucontext); 1366 if (IS_ERR(obj)) 1367 return PTR_ERR(obj); 1368 obj->uxrcd = NULL; 1369 obj->uevent.uobject.user_handle = cmd->user_handle; 1370 mutex_init(&obj->mcast_lock); 1371 1372 if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) + 1373 sizeof(cmd->rwq_ind_tbl_handle) && 1374 (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) { 1375 ind_tbl = uobj_get_obj_read(rwq_ind_table, 1376 cmd->rwq_ind_tbl_handle, 1377 file->ucontext); 1378 if (!ind_tbl) { 1379 ret = -EINVAL; 1380 goto err_put; 1381 } 1382 1383 attr.rwq_ind_tbl = ind_tbl; 1384 } 1385 1386 if ((cmd_sz >= offsetof(typeof(*cmd), reserved1) + 1387 sizeof(cmd->reserved1)) && cmd->reserved1) { 1388 ret = -EOPNOTSUPP; 1389 goto err_put; 1390 } 1391 1392 if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) { 1393 ret = -EINVAL; 1394 goto err_put; 1395 } 1396 1397 if (ind_tbl && !cmd->max_send_wr) 1398 has_sq = false; 1399 1400 if (cmd->qp_type == IB_QPT_XRC_TGT) { 1401 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd->pd_handle, 1402 file->ucontext); 1403 1404 if (IS_ERR(xrcd_uobj)) { 1405 ret = -EINVAL; 1406 goto err_put; 1407 } 1408 1409 xrcd = (struct ib_xrcd *)xrcd_uobj->object; 1410 if (!xrcd) { 1411 ret = -EINVAL; 1412 goto err_put; 1413 } 1414 device = xrcd->device; 1415 } else { 1416 if (cmd->qp_type == IB_QPT_XRC_INI) { 1417 cmd->max_recv_wr = 0; 1418 cmd->max_recv_sge = 0; 1419 } else { 1420 if (cmd->is_srq) { 1421 srq = uobj_get_obj_read(srq, cmd->srq_handle, 1422 file->ucontext); 1423 if (!srq || srq->srq_type != IB_SRQT_BASIC) { 1424 ret = -EINVAL; 1425 goto err_put; 1426 } 1427 } 1428 1429 if (!ind_tbl) { 1430 if (cmd->recv_cq_handle != cmd->send_cq_handle) { 1431 rcq = uobj_get_obj_read(cq, cmd->recv_cq_handle, 1432 file->ucontext); 1433 if (!rcq) { 1434 ret = -EINVAL; 1435 goto err_put; 1436 } 1437 } 1438 } 1439 } 1440 1441 if (has_sq) 1442 scq = uobj_get_obj_read(cq, cmd->send_cq_handle, 1443 file->ucontext); 1444 if (!ind_tbl) 1445 rcq = rcq ?: scq; 1446 pd = uobj_get_obj_read(pd, cmd->pd_handle, file->ucontext); 1447 if (!pd || (!scq && has_sq)) { 1448 ret = -EINVAL; 1449 goto err_put; 1450 } 1451 1452 device = pd->device; 1453 } 1454 1455 attr.event_handler = ib_uverbs_qp_event_handler; 1456 attr.qp_context = file; 1457 attr.send_cq = scq; 1458 attr.recv_cq = rcq; 1459 attr.srq = srq; 1460 attr.xrcd = xrcd; 1461 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR : 1462 IB_SIGNAL_REQ_WR; 1463 attr.qp_type = cmd->qp_type; 1464 attr.create_flags = 0; 1465 1466 attr.cap.max_send_wr = cmd->max_send_wr; 1467 attr.cap.max_recv_wr = cmd->max_recv_wr; 1468 attr.cap.max_send_sge = cmd->max_send_sge; 1469 attr.cap.max_recv_sge = cmd->max_recv_sge; 1470 attr.cap.max_inline_data = cmd->max_inline_data; 1471 1472 obj->uevent.events_reported = 0; 1473 INIT_LIST_HEAD(&obj->uevent.event_list); 1474 INIT_LIST_HEAD(&obj->mcast_list); 1475 1476 if (cmd_sz >= offsetof(typeof(*cmd), create_flags) + 1477 sizeof(cmd->create_flags)) 1478 attr.create_flags = cmd->create_flags; 1479 1480 if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | 1481 IB_QP_CREATE_CROSS_CHANNEL | 1482 IB_QP_CREATE_MANAGED_SEND | 1483 IB_QP_CREATE_MANAGED_RECV | 1484 IB_QP_CREATE_SCATTER_FCS | 1485 IB_QP_CREATE_CVLAN_STRIPPING)) { 1486 ret = -EINVAL; 1487 goto err_put; 1488 } 1489 1490 buf = (void *)cmd + sizeof(*cmd); 1491 if (cmd_sz > sizeof(*cmd)) 1492 if (!(buf[0] == 0 && !memcmp(buf, buf + 1, 1493 cmd_sz - sizeof(*cmd) - 1))) { 1494 ret = -EINVAL; 1495 goto err_put; 1496 } 1497 1498 if (cmd->qp_type == IB_QPT_XRC_TGT) 1499 qp = ib_create_qp(pd, &attr); 1500 else 1501 qp = device->create_qp(pd, &attr, uhw); 1502 1503 if (IS_ERR(qp)) { 1504 ret = PTR_ERR(qp); 1505 goto err_put; 1506 } 1507 1508 if (cmd->qp_type != IB_QPT_XRC_TGT) { 1509 ret = ib_create_qp_security(qp, device); 1510 if (ret) 1511 goto err_cb; 1512 1513 qp->real_qp = qp; 1514 qp->device = device; 1515 qp->pd = pd; 1516 qp->send_cq = attr.send_cq; 1517 qp->recv_cq = attr.recv_cq; 1518 qp->srq = attr.srq; 1519 qp->rwq_ind_tbl = ind_tbl; 1520 qp->event_handler = attr.event_handler; 1521 qp->qp_context = attr.qp_context; 1522 qp->qp_type = attr.qp_type; 1523 atomic_set(&qp->usecnt, 0); 1524 atomic_inc(&pd->usecnt); 1525 if (attr.send_cq) 1526 atomic_inc(&attr.send_cq->usecnt); 1527 if (attr.recv_cq) 1528 atomic_inc(&attr.recv_cq->usecnt); 1529 if (attr.srq) 1530 atomic_inc(&attr.srq->usecnt); 1531 if (ind_tbl) 1532 atomic_inc(&ind_tbl->usecnt); 1533 } 1534 qp->uobject = &obj->uevent.uobject; 1535 1536 obj->uevent.uobject.object = qp; 1537 1538 memset(&resp, 0, sizeof resp); 1539 resp.base.qpn = qp->qp_num; 1540 resp.base.qp_handle = obj->uevent.uobject.id; 1541 resp.base.max_recv_sge = attr.cap.max_recv_sge; 1542 resp.base.max_send_sge = attr.cap.max_send_sge; 1543 resp.base.max_recv_wr = attr.cap.max_recv_wr; 1544 resp.base.max_send_wr = attr.cap.max_send_wr; 1545 resp.base.max_inline_data = attr.cap.max_inline_data; 1546 1547 resp.response_length = offsetof(typeof(resp), response_length) + 1548 sizeof(resp.response_length); 1549 1550 ret = cb(file, &resp, ucore); 1551 if (ret) 1552 goto err_cb; 1553 1554 if (xrcd) { 1555 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, 1556 uobject); 1557 atomic_inc(&obj->uxrcd->refcnt); 1558 uobj_put_read(xrcd_uobj); 1559 } 1560 1561 if (pd) 1562 uobj_put_obj_read(pd); 1563 if (scq) 1564 uobj_put_obj_read(scq); 1565 if (rcq && rcq != scq) 1566 uobj_put_obj_read(rcq); 1567 if (srq) 1568 uobj_put_obj_read(srq); 1569 if (ind_tbl) 1570 uobj_put_obj_read(ind_tbl); 1571 1572 uobj_alloc_commit(&obj->uevent.uobject); 1573 1574 return 0; 1575 err_cb: 1576 ib_destroy_qp(qp); 1577 1578 err_put: 1579 if (!IS_ERR(xrcd_uobj)) 1580 uobj_put_read(xrcd_uobj); 1581 if (pd) 1582 uobj_put_obj_read(pd); 1583 if (scq) 1584 uobj_put_obj_read(scq); 1585 if (rcq && rcq != scq) 1586 uobj_put_obj_read(rcq); 1587 if (srq) 1588 uobj_put_obj_read(srq); 1589 if (ind_tbl) 1590 uobj_put_obj_read(ind_tbl); 1591 1592 uobj_alloc_abort(&obj->uevent.uobject); 1593 return ret; 1594 } 1595 1596 static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file, 1597 struct ib_uverbs_ex_create_qp_resp *resp, 1598 struct ib_udata *ucore) 1599 { 1600 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1601 return -EFAULT; 1602 1603 return 0; 1604 } 1605 1606 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 1607 struct ib_device *ib_dev, 1608 const char __user *buf, int in_len, 1609 int out_len) 1610 { 1611 struct ib_uverbs_create_qp cmd; 1612 struct ib_uverbs_ex_create_qp cmd_ex; 1613 struct ib_udata ucore; 1614 struct ib_udata uhw; 1615 ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp); 1616 int err; 1617 1618 if (out_len < resp_size) 1619 return -ENOSPC; 1620 1621 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1622 return -EFAULT; 1623 1624 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), 1625 resp_size); 1626 INIT_UDATA(&uhw, buf + sizeof(cmd), 1627 (unsigned long)cmd.response + resp_size, 1628 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1629 out_len - resp_size); 1630 1631 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1632 cmd_ex.user_handle = cmd.user_handle; 1633 cmd_ex.pd_handle = cmd.pd_handle; 1634 cmd_ex.send_cq_handle = cmd.send_cq_handle; 1635 cmd_ex.recv_cq_handle = cmd.recv_cq_handle; 1636 cmd_ex.srq_handle = cmd.srq_handle; 1637 cmd_ex.max_send_wr = cmd.max_send_wr; 1638 cmd_ex.max_recv_wr = cmd.max_recv_wr; 1639 cmd_ex.max_send_sge = cmd.max_send_sge; 1640 cmd_ex.max_recv_sge = cmd.max_recv_sge; 1641 cmd_ex.max_inline_data = cmd.max_inline_data; 1642 cmd_ex.sq_sig_all = cmd.sq_sig_all; 1643 cmd_ex.qp_type = cmd.qp_type; 1644 cmd_ex.is_srq = cmd.is_srq; 1645 1646 err = create_qp(file, &ucore, &uhw, &cmd_ex, 1647 offsetof(typeof(cmd_ex), is_srq) + 1648 sizeof(cmd.is_srq), ib_uverbs_create_qp_cb, 1649 NULL); 1650 1651 if (err) 1652 return err; 1653 1654 return in_len; 1655 } 1656 1657 static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file, 1658 struct ib_uverbs_ex_create_qp_resp *resp, 1659 struct ib_udata *ucore) 1660 { 1661 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1662 return -EFAULT; 1663 1664 return 0; 1665 } 1666 1667 int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file, 1668 struct ib_device *ib_dev, 1669 struct ib_udata *ucore, 1670 struct ib_udata *uhw) 1671 { 1672 struct ib_uverbs_ex_create_qp_resp resp; 1673 struct ib_uverbs_ex_create_qp cmd = {0}; 1674 int err; 1675 1676 if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) + 1677 sizeof(cmd.comp_mask))) 1678 return -EINVAL; 1679 1680 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 1681 if (err) 1682 return err; 1683 1684 if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK) 1685 return -EINVAL; 1686 1687 if (cmd.reserved) 1688 return -EINVAL; 1689 1690 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1691 sizeof(resp.response_length))) 1692 return -ENOSPC; 1693 1694 err = create_qp(file, ucore, uhw, &cmd, 1695 min(ucore->inlen, sizeof(cmd)), 1696 ib_uverbs_ex_create_qp_cb, NULL); 1697 1698 if (err) 1699 return err; 1700 1701 return 0; 1702 } 1703 1704 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, 1705 struct ib_device *ib_dev, 1706 const char __user *buf, int in_len, int out_len) 1707 { 1708 struct ib_uverbs_open_qp cmd; 1709 struct ib_uverbs_create_qp_resp resp; 1710 struct ib_udata udata; 1711 struct ib_uqp_object *obj; 1712 struct ib_xrcd *xrcd; 1713 struct ib_uobject *uninitialized_var(xrcd_uobj); 1714 struct ib_qp *qp; 1715 struct ib_qp_open_attr attr; 1716 int ret; 1717 1718 if (out_len < sizeof resp) 1719 return -ENOSPC; 1720 1721 if (copy_from_user(&cmd, buf, sizeof cmd)) 1722 return -EFAULT; 1723 1724 INIT_UDATA(&udata, buf + sizeof cmd, 1725 (unsigned long) cmd.response + sizeof resp, 1726 in_len - sizeof cmd, out_len - sizeof resp); 1727 1728 obj = (struct ib_uqp_object *)uobj_alloc(uobj_get_type(qp), 1729 file->ucontext); 1730 if (IS_ERR(obj)) 1731 return PTR_ERR(obj); 1732 1733 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd.pd_handle, 1734 file->ucontext); 1735 if (IS_ERR(xrcd_uobj)) { 1736 ret = -EINVAL; 1737 goto err_put; 1738 } 1739 1740 xrcd = (struct ib_xrcd *)xrcd_uobj->object; 1741 if (!xrcd) { 1742 ret = -EINVAL; 1743 goto err_xrcd; 1744 } 1745 1746 attr.event_handler = ib_uverbs_qp_event_handler; 1747 attr.qp_context = file; 1748 attr.qp_num = cmd.qpn; 1749 attr.qp_type = cmd.qp_type; 1750 1751 obj->uevent.events_reported = 0; 1752 INIT_LIST_HEAD(&obj->uevent.event_list); 1753 INIT_LIST_HEAD(&obj->mcast_list); 1754 1755 qp = ib_open_qp(xrcd, &attr); 1756 if (IS_ERR(qp)) { 1757 ret = PTR_ERR(qp); 1758 goto err_xrcd; 1759 } 1760 1761 obj->uevent.uobject.object = qp; 1762 obj->uevent.uobject.user_handle = cmd.user_handle; 1763 1764 memset(&resp, 0, sizeof resp); 1765 resp.qpn = qp->qp_num; 1766 resp.qp_handle = obj->uevent.uobject.id; 1767 1768 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1769 &resp, sizeof resp)) { 1770 ret = -EFAULT; 1771 goto err_destroy; 1772 } 1773 1774 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 1775 atomic_inc(&obj->uxrcd->refcnt); 1776 qp->uobject = &obj->uevent.uobject; 1777 uobj_put_read(xrcd_uobj); 1778 1779 1780 uobj_alloc_commit(&obj->uevent.uobject); 1781 1782 return in_len; 1783 1784 err_destroy: 1785 ib_destroy_qp(qp); 1786 err_xrcd: 1787 uobj_put_read(xrcd_uobj); 1788 err_put: 1789 uobj_alloc_abort(&obj->uevent.uobject); 1790 return ret; 1791 } 1792 1793 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 1794 struct ib_device *ib_dev, 1795 const char __user *buf, int in_len, 1796 int out_len) 1797 { 1798 struct ib_uverbs_query_qp cmd; 1799 struct ib_uverbs_query_qp_resp resp; 1800 struct ib_qp *qp; 1801 struct ib_qp_attr *attr; 1802 struct ib_qp_init_attr *init_attr; 1803 const struct ib_global_route *grh; 1804 int ret; 1805 1806 if (copy_from_user(&cmd, buf, sizeof cmd)) 1807 return -EFAULT; 1808 1809 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1810 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 1811 if (!attr || !init_attr) { 1812 ret = -ENOMEM; 1813 goto out; 1814 } 1815 1816 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 1817 if (!qp) { 1818 ret = -EINVAL; 1819 goto out; 1820 } 1821 1822 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 1823 1824 uobj_put_obj_read(qp); 1825 1826 if (ret) 1827 goto out; 1828 1829 memset(&resp, 0, sizeof resp); 1830 1831 resp.qp_state = attr->qp_state; 1832 resp.cur_qp_state = attr->cur_qp_state; 1833 resp.path_mtu = attr->path_mtu; 1834 resp.path_mig_state = attr->path_mig_state; 1835 resp.qkey = attr->qkey; 1836 resp.rq_psn = attr->rq_psn; 1837 resp.sq_psn = attr->sq_psn; 1838 resp.dest_qp_num = attr->dest_qp_num; 1839 resp.qp_access_flags = attr->qp_access_flags; 1840 resp.pkey_index = attr->pkey_index; 1841 resp.alt_pkey_index = attr->alt_pkey_index; 1842 resp.sq_draining = attr->sq_draining; 1843 resp.max_rd_atomic = attr->max_rd_atomic; 1844 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 1845 resp.min_rnr_timer = attr->min_rnr_timer; 1846 resp.port_num = attr->port_num; 1847 resp.timeout = attr->timeout; 1848 resp.retry_cnt = attr->retry_cnt; 1849 resp.rnr_retry = attr->rnr_retry; 1850 resp.alt_port_num = attr->alt_port_num; 1851 resp.alt_timeout = attr->alt_timeout; 1852 1853 resp.dest.dlid = rdma_ah_get_dlid(&attr->ah_attr); 1854 resp.dest.sl = rdma_ah_get_sl(&attr->ah_attr); 1855 resp.dest.src_path_bits = rdma_ah_get_path_bits(&attr->ah_attr); 1856 resp.dest.static_rate = rdma_ah_get_static_rate(&attr->ah_attr); 1857 resp.dest.is_global = !!(rdma_ah_get_ah_flags(&attr->ah_attr) & 1858 IB_AH_GRH); 1859 if (resp.dest.is_global) { 1860 grh = rdma_ah_read_grh(&attr->ah_attr); 1861 memcpy(resp.dest.dgid, grh->dgid.raw, 16); 1862 resp.dest.flow_label = grh->flow_label; 1863 resp.dest.sgid_index = grh->sgid_index; 1864 resp.dest.hop_limit = grh->hop_limit; 1865 resp.dest.traffic_class = grh->traffic_class; 1866 } 1867 resp.dest.port_num = rdma_ah_get_port_num(&attr->ah_attr); 1868 1869 resp.alt_dest.dlid = rdma_ah_get_dlid(&attr->alt_ah_attr); 1870 resp.alt_dest.sl = rdma_ah_get_sl(&attr->alt_ah_attr); 1871 resp.alt_dest.src_path_bits = rdma_ah_get_path_bits(&attr->alt_ah_attr); 1872 resp.alt_dest.static_rate 1873 = rdma_ah_get_static_rate(&attr->alt_ah_attr); 1874 resp.alt_dest.is_global 1875 = !!(rdma_ah_get_ah_flags(&attr->alt_ah_attr) & 1876 IB_AH_GRH); 1877 if (resp.alt_dest.is_global) { 1878 grh = rdma_ah_read_grh(&attr->alt_ah_attr); 1879 memcpy(resp.alt_dest.dgid, grh->dgid.raw, 16); 1880 resp.alt_dest.flow_label = grh->flow_label; 1881 resp.alt_dest.sgid_index = grh->sgid_index; 1882 resp.alt_dest.hop_limit = grh->hop_limit; 1883 resp.alt_dest.traffic_class = grh->traffic_class; 1884 } 1885 resp.alt_dest.port_num = rdma_ah_get_port_num(&attr->alt_ah_attr); 1886 1887 resp.max_send_wr = init_attr->cap.max_send_wr; 1888 resp.max_recv_wr = init_attr->cap.max_recv_wr; 1889 resp.max_send_sge = init_attr->cap.max_send_sge; 1890 resp.max_recv_sge = init_attr->cap.max_recv_sge; 1891 resp.max_inline_data = init_attr->cap.max_inline_data; 1892 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 1893 1894 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1895 &resp, sizeof resp)) 1896 ret = -EFAULT; 1897 1898 out: 1899 kfree(attr); 1900 kfree(init_attr); 1901 1902 return ret ? ret : in_len; 1903 } 1904 1905 /* Remove ignored fields set in the attribute mask */ 1906 static int modify_qp_mask(enum ib_qp_type qp_type, int mask) 1907 { 1908 switch (qp_type) { 1909 case IB_QPT_XRC_INI: 1910 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); 1911 case IB_QPT_XRC_TGT: 1912 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | 1913 IB_QP_RNR_RETRY); 1914 default: 1915 return mask; 1916 } 1917 } 1918 1919 static int modify_qp(struct ib_uverbs_file *file, 1920 struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata) 1921 { 1922 struct ib_qp_attr *attr; 1923 struct ib_qp *qp; 1924 int ret; 1925 1926 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1927 if (!attr) 1928 return -ENOMEM; 1929 1930 qp = uobj_get_obj_read(qp, cmd->base.qp_handle, file->ucontext); 1931 if (!qp) { 1932 ret = -EINVAL; 1933 goto out; 1934 } 1935 1936 if ((cmd->base.attr_mask & IB_QP_PORT) && 1937 !rdma_is_port_valid(qp->device, cmd->base.port_num)) { 1938 ret = -EINVAL; 1939 goto release_qp; 1940 } 1941 1942 attr->qp_state = cmd->base.qp_state; 1943 attr->cur_qp_state = cmd->base.cur_qp_state; 1944 attr->path_mtu = cmd->base.path_mtu; 1945 attr->path_mig_state = cmd->base.path_mig_state; 1946 attr->qkey = cmd->base.qkey; 1947 attr->rq_psn = cmd->base.rq_psn; 1948 attr->sq_psn = cmd->base.sq_psn; 1949 attr->dest_qp_num = cmd->base.dest_qp_num; 1950 attr->qp_access_flags = cmd->base.qp_access_flags; 1951 attr->pkey_index = cmd->base.pkey_index; 1952 attr->alt_pkey_index = cmd->base.alt_pkey_index; 1953 attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify; 1954 attr->max_rd_atomic = cmd->base.max_rd_atomic; 1955 attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic; 1956 attr->min_rnr_timer = cmd->base.min_rnr_timer; 1957 attr->port_num = cmd->base.port_num; 1958 attr->timeout = cmd->base.timeout; 1959 attr->retry_cnt = cmd->base.retry_cnt; 1960 attr->rnr_retry = cmd->base.rnr_retry; 1961 attr->alt_port_num = cmd->base.alt_port_num; 1962 attr->alt_timeout = cmd->base.alt_timeout; 1963 attr->rate_limit = cmd->rate_limit; 1964 1965 attr->ah_attr.type = rdma_ah_find_type(qp->device, 1966 cmd->base.dest.port_num); 1967 if (cmd->base.dest.is_global) { 1968 rdma_ah_set_grh(&attr->ah_attr, NULL, 1969 cmd->base.dest.flow_label, 1970 cmd->base.dest.sgid_index, 1971 cmd->base.dest.hop_limit, 1972 cmd->base.dest.traffic_class); 1973 rdma_ah_set_dgid_raw(&attr->ah_attr, cmd->base.dest.dgid); 1974 } else { 1975 rdma_ah_set_ah_flags(&attr->ah_attr, 0); 1976 } 1977 rdma_ah_set_dlid(&attr->ah_attr, cmd->base.dest.dlid); 1978 rdma_ah_set_sl(&attr->ah_attr, cmd->base.dest.sl); 1979 rdma_ah_set_path_bits(&attr->ah_attr, cmd->base.dest.src_path_bits); 1980 rdma_ah_set_static_rate(&attr->ah_attr, cmd->base.dest.static_rate); 1981 rdma_ah_set_port_num(&attr->ah_attr, 1982 cmd->base.dest.port_num); 1983 1984 attr->alt_ah_attr.type = rdma_ah_find_type(qp->device, 1985 cmd->base.dest.port_num); 1986 if (cmd->base.alt_dest.is_global) { 1987 rdma_ah_set_grh(&attr->alt_ah_attr, NULL, 1988 cmd->base.alt_dest.flow_label, 1989 cmd->base.alt_dest.sgid_index, 1990 cmd->base.alt_dest.hop_limit, 1991 cmd->base.alt_dest.traffic_class); 1992 rdma_ah_set_dgid_raw(&attr->alt_ah_attr, 1993 cmd->base.alt_dest.dgid); 1994 } else { 1995 rdma_ah_set_ah_flags(&attr->alt_ah_attr, 0); 1996 } 1997 1998 rdma_ah_set_dlid(&attr->alt_ah_attr, cmd->base.alt_dest.dlid); 1999 rdma_ah_set_sl(&attr->alt_ah_attr, cmd->base.alt_dest.sl); 2000 rdma_ah_set_path_bits(&attr->alt_ah_attr, 2001 cmd->base.alt_dest.src_path_bits); 2002 rdma_ah_set_static_rate(&attr->alt_ah_attr, 2003 cmd->base.alt_dest.static_rate); 2004 rdma_ah_set_port_num(&attr->alt_ah_attr, 2005 cmd->base.alt_dest.port_num); 2006 2007 ret = ib_modify_qp_with_udata(qp, attr, 2008 modify_qp_mask(qp->qp_type, 2009 cmd->base.attr_mask), 2010 udata); 2011 2012 release_qp: 2013 uobj_put_obj_read(qp); 2014 out: 2015 kfree(attr); 2016 2017 return ret; 2018 } 2019 2020 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 2021 struct ib_device *ib_dev, 2022 const char __user *buf, int in_len, 2023 int out_len) 2024 { 2025 struct ib_uverbs_ex_modify_qp cmd = {}; 2026 struct ib_udata udata; 2027 int ret; 2028 2029 if (copy_from_user(&cmd.base, buf, sizeof(cmd.base))) 2030 return -EFAULT; 2031 2032 if (cmd.base.attr_mask & 2033 ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1)) 2034 return -EOPNOTSUPP; 2035 2036 INIT_UDATA(&udata, buf + sizeof(cmd.base), NULL, 2037 in_len - sizeof(cmd.base), out_len); 2038 2039 ret = modify_qp(file, &cmd, &udata); 2040 if (ret) 2041 return ret; 2042 2043 return in_len; 2044 } 2045 2046 int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file, 2047 struct ib_device *ib_dev, 2048 struct ib_udata *ucore, 2049 struct ib_udata *uhw) 2050 { 2051 struct ib_uverbs_ex_modify_qp cmd = {}; 2052 int ret; 2053 2054 /* 2055 * Last bit is reserved for extending the attr_mask by 2056 * using another field. 2057 */ 2058 BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31)); 2059 2060 if (ucore->inlen < sizeof(cmd.base)) 2061 return -EINVAL; 2062 2063 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 2064 if (ret) 2065 return ret; 2066 2067 if (cmd.base.attr_mask & 2068 ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1)) 2069 return -EOPNOTSUPP; 2070 2071 if (ucore->inlen > sizeof(cmd)) { 2072 if (ib_is_udata_cleared(ucore, sizeof(cmd), 2073 ucore->inlen - sizeof(cmd))) 2074 return -EOPNOTSUPP; 2075 } 2076 2077 ret = modify_qp(file, &cmd, uhw); 2078 2079 return ret; 2080 } 2081 2082 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 2083 struct ib_device *ib_dev, 2084 const char __user *buf, int in_len, 2085 int out_len) 2086 { 2087 struct ib_uverbs_destroy_qp cmd; 2088 struct ib_uverbs_destroy_qp_resp resp; 2089 struct ib_uobject *uobj; 2090 struct ib_uqp_object *obj; 2091 int ret = -EINVAL; 2092 2093 if (copy_from_user(&cmd, buf, sizeof cmd)) 2094 return -EFAULT; 2095 2096 memset(&resp, 0, sizeof resp); 2097 2098 uobj = uobj_get_write(uobj_get_type(qp), cmd.qp_handle, 2099 file->ucontext); 2100 if (IS_ERR(uobj)) 2101 return PTR_ERR(uobj); 2102 2103 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 2104 /* 2105 * Make sure we don't free the memory in remove_commit as we still 2106 * needs the uobject memory to create the response. 2107 */ 2108 uverbs_uobject_get(uobj); 2109 2110 ret = uobj_remove_commit(uobj); 2111 if (ret) { 2112 uverbs_uobject_put(uobj); 2113 return ret; 2114 } 2115 2116 resp.events_reported = obj->uevent.events_reported; 2117 uverbs_uobject_put(uobj); 2118 2119 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2120 &resp, sizeof resp)) 2121 return -EFAULT; 2122 2123 return in_len; 2124 } 2125 2126 static void *alloc_wr(size_t wr_size, __u32 num_sge) 2127 { 2128 if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) / 2129 sizeof (struct ib_sge)) 2130 return NULL; 2131 2132 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) + 2133 num_sge * sizeof (struct ib_sge), GFP_KERNEL); 2134 } 2135 2136 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 2137 struct ib_device *ib_dev, 2138 const char __user *buf, int in_len, 2139 int out_len) 2140 { 2141 struct ib_uverbs_post_send cmd; 2142 struct ib_uverbs_post_send_resp resp; 2143 struct ib_uverbs_send_wr *user_wr; 2144 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 2145 struct ib_qp *qp; 2146 int i, sg_ind; 2147 int is_ud; 2148 ssize_t ret = -EINVAL; 2149 size_t next_size; 2150 2151 if (copy_from_user(&cmd, buf, sizeof cmd)) 2152 return -EFAULT; 2153 2154 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 2155 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 2156 return -EINVAL; 2157 2158 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 2159 return -EINVAL; 2160 2161 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 2162 if (!user_wr) 2163 return -ENOMEM; 2164 2165 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 2166 if (!qp) 2167 goto out; 2168 2169 is_ud = qp->qp_type == IB_QPT_UD; 2170 sg_ind = 0; 2171 last = NULL; 2172 for (i = 0; i < cmd.wr_count; ++i) { 2173 if (copy_from_user(user_wr, 2174 buf + sizeof cmd + i * cmd.wqe_size, 2175 cmd.wqe_size)) { 2176 ret = -EFAULT; 2177 goto out_put; 2178 } 2179 2180 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 2181 ret = -EINVAL; 2182 goto out_put; 2183 } 2184 2185 if (is_ud) { 2186 struct ib_ud_wr *ud; 2187 2188 if (user_wr->opcode != IB_WR_SEND && 2189 user_wr->opcode != IB_WR_SEND_WITH_IMM) { 2190 ret = -EINVAL; 2191 goto out_put; 2192 } 2193 2194 next_size = sizeof(*ud); 2195 ud = alloc_wr(next_size, user_wr->num_sge); 2196 if (!ud) { 2197 ret = -ENOMEM; 2198 goto out_put; 2199 } 2200 2201 ud->ah = uobj_get_obj_read(ah, user_wr->wr.ud.ah, 2202 file->ucontext); 2203 if (!ud->ah) { 2204 kfree(ud); 2205 ret = -EINVAL; 2206 goto out_put; 2207 } 2208 ud->remote_qpn = user_wr->wr.ud.remote_qpn; 2209 ud->remote_qkey = user_wr->wr.ud.remote_qkey; 2210 2211 next = &ud->wr; 2212 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || 2213 user_wr->opcode == IB_WR_RDMA_WRITE || 2214 user_wr->opcode == IB_WR_RDMA_READ) { 2215 struct ib_rdma_wr *rdma; 2216 2217 next_size = sizeof(*rdma); 2218 rdma = alloc_wr(next_size, user_wr->num_sge); 2219 if (!rdma) { 2220 ret = -ENOMEM; 2221 goto out_put; 2222 } 2223 2224 rdma->remote_addr = user_wr->wr.rdma.remote_addr; 2225 rdma->rkey = user_wr->wr.rdma.rkey; 2226 2227 next = &rdma->wr; 2228 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || 2229 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 2230 struct ib_atomic_wr *atomic; 2231 2232 next_size = sizeof(*atomic); 2233 atomic = alloc_wr(next_size, user_wr->num_sge); 2234 if (!atomic) { 2235 ret = -ENOMEM; 2236 goto out_put; 2237 } 2238 2239 atomic->remote_addr = user_wr->wr.atomic.remote_addr; 2240 atomic->compare_add = user_wr->wr.atomic.compare_add; 2241 atomic->swap = user_wr->wr.atomic.swap; 2242 atomic->rkey = user_wr->wr.atomic.rkey; 2243 2244 next = &atomic->wr; 2245 } else if (user_wr->opcode == IB_WR_SEND || 2246 user_wr->opcode == IB_WR_SEND_WITH_IMM || 2247 user_wr->opcode == IB_WR_SEND_WITH_INV) { 2248 next_size = sizeof(*next); 2249 next = alloc_wr(next_size, user_wr->num_sge); 2250 if (!next) { 2251 ret = -ENOMEM; 2252 goto out_put; 2253 } 2254 } else { 2255 ret = -EINVAL; 2256 goto out_put; 2257 } 2258 2259 if (user_wr->opcode == IB_WR_SEND_WITH_IMM || 2260 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 2261 next->ex.imm_data = 2262 (__be32 __force) user_wr->ex.imm_data; 2263 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) { 2264 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey; 2265 } 2266 2267 if (!last) 2268 wr = next; 2269 else 2270 last->next = next; 2271 last = next; 2272 2273 next->next = NULL; 2274 next->wr_id = user_wr->wr_id; 2275 next->num_sge = user_wr->num_sge; 2276 next->opcode = user_wr->opcode; 2277 next->send_flags = user_wr->send_flags; 2278 2279 if (next->num_sge) { 2280 next->sg_list = (void *) next + 2281 ALIGN(next_size, sizeof(struct ib_sge)); 2282 if (copy_from_user(next->sg_list, 2283 buf + sizeof cmd + 2284 cmd.wr_count * cmd.wqe_size + 2285 sg_ind * sizeof (struct ib_sge), 2286 next->num_sge * sizeof (struct ib_sge))) { 2287 ret = -EFAULT; 2288 goto out_put; 2289 } 2290 sg_ind += next->num_sge; 2291 } else 2292 next->sg_list = NULL; 2293 } 2294 2295 resp.bad_wr = 0; 2296 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); 2297 if (ret) 2298 for (next = wr; next; next = next->next) { 2299 ++resp.bad_wr; 2300 if (next == bad_wr) 2301 break; 2302 } 2303 2304 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2305 &resp, sizeof resp)) 2306 ret = -EFAULT; 2307 2308 out_put: 2309 uobj_put_obj_read(qp); 2310 2311 while (wr) { 2312 if (is_ud && ud_wr(wr)->ah) 2313 uobj_put_obj_read(ud_wr(wr)->ah); 2314 next = wr->next; 2315 kfree(wr); 2316 wr = next; 2317 } 2318 2319 out: 2320 kfree(user_wr); 2321 2322 return ret ? ret : in_len; 2323 } 2324 2325 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 2326 int in_len, 2327 u32 wr_count, 2328 u32 sge_count, 2329 u32 wqe_size) 2330 { 2331 struct ib_uverbs_recv_wr *user_wr; 2332 struct ib_recv_wr *wr = NULL, *last, *next; 2333 int sg_ind; 2334 int i; 2335 int ret; 2336 2337 if (in_len < wqe_size * wr_count + 2338 sge_count * sizeof (struct ib_uverbs_sge)) 2339 return ERR_PTR(-EINVAL); 2340 2341 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 2342 return ERR_PTR(-EINVAL); 2343 2344 user_wr = kmalloc(wqe_size, GFP_KERNEL); 2345 if (!user_wr) 2346 return ERR_PTR(-ENOMEM); 2347 2348 sg_ind = 0; 2349 last = NULL; 2350 for (i = 0; i < wr_count; ++i) { 2351 if (copy_from_user(user_wr, buf + i * wqe_size, 2352 wqe_size)) { 2353 ret = -EFAULT; 2354 goto err; 2355 } 2356 2357 if (user_wr->num_sge + sg_ind > sge_count) { 2358 ret = -EINVAL; 2359 goto err; 2360 } 2361 2362 if (user_wr->num_sge >= 2363 (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) / 2364 sizeof (struct ib_sge)) { 2365 ret = -EINVAL; 2366 goto err; 2367 } 2368 2369 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2370 user_wr->num_sge * sizeof (struct ib_sge), 2371 GFP_KERNEL); 2372 if (!next) { 2373 ret = -ENOMEM; 2374 goto err; 2375 } 2376 2377 if (!last) 2378 wr = next; 2379 else 2380 last->next = next; 2381 last = next; 2382 2383 next->next = NULL; 2384 next->wr_id = user_wr->wr_id; 2385 next->num_sge = user_wr->num_sge; 2386 2387 if (next->num_sge) { 2388 next->sg_list = (void *) next + 2389 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2390 if (copy_from_user(next->sg_list, 2391 buf + wr_count * wqe_size + 2392 sg_ind * sizeof (struct ib_sge), 2393 next->num_sge * sizeof (struct ib_sge))) { 2394 ret = -EFAULT; 2395 goto err; 2396 } 2397 sg_ind += next->num_sge; 2398 } else 2399 next->sg_list = NULL; 2400 } 2401 2402 kfree(user_wr); 2403 return wr; 2404 2405 err: 2406 kfree(user_wr); 2407 2408 while (wr) { 2409 next = wr->next; 2410 kfree(wr); 2411 wr = next; 2412 } 2413 2414 return ERR_PTR(ret); 2415 } 2416 2417 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 2418 struct ib_device *ib_dev, 2419 const char __user *buf, int in_len, 2420 int out_len) 2421 { 2422 struct ib_uverbs_post_recv cmd; 2423 struct ib_uverbs_post_recv_resp resp; 2424 struct ib_recv_wr *wr, *next, *bad_wr; 2425 struct ib_qp *qp; 2426 ssize_t ret = -EINVAL; 2427 2428 if (copy_from_user(&cmd, buf, sizeof cmd)) 2429 return -EFAULT; 2430 2431 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2432 in_len - sizeof cmd, cmd.wr_count, 2433 cmd.sge_count, cmd.wqe_size); 2434 if (IS_ERR(wr)) 2435 return PTR_ERR(wr); 2436 2437 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 2438 if (!qp) 2439 goto out; 2440 2441 resp.bad_wr = 0; 2442 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); 2443 2444 uobj_put_obj_read(qp); 2445 if (ret) { 2446 for (next = wr; next; next = next->next) { 2447 ++resp.bad_wr; 2448 if (next == bad_wr) 2449 break; 2450 } 2451 } 2452 2453 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2454 &resp, sizeof resp)) 2455 ret = -EFAULT; 2456 2457 out: 2458 while (wr) { 2459 next = wr->next; 2460 kfree(wr); 2461 wr = next; 2462 } 2463 2464 return ret ? ret : in_len; 2465 } 2466 2467 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 2468 struct ib_device *ib_dev, 2469 const char __user *buf, int in_len, 2470 int out_len) 2471 { 2472 struct ib_uverbs_post_srq_recv cmd; 2473 struct ib_uverbs_post_srq_recv_resp resp; 2474 struct ib_recv_wr *wr, *next, *bad_wr; 2475 struct ib_srq *srq; 2476 ssize_t ret = -EINVAL; 2477 2478 if (copy_from_user(&cmd, buf, sizeof cmd)) 2479 return -EFAULT; 2480 2481 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2482 in_len - sizeof cmd, cmd.wr_count, 2483 cmd.sge_count, cmd.wqe_size); 2484 if (IS_ERR(wr)) 2485 return PTR_ERR(wr); 2486 2487 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext); 2488 if (!srq) 2489 goto out; 2490 2491 resp.bad_wr = 0; 2492 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 2493 2494 uobj_put_obj_read(srq); 2495 2496 if (ret) 2497 for (next = wr; next; next = next->next) { 2498 ++resp.bad_wr; 2499 if (next == bad_wr) 2500 break; 2501 } 2502 2503 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2504 &resp, sizeof resp)) 2505 ret = -EFAULT; 2506 2507 out: 2508 while (wr) { 2509 next = wr->next; 2510 kfree(wr); 2511 wr = next; 2512 } 2513 2514 return ret ? ret : in_len; 2515 } 2516 2517 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 2518 struct ib_device *ib_dev, 2519 const char __user *buf, int in_len, 2520 int out_len) 2521 { 2522 struct ib_uverbs_create_ah cmd; 2523 struct ib_uverbs_create_ah_resp resp; 2524 struct ib_uobject *uobj; 2525 struct ib_pd *pd; 2526 struct ib_ah *ah; 2527 struct rdma_ah_attr attr; 2528 int ret; 2529 struct ib_udata udata; 2530 u8 *dmac; 2531 2532 if (out_len < sizeof resp) 2533 return -ENOSPC; 2534 2535 if (copy_from_user(&cmd, buf, sizeof cmd)) 2536 return -EFAULT; 2537 2538 if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) 2539 return -EINVAL; 2540 2541 INIT_UDATA(&udata, buf + sizeof(cmd), 2542 (unsigned long)cmd.response + sizeof(resp), 2543 in_len - sizeof(cmd), out_len - sizeof(resp)); 2544 2545 uobj = uobj_alloc(uobj_get_type(ah), file->ucontext); 2546 if (IS_ERR(uobj)) 2547 return PTR_ERR(uobj); 2548 2549 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 2550 if (!pd) { 2551 ret = -EINVAL; 2552 goto err; 2553 } 2554 2555 attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num); 2556 rdma_ah_set_dlid(&attr, cmd.attr.dlid); 2557 rdma_ah_set_sl(&attr, cmd.attr.sl); 2558 rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits); 2559 rdma_ah_set_static_rate(&attr, cmd.attr.static_rate); 2560 rdma_ah_set_port_num(&attr, cmd.attr.port_num); 2561 2562 if (cmd.attr.is_global) { 2563 rdma_ah_set_grh(&attr, NULL, cmd.attr.grh.flow_label, 2564 cmd.attr.grh.sgid_index, 2565 cmd.attr.grh.hop_limit, 2566 cmd.attr.grh.traffic_class); 2567 rdma_ah_set_dgid_raw(&attr, cmd.attr.grh.dgid); 2568 } else { 2569 rdma_ah_set_ah_flags(&attr, 0); 2570 } 2571 dmac = rdma_ah_retrieve_dmac(&attr); 2572 if (dmac) 2573 memset(dmac, 0, ETH_ALEN); 2574 2575 ah = pd->device->create_ah(pd, &attr, &udata); 2576 2577 if (IS_ERR(ah)) { 2578 ret = PTR_ERR(ah); 2579 goto err_put; 2580 } 2581 2582 ah->device = pd->device; 2583 ah->pd = pd; 2584 atomic_inc(&pd->usecnt); 2585 ah->uobject = uobj; 2586 uobj->user_handle = cmd.user_handle; 2587 uobj->object = ah; 2588 2589 resp.ah_handle = uobj->id; 2590 2591 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2592 &resp, sizeof resp)) { 2593 ret = -EFAULT; 2594 goto err_copy; 2595 } 2596 2597 uobj_put_obj_read(pd); 2598 uobj_alloc_commit(uobj); 2599 2600 return in_len; 2601 2602 err_copy: 2603 rdma_destroy_ah(ah); 2604 2605 err_put: 2606 uobj_put_obj_read(pd); 2607 2608 err: 2609 uobj_alloc_abort(uobj); 2610 return ret; 2611 } 2612 2613 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 2614 struct ib_device *ib_dev, 2615 const char __user *buf, int in_len, int out_len) 2616 { 2617 struct ib_uverbs_destroy_ah cmd; 2618 struct ib_uobject *uobj; 2619 int ret; 2620 2621 if (copy_from_user(&cmd, buf, sizeof cmd)) 2622 return -EFAULT; 2623 2624 uobj = uobj_get_write(uobj_get_type(ah), cmd.ah_handle, 2625 file->ucontext); 2626 if (IS_ERR(uobj)) 2627 return PTR_ERR(uobj); 2628 2629 ret = uobj_remove_commit(uobj); 2630 return ret ?: in_len; 2631 } 2632 2633 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 2634 struct ib_device *ib_dev, 2635 const char __user *buf, int in_len, 2636 int out_len) 2637 { 2638 struct ib_uverbs_attach_mcast cmd; 2639 struct ib_qp *qp; 2640 struct ib_uqp_object *obj; 2641 struct ib_uverbs_mcast_entry *mcast; 2642 int ret; 2643 2644 if (copy_from_user(&cmd, buf, sizeof cmd)) 2645 return -EFAULT; 2646 2647 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 2648 if (!qp) 2649 return -EINVAL; 2650 2651 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2652 2653 mutex_lock(&obj->mcast_lock); 2654 list_for_each_entry(mcast, &obj->mcast_list, list) 2655 if (cmd.mlid == mcast->lid && 2656 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2657 ret = 0; 2658 goto out_put; 2659 } 2660 2661 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 2662 if (!mcast) { 2663 ret = -ENOMEM; 2664 goto out_put; 2665 } 2666 2667 mcast->lid = cmd.mlid; 2668 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 2669 2670 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 2671 if (!ret) 2672 list_add_tail(&mcast->list, &obj->mcast_list); 2673 else 2674 kfree(mcast); 2675 2676 out_put: 2677 mutex_unlock(&obj->mcast_lock); 2678 uobj_put_obj_read(qp); 2679 2680 return ret ? ret : in_len; 2681 } 2682 2683 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 2684 struct ib_device *ib_dev, 2685 const char __user *buf, int in_len, 2686 int out_len) 2687 { 2688 struct ib_uverbs_detach_mcast cmd; 2689 struct ib_uqp_object *obj; 2690 struct ib_qp *qp; 2691 struct ib_uverbs_mcast_entry *mcast; 2692 int ret = -EINVAL; 2693 bool found = false; 2694 2695 if (copy_from_user(&cmd, buf, sizeof cmd)) 2696 return -EFAULT; 2697 2698 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 2699 if (!qp) 2700 return -EINVAL; 2701 2702 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2703 mutex_lock(&obj->mcast_lock); 2704 2705 list_for_each_entry(mcast, &obj->mcast_list, list) 2706 if (cmd.mlid == mcast->lid && 2707 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2708 list_del(&mcast->list); 2709 kfree(mcast); 2710 found = true; 2711 break; 2712 } 2713 2714 if (!found) { 2715 ret = -EINVAL; 2716 goto out_put; 2717 } 2718 2719 ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid); 2720 2721 out_put: 2722 mutex_unlock(&obj->mcast_lock); 2723 uobj_put_obj_read(qp); 2724 return ret ? ret : in_len; 2725 } 2726 2727 static int kern_spec_to_ib_spec_action(struct ib_uverbs_flow_spec *kern_spec, 2728 union ib_flow_spec *ib_spec) 2729 { 2730 ib_spec->type = kern_spec->type; 2731 switch (ib_spec->type) { 2732 case IB_FLOW_SPEC_ACTION_TAG: 2733 if (kern_spec->flow_tag.size != 2734 sizeof(struct ib_uverbs_flow_spec_action_tag)) 2735 return -EINVAL; 2736 2737 ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag); 2738 ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id; 2739 break; 2740 case IB_FLOW_SPEC_ACTION_DROP: 2741 if (kern_spec->drop.size != 2742 sizeof(struct ib_uverbs_flow_spec_action_drop)) 2743 return -EINVAL; 2744 2745 ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop); 2746 break; 2747 default: 2748 return -EINVAL; 2749 } 2750 return 0; 2751 } 2752 2753 static size_t kern_spec_filter_sz(struct ib_uverbs_flow_spec_hdr *spec) 2754 { 2755 /* Returns user space filter size, includes padding */ 2756 return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2; 2757 } 2758 2759 static ssize_t spec_filter_size(void *kern_spec_filter, u16 kern_filter_size, 2760 u16 ib_real_filter_sz) 2761 { 2762 /* 2763 * User space filter structures must be 64 bit aligned, otherwise this 2764 * may pass, but we won't handle additional new attributes. 2765 */ 2766 2767 if (kern_filter_size > ib_real_filter_sz) { 2768 if (memchr_inv(kern_spec_filter + 2769 ib_real_filter_sz, 0, 2770 kern_filter_size - ib_real_filter_sz)) 2771 return -EINVAL; 2772 return ib_real_filter_sz; 2773 } 2774 return kern_filter_size; 2775 } 2776 2777 static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec, 2778 union ib_flow_spec *ib_spec) 2779 { 2780 ssize_t actual_filter_sz; 2781 ssize_t kern_filter_sz; 2782 ssize_t ib_filter_sz; 2783 void *kern_spec_mask; 2784 void *kern_spec_val; 2785 2786 if (kern_spec->reserved) 2787 return -EINVAL; 2788 2789 ib_spec->type = kern_spec->type; 2790 2791 kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr); 2792 /* User flow spec size must be aligned to 4 bytes */ 2793 if (kern_filter_sz != ALIGN(kern_filter_sz, 4)) 2794 return -EINVAL; 2795 2796 kern_spec_val = (void *)kern_spec + 2797 sizeof(struct ib_uverbs_flow_spec_hdr); 2798 kern_spec_mask = kern_spec_val + kern_filter_sz; 2799 if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL)) 2800 return -EINVAL; 2801 2802 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) { 2803 case IB_FLOW_SPEC_ETH: 2804 ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz); 2805 actual_filter_sz = spec_filter_size(kern_spec_mask, 2806 kern_filter_sz, 2807 ib_filter_sz); 2808 if (actual_filter_sz <= 0) 2809 return -EINVAL; 2810 ib_spec->size = sizeof(struct ib_flow_spec_eth); 2811 memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz); 2812 memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz); 2813 break; 2814 case IB_FLOW_SPEC_IPV4: 2815 ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz); 2816 actual_filter_sz = spec_filter_size(kern_spec_mask, 2817 kern_filter_sz, 2818 ib_filter_sz); 2819 if (actual_filter_sz <= 0) 2820 return -EINVAL; 2821 ib_spec->size = sizeof(struct ib_flow_spec_ipv4); 2822 memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz); 2823 memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz); 2824 break; 2825 case IB_FLOW_SPEC_IPV6: 2826 ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz); 2827 actual_filter_sz = spec_filter_size(kern_spec_mask, 2828 kern_filter_sz, 2829 ib_filter_sz); 2830 if (actual_filter_sz <= 0) 2831 return -EINVAL; 2832 ib_spec->size = sizeof(struct ib_flow_spec_ipv6); 2833 memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz); 2834 memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz); 2835 2836 if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) || 2837 (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20)) 2838 return -EINVAL; 2839 break; 2840 case IB_FLOW_SPEC_TCP: 2841 case IB_FLOW_SPEC_UDP: 2842 ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz); 2843 actual_filter_sz = spec_filter_size(kern_spec_mask, 2844 kern_filter_sz, 2845 ib_filter_sz); 2846 if (actual_filter_sz <= 0) 2847 return -EINVAL; 2848 ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp); 2849 memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz); 2850 memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz); 2851 break; 2852 case IB_FLOW_SPEC_VXLAN_TUNNEL: 2853 ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz); 2854 actual_filter_sz = spec_filter_size(kern_spec_mask, 2855 kern_filter_sz, 2856 ib_filter_sz); 2857 if (actual_filter_sz <= 0) 2858 return -EINVAL; 2859 ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel); 2860 memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz); 2861 memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz); 2862 2863 if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) || 2864 (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24)) 2865 return -EINVAL; 2866 break; 2867 default: 2868 return -EINVAL; 2869 } 2870 return 0; 2871 } 2872 2873 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, 2874 union ib_flow_spec *ib_spec) 2875 { 2876 if (kern_spec->reserved) 2877 return -EINVAL; 2878 2879 if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG) 2880 return kern_spec_to_ib_spec_action(kern_spec, ib_spec); 2881 else 2882 return kern_spec_to_ib_spec_filter(kern_spec, ib_spec); 2883 } 2884 2885 int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file, 2886 struct ib_device *ib_dev, 2887 struct ib_udata *ucore, 2888 struct ib_udata *uhw) 2889 { 2890 struct ib_uverbs_ex_create_wq cmd = {}; 2891 struct ib_uverbs_ex_create_wq_resp resp = {}; 2892 struct ib_uwq_object *obj; 2893 int err = 0; 2894 struct ib_cq *cq; 2895 struct ib_pd *pd; 2896 struct ib_wq *wq; 2897 struct ib_wq_init_attr wq_init_attr = {}; 2898 size_t required_cmd_sz; 2899 size_t required_resp_len; 2900 2901 required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge); 2902 required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn); 2903 2904 if (ucore->inlen < required_cmd_sz) 2905 return -EINVAL; 2906 2907 if (ucore->outlen < required_resp_len) 2908 return -ENOSPC; 2909 2910 if (ucore->inlen > sizeof(cmd) && 2911 !ib_is_udata_cleared(ucore, sizeof(cmd), 2912 ucore->inlen - sizeof(cmd))) 2913 return -EOPNOTSUPP; 2914 2915 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 2916 if (err) 2917 return err; 2918 2919 if (cmd.comp_mask) 2920 return -EOPNOTSUPP; 2921 2922 obj = (struct ib_uwq_object *)uobj_alloc(uobj_get_type(wq), 2923 file->ucontext); 2924 if (IS_ERR(obj)) 2925 return PTR_ERR(obj); 2926 2927 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 2928 if (!pd) { 2929 err = -EINVAL; 2930 goto err_uobj; 2931 } 2932 2933 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext); 2934 if (!cq) { 2935 err = -EINVAL; 2936 goto err_put_pd; 2937 } 2938 2939 wq_init_attr.cq = cq; 2940 wq_init_attr.max_sge = cmd.max_sge; 2941 wq_init_attr.max_wr = cmd.max_wr; 2942 wq_init_attr.wq_context = file; 2943 wq_init_attr.wq_type = cmd.wq_type; 2944 wq_init_attr.event_handler = ib_uverbs_wq_event_handler; 2945 if (ucore->inlen >= (offsetof(typeof(cmd), create_flags) + 2946 sizeof(cmd.create_flags))) 2947 wq_init_attr.create_flags = cmd.create_flags; 2948 obj->uevent.events_reported = 0; 2949 INIT_LIST_HEAD(&obj->uevent.event_list); 2950 wq = pd->device->create_wq(pd, &wq_init_attr, uhw); 2951 if (IS_ERR(wq)) { 2952 err = PTR_ERR(wq); 2953 goto err_put_cq; 2954 } 2955 2956 wq->uobject = &obj->uevent.uobject; 2957 obj->uevent.uobject.object = wq; 2958 wq->wq_type = wq_init_attr.wq_type; 2959 wq->cq = cq; 2960 wq->pd = pd; 2961 wq->device = pd->device; 2962 wq->wq_context = wq_init_attr.wq_context; 2963 atomic_set(&wq->usecnt, 0); 2964 atomic_inc(&pd->usecnt); 2965 atomic_inc(&cq->usecnt); 2966 wq->uobject = &obj->uevent.uobject; 2967 obj->uevent.uobject.object = wq; 2968 2969 memset(&resp, 0, sizeof(resp)); 2970 resp.wq_handle = obj->uevent.uobject.id; 2971 resp.max_sge = wq_init_attr.max_sge; 2972 resp.max_wr = wq_init_attr.max_wr; 2973 resp.wqn = wq->wq_num; 2974 resp.response_length = required_resp_len; 2975 err = ib_copy_to_udata(ucore, 2976 &resp, resp.response_length); 2977 if (err) 2978 goto err_copy; 2979 2980 uobj_put_obj_read(pd); 2981 uobj_put_obj_read(cq); 2982 uobj_alloc_commit(&obj->uevent.uobject); 2983 return 0; 2984 2985 err_copy: 2986 ib_destroy_wq(wq); 2987 err_put_cq: 2988 uobj_put_obj_read(cq); 2989 err_put_pd: 2990 uobj_put_obj_read(pd); 2991 err_uobj: 2992 uobj_alloc_abort(&obj->uevent.uobject); 2993 2994 return err; 2995 } 2996 2997 int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file, 2998 struct ib_device *ib_dev, 2999 struct ib_udata *ucore, 3000 struct ib_udata *uhw) 3001 { 3002 struct ib_uverbs_ex_destroy_wq cmd = {}; 3003 struct ib_uverbs_ex_destroy_wq_resp resp = {}; 3004 struct ib_uobject *uobj; 3005 struct ib_uwq_object *obj; 3006 size_t required_cmd_sz; 3007 size_t required_resp_len; 3008 int ret; 3009 3010 required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle); 3011 required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); 3012 3013 if (ucore->inlen < required_cmd_sz) 3014 return -EINVAL; 3015 3016 if (ucore->outlen < required_resp_len) 3017 return -ENOSPC; 3018 3019 if (ucore->inlen > sizeof(cmd) && 3020 !ib_is_udata_cleared(ucore, sizeof(cmd), 3021 ucore->inlen - sizeof(cmd))) 3022 return -EOPNOTSUPP; 3023 3024 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3025 if (ret) 3026 return ret; 3027 3028 if (cmd.comp_mask) 3029 return -EOPNOTSUPP; 3030 3031 resp.response_length = required_resp_len; 3032 uobj = uobj_get_write(uobj_get_type(wq), cmd.wq_handle, 3033 file->ucontext); 3034 if (IS_ERR(uobj)) 3035 return PTR_ERR(uobj); 3036 3037 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject); 3038 /* 3039 * Make sure we don't free the memory in remove_commit as we still 3040 * needs the uobject memory to create the response. 3041 */ 3042 uverbs_uobject_get(uobj); 3043 3044 ret = uobj_remove_commit(uobj); 3045 resp.events_reported = obj->uevent.events_reported; 3046 uverbs_uobject_put(uobj); 3047 if (ret) 3048 return ret; 3049 3050 return ib_copy_to_udata(ucore, &resp, resp.response_length); 3051 } 3052 3053 int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file, 3054 struct ib_device *ib_dev, 3055 struct ib_udata *ucore, 3056 struct ib_udata *uhw) 3057 { 3058 struct ib_uverbs_ex_modify_wq cmd = {}; 3059 struct ib_wq *wq; 3060 struct ib_wq_attr wq_attr = {}; 3061 size_t required_cmd_sz; 3062 int ret; 3063 3064 required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state); 3065 if (ucore->inlen < required_cmd_sz) 3066 return -EINVAL; 3067 3068 if (ucore->inlen > sizeof(cmd) && 3069 !ib_is_udata_cleared(ucore, sizeof(cmd), 3070 ucore->inlen - sizeof(cmd))) 3071 return -EOPNOTSUPP; 3072 3073 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3074 if (ret) 3075 return ret; 3076 3077 if (!cmd.attr_mask) 3078 return -EINVAL; 3079 3080 if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS)) 3081 return -EINVAL; 3082 3083 wq = uobj_get_obj_read(wq, cmd.wq_handle, file->ucontext); 3084 if (!wq) 3085 return -EINVAL; 3086 3087 wq_attr.curr_wq_state = cmd.curr_wq_state; 3088 wq_attr.wq_state = cmd.wq_state; 3089 if (cmd.attr_mask & IB_WQ_FLAGS) { 3090 wq_attr.flags = cmd.flags; 3091 wq_attr.flags_mask = cmd.flags_mask; 3092 } 3093 ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw); 3094 uobj_put_obj_read(wq); 3095 return ret; 3096 } 3097 3098 int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, 3099 struct ib_device *ib_dev, 3100 struct ib_udata *ucore, 3101 struct ib_udata *uhw) 3102 { 3103 struct ib_uverbs_ex_create_rwq_ind_table cmd = {}; 3104 struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {}; 3105 struct ib_uobject *uobj; 3106 int err = 0; 3107 struct ib_rwq_ind_table_init_attr init_attr = {}; 3108 struct ib_rwq_ind_table *rwq_ind_tbl; 3109 struct ib_wq **wqs = NULL; 3110 u32 *wqs_handles = NULL; 3111 struct ib_wq *wq = NULL; 3112 int i, j, num_read_wqs; 3113 u32 num_wq_handles; 3114 u32 expected_in_size; 3115 size_t required_cmd_sz_header; 3116 size_t required_resp_len; 3117 3118 required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size); 3119 required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num); 3120 3121 if (ucore->inlen < required_cmd_sz_header) 3122 return -EINVAL; 3123 3124 if (ucore->outlen < required_resp_len) 3125 return -ENOSPC; 3126 3127 err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header); 3128 if (err) 3129 return err; 3130 3131 ucore->inbuf += required_cmd_sz_header; 3132 ucore->inlen -= required_cmd_sz_header; 3133 3134 if (cmd.comp_mask) 3135 return -EOPNOTSUPP; 3136 3137 if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE) 3138 return -EINVAL; 3139 3140 num_wq_handles = 1 << cmd.log_ind_tbl_size; 3141 expected_in_size = num_wq_handles * sizeof(__u32); 3142 if (num_wq_handles == 1) 3143 /* input size for wq handles is u64 aligned */ 3144 expected_in_size += sizeof(__u32); 3145 3146 if (ucore->inlen < expected_in_size) 3147 return -EINVAL; 3148 3149 if (ucore->inlen > expected_in_size && 3150 !ib_is_udata_cleared(ucore, expected_in_size, 3151 ucore->inlen - expected_in_size)) 3152 return -EOPNOTSUPP; 3153 3154 wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles), 3155 GFP_KERNEL); 3156 if (!wqs_handles) 3157 return -ENOMEM; 3158 3159 err = ib_copy_from_udata(wqs_handles, ucore, 3160 num_wq_handles * sizeof(__u32)); 3161 if (err) 3162 goto err_free; 3163 3164 wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL); 3165 if (!wqs) { 3166 err = -ENOMEM; 3167 goto err_free; 3168 } 3169 3170 for (num_read_wqs = 0; num_read_wqs < num_wq_handles; 3171 num_read_wqs++) { 3172 wq = uobj_get_obj_read(wq, wqs_handles[num_read_wqs], 3173 file->ucontext); 3174 if (!wq) { 3175 err = -EINVAL; 3176 goto put_wqs; 3177 } 3178 3179 wqs[num_read_wqs] = wq; 3180 } 3181 3182 uobj = uobj_alloc(uobj_get_type(rwq_ind_table), file->ucontext); 3183 if (IS_ERR(uobj)) { 3184 err = PTR_ERR(uobj); 3185 goto put_wqs; 3186 } 3187 3188 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size; 3189 init_attr.ind_tbl = wqs; 3190 rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw); 3191 3192 if (IS_ERR(rwq_ind_tbl)) { 3193 err = PTR_ERR(rwq_ind_tbl); 3194 goto err_uobj; 3195 } 3196 3197 rwq_ind_tbl->ind_tbl = wqs; 3198 rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size; 3199 rwq_ind_tbl->uobject = uobj; 3200 uobj->object = rwq_ind_tbl; 3201 rwq_ind_tbl->device = ib_dev; 3202 atomic_set(&rwq_ind_tbl->usecnt, 0); 3203 3204 for (i = 0; i < num_wq_handles; i++) 3205 atomic_inc(&wqs[i]->usecnt); 3206 3207 resp.ind_tbl_handle = uobj->id; 3208 resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num; 3209 resp.response_length = required_resp_len; 3210 3211 err = ib_copy_to_udata(ucore, 3212 &resp, resp.response_length); 3213 if (err) 3214 goto err_copy; 3215 3216 kfree(wqs_handles); 3217 3218 for (j = 0; j < num_read_wqs; j++) 3219 uobj_put_obj_read(wqs[j]); 3220 3221 uobj_alloc_commit(uobj); 3222 return 0; 3223 3224 err_copy: 3225 ib_destroy_rwq_ind_table(rwq_ind_tbl); 3226 err_uobj: 3227 uobj_alloc_abort(uobj); 3228 put_wqs: 3229 for (j = 0; j < num_read_wqs; j++) 3230 uobj_put_obj_read(wqs[j]); 3231 err_free: 3232 kfree(wqs_handles); 3233 kfree(wqs); 3234 return err; 3235 } 3236 3237 int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file, 3238 struct ib_device *ib_dev, 3239 struct ib_udata *ucore, 3240 struct ib_udata *uhw) 3241 { 3242 struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {}; 3243 struct ib_uobject *uobj; 3244 int ret; 3245 size_t required_cmd_sz; 3246 3247 required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle); 3248 3249 if (ucore->inlen < required_cmd_sz) 3250 return -EINVAL; 3251 3252 if (ucore->inlen > sizeof(cmd) && 3253 !ib_is_udata_cleared(ucore, sizeof(cmd), 3254 ucore->inlen - sizeof(cmd))) 3255 return -EOPNOTSUPP; 3256 3257 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3258 if (ret) 3259 return ret; 3260 3261 if (cmd.comp_mask) 3262 return -EOPNOTSUPP; 3263 3264 uobj = uobj_get_write(uobj_get_type(rwq_ind_table), cmd.ind_tbl_handle, 3265 file->ucontext); 3266 if (IS_ERR(uobj)) 3267 return PTR_ERR(uobj); 3268 3269 return uobj_remove_commit(uobj); 3270 } 3271 3272 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, 3273 struct ib_device *ib_dev, 3274 struct ib_udata *ucore, 3275 struct ib_udata *uhw) 3276 { 3277 struct ib_uverbs_create_flow cmd; 3278 struct ib_uverbs_create_flow_resp resp; 3279 struct ib_uobject *uobj; 3280 struct ib_flow *flow_id; 3281 struct ib_uverbs_flow_attr *kern_flow_attr; 3282 struct ib_flow_attr *flow_attr; 3283 struct ib_qp *qp; 3284 int err = 0; 3285 void *kern_spec; 3286 void *ib_spec; 3287 int i; 3288 3289 if (ucore->inlen < sizeof(cmd)) 3290 return -EINVAL; 3291 3292 if (ucore->outlen < sizeof(resp)) 3293 return -ENOSPC; 3294 3295 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3296 if (err) 3297 return err; 3298 3299 ucore->inbuf += sizeof(cmd); 3300 ucore->inlen -= sizeof(cmd); 3301 3302 if (cmd.comp_mask) 3303 return -EINVAL; 3304 3305 if (!capable(CAP_NET_RAW)) 3306 return -EPERM; 3307 3308 if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED) 3309 return -EINVAL; 3310 3311 if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && 3312 ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) || 3313 (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT))) 3314 return -EINVAL; 3315 3316 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) 3317 return -EINVAL; 3318 3319 if (cmd.flow_attr.size > ucore->inlen || 3320 cmd.flow_attr.size > 3321 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) 3322 return -EINVAL; 3323 3324 if (cmd.flow_attr.reserved[0] || 3325 cmd.flow_attr.reserved[1]) 3326 return -EINVAL; 3327 3328 if (cmd.flow_attr.num_of_specs) { 3329 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, 3330 GFP_KERNEL); 3331 if (!kern_flow_attr) 3332 return -ENOMEM; 3333 3334 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); 3335 err = ib_copy_from_udata(kern_flow_attr + 1, ucore, 3336 cmd.flow_attr.size); 3337 if (err) 3338 goto err_free_attr; 3339 } else { 3340 kern_flow_attr = &cmd.flow_attr; 3341 } 3342 3343 uobj = uobj_alloc(uobj_get_type(flow), file->ucontext); 3344 if (IS_ERR(uobj)) { 3345 err = PTR_ERR(uobj); 3346 goto err_free_attr; 3347 } 3348 3349 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 3350 if (!qp) { 3351 err = -EINVAL; 3352 goto err_uobj; 3353 } 3354 3355 flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs * 3356 sizeof(union ib_flow_spec), GFP_KERNEL); 3357 if (!flow_attr) { 3358 err = -ENOMEM; 3359 goto err_put; 3360 } 3361 3362 flow_attr->type = kern_flow_attr->type; 3363 flow_attr->priority = kern_flow_attr->priority; 3364 flow_attr->num_of_specs = kern_flow_attr->num_of_specs; 3365 flow_attr->port = kern_flow_attr->port; 3366 flow_attr->flags = kern_flow_attr->flags; 3367 flow_attr->size = sizeof(*flow_attr); 3368 3369 kern_spec = kern_flow_attr + 1; 3370 ib_spec = flow_attr + 1; 3371 for (i = 0; i < flow_attr->num_of_specs && 3372 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && 3373 cmd.flow_attr.size >= 3374 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { 3375 err = kern_spec_to_ib_spec(kern_spec, ib_spec); 3376 if (err) 3377 goto err_free; 3378 flow_attr->size += 3379 ((union ib_flow_spec *) ib_spec)->size; 3380 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; 3381 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size; 3382 ib_spec += ((union ib_flow_spec *) ib_spec)->size; 3383 } 3384 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { 3385 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", 3386 i, cmd.flow_attr.size); 3387 err = -EINVAL; 3388 goto err_free; 3389 } 3390 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); 3391 if (IS_ERR(flow_id)) { 3392 err = PTR_ERR(flow_id); 3393 goto err_free; 3394 } 3395 flow_id->uobject = uobj; 3396 uobj->object = flow_id; 3397 3398 memset(&resp, 0, sizeof(resp)); 3399 resp.flow_handle = uobj->id; 3400 3401 err = ib_copy_to_udata(ucore, 3402 &resp, sizeof(resp)); 3403 if (err) 3404 goto err_copy; 3405 3406 uobj_put_obj_read(qp); 3407 uobj_alloc_commit(uobj); 3408 kfree(flow_attr); 3409 if (cmd.flow_attr.num_of_specs) 3410 kfree(kern_flow_attr); 3411 return 0; 3412 err_copy: 3413 ib_destroy_flow(flow_id); 3414 err_free: 3415 kfree(flow_attr); 3416 err_put: 3417 uobj_put_obj_read(qp); 3418 err_uobj: 3419 uobj_alloc_abort(uobj); 3420 err_free_attr: 3421 if (cmd.flow_attr.num_of_specs) 3422 kfree(kern_flow_attr); 3423 return err; 3424 } 3425 3426 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, 3427 struct ib_device *ib_dev, 3428 struct ib_udata *ucore, 3429 struct ib_udata *uhw) 3430 { 3431 struct ib_uverbs_destroy_flow cmd; 3432 struct ib_uobject *uobj; 3433 int ret; 3434 3435 if (ucore->inlen < sizeof(cmd)) 3436 return -EINVAL; 3437 3438 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3439 if (ret) 3440 return ret; 3441 3442 if (cmd.comp_mask) 3443 return -EINVAL; 3444 3445 uobj = uobj_get_write(uobj_get_type(flow), cmd.flow_handle, 3446 file->ucontext); 3447 if (IS_ERR(uobj)) 3448 return PTR_ERR(uobj); 3449 3450 ret = uobj_remove_commit(uobj); 3451 return ret; 3452 } 3453 3454 static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 3455 struct ib_device *ib_dev, 3456 struct ib_uverbs_create_xsrq *cmd, 3457 struct ib_udata *udata) 3458 { 3459 struct ib_uverbs_create_srq_resp resp; 3460 struct ib_usrq_object *obj; 3461 struct ib_pd *pd; 3462 struct ib_srq *srq; 3463 struct ib_uobject *uninitialized_var(xrcd_uobj); 3464 struct ib_srq_init_attr attr; 3465 int ret; 3466 3467 obj = (struct ib_usrq_object *)uobj_alloc(uobj_get_type(srq), 3468 file->ucontext); 3469 if (IS_ERR(obj)) 3470 return PTR_ERR(obj); 3471 3472 if (cmd->srq_type == IB_SRQT_XRC) { 3473 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd->xrcd_handle, 3474 file->ucontext); 3475 if (IS_ERR(xrcd_uobj)) { 3476 ret = -EINVAL; 3477 goto err; 3478 } 3479 3480 attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object; 3481 if (!attr.ext.xrc.xrcd) { 3482 ret = -EINVAL; 3483 goto err_put_xrcd; 3484 } 3485 3486 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 3487 atomic_inc(&obj->uxrcd->refcnt); 3488 3489 attr.ext.xrc.cq = uobj_get_obj_read(cq, cmd->cq_handle, 3490 file->ucontext); 3491 if (!attr.ext.xrc.cq) { 3492 ret = -EINVAL; 3493 goto err_put_xrcd; 3494 } 3495 } 3496 3497 pd = uobj_get_obj_read(pd, cmd->pd_handle, file->ucontext); 3498 if (!pd) { 3499 ret = -EINVAL; 3500 goto err_put_cq; 3501 } 3502 3503 attr.event_handler = ib_uverbs_srq_event_handler; 3504 attr.srq_context = file; 3505 attr.srq_type = cmd->srq_type; 3506 attr.attr.max_wr = cmd->max_wr; 3507 attr.attr.max_sge = cmd->max_sge; 3508 attr.attr.srq_limit = cmd->srq_limit; 3509 3510 obj->uevent.events_reported = 0; 3511 INIT_LIST_HEAD(&obj->uevent.event_list); 3512 3513 srq = pd->device->create_srq(pd, &attr, udata); 3514 if (IS_ERR(srq)) { 3515 ret = PTR_ERR(srq); 3516 goto err_put; 3517 } 3518 3519 srq->device = pd->device; 3520 srq->pd = pd; 3521 srq->srq_type = cmd->srq_type; 3522 srq->uobject = &obj->uevent.uobject; 3523 srq->event_handler = attr.event_handler; 3524 srq->srq_context = attr.srq_context; 3525 3526 if (cmd->srq_type == IB_SRQT_XRC) { 3527 srq->ext.xrc.cq = attr.ext.xrc.cq; 3528 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; 3529 atomic_inc(&attr.ext.xrc.cq->usecnt); 3530 atomic_inc(&attr.ext.xrc.xrcd->usecnt); 3531 } 3532 3533 atomic_inc(&pd->usecnt); 3534 atomic_set(&srq->usecnt, 0); 3535 3536 obj->uevent.uobject.object = srq; 3537 obj->uevent.uobject.user_handle = cmd->user_handle; 3538 3539 memset(&resp, 0, sizeof resp); 3540 resp.srq_handle = obj->uevent.uobject.id; 3541 resp.max_wr = attr.attr.max_wr; 3542 resp.max_sge = attr.attr.max_sge; 3543 if (cmd->srq_type == IB_SRQT_XRC) 3544 resp.srqn = srq->ext.xrc.srq_num; 3545 3546 if (copy_to_user((void __user *) (unsigned long) cmd->response, 3547 &resp, sizeof resp)) { 3548 ret = -EFAULT; 3549 goto err_copy; 3550 } 3551 3552 if (cmd->srq_type == IB_SRQT_XRC) { 3553 uobj_put_read(xrcd_uobj); 3554 uobj_put_obj_read(attr.ext.xrc.cq); 3555 } 3556 uobj_put_obj_read(pd); 3557 uobj_alloc_commit(&obj->uevent.uobject); 3558 3559 return 0; 3560 3561 err_copy: 3562 ib_destroy_srq(srq); 3563 3564 err_put: 3565 uobj_put_obj_read(pd); 3566 3567 err_put_cq: 3568 if (cmd->srq_type == IB_SRQT_XRC) 3569 uobj_put_obj_read(attr.ext.xrc.cq); 3570 3571 err_put_xrcd: 3572 if (cmd->srq_type == IB_SRQT_XRC) { 3573 atomic_dec(&obj->uxrcd->refcnt); 3574 uobj_put_read(xrcd_uobj); 3575 } 3576 3577 err: 3578 uobj_alloc_abort(&obj->uevent.uobject); 3579 return ret; 3580 } 3581 3582 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 3583 struct ib_device *ib_dev, 3584 const char __user *buf, int in_len, 3585 int out_len) 3586 { 3587 struct ib_uverbs_create_srq cmd; 3588 struct ib_uverbs_create_xsrq xcmd; 3589 struct ib_uverbs_create_srq_resp resp; 3590 struct ib_udata udata; 3591 int ret; 3592 3593 if (out_len < sizeof resp) 3594 return -ENOSPC; 3595 3596 if (copy_from_user(&cmd, buf, sizeof cmd)) 3597 return -EFAULT; 3598 3599 xcmd.response = cmd.response; 3600 xcmd.user_handle = cmd.user_handle; 3601 xcmd.srq_type = IB_SRQT_BASIC; 3602 xcmd.pd_handle = cmd.pd_handle; 3603 xcmd.max_wr = cmd.max_wr; 3604 xcmd.max_sge = cmd.max_sge; 3605 xcmd.srq_limit = cmd.srq_limit; 3606 3607 INIT_UDATA(&udata, buf + sizeof cmd, 3608 (unsigned long) cmd.response + sizeof resp, 3609 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr), 3610 out_len - sizeof resp); 3611 3612 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata); 3613 if (ret) 3614 return ret; 3615 3616 return in_len; 3617 } 3618 3619 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, 3620 struct ib_device *ib_dev, 3621 const char __user *buf, int in_len, int out_len) 3622 { 3623 struct ib_uverbs_create_xsrq cmd; 3624 struct ib_uverbs_create_srq_resp resp; 3625 struct ib_udata udata; 3626 int ret; 3627 3628 if (out_len < sizeof resp) 3629 return -ENOSPC; 3630 3631 if (copy_from_user(&cmd, buf, sizeof cmd)) 3632 return -EFAULT; 3633 3634 INIT_UDATA(&udata, buf + sizeof cmd, 3635 (unsigned long) cmd.response + sizeof resp, 3636 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr), 3637 out_len - sizeof resp); 3638 3639 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata); 3640 if (ret) 3641 return ret; 3642 3643 return in_len; 3644 } 3645 3646 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 3647 struct ib_device *ib_dev, 3648 const char __user *buf, int in_len, 3649 int out_len) 3650 { 3651 struct ib_uverbs_modify_srq cmd; 3652 struct ib_udata udata; 3653 struct ib_srq *srq; 3654 struct ib_srq_attr attr; 3655 int ret; 3656 3657 if (copy_from_user(&cmd, buf, sizeof cmd)) 3658 return -EFAULT; 3659 3660 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 3661 out_len); 3662 3663 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext); 3664 if (!srq) 3665 return -EINVAL; 3666 3667 attr.max_wr = cmd.max_wr; 3668 attr.srq_limit = cmd.srq_limit; 3669 3670 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 3671 3672 uobj_put_obj_read(srq); 3673 3674 return ret ? ret : in_len; 3675 } 3676 3677 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 3678 struct ib_device *ib_dev, 3679 const char __user *buf, 3680 int in_len, int out_len) 3681 { 3682 struct ib_uverbs_query_srq cmd; 3683 struct ib_uverbs_query_srq_resp resp; 3684 struct ib_srq_attr attr; 3685 struct ib_srq *srq; 3686 int ret; 3687 3688 if (out_len < sizeof resp) 3689 return -ENOSPC; 3690 3691 if (copy_from_user(&cmd, buf, sizeof cmd)) 3692 return -EFAULT; 3693 3694 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext); 3695 if (!srq) 3696 return -EINVAL; 3697 3698 ret = ib_query_srq(srq, &attr); 3699 3700 uobj_put_obj_read(srq); 3701 3702 if (ret) 3703 return ret; 3704 3705 memset(&resp, 0, sizeof resp); 3706 3707 resp.max_wr = attr.max_wr; 3708 resp.max_sge = attr.max_sge; 3709 resp.srq_limit = attr.srq_limit; 3710 3711 if (copy_to_user((void __user *) (unsigned long) cmd.response, 3712 &resp, sizeof resp)) 3713 return -EFAULT; 3714 3715 return in_len; 3716 } 3717 3718 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 3719 struct ib_device *ib_dev, 3720 const char __user *buf, int in_len, 3721 int out_len) 3722 { 3723 struct ib_uverbs_destroy_srq cmd; 3724 struct ib_uverbs_destroy_srq_resp resp; 3725 struct ib_uobject *uobj; 3726 struct ib_uevent_object *obj; 3727 int ret = -EINVAL; 3728 3729 if (copy_from_user(&cmd, buf, sizeof cmd)) 3730 return -EFAULT; 3731 3732 uobj = uobj_get_write(uobj_get_type(srq), cmd.srq_handle, 3733 file->ucontext); 3734 if (IS_ERR(uobj)) 3735 return PTR_ERR(uobj); 3736 3737 obj = container_of(uobj, struct ib_uevent_object, uobject); 3738 /* 3739 * Make sure we don't free the memory in remove_commit as we still 3740 * needs the uobject memory to create the response. 3741 */ 3742 uverbs_uobject_get(uobj); 3743 3744 memset(&resp, 0, sizeof(resp)); 3745 3746 ret = uobj_remove_commit(uobj); 3747 if (ret) { 3748 uverbs_uobject_put(uobj); 3749 return ret; 3750 } 3751 resp.events_reported = obj->events_reported; 3752 uverbs_uobject_put(uobj); 3753 if (copy_to_user((void __user *)(unsigned long)cmd.response, 3754 &resp, sizeof(resp))) 3755 return -EFAULT; 3756 3757 return in_len; 3758 } 3759 3760 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, 3761 struct ib_device *ib_dev, 3762 struct ib_udata *ucore, 3763 struct ib_udata *uhw) 3764 { 3765 struct ib_uverbs_ex_query_device_resp resp = { {0} }; 3766 struct ib_uverbs_ex_query_device cmd; 3767 struct ib_device_attr attr = {0}; 3768 int err; 3769 3770 if (ucore->inlen < sizeof(cmd)) 3771 return -EINVAL; 3772 3773 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3774 if (err) 3775 return err; 3776 3777 if (cmd.comp_mask) 3778 return -EINVAL; 3779 3780 if (cmd.reserved) 3781 return -EINVAL; 3782 3783 resp.response_length = offsetof(typeof(resp), odp_caps); 3784 3785 if (ucore->outlen < resp.response_length) 3786 return -ENOSPC; 3787 3788 err = ib_dev->query_device(ib_dev, &attr, uhw); 3789 if (err) 3790 return err; 3791 3792 copy_query_dev_fields(file, ib_dev, &resp.base, &attr); 3793 3794 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps)) 3795 goto end; 3796 3797 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 3798 resp.odp_caps.general_caps = attr.odp_caps.general_caps; 3799 resp.odp_caps.per_transport_caps.rc_odp_caps = 3800 attr.odp_caps.per_transport_caps.rc_odp_caps; 3801 resp.odp_caps.per_transport_caps.uc_odp_caps = 3802 attr.odp_caps.per_transport_caps.uc_odp_caps; 3803 resp.odp_caps.per_transport_caps.ud_odp_caps = 3804 attr.odp_caps.per_transport_caps.ud_odp_caps; 3805 #endif 3806 resp.response_length += sizeof(resp.odp_caps); 3807 3808 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask)) 3809 goto end; 3810 3811 resp.timestamp_mask = attr.timestamp_mask; 3812 resp.response_length += sizeof(resp.timestamp_mask); 3813 3814 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock)) 3815 goto end; 3816 3817 resp.hca_core_clock = attr.hca_core_clock; 3818 resp.response_length += sizeof(resp.hca_core_clock); 3819 3820 if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex)) 3821 goto end; 3822 3823 resp.device_cap_flags_ex = attr.device_cap_flags; 3824 resp.response_length += sizeof(resp.device_cap_flags_ex); 3825 3826 if (ucore->outlen < resp.response_length + sizeof(resp.rss_caps)) 3827 goto end; 3828 3829 resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts; 3830 resp.rss_caps.max_rwq_indirection_tables = 3831 attr.rss_caps.max_rwq_indirection_tables; 3832 resp.rss_caps.max_rwq_indirection_table_size = 3833 attr.rss_caps.max_rwq_indirection_table_size; 3834 3835 resp.response_length += sizeof(resp.rss_caps); 3836 3837 if (ucore->outlen < resp.response_length + sizeof(resp.max_wq_type_rq)) 3838 goto end; 3839 3840 resp.max_wq_type_rq = attr.max_wq_type_rq; 3841 resp.response_length += sizeof(resp.max_wq_type_rq); 3842 3843 if (ucore->outlen < resp.response_length + sizeof(resp.raw_packet_caps)) 3844 goto end; 3845 3846 resp.raw_packet_caps = attr.raw_packet_caps; 3847 resp.response_length += sizeof(resp.raw_packet_caps); 3848 end: 3849 err = ib_copy_to_udata(ucore, &resp, resp.response_length); 3850 return err; 3851 } 3852