1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/file.h> 37 #include <linux/fs.h> 38 #include <linux/slab.h> 39 #include <linux/sched.h> 40 41 #include <linux/uaccess.h> 42 43 #include <rdma/uverbs_types.h> 44 #include <rdma/uverbs_std_types.h> 45 #include "rdma_core.h" 46 47 #include "uverbs.h" 48 #include "core_priv.h" 49 50 static struct ib_uverbs_completion_event_file * 51 ib_uverbs_lookup_comp_file(int fd, struct ib_ucontext *context) 52 { 53 struct ib_uobject *uobj = uobj_get_read(uobj_get_type(comp_channel), 54 fd, context); 55 struct ib_uobject_file *uobj_file; 56 57 if (IS_ERR(uobj)) 58 return (void *)uobj; 59 60 uverbs_uobject_get(uobj); 61 uobj_put_read(uobj); 62 63 uobj_file = container_of(uobj, struct ib_uobject_file, uobj); 64 return container_of(uobj_file, struct ib_uverbs_completion_event_file, 65 uobj_file); 66 } 67 68 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 69 struct ib_device *ib_dev, 70 const char __user *buf, 71 int in_len, int out_len) 72 { 73 struct ib_uverbs_get_context cmd; 74 struct ib_uverbs_get_context_resp resp; 75 struct ib_udata udata; 76 struct ib_ucontext *ucontext; 77 struct file *filp; 78 struct ib_rdmacg_object cg_obj; 79 int ret; 80 81 if (out_len < sizeof resp) 82 return -ENOSPC; 83 84 if (copy_from_user(&cmd, buf, sizeof cmd)) 85 return -EFAULT; 86 87 mutex_lock(&file->mutex); 88 89 if (file->ucontext) { 90 ret = -EINVAL; 91 goto err; 92 } 93 94 INIT_UDATA(&udata, buf + sizeof cmd, 95 (unsigned long) cmd.response + sizeof resp, 96 in_len - sizeof cmd, out_len - sizeof resp); 97 98 ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); 99 if (ret) 100 goto err; 101 102 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata); 103 if (IS_ERR(ucontext)) { 104 ret = PTR_ERR(ucontext); 105 goto err_alloc; 106 } 107 108 ucontext->device = ib_dev; 109 ucontext->cg_obj = cg_obj; 110 /* ufile is required when some objects are released */ 111 ucontext->ufile = file; 112 uverbs_initialize_ucontext(ucontext); 113 114 rcu_read_lock(); 115 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); 116 rcu_read_unlock(); 117 ucontext->closing = 0; 118 119 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 120 ucontext->umem_tree = RB_ROOT; 121 init_rwsem(&ucontext->umem_rwsem); 122 ucontext->odp_mrs_count = 0; 123 INIT_LIST_HEAD(&ucontext->no_private_counters); 124 125 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)) 126 ucontext->invalidate_range = NULL; 127 128 #endif 129 130 resp.num_comp_vectors = file->device->num_comp_vectors; 131 132 ret = get_unused_fd_flags(O_CLOEXEC); 133 if (ret < 0) 134 goto err_free; 135 resp.async_fd = ret; 136 137 filp = ib_uverbs_alloc_async_event_file(file, ib_dev); 138 if (IS_ERR(filp)) { 139 ret = PTR_ERR(filp); 140 goto err_fd; 141 } 142 143 if (copy_to_user((void __user *) (unsigned long) cmd.response, 144 &resp, sizeof resp)) { 145 ret = -EFAULT; 146 goto err_file; 147 } 148 149 file->ucontext = ucontext; 150 151 fd_install(resp.async_fd, filp); 152 153 mutex_unlock(&file->mutex); 154 155 return in_len; 156 157 err_file: 158 ib_uverbs_free_async_event_file(file); 159 fput(filp); 160 161 err_fd: 162 put_unused_fd(resp.async_fd); 163 164 err_free: 165 put_pid(ucontext->tgid); 166 ib_dev->dealloc_ucontext(ucontext); 167 168 err_alloc: 169 ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); 170 171 err: 172 mutex_unlock(&file->mutex); 173 return ret; 174 } 175 176 static void copy_query_dev_fields(struct ib_uverbs_file *file, 177 struct ib_device *ib_dev, 178 struct ib_uverbs_query_device_resp *resp, 179 struct ib_device_attr *attr) 180 { 181 resp->fw_ver = attr->fw_ver; 182 resp->node_guid = ib_dev->node_guid; 183 resp->sys_image_guid = attr->sys_image_guid; 184 resp->max_mr_size = attr->max_mr_size; 185 resp->page_size_cap = attr->page_size_cap; 186 resp->vendor_id = attr->vendor_id; 187 resp->vendor_part_id = attr->vendor_part_id; 188 resp->hw_ver = attr->hw_ver; 189 resp->max_qp = attr->max_qp; 190 resp->max_qp_wr = attr->max_qp_wr; 191 resp->device_cap_flags = lower_32_bits(attr->device_cap_flags); 192 resp->max_sge = attr->max_sge; 193 resp->max_sge_rd = attr->max_sge_rd; 194 resp->max_cq = attr->max_cq; 195 resp->max_cqe = attr->max_cqe; 196 resp->max_mr = attr->max_mr; 197 resp->max_pd = attr->max_pd; 198 resp->max_qp_rd_atom = attr->max_qp_rd_atom; 199 resp->max_ee_rd_atom = attr->max_ee_rd_atom; 200 resp->max_res_rd_atom = attr->max_res_rd_atom; 201 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom; 202 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom; 203 resp->atomic_cap = attr->atomic_cap; 204 resp->max_ee = attr->max_ee; 205 resp->max_rdd = attr->max_rdd; 206 resp->max_mw = attr->max_mw; 207 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp; 208 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp; 209 resp->max_mcast_grp = attr->max_mcast_grp; 210 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach; 211 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach; 212 resp->max_ah = attr->max_ah; 213 resp->max_fmr = attr->max_fmr; 214 resp->max_map_per_fmr = attr->max_map_per_fmr; 215 resp->max_srq = attr->max_srq; 216 resp->max_srq_wr = attr->max_srq_wr; 217 resp->max_srq_sge = attr->max_srq_sge; 218 resp->max_pkeys = attr->max_pkeys; 219 resp->local_ca_ack_delay = attr->local_ca_ack_delay; 220 resp->phys_port_cnt = ib_dev->phys_port_cnt; 221 } 222 223 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 224 struct ib_device *ib_dev, 225 const char __user *buf, 226 int in_len, int out_len) 227 { 228 struct ib_uverbs_query_device cmd; 229 struct ib_uverbs_query_device_resp resp; 230 231 if (out_len < sizeof resp) 232 return -ENOSPC; 233 234 if (copy_from_user(&cmd, buf, sizeof cmd)) 235 return -EFAULT; 236 237 memset(&resp, 0, sizeof resp); 238 copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs); 239 240 if (copy_to_user((void __user *) (unsigned long) cmd.response, 241 &resp, sizeof resp)) 242 return -EFAULT; 243 244 return in_len; 245 } 246 247 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 248 struct ib_device *ib_dev, 249 const char __user *buf, 250 int in_len, int out_len) 251 { 252 struct ib_uverbs_query_port cmd; 253 struct ib_uverbs_query_port_resp resp; 254 struct ib_port_attr attr; 255 int ret; 256 257 if (out_len < sizeof resp) 258 return -ENOSPC; 259 260 if (copy_from_user(&cmd, buf, sizeof cmd)) 261 return -EFAULT; 262 263 ret = ib_query_port(ib_dev, cmd.port_num, &attr); 264 if (ret) 265 return ret; 266 267 memset(&resp, 0, sizeof resp); 268 269 resp.state = attr.state; 270 resp.max_mtu = attr.max_mtu; 271 resp.active_mtu = attr.active_mtu; 272 resp.gid_tbl_len = attr.gid_tbl_len; 273 resp.port_cap_flags = attr.port_cap_flags; 274 resp.max_msg_sz = attr.max_msg_sz; 275 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 276 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 277 resp.pkey_tbl_len = attr.pkey_tbl_len; 278 resp.lid = attr.lid; 279 resp.sm_lid = attr.sm_lid; 280 resp.lmc = attr.lmc; 281 resp.max_vl_num = attr.max_vl_num; 282 resp.sm_sl = attr.sm_sl; 283 resp.subnet_timeout = attr.subnet_timeout; 284 resp.init_type_reply = attr.init_type_reply; 285 resp.active_width = attr.active_width; 286 resp.active_speed = attr.active_speed; 287 resp.phys_state = attr.phys_state; 288 resp.link_layer = rdma_port_get_link_layer(ib_dev, 289 cmd.port_num); 290 291 if (copy_to_user((void __user *) (unsigned long) cmd.response, 292 &resp, sizeof resp)) 293 return -EFAULT; 294 295 return in_len; 296 } 297 298 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 299 struct ib_device *ib_dev, 300 const char __user *buf, 301 int in_len, int out_len) 302 { 303 struct ib_uverbs_alloc_pd cmd; 304 struct ib_uverbs_alloc_pd_resp resp; 305 struct ib_udata udata; 306 struct ib_uobject *uobj; 307 struct ib_pd *pd; 308 int ret; 309 310 if (out_len < sizeof resp) 311 return -ENOSPC; 312 313 if (copy_from_user(&cmd, buf, sizeof cmd)) 314 return -EFAULT; 315 316 INIT_UDATA(&udata, buf + sizeof cmd, 317 (unsigned long) cmd.response + sizeof resp, 318 in_len - sizeof cmd, out_len - sizeof resp); 319 320 uobj = uobj_alloc(uobj_get_type(pd), file->ucontext); 321 if (IS_ERR(uobj)) 322 return PTR_ERR(uobj); 323 324 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata); 325 if (IS_ERR(pd)) { 326 ret = PTR_ERR(pd); 327 goto err; 328 } 329 330 pd->device = ib_dev; 331 pd->uobject = uobj; 332 pd->__internal_mr = NULL; 333 atomic_set(&pd->usecnt, 0); 334 335 uobj->object = pd; 336 memset(&resp, 0, sizeof resp); 337 resp.pd_handle = uobj->id; 338 339 if (copy_to_user((void __user *) (unsigned long) cmd.response, 340 &resp, sizeof resp)) { 341 ret = -EFAULT; 342 goto err_copy; 343 } 344 345 uobj_alloc_commit(uobj); 346 347 return in_len; 348 349 err_copy: 350 ib_dealloc_pd(pd); 351 352 err: 353 uobj_alloc_abort(uobj); 354 return ret; 355 } 356 357 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 358 struct ib_device *ib_dev, 359 const char __user *buf, 360 int in_len, int out_len) 361 { 362 struct ib_uverbs_dealloc_pd cmd; 363 struct ib_uobject *uobj; 364 int ret; 365 366 if (copy_from_user(&cmd, buf, sizeof cmd)) 367 return -EFAULT; 368 369 uobj = uobj_get_write(uobj_get_type(pd), cmd.pd_handle, 370 file->ucontext); 371 if (IS_ERR(uobj)) 372 return PTR_ERR(uobj); 373 374 ret = uobj_remove_commit(uobj); 375 376 return ret ?: in_len; 377 } 378 379 struct xrcd_table_entry { 380 struct rb_node node; 381 struct ib_xrcd *xrcd; 382 struct inode *inode; 383 }; 384 385 static int xrcd_table_insert(struct ib_uverbs_device *dev, 386 struct inode *inode, 387 struct ib_xrcd *xrcd) 388 { 389 struct xrcd_table_entry *entry, *scan; 390 struct rb_node **p = &dev->xrcd_tree.rb_node; 391 struct rb_node *parent = NULL; 392 393 entry = kmalloc(sizeof *entry, GFP_KERNEL); 394 if (!entry) 395 return -ENOMEM; 396 397 entry->xrcd = xrcd; 398 entry->inode = inode; 399 400 while (*p) { 401 parent = *p; 402 scan = rb_entry(parent, struct xrcd_table_entry, node); 403 404 if (inode < scan->inode) { 405 p = &(*p)->rb_left; 406 } else if (inode > scan->inode) { 407 p = &(*p)->rb_right; 408 } else { 409 kfree(entry); 410 return -EEXIST; 411 } 412 } 413 414 rb_link_node(&entry->node, parent, p); 415 rb_insert_color(&entry->node, &dev->xrcd_tree); 416 igrab(inode); 417 return 0; 418 } 419 420 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, 421 struct inode *inode) 422 { 423 struct xrcd_table_entry *entry; 424 struct rb_node *p = dev->xrcd_tree.rb_node; 425 426 while (p) { 427 entry = rb_entry(p, struct xrcd_table_entry, node); 428 429 if (inode < entry->inode) 430 p = p->rb_left; 431 else if (inode > entry->inode) 432 p = p->rb_right; 433 else 434 return entry; 435 } 436 437 return NULL; 438 } 439 440 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) 441 { 442 struct xrcd_table_entry *entry; 443 444 entry = xrcd_table_search(dev, inode); 445 if (!entry) 446 return NULL; 447 448 return entry->xrcd; 449 } 450 451 static void xrcd_table_delete(struct ib_uverbs_device *dev, 452 struct inode *inode) 453 { 454 struct xrcd_table_entry *entry; 455 456 entry = xrcd_table_search(dev, inode); 457 if (entry) { 458 iput(inode); 459 rb_erase(&entry->node, &dev->xrcd_tree); 460 kfree(entry); 461 } 462 } 463 464 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, 465 struct ib_device *ib_dev, 466 const char __user *buf, int in_len, 467 int out_len) 468 { 469 struct ib_uverbs_open_xrcd cmd; 470 struct ib_uverbs_open_xrcd_resp resp; 471 struct ib_udata udata; 472 struct ib_uxrcd_object *obj; 473 struct ib_xrcd *xrcd = NULL; 474 struct fd f = {NULL, 0}; 475 struct inode *inode = NULL; 476 int ret = 0; 477 int new_xrcd = 0; 478 479 if (out_len < sizeof resp) 480 return -ENOSPC; 481 482 if (copy_from_user(&cmd, buf, sizeof cmd)) 483 return -EFAULT; 484 485 INIT_UDATA(&udata, buf + sizeof cmd, 486 (unsigned long) cmd.response + sizeof resp, 487 in_len - sizeof cmd, out_len - sizeof resp); 488 489 mutex_lock(&file->device->xrcd_tree_mutex); 490 491 if (cmd.fd != -1) { 492 /* search for file descriptor */ 493 f = fdget(cmd.fd); 494 if (!f.file) { 495 ret = -EBADF; 496 goto err_tree_mutex_unlock; 497 } 498 499 inode = file_inode(f.file); 500 xrcd = find_xrcd(file->device, inode); 501 if (!xrcd && !(cmd.oflags & O_CREAT)) { 502 /* no file descriptor. Need CREATE flag */ 503 ret = -EAGAIN; 504 goto err_tree_mutex_unlock; 505 } 506 507 if (xrcd && cmd.oflags & O_EXCL) { 508 ret = -EINVAL; 509 goto err_tree_mutex_unlock; 510 } 511 } 512 513 obj = (struct ib_uxrcd_object *)uobj_alloc(uobj_get_type(xrcd), 514 file->ucontext); 515 if (IS_ERR(obj)) { 516 ret = PTR_ERR(obj); 517 goto err_tree_mutex_unlock; 518 } 519 520 if (!xrcd) { 521 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata); 522 if (IS_ERR(xrcd)) { 523 ret = PTR_ERR(xrcd); 524 goto err; 525 } 526 527 xrcd->inode = inode; 528 xrcd->device = ib_dev; 529 atomic_set(&xrcd->usecnt, 0); 530 mutex_init(&xrcd->tgt_qp_mutex); 531 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 532 new_xrcd = 1; 533 } 534 535 atomic_set(&obj->refcnt, 0); 536 obj->uobject.object = xrcd; 537 memset(&resp, 0, sizeof resp); 538 resp.xrcd_handle = obj->uobject.id; 539 540 if (inode) { 541 if (new_xrcd) { 542 /* create new inode/xrcd table entry */ 543 ret = xrcd_table_insert(file->device, inode, xrcd); 544 if (ret) 545 goto err_dealloc_xrcd; 546 } 547 atomic_inc(&xrcd->usecnt); 548 } 549 550 if (copy_to_user((void __user *) (unsigned long) cmd.response, 551 &resp, sizeof resp)) { 552 ret = -EFAULT; 553 goto err_copy; 554 } 555 556 if (f.file) 557 fdput(f); 558 559 uobj_alloc_commit(&obj->uobject); 560 561 mutex_unlock(&file->device->xrcd_tree_mutex); 562 return in_len; 563 564 err_copy: 565 if (inode) { 566 if (new_xrcd) 567 xrcd_table_delete(file->device, inode); 568 atomic_dec(&xrcd->usecnt); 569 } 570 571 err_dealloc_xrcd: 572 ib_dealloc_xrcd(xrcd); 573 574 err: 575 uobj_alloc_abort(&obj->uobject); 576 577 err_tree_mutex_unlock: 578 if (f.file) 579 fdput(f); 580 581 mutex_unlock(&file->device->xrcd_tree_mutex); 582 583 return ret; 584 } 585 586 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, 587 struct ib_device *ib_dev, 588 const char __user *buf, int in_len, 589 int out_len) 590 { 591 struct ib_uverbs_close_xrcd cmd; 592 struct ib_uobject *uobj; 593 int ret = 0; 594 595 if (copy_from_user(&cmd, buf, sizeof cmd)) 596 return -EFAULT; 597 598 uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle, 599 file->ucontext); 600 if (IS_ERR(uobj)) { 601 mutex_unlock(&file->device->xrcd_tree_mutex); 602 return PTR_ERR(uobj); 603 } 604 605 ret = uobj_remove_commit(uobj); 606 return ret ?: in_len; 607 } 608 609 int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, 610 struct ib_xrcd *xrcd, 611 enum rdma_remove_reason why) 612 { 613 struct inode *inode; 614 int ret; 615 616 inode = xrcd->inode; 617 if (inode && !atomic_dec_and_test(&xrcd->usecnt)) 618 return 0; 619 620 ret = ib_dealloc_xrcd(xrcd); 621 622 if (why == RDMA_REMOVE_DESTROY && ret) 623 atomic_inc(&xrcd->usecnt); 624 else if (inode) 625 xrcd_table_delete(dev, inode); 626 627 return ret; 628 } 629 630 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 631 struct ib_device *ib_dev, 632 const char __user *buf, int in_len, 633 int out_len) 634 { 635 struct ib_uverbs_reg_mr cmd; 636 struct ib_uverbs_reg_mr_resp resp; 637 struct ib_udata udata; 638 struct ib_uobject *uobj; 639 struct ib_pd *pd; 640 struct ib_mr *mr; 641 int ret; 642 643 if (out_len < sizeof resp) 644 return -ENOSPC; 645 646 if (copy_from_user(&cmd, buf, sizeof cmd)) 647 return -EFAULT; 648 649 INIT_UDATA(&udata, buf + sizeof cmd, 650 (unsigned long) cmd.response + sizeof resp, 651 in_len - sizeof cmd, out_len - sizeof resp); 652 653 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 654 return -EINVAL; 655 656 ret = ib_check_mr_access(cmd.access_flags); 657 if (ret) 658 return ret; 659 660 uobj = uobj_alloc(uobj_get_type(mr), file->ucontext); 661 if (IS_ERR(uobj)) 662 return PTR_ERR(uobj); 663 664 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 665 if (!pd) { 666 ret = -EINVAL; 667 goto err_free; 668 } 669 670 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) { 671 if (!(pd->device->attrs.device_cap_flags & 672 IB_DEVICE_ON_DEMAND_PAGING)) { 673 pr_debug("ODP support not available\n"); 674 ret = -EINVAL; 675 goto err_put; 676 } 677 } 678 679 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 680 cmd.access_flags, &udata); 681 if (IS_ERR(mr)) { 682 ret = PTR_ERR(mr); 683 goto err_put; 684 } 685 686 mr->device = pd->device; 687 mr->pd = pd; 688 mr->uobject = uobj; 689 atomic_inc(&pd->usecnt); 690 691 uobj->object = mr; 692 693 memset(&resp, 0, sizeof resp); 694 resp.lkey = mr->lkey; 695 resp.rkey = mr->rkey; 696 resp.mr_handle = uobj->id; 697 698 if (copy_to_user((void __user *) (unsigned long) cmd.response, 699 &resp, sizeof resp)) { 700 ret = -EFAULT; 701 goto err_copy; 702 } 703 704 uobj_put_obj_read(pd); 705 706 uobj_alloc_commit(uobj); 707 708 return in_len; 709 710 err_copy: 711 ib_dereg_mr(mr); 712 713 err_put: 714 uobj_put_obj_read(pd); 715 716 err_free: 717 uobj_alloc_abort(uobj); 718 return ret; 719 } 720 721 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, 722 struct ib_device *ib_dev, 723 const char __user *buf, int in_len, 724 int out_len) 725 { 726 struct ib_uverbs_rereg_mr cmd; 727 struct ib_uverbs_rereg_mr_resp resp; 728 struct ib_udata udata; 729 struct ib_pd *pd = NULL; 730 struct ib_mr *mr; 731 struct ib_pd *old_pd; 732 int ret; 733 struct ib_uobject *uobj; 734 735 if (out_len < sizeof(resp)) 736 return -ENOSPC; 737 738 if (copy_from_user(&cmd, buf, sizeof(cmd))) 739 return -EFAULT; 740 741 INIT_UDATA(&udata, buf + sizeof(cmd), 742 (unsigned long) cmd.response + sizeof(resp), 743 in_len - sizeof(cmd), out_len - sizeof(resp)); 744 745 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags) 746 return -EINVAL; 747 748 if ((cmd.flags & IB_MR_REREG_TRANS) && 749 (!cmd.start || !cmd.hca_va || 0 >= cmd.length || 750 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))) 751 return -EINVAL; 752 753 uobj = uobj_get_write(uobj_get_type(mr), cmd.mr_handle, 754 file->ucontext); 755 if (IS_ERR(uobj)) 756 return PTR_ERR(uobj); 757 758 mr = uobj->object; 759 760 if (cmd.flags & IB_MR_REREG_ACCESS) { 761 ret = ib_check_mr_access(cmd.access_flags); 762 if (ret) 763 goto put_uobjs; 764 } 765 766 if (cmd.flags & IB_MR_REREG_PD) { 767 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 768 if (!pd) { 769 ret = -EINVAL; 770 goto put_uobjs; 771 } 772 } 773 774 old_pd = mr->pd; 775 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, 776 cmd.length, cmd.hca_va, 777 cmd.access_flags, pd, &udata); 778 if (!ret) { 779 if (cmd.flags & IB_MR_REREG_PD) { 780 atomic_inc(&pd->usecnt); 781 mr->pd = pd; 782 atomic_dec(&old_pd->usecnt); 783 } 784 } else { 785 goto put_uobj_pd; 786 } 787 788 memset(&resp, 0, sizeof(resp)); 789 resp.lkey = mr->lkey; 790 resp.rkey = mr->rkey; 791 792 if (copy_to_user((void __user *)(unsigned long)cmd.response, 793 &resp, sizeof(resp))) 794 ret = -EFAULT; 795 else 796 ret = in_len; 797 798 put_uobj_pd: 799 if (cmd.flags & IB_MR_REREG_PD) 800 uobj_put_obj_read(pd); 801 802 put_uobjs: 803 uobj_put_write(uobj); 804 805 return ret; 806 } 807 808 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 809 struct ib_device *ib_dev, 810 const char __user *buf, int in_len, 811 int out_len) 812 { 813 struct ib_uverbs_dereg_mr cmd; 814 struct ib_uobject *uobj; 815 int ret = -EINVAL; 816 817 if (copy_from_user(&cmd, buf, sizeof cmd)) 818 return -EFAULT; 819 820 uobj = uobj_get_write(uobj_get_type(mr), cmd.mr_handle, 821 file->ucontext); 822 if (IS_ERR(uobj)) 823 return PTR_ERR(uobj); 824 825 ret = uobj_remove_commit(uobj); 826 827 return ret ?: in_len; 828 } 829 830 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, 831 struct ib_device *ib_dev, 832 const char __user *buf, int in_len, 833 int out_len) 834 { 835 struct ib_uverbs_alloc_mw cmd; 836 struct ib_uverbs_alloc_mw_resp resp; 837 struct ib_uobject *uobj; 838 struct ib_pd *pd; 839 struct ib_mw *mw; 840 struct ib_udata udata; 841 int ret; 842 843 if (out_len < sizeof(resp)) 844 return -ENOSPC; 845 846 if (copy_from_user(&cmd, buf, sizeof(cmd))) 847 return -EFAULT; 848 849 uobj = uobj_alloc(uobj_get_type(mw), file->ucontext); 850 if (IS_ERR(uobj)) 851 return PTR_ERR(uobj); 852 853 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 854 if (!pd) { 855 ret = -EINVAL; 856 goto err_free; 857 } 858 859 INIT_UDATA(&udata, buf + sizeof(cmd), 860 (unsigned long)cmd.response + sizeof(resp), 861 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 862 out_len - sizeof(resp)); 863 864 mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata); 865 if (IS_ERR(mw)) { 866 ret = PTR_ERR(mw); 867 goto err_put; 868 } 869 870 mw->device = pd->device; 871 mw->pd = pd; 872 mw->uobject = uobj; 873 atomic_inc(&pd->usecnt); 874 875 uobj->object = mw; 876 877 memset(&resp, 0, sizeof(resp)); 878 resp.rkey = mw->rkey; 879 resp.mw_handle = uobj->id; 880 881 if (copy_to_user((void __user *)(unsigned long)cmd.response, 882 &resp, sizeof(resp))) { 883 ret = -EFAULT; 884 goto err_copy; 885 } 886 887 uobj_put_obj_read(pd); 888 uobj_alloc_commit(uobj); 889 890 return in_len; 891 892 err_copy: 893 uverbs_dealloc_mw(mw); 894 err_put: 895 uobj_put_obj_read(pd); 896 err_free: 897 uobj_alloc_abort(uobj); 898 return ret; 899 } 900 901 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file, 902 struct ib_device *ib_dev, 903 const char __user *buf, int in_len, 904 int out_len) 905 { 906 struct ib_uverbs_dealloc_mw cmd; 907 struct ib_uobject *uobj; 908 int ret = -EINVAL; 909 910 if (copy_from_user(&cmd, buf, sizeof(cmd))) 911 return -EFAULT; 912 913 uobj = uobj_get_write(uobj_get_type(mw), cmd.mw_handle, 914 file->ucontext); 915 if (IS_ERR(uobj)) 916 return PTR_ERR(uobj); 917 918 ret = uobj_remove_commit(uobj); 919 return ret ?: in_len; 920 } 921 922 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 923 struct ib_device *ib_dev, 924 const char __user *buf, int in_len, 925 int out_len) 926 { 927 struct ib_uverbs_create_comp_channel cmd; 928 struct ib_uverbs_create_comp_channel_resp resp; 929 struct ib_uobject *uobj; 930 struct ib_uverbs_completion_event_file *ev_file; 931 932 if (out_len < sizeof resp) 933 return -ENOSPC; 934 935 if (copy_from_user(&cmd, buf, sizeof cmd)) 936 return -EFAULT; 937 938 uobj = uobj_alloc(uobj_get_type(comp_channel), file->ucontext); 939 if (IS_ERR(uobj)) 940 return PTR_ERR(uobj); 941 942 resp.fd = uobj->id; 943 944 ev_file = container_of(uobj, struct ib_uverbs_completion_event_file, 945 uobj_file.uobj); 946 ib_uverbs_init_event_queue(&ev_file->ev_queue); 947 948 if (copy_to_user((void __user *) (unsigned long) cmd.response, 949 &resp, sizeof resp)) { 950 uobj_alloc_abort(uobj); 951 return -EFAULT; 952 } 953 954 uobj_alloc_commit(uobj); 955 return in_len; 956 } 957 958 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, 959 struct ib_device *ib_dev, 960 struct ib_udata *ucore, 961 struct ib_udata *uhw, 962 struct ib_uverbs_ex_create_cq *cmd, 963 size_t cmd_sz, 964 int (*cb)(struct ib_uverbs_file *file, 965 struct ib_ucq_object *obj, 966 struct ib_uverbs_ex_create_cq_resp *resp, 967 struct ib_udata *udata, 968 void *context), 969 void *context) 970 { 971 struct ib_ucq_object *obj; 972 struct ib_uverbs_completion_event_file *ev_file = NULL; 973 struct ib_cq *cq; 974 int ret; 975 struct ib_uverbs_ex_create_cq_resp resp; 976 struct ib_cq_init_attr attr = {}; 977 978 if (cmd->comp_vector >= file->device->num_comp_vectors) 979 return ERR_PTR(-EINVAL); 980 981 obj = (struct ib_ucq_object *)uobj_alloc(uobj_get_type(cq), 982 file->ucontext); 983 if (IS_ERR(obj)) 984 return obj; 985 986 if (cmd->comp_channel >= 0) { 987 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel, 988 file->ucontext); 989 if (IS_ERR(ev_file)) { 990 ret = PTR_ERR(ev_file); 991 goto err; 992 } 993 } 994 995 obj->uobject.user_handle = cmd->user_handle; 996 obj->uverbs_file = file; 997 obj->comp_events_reported = 0; 998 obj->async_events_reported = 0; 999 INIT_LIST_HEAD(&obj->comp_list); 1000 INIT_LIST_HEAD(&obj->async_list); 1001 1002 attr.cqe = cmd->cqe; 1003 attr.comp_vector = cmd->comp_vector; 1004 1005 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags)) 1006 attr.flags = cmd->flags; 1007 1008 cq = ib_dev->create_cq(ib_dev, &attr, file->ucontext, uhw); 1009 if (IS_ERR(cq)) { 1010 ret = PTR_ERR(cq); 1011 goto err_file; 1012 } 1013 1014 cq->device = ib_dev; 1015 cq->uobject = &obj->uobject; 1016 cq->comp_handler = ib_uverbs_comp_handler; 1017 cq->event_handler = ib_uverbs_cq_event_handler; 1018 cq->cq_context = ev_file ? &ev_file->ev_queue : NULL; 1019 atomic_set(&cq->usecnt, 0); 1020 1021 obj->uobject.object = cq; 1022 memset(&resp, 0, sizeof resp); 1023 resp.base.cq_handle = obj->uobject.id; 1024 resp.base.cqe = cq->cqe; 1025 1026 resp.response_length = offsetof(typeof(resp), response_length) + 1027 sizeof(resp.response_length); 1028 1029 ret = cb(file, obj, &resp, ucore, context); 1030 if (ret) 1031 goto err_cb; 1032 1033 uobj_alloc_commit(&obj->uobject); 1034 1035 return obj; 1036 1037 err_cb: 1038 ib_destroy_cq(cq); 1039 1040 err_file: 1041 if (ev_file) 1042 ib_uverbs_release_ucq(file, ev_file, obj); 1043 1044 err: 1045 uobj_alloc_abort(&obj->uobject); 1046 1047 return ERR_PTR(ret); 1048 } 1049 1050 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file, 1051 struct ib_ucq_object *obj, 1052 struct ib_uverbs_ex_create_cq_resp *resp, 1053 struct ib_udata *ucore, void *context) 1054 { 1055 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1056 return -EFAULT; 1057 1058 return 0; 1059 } 1060 1061 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 1062 struct ib_device *ib_dev, 1063 const char __user *buf, int in_len, 1064 int out_len) 1065 { 1066 struct ib_uverbs_create_cq cmd; 1067 struct ib_uverbs_ex_create_cq cmd_ex; 1068 struct ib_uverbs_create_cq_resp resp; 1069 struct ib_udata ucore; 1070 struct ib_udata uhw; 1071 struct ib_ucq_object *obj; 1072 1073 if (out_len < sizeof(resp)) 1074 return -ENOSPC; 1075 1076 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1077 return -EFAULT; 1078 1079 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), sizeof(resp)); 1080 1081 INIT_UDATA(&uhw, buf + sizeof(cmd), 1082 (unsigned long)cmd.response + sizeof(resp), 1083 in_len - sizeof(cmd), out_len - sizeof(resp)); 1084 1085 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1086 cmd_ex.user_handle = cmd.user_handle; 1087 cmd_ex.cqe = cmd.cqe; 1088 cmd_ex.comp_vector = cmd.comp_vector; 1089 cmd_ex.comp_channel = cmd.comp_channel; 1090 1091 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex, 1092 offsetof(typeof(cmd_ex), comp_channel) + 1093 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb, 1094 NULL); 1095 1096 if (IS_ERR(obj)) 1097 return PTR_ERR(obj); 1098 1099 return in_len; 1100 } 1101 1102 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file, 1103 struct ib_ucq_object *obj, 1104 struct ib_uverbs_ex_create_cq_resp *resp, 1105 struct ib_udata *ucore, void *context) 1106 { 1107 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1108 return -EFAULT; 1109 1110 return 0; 1111 } 1112 1113 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file, 1114 struct ib_device *ib_dev, 1115 struct ib_udata *ucore, 1116 struct ib_udata *uhw) 1117 { 1118 struct ib_uverbs_ex_create_cq_resp resp; 1119 struct ib_uverbs_ex_create_cq cmd; 1120 struct ib_ucq_object *obj; 1121 int err; 1122 1123 if (ucore->inlen < sizeof(cmd)) 1124 return -EINVAL; 1125 1126 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 1127 if (err) 1128 return err; 1129 1130 if (cmd.comp_mask) 1131 return -EINVAL; 1132 1133 if (cmd.reserved) 1134 return -EINVAL; 1135 1136 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1137 sizeof(resp.response_length))) 1138 return -ENOSPC; 1139 1140 obj = create_cq(file, ib_dev, ucore, uhw, &cmd, 1141 min(ucore->inlen, sizeof(cmd)), 1142 ib_uverbs_ex_create_cq_cb, NULL); 1143 1144 if (IS_ERR(obj)) 1145 return PTR_ERR(obj); 1146 1147 return 0; 1148 } 1149 1150 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 1151 struct ib_device *ib_dev, 1152 const char __user *buf, int in_len, 1153 int out_len) 1154 { 1155 struct ib_uverbs_resize_cq cmd; 1156 struct ib_uverbs_resize_cq_resp resp = {}; 1157 struct ib_udata udata; 1158 struct ib_cq *cq; 1159 int ret = -EINVAL; 1160 1161 if (copy_from_user(&cmd, buf, sizeof cmd)) 1162 return -EFAULT; 1163 1164 INIT_UDATA(&udata, buf + sizeof cmd, 1165 (unsigned long) cmd.response + sizeof resp, 1166 in_len - sizeof cmd, out_len - sizeof resp); 1167 1168 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext); 1169 if (!cq) 1170 return -EINVAL; 1171 1172 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 1173 if (ret) 1174 goto out; 1175 1176 resp.cqe = cq->cqe; 1177 1178 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1179 &resp, sizeof resp.cqe)) 1180 ret = -EFAULT; 1181 1182 out: 1183 uobj_put_obj_read(cq); 1184 1185 return ret ? ret : in_len; 1186 } 1187 1188 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc) 1189 { 1190 struct ib_uverbs_wc tmp; 1191 1192 tmp.wr_id = wc->wr_id; 1193 tmp.status = wc->status; 1194 tmp.opcode = wc->opcode; 1195 tmp.vendor_err = wc->vendor_err; 1196 tmp.byte_len = wc->byte_len; 1197 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data; 1198 tmp.qp_num = wc->qp->qp_num; 1199 tmp.src_qp = wc->src_qp; 1200 tmp.wc_flags = wc->wc_flags; 1201 tmp.pkey_index = wc->pkey_index; 1202 tmp.slid = wc->slid; 1203 tmp.sl = wc->sl; 1204 tmp.dlid_path_bits = wc->dlid_path_bits; 1205 tmp.port_num = wc->port_num; 1206 tmp.reserved = 0; 1207 1208 if (copy_to_user(dest, &tmp, sizeof tmp)) 1209 return -EFAULT; 1210 1211 return 0; 1212 } 1213 1214 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 1215 struct ib_device *ib_dev, 1216 const char __user *buf, int in_len, 1217 int out_len) 1218 { 1219 struct ib_uverbs_poll_cq cmd; 1220 struct ib_uverbs_poll_cq_resp resp; 1221 u8 __user *header_ptr; 1222 u8 __user *data_ptr; 1223 struct ib_cq *cq; 1224 struct ib_wc wc; 1225 int ret; 1226 1227 if (copy_from_user(&cmd, buf, sizeof cmd)) 1228 return -EFAULT; 1229 1230 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext); 1231 if (!cq) 1232 return -EINVAL; 1233 1234 /* we copy a struct ib_uverbs_poll_cq_resp to user space */ 1235 header_ptr = (void __user *)(unsigned long) cmd.response; 1236 data_ptr = header_ptr + sizeof resp; 1237 1238 memset(&resp, 0, sizeof resp); 1239 while (resp.count < cmd.ne) { 1240 ret = ib_poll_cq(cq, 1, &wc); 1241 if (ret < 0) 1242 goto out_put; 1243 if (!ret) 1244 break; 1245 1246 ret = copy_wc_to_user(data_ptr, &wc); 1247 if (ret) 1248 goto out_put; 1249 1250 data_ptr += sizeof(struct ib_uverbs_wc); 1251 ++resp.count; 1252 } 1253 1254 if (copy_to_user(header_ptr, &resp, sizeof resp)) { 1255 ret = -EFAULT; 1256 goto out_put; 1257 } 1258 1259 ret = in_len; 1260 1261 out_put: 1262 uobj_put_obj_read(cq); 1263 return ret; 1264 } 1265 1266 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 1267 struct ib_device *ib_dev, 1268 const char __user *buf, int in_len, 1269 int out_len) 1270 { 1271 struct ib_uverbs_req_notify_cq cmd; 1272 struct ib_cq *cq; 1273 1274 if (copy_from_user(&cmd, buf, sizeof cmd)) 1275 return -EFAULT; 1276 1277 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext); 1278 if (!cq) 1279 return -EINVAL; 1280 1281 ib_req_notify_cq(cq, cmd.solicited_only ? 1282 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 1283 1284 uobj_put_obj_read(cq); 1285 1286 return in_len; 1287 } 1288 1289 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 1290 struct ib_device *ib_dev, 1291 const char __user *buf, int in_len, 1292 int out_len) 1293 { 1294 struct ib_uverbs_destroy_cq cmd; 1295 struct ib_uverbs_destroy_cq_resp resp; 1296 struct ib_uobject *uobj; 1297 struct ib_cq *cq; 1298 struct ib_ucq_object *obj; 1299 int ret = -EINVAL; 1300 1301 if (copy_from_user(&cmd, buf, sizeof cmd)) 1302 return -EFAULT; 1303 1304 uobj = uobj_get_write(uobj_get_type(cq), cmd.cq_handle, 1305 file->ucontext); 1306 if (IS_ERR(uobj)) 1307 return PTR_ERR(uobj); 1308 1309 /* 1310 * Make sure we don't free the memory in remove_commit as we still 1311 * needs the uobject memory to create the response. 1312 */ 1313 uverbs_uobject_get(uobj); 1314 cq = uobj->object; 1315 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1316 1317 memset(&resp, 0, sizeof(resp)); 1318 1319 ret = uobj_remove_commit(uobj); 1320 if (ret) { 1321 uverbs_uobject_put(uobj); 1322 return ret; 1323 } 1324 1325 resp.comp_events_reported = obj->comp_events_reported; 1326 resp.async_events_reported = obj->async_events_reported; 1327 1328 uverbs_uobject_put(uobj); 1329 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1330 &resp, sizeof resp)) 1331 return -EFAULT; 1332 1333 return in_len; 1334 } 1335 1336 static int create_qp(struct ib_uverbs_file *file, 1337 struct ib_udata *ucore, 1338 struct ib_udata *uhw, 1339 struct ib_uverbs_ex_create_qp *cmd, 1340 size_t cmd_sz, 1341 int (*cb)(struct ib_uverbs_file *file, 1342 struct ib_uverbs_ex_create_qp_resp *resp, 1343 struct ib_udata *udata), 1344 void *context) 1345 { 1346 struct ib_uqp_object *obj; 1347 struct ib_device *device; 1348 struct ib_pd *pd = NULL; 1349 struct ib_xrcd *xrcd = NULL; 1350 struct ib_uobject *xrcd_uobj = ERR_PTR(-ENOENT); 1351 struct ib_cq *scq = NULL, *rcq = NULL; 1352 struct ib_srq *srq = NULL; 1353 struct ib_qp *qp; 1354 char *buf; 1355 struct ib_qp_init_attr attr = {}; 1356 struct ib_uverbs_ex_create_qp_resp resp; 1357 int ret; 1358 struct ib_rwq_ind_table *ind_tbl = NULL; 1359 bool has_sq = true; 1360 1361 if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) 1362 return -EPERM; 1363 1364 obj = (struct ib_uqp_object *)uobj_alloc(uobj_get_type(qp), 1365 file->ucontext); 1366 if (IS_ERR(obj)) 1367 return PTR_ERR(obj); 1368 obj->uxrcd = NULL; 1369 obj->uevent.uobject.user_handle = cmd->user_handle; 1370 mutex_init(&obj->mcast_lock); 1371 1372 if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) + 1373 sizeof(cmd->rwq_ind_tbl_handle) && 1374 (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) { 1375 ind_tbl = uobj_get_obj_read(rwq_ind_table, 1376 cmd->rwq_ind_tbl_handle, 1377 file->ucontext); 1378 if (!ind_tbl) { 1379 ret = -EINVAL; 1380 goto err_put; 1381 } 1382 1383 attr.rwq_ind_tbl = ind_tbl; 1384 } 1385 1386 if ((cmd_sz >= offsetof(typeof(*cmd), reserved1) + 1387 sizeof(cmd->reserved1)) && cmd->reserved1) { 1388 ret = -EOPNOTSUPP; 1389 goto err_put; 1390 } 1391 1392 if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) { 1393 ret = -EINVAL; 1394 goto err_put; 1395 } 1396 1397 if (ind_tbl && !cmd->max_send_wr) 1398 has_sq = false; 1399 1400 if (cmd->qp_type == IB_QPT_XRC_TGT) { 1401 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd->pd_handle, 1402 file->ucontext); 1403 1404 if (IS_ERR(xrcd_uobj)) { 1405 ret = -EINVAL; 1406 goto err_put; 1407 } 1408 1409 xrcd = (struct ib_xrcd *)xrcd_uobj->object; 1410 if (!xrcd) { 1411 ret = -EINVAL; 1412 goto err_put; 1413 } 1414 device = xrcd->device; 1415 } else { 1416 if (cmd->qp_type == IB_QPT_XRC_INI) { 1417 cmd->max_recv_wr = 0; 1418 cmd->max_recv_sge = 0; 1419 } else { 1420 if (cmd->is_srq) { 1421 srq = uobj_get_obj_read(srq, cmd->srq_handle, 1422 file->ucontext); 1423 if (!srq || srq->srq_type != IB_SRQT_BASIC) { 1424 ret = -EINVAL; 1425 goto err_put; 1426 } 1427 } 1428 1429 if (!ind_tbl) { 1430 if (cmd->recv_cq_handle != cmd->send_cq_handle) { 1431 rcq = uobj_get_obj_read(cq, cmd->recv_cq_handle, 1432 file->ucontext); 1433 if (!rcq) { 1434 ret = -EINVAL; 1435 goto err_put; 1436 } 1437 } 1438 } 1439 } 1440 1441 if (has_sq) 1442 scq = uobj_get_obj_read(cq, cmd->send_cq_handle, 1443 file->ucontext); 1444 if (!ind_tbl) 1445 rcq = rcq ?: scq; 1446 pd = uobj_get_obj_read(pd, cmd->pd_handle, file->ucontext); 1447 if (!pd || (!scq && has_sq)) { 1448 ret = -EINVAL; 1449 goto err_put; 1450 } 1451 1452 device = pd->device; 1453 } 1454 1455 attr.event_handler = ib_uverbs_qp_event_handler; 1456 attr.qp_context = file; 1457 attr.send_cq = scq; 1458 attr.recv_cq = rcq; 1459 attr.srq = srq; 1460 attr.xrcd = xrcd; 1461 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR : 1462 IB_SIGNAL_REQ_WR; 1463 attr.qp_type = cmd->qp_type; 1464 attr.create_flags = 0; 1465 1466 attr.cap.max_send_wr = cmd->max_send_wr; 1467 attr.cap.max_recv_wr = cmd->max_recv_wr; 1468 attr.cap.max_send_sge = cmd->max_send_sge; 1469 attr.cap.max_recv_sge = cmd->max_recv_sge; 1470 attr.cap.max_inline_data = cmd->max_inline_data; 1471 1472 obj->uevent.events_reported = 0; 1473 INIT_LIST_HEAD(&obj->uevent.event_list); 1474 INIT_LIST_HEAD(&obj->mcast_list); 1475 1476 if (cmd_sz >= offsetof(typeof(*cmd), create_flags) + 1477 sizeof(cmd->create_flags)) 1478 attr.create_flags = cmd->create_flags; 1479 1480 if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | 1481 IB_QP_CREATE_CROSS_CHANNEL | 1482 IB_QP_CREATE_MANAGED_SEND | 1483 IB_QP_CREATE_MANAGED_RECV | 1484 IB_QP_CREATE_SCATTER_FCS | 1485 IB_QP_CREATE_CVLAN_STRIPPING)) { 1486 ret = -EINVAL; 1487 goto err_put; 1488 } 1489 1490 buf = (void *)cmd + sizeof(*cmd); 1491 if (cmd_sz > sizeof(*cmd)) 1492 if (!(buf[0] == 0 && !memcmp(buf, buf + 1, 1493 cmd_sz - sizeof(*cmd) - 1))) { 1494 ret = -EINVAL; 1495 goto err_put; 1496 } 1497 1498 if (cmd->qp_type == IB_QPT_XRC_TGT) 1499 qp = ib_create_qp(pd, &attr); 1500 else 1501 qp = device->create_qp(pd, &attr, uhw); 1502 1503 if (IS_ERR(qp)) { 1504 ret = PTR_ERR(qp); 1505 goto err_put; 1506 } 1507 1508 if (cmd->qp_type != IB_QPT_XRC_TGT) { 1509 ret = ib_create_qp_security(qp, device); 1510 if (ret) 1511 goto err_cb; 1512 1513 qp->real_qp = qp; 1514 qp->device = device; 1515 qp->pd = pd; 1516 qp->send_cq = attr.send_cq; 1517 qp->recv_cq = attr.recv_cq; 1518 qp->srq = attr.srq; 1519 qp->rwq_ind_tbl = ind_tbl; 1520 qp->event_handler = attr.event_handler; 1521 qp->qp_context = attr.qp_context; 1522 qp->qp_type = attr.qp_type; 1523 atomic_set(&qp->usecnt, 0); 1524 atomic_inc(&pd->usecnt); 1525 qp->port = 0; 1526 if (attr.send_cq) 1527 atomic_inc(&attr.send_cq->usecnt); 1528 if (attr.recv_cq) 1529 atomic_inc(&attr.recv_cq->usecnt); 1530 if (attr.srq) 1531 atomic_inc(&attr.srq->usecnt); 1532 if (ind_tbl) 1533 atomic_inc(&ind_tbl->usecnt); 1534 } 1535 qp->uobject = &obj->uevent.uobject; 1536 1537 obj->uevent.uobject.object = qp; 1538 1539 memset(&resp, 0, sizeof resp); 1540 resp.base.qpn = qp->qp_num; 1541 resp.base.qp_handle = obj->uevent.uobject.id; 1542 resp.base.max_recv_sge = attr.cap.max_recv_sge; 1543 resp.base.max_send_sge = attr.cap.max_send_sge; 1544 resp.base.max_recv_wr = attr.cap.max_recv_wr; 1545 resp.base.max_send_wr = attr.cap.max_send_wr; 1546 resp.base.max_inline_data = attr.cap.max_inline_data; 1547 1548 resp.response_length = offsetof(typeof(resp), response_length) + 1549 sizeof(resp.response_length); 1550 1551 ret = cb(file, &resp, ucore); 1552 if (ret) 1553 goto err_cb; 1554 1555 if (xrcd) { 1556 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, 1557 uobject); 1558 atomic_inc(&obj->uxrcd->refcnt); 1559 uobj_put_read(xrcd_uobj); 1560 } 1561 1562 if (pd) 1563 uobj_put_obj_read(pd); 1564 if (scq) 1565 uobj_put_obj_read(scq); 1566 if (rcq && rcq != scq) 1567 uobj_put_obj_read(rcq); 1568 if (srq) 1569 uobj_put_obj_read(srq); 1570 if (ind_tbl) 1571 uobj_put_obj_read(ind_tbl); 1572 1573 uobj_alloc_commit(&obj->uevent.uobject); 1574 1575 return 0; 1576 err_cb: 1577 ib_destroy_qp(qp); 1578 1579 err_put: 1580 if (!IS_ERR(xrcd_uobj)) 1581 uobj_put_read(xrcd_uobj); 1582 if (pd) 1583 uobj_put_obj_read(pd); 1584 if (scq) 1585 uobj_put_obj_read(scq); 1586 if (rcq && rcq != scq) 1587 uobj_put_obj_read(rcq); 1588 if (srq) 1589 uobj_put_obj_read(srq); 1590 if (ind_tbl) 1591 uobj_put_obj_read(ind_tbl); 1592 1593 uobj_alloc_abort(&obj->uevent.uobject); 1594 return ret; 1595 } 1596 1597 static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file, 1598 struct ib_uverbs_ex_create_qp_resp *resp, 1599 struct ib_udata *ucore) 1600 { 1601 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1602 return -EFAULT; 1603 1604 return 0; 1605 } 1606 1607 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 1608 struct ib_device *ib_dev, 1609 const char __user *buf, int in_len, 1610 int out_len) 1611 { 1612 struct ib_uverbs_create_qp cmd; 1613 struct ib_uverbs_ex_create_qp cmd_ex; 1614 struct ib_udata ucore; 1615 struct ib_udata uhw; 1616 ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp); 1617 int err; 1618 1619 if (out_len < resp_size) 1620 return -ENOSPC; 1621 1622 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1623 return -EFAULT; 1624 1625 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), 1626 resp_size); 1627 INIT_UDATA(&uhw, buf + sizeof(cmd), 1628 (unsigned long)cmd.response + resp_size, 1629 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1630 out_len - resp_size); 1631 1632 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1633 cmd_ex.user_handle = cmd.user_handle; 1634 cmd_ex.pd_handle = cmd.pd_handle; 1635 cmd_ex.send_cq_handle = cmd.send_cq_handle; 1636 cmd_ex.recv_cq_handle = cmd.recv_cq_handle; 1637 cmd_ex.srq_handle = cmd.srq_handle; 1638 cmd_ex.max_send_wr = cmd.max_send_wr; 1639 cmd_ex.max_recv_wr = cmd.max_recv_wr; 1640 cmd_ex.max_send_sge = cmd.max_send_sge; 1641 cmd_ex.max_recv_sge = cmd.max_recv_sge; 1642 cmd_ex.max_inline_data = cmd.max_inline_data; 1643 cmd_ex.sq_sig_all = cmd.sq_sig_all; 1644 cmd_ex.qp_type = cmd.qp_type; 1645 cmd_ex.is_srq = cmd.is_srq; 1646 1647 err = create_qp(file, &ucore, &uhw, &cmd_ex, 1648 offsetof(typeof(cmd_ex), is_srq) + 1649 sizeof(cmd.is_srq), ib_uverbs_create_qp_cb, 1650 NULL); 1651 1652 if (err) 1653 return err; 1654 1655 return in_len; 1656 } 1657 1658 static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file, 1659 struct ib_uverbs_ex_create_qp_resp *resp, 1660 struct ib_udata *ucore) 1661 { 1662 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1663 return -EFAULT; 1664 1665 return 0; 1666 } 1667 1668 int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file, 1669 struct ib_device *ib_dev, 1670 struct ib_udata *ucore, 1671 struct ib_udata *uhw) 1672 { 1673 struct ib_uverbs_ex_create_qp_resp resp; 1674 struct ib_uverbs_ex_create_qp cmd = {0}; 1675 int err; 1676 1677 if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) + 1678 sizeof(cmd.comp_mask))) 1679 return -EINVAL; 1680 1681 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 1682 if (err) 1683 return err; 1684 1685 if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK) 1686 return -EINVAL; 1687 1688 if (cmd.reserved) 1689 return -EINVAL; 1690 1691 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1692 sizeof(resp.response_length))) 1693 return -ENOSPC; 1694 1695 err = create_qp(file, ucore, uhw, &cmd, 1696 min(ucore->inlen, sizeof(cmd)), 1697 ib_uverbs_ex_create_qp_cb, NULL); 1698 1699 if (err) 1700 return err; 1701 1702 return 0; 1703 } 1704 1705 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, 1706 struct ib_device *ib_dev, 1707 const char __user *buf, int in_len, int out_len) 1708 { 1709 struct ib_uverbs_open_qp cmd; 1710 struct ib_uverbs_create_qp_resp resp; 1711 struct ib_udata udata; 1712 struct ib_uqp_object *obj; 1713 struct ib_xrcd *xrcd; 1714 struct ib_uobject *uninitialized_var(xrcd_uobj); 1715 struct ib_qp *qp; 1716 struct ib_qp_open_attr attr; 1717 int ret; 1718 1719 if (out_len < sizeof resp) 1720 return -ENOSPC; 1721 1722 if (copy_from_user(&cmd, buf, sizeof cmd)) 1723 return -EFAULT; 1724 1725 INIT_UDATA(&udata, buf + sizeof cmd, 1726 (unsigned long) cmd.response + sizeof resp, 1727 in_len - sizeof cmd, out_len - sizeof resp); 1728 1729 obj = (struct ib_uqp_object *)uobj_alloc(uobj_get_type(qp), 1730 file->ucontext); 1731 if (IS_ERR(obj)) 1732 return PTR_ERR(obj); 1733 1734 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd.pd_handle, 1735 file->ucontext); 1736 if (IS_ERR(xrcd_uobj)) { 1737 ret = -EINVAL; 1738 goto err_put; 1739 } 1740 1741 xrcd = (struct ib_xrcd *)xrcd_uobj->object; 1742 if (!xrcd) { 1743 ret = -EINVAL; 1744 goto err_xrcd; 1745 } 1746 1747 attr.event_handler = ib_uverbs_qp_event_handler; 1748 attr.qp_context = file; 1749 attr.qp_num = cmd.qpn; 1750 attr.qp_type = cmd.qp_type; 1751 1752 obj->uevent.events_reported = 0; 1753 INIT_LIST_HEAD(&obj->uevent.event_list); 1754 INIT_LIST_HEAD(&obj->mcast_list); 1755 1756 qp = ib_open_qp(xrcd, &attr); 1757 if (IS_ERR(qp)) { 1758 ret = PTR_ERR(qp); 1759 goto err_xrcd; 1760 } 1761 1762 obj->uevent.uobject.object = qp; 1763 obj->uevent.uobject.user_handle = cmd.user_handle; 1764 1765 memset(&resp, 0, sizeof resp); 1766 resp.qpn = qp->qp_num; 1767 resp.qp_handle = obj->uevent.uobject.id; 1768 1769 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1770 &resp, sizeof resp)) { 1771 ret = -EFAULT; 1772 goto err_destroy; 1773 } 1774 1775 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 1776 atomic_inc(&obj->uxrcd->refcnt); 1777 qp->uobject = &obj->uevent.uobject; 1778 uobj_put_read(xrcd_uobj); 1779 1780 1781 uobj_alloc_commit(&obj->uevent.uobject); 1782 1783 return in_len; 1784 1785 err_destroy: 1786 ib_destroy_qp(qp); 1787 err_xrcd: 1788 uobj_put_read(xrcd_uobj); 1789 err_put: 1790 uobj_alloc_abort(&obj->uevent.uobject); 1791 return ret; 1792 } 1793 1794 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 1795 struct ib_device *ib_dev, 1796 const char __user *buf, int in_len, 1797 int out_len) 1798 { 1799 struct ib_uverbs_query_qp cmd; 1800 struct ib_uverbs_query_qp_resp resp; 1801 struct ib_qp *qp; 1802 struct ib_qp_attr *attr; 1803 struct ib_qp_init_attr *init_attr; 1804 const struct ib_global_route *grh; 1805 int ret; 1806 1807 if (copy_from_user(&cmd, buf, sizeof cmd)) 1808 return -EFAULT; 1809 1810 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1811 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 1812 if (!attr || !init_attr) { 1813 ret = -ENOMEM; 1814 goto out; 1815 } 1816 1817 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 1818 if (!qp) { 1819 ret = -EINVAL; 1820 goto out; 1821 } 1822 1823 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 1824 1825 uobj_put_obj_read(qp); 1826 1827 if (ret) 1828 goto out; 1829 1830 memset(&resp, 0, sizeof resp); 1831 1832 resp.qp_state = attr->qp_state; 1833 resp.cur_qp_state = attr->cur_qp_state; 1834 resp.path_mtu = attr->path_mtu; 1835 resp.path_mig_state = attr->path_mig_state; 1836 resp.qkey = attr->qkey; 1837 resp.rq_psn = attr->rq_psn; 1838 resp.sq_psn = attr->sq_psn; 1839 resp.dest_qp_num = attr->dest_qp_num; 1840 resp.qp_access_flags = attr->qp_access_flags; 1841 resp.pkey_index = attr->pkey_index; 1842 resp.alt_pkey_index = attr->alt_pkey_index; 1843 resp.sq_draining = attr->sq_draining; 1844 resp.max_rd_atomic = attr->max_rd_atomic; 1845 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 1846 resp.min_rnr_timer = attr->min_rnr_timer; 1847 resp.port_num = attr->port_num; 1848 resp.timeout = attr->timeout; 1849 resp.retry_cnt = attr->retry_cnt; 1850 resp.rnr_retry = attr->rnr_retry; 1851 resp.alt_port_num = attr->alt_port_num; 1852 resp.alt_timeout = attr->alt_timeout; 1853 1854 resp.dest.dlid = rdma_ah_get_dlid(&attr->ah_attr); 1855 resp.dest.sl = rdma_ah_get_sl(&attr->ah_attr); 1856 resp.dest.src_path_bits = rdma_ah_get_path_bits(&attr->ah_attr); 1857 resp.dest.static_rate = rdma_ah_get_static_rate(&attr->ah_attr); 1858 resp.dest.is_global = !!(rdma_ah_get_ah_flags(&attr->ah_attr) & 1859 IB_AH_GRH); 1860 if (resp.dest.is_global) { 1861 grh = rdma_ah_read_grh(&attr->ah_attr); 1862 memcpy(resp.dest.dgid, grh->dgid.raw, 16); 1863 resp.dest.flow_label = grh->flow_label; 1864 resp.dest.sgid_index = grh->sgid_index; 1865 resp.dest.hop_limit = grh->hop_limit; 1866 resp.dest.traffic_class = grh->traffic_class; 1867 } 1868 resp.dest.port_num = rdma_ah_get_port_num(&attr->ah_attr); 1869 1870 resp.alt_dest.dlid = rdma_ah_get_dlid(&attr->alt_ah_attr); 1871 resp.alt_dest.sl = rdma_ah_get_sl(&attr->alt_ah_attr); 1872 resp.alt_dest.src_path_bits = rdma_ah_get_path_bits(&attr->alt_ah_attr); 1873 resp.alt_dest.static_rate 1874 = rdma_ah_get_static_rate(&attr->alt_ah_attr); 1875 resp.alt_dest.is_global 1876 = !!(rdma_ah_get_ah_flags(&attr->alt_ah_attr) & 1877 IB_AH_GRH); 1878 if (resp.alt_dest.is_global) { 1879 grh = rdma_ah_read_grh(&attr->alt_ah_attr); 1880 memcpy(resp.alt_dest.dgid, grh->dgid.raw, 16); 1881 resp.alt_dest.flow_label = grh->flow_label; 1882 resp.alt_dest.sgid_index = grh->sgid_index; 1883 resp.alt_dest.hop_limit = grh->hop_limit; 1884 resp.alt_dest.traffic_class = grh->traffic_class; 1885 } 1886 resp.alt_dest.port_num = rdma_ah_get_port_num(&attr->alt_ah_attr); 1887 1888 resp.max_send_wr = init_attr->cap.max_send_wr; 1889 resp.max_recv_wr = init_attr->cap.max_recv_wr; 1890 resp.max_send_sge = init_attr->cap.max_send_sge; 1891 resp.max_recv_sge = init_attr->cap.max_recv_sge; 1892 resp.max_inline_data = init_attr->cap.max_inline_data; 1893 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 1894 1895 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1896 &resp, sizeof resp)) 1897 ret = -EFAULT; 1898 1899 out: 1900 kfree(attr); 1901 kfree(init_attr); 1902 1903 return ret ? ret : in_len; 1904 } 1905 1906 /* Remove ignored fields set in the attribute mask */ 1907 static int modify_qp_mask(enum ib_qp_type qp_type, int mask) 1908 { 1909 switch (qp_type) { 1910 case IB_QPT_XRC_INI: 1911 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); 1912 case IB_QPT_XRC_TGT: 1913 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | 1914 IB_QP_RNR_RETRY); 1915 default: 1916 return mask; 1917 } 1918 } 1919 1920 static int modify_qp(struct ib_uverbs_file *file, 1921 struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata) 1922 { 1923 struct ib_qp_attr *attr; 1924 struct ib_qp *qp; 1925 int ret; 1926 1927 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1928 if (!attr) 1929 return -ENOMEM; 1930 1931 qp = uobj_get_obj_read(qp, cmd->base.qp_handle, file->ucontext); 1932 if (!qp) { 1933 ret = -EINVAL; 1934 goto out; 1935 } 1936 1937 if ((cmd->base.attr_mask & IB_QP_PORT) && 1938 !rdma_is_port_valid(qp->device, cmd->base.port_num)) { 1939 ret = -EINVAL; 1940 goto release_qp; 1941 } 1942 1943 attr->qp_state = cmd->base.qp_state; 1944 attr->cur_qp_state = cmd->base.cur_qp_state; 1945 attr->path_mtu = cmd->base.path_mtu; 1946 attr->path_mig_state = cmd->base.path_mig_state; 1947 attr->qkey = cmd->base.qkey; 1948 attr->rq_psn = cmd->base.rq_psn; 1949 attr->sq_psn = cmd->base.sq_psn; 1950 attr->dest_qp_num = cmd->base.dest_qp_num; 1951 attr->qp_access_flags = cmd->base.qp_access_flags; 1952 attr->pkey_index = cmd->base.pkey_index; 1953 attr->alt_pkey_index = cmd->base.alt_pkey_index; 1954 attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify; 1955 attr->max_rd_atomic = cmd->base.max_rd_atomic; 1956 attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic; 1957 attr->min_rnr_timer = cmd->base.min_rnr_timer; 1958 attr->port_num = cmd->base.port_num; 1959 attr->timeout = cmd->base.timeout; 1960 attr->retry_cnt = cmd->base.retry_cnt; 1961 attr->rnr_retry = cmd->base.rnr_retry; 1962 attr->alt_port_num = cmd->base.alt_port_num; 1963 attr->alt_timeout = cmd->base.alt_timeout; 1964 attr->rate_limit = cmd->rate_limit; 1965 1966 if (cmd->base.attr_mask & IB_QP_AV) 1967 attr->ah_attr.type = rdma_ah_find_type(qp->device, 1968 cmd->base.dest.port_num); 1969 if (cmd->base.dest.is_global) { 1970 rdma_ah_set_grh(&attr->ah_attr, NULL, 1971 cmd->base.dest.flow_label, 1972 cmd->base.dest.sgid_index, 1973 cmd->base.dest.hop_limit, 1974 cmd->base.dest.traffic_class); 1975 rdma_ah_set_dgid_raw(&attr->ah_attr, cmd->base.dest.dgid); 1976 } else { 1977 rdma_ah_set_ah_flags(&attr->ah_attr, 0); 1978 } 1979 rdma_ah_set_dlid(&attr->ah_attr, cmd->base.dest.dlid); 1980 rdma_ah_set_sl(&attr->ah_attr, cmd->base.dest.sl); 1981 rdma_ah_set_path_bits(&attr->ah_attr, cmd->base.dest.src_path_bits); 1982 rdma_ah_set_static_rate(&attr->ah_attr, cmd->base.dest.static_rate); 1983 rdma_ah_set_port_num(&attr->ah_attr, 1984 cmd->base.dest.port_num); 1985 1986 if (cmd->base.attr_mask & IB_QP_ALT_PATH) 1987 attr->alt_ah_attr.type = 1988 rdma_ah_find_type(qp->device, cmd->base.dest.port_num); 1989 if (cmd->base.alt_dest.is_global) { 1990 rdma_ah_set_grh(&attr->alt_ah_attr, NULL, 1991 cmd->base.alt_dest.flow_label, 1992 cmd->base.alt_dest.sgid_index, 1993 cmd->base.alt_dest.hop_limit, 1994 cmd->base.alt_dest.traffic_class); 1995 rdma_ah_set_dgid_raw(&attr->alt_ah_attr, 1996 cmd->base.alt_dest.dgid); 1997 } else { 1998 rdma_ah_set_ah_flags(&attr->alt_ah_attr, 0); 1999 } 2000 2001 rdma_ah_set_dlid(&attr->alt_ah_attr, cmd->base.alt_dest.dlid); 2002 rdma_ah_set_sl(&attr->alt_ah_attr, cmd->base.alt_dest.sl); 2003 rdma_ah_set_path_bits(&attr->alt_ah_attr, 2004 cmd->base.alt_dest.src_path_bits); 2005 rdma_ah_set_static_rate(&attr->alt_ah_attr, 2006 cmd->base.alt_dest.static_rate); 2007 rdma_ah_set_port_num(&attr->alt_ah_attr, 2008 cmd->base.alt_dest.port_num); 2009 2010 ret = ib_modify_qp_with_udata(qp, attr, 2011 modify_qp_mask(qp->qp_type, 2012 cmd->base.attr_mask), 2013 udata); 2014 2015 release_qp: 2016 uobj_put_obj_read(qp); 2017 out: 2018 kfree(attr); 2019 2020 return ret; 2021 } 2022 2023 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 2024 struct ib_device *ib_dev, 2025 const char __user *buf, int in_len, 2026 int out_len) 2027 { 2028 struct ib_uverbs_ex_modify_qp cmd = {}; 2029 struct ib_udata udata; 2030 int ret; 2031 2032 if (copy_from_user(&cmd.base, buf, sizeof(cmd.base))) 2033 return -EFAULT; 2034 2035 if (cmd.base.attr_mask & 2036 ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1)) 2037 return -EOPNOTSUPP; 2038 2039 INIT_UDATA(&udata, buf + sizeof(cmd.base), NULL, 2040 in_len - sizeof(cmd.base), out_len); 2041 2042 ret = modify_qp(file, &cmd, &udata); 2043 if (ret) 2044 return ret; 2045 2046 return in_len; 2047 } 2048 2049 int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file, 2050 struct ib_device *ib_dev, 2051 struct ib_udata *ucore, 2052 struct ib_udata *uhw) 2053 { 2054 struct ib_uverbs_ex_modify_qp cmd = {}; 2055 int ret; 2056 2057 /* 2058 * Last bit is reserved for extending the attr_mask by 2059 * using another field. 2060 */ 2061 BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31)); 2062 2063 if (ucore->inlen < sizeof(cmd.base)) 2064 return -EINVAL; 2065 2066 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 2067 if (ret) 2068 return ret; 2069 2070 if (cmd.base.attr_mask & 2071 ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1)) 2072 return -EOPNOTSUPP; 2073 2074 if (ucore->inlen > sizeof(cmd)) { 2075 if (ib_is_udata_cleared(ucore, sizeof(cmd), 2076 ucore->inlen - sizeof(cmd))) 2077 return -EOPNOTSUPP; 2078 } 2079 2080 ret = modify_qp(file, &cmd, uhw); 2081 2082 return ret; 2083 } 2084 2085 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 2086 struct ib_device *ib_dev, 2087 const char __user *buf, int in_len, 2088 int out_len) 2089 { 2090 struct ib_uverbs_destroy_qp cmd; 2091 struct ib_uverbs_destroy_qp_resp resp; 2092 struct ib_uobject *uobj; 2093 struct ib_uqp_object *obj; 2094 int ret = -EINVAL; 2095 2096 if (copy_from_user(&cmd, buf, sizeof cmd)) 2097 return -EFAULT; 2098 2099 memset(&resp, 0, sizeof resp); 2100 2101 uobj = uobj_get_write(uobj_get_type(qp), cmd.qp_handle, 2102 file->ucontext); 2103 if (IS_ERR(uobj)) 2104 return PTR_ERR(uobj); 2105 2106 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 2107 /* 2108 * Make sure we don't free the memory in remove_commit as we still 2109 * needs the uobject memory to create the response. 2110 */ 2111 uverbs_uobject_get(uobj); 2112 2113 ret = uobj_remove_commit(uobj); 2114 if (ret) { 2115 uverbs_uobject_put(uobj); 2116 return ret; 2117 } 2118 2119 resp.events_reported = obj->uevent.events_reported; 2120 uverbs_uobject_put(uobj); 2121 2122 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2123 &resp, sizeof resp)) 2124 return -EFAULT; 2125 2126 return in_len; 2127 } 2128 2129 static void *alloc_wr(size_t wr_size, __u32 num_sge) 2130 { 2131 if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) / 2132 sizeof (struct ib_sge)) 2133 return NULL; 2134 2135 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) + 2136 num_sge * sizeof (struct ib_sge), GFP_KERNEL); 2137 } 2138 2139 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 2140 struct ib_device *ib_dev, 2141 const char __user *buf, int in_len, 2142 int out_len) 2143 { 2144 struct ib_uverbs_post_send cmd; 2145 struct ib_uverbs_post_send_resp resp; 2146 struct ib_uverbs_send_wr *user_wr; 2147 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 2148 struct ib_qp *qp; 2149 int i, sg_ind; 2150 int is_ud; 2151 ssize_t ret = -EINVAL; 2152 size_t next_size; 2153 2154 if (copy_from_user(&cmd, buf, sizeof cmd)) 2155 return -EFAULT; 2156 2157 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 2158 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 2159 return -EINVAL; 2160 2161 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 2162 return -EINVAL; 2163 2164 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 2165 if (!user_wr) 2166 return -ENOMEM; 2167 2168 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 2169 if (!qp) 2170 goto out; 2171 2172 is_ud = qp->qp_type == IB_QPT_UD; 2173 sg_ind = 0; 2174 last = NULL; 2175 for (i = 0; i < cmd.wr_count; ++i) { 2176 if (copy_from_user(user_wr, 2177 buf + sizeof cmd + i * cmd.wqe_size, 2178 cmd.wqe_size)) { 2179 ret = -EFAULT; 2180 goto out_put; 2181 } 2182 2183 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 2184 ret = -EINVAL; 2185 goto out_put; 2186 } 2187 2188 if (is_ud) { 2189 struct ib_ud_wr *ud; 2190 2191 if (user_wr->opcode != IB_WR_SEND && 2192 user_wr->opcode != IB_WR_SEND_WITH_IMM) { 2193 ret = -EINVAL; 2194 goto out_put; 2195 } 2196 2197 next_size = sizeof(*ud); 2198 ud = alloc_wr(next_size, user_wr->num_sge); 2199 if (!ud) { 2200 ret = -ENOMEM; 2201 goto out_put; 2202 } 2203 2204 ud->ah = uobj_get_obj_read(ah, user_wr->wr.ud.ah, 2205 file->ucontext); 2206 if (!ud->ah) { 2207 kfree(ud); 2208 ret = -EINVAL; 2209 goto out_put; 2210 } 2211 ud->remote_qpn = user_wr->wr.ud.remote_qpn; 2212 ud->remote_qkey = user_wr->wr.ud.remote_qkey; 2213 2214 next = &ud->wr; 2215 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || 2216 user_wr->opcode == IB_WR_RDMA_WRITE || 2217 user_wr->opcode == IB_WR_RDMA_READ) { 2218 struct ib_rdma_wr *rdma; 2219 2220 next_size = sizeof(*rdma); 2221 rdma = alloc_wr(next_size, user_wr->num_sge); 2222 if (!rdma) { 2223 ret = -ENOMEM; 2224 goto out_put; 2225 } 2226 2227 rdma->remote_addr = user_wr->wr.rdma.remote_addr; 2228 rdma->rkey = user_wr->wr.rdma.rkey; 2229 2230 next = &rdma->wr; 2231 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || 2232 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 2233 struct ib_atomic_wr *atomic; 2234 2235 next_size = sizeof(*atomic); 2236 atomic = alloc_wr(next_size, user_wr->num_sge); 2237 if (!atomic) { 2238 ret = -ENOMEM; 2239 goto out_put; 2240 } 2241 2242 atomic->remote_addr = user_wr->wr.atomic.remote_addr; 2243 atomic->compare_add = user_wr->wr.atomic.compare_add; 2244 atomic->swap = user_wr->wr.atomic.swap; 2245 atomic->rkey = user_wr->wr.atomic.rkey; 2246 2247 next = &atomic->wr; 2248 } else if (user_wr->opcode == IB_WR_SEND || 2249 user_wr->opcode == IB_WR_SEND_WITH_IMM || 2250 user_wr->opcode == IB_WR_SEND_WITH_INV) { 2251 next_size = sizeof(*next); 2252 next = alloc_wr(next_size, user_wr->num_sge); 2253 if (!next) { 2254 ret = -ENOMEM; 2255 goto out_put; 2256 } 2257 } else { 2258 ret = -EINVAL; 2259 goto out_put; 2260 } 2261 2262 if (user_wr->opcode == IB_WR_SEND_WITH_IMM || 2263 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 2264 next->ex.imm_data = 2265 (__be32 __force) user_wr->ex.imm_data; 2266 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) { 2267 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey; 2268 } 2269 2270 if (!last) 2271 wr = next; 2272 else 2273 last->next = next; 2274 last = next; 2275 2276 next->next = NULL; 2277 next->wr_id = user_wr->wr_id; 2278 next->num_sge = user_wr->num_sge; 2279 next->opcode = user_wr->opcode; 2280 next->send_flags = user_wr->send_flags; 2281 2282 if (next->num_sge) { 2283 next->sg_list = (void *) next + 2284 ALIGN(next_size, sizeof(struct ib_sge)); 2285 if (copy_from_user(next->sg_list, 2286 buf + sizeof cmd + 2287 cmd.wr_count * cmd.wqe_size + 2288 sg_ind * sizeof (struct ib_sge), 2289 next->num_sge * sizeof (struct ib_sge))) { 2290 ret = -EFAULT; 2291 goto out_put; 2292 } 2293 sg_ind += next->num_sge; 2294 } else 2295 next->sg_list = NULL; 2296 } 2297 2298 resp.bad_wr = 0; 2299 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); 2300 if (ret) 2301 for (next = wr; next; next = next->next) { 2302 ++resp.bad_wr; 2303 if (next == bad_wr) 2304 break; 2305 } 2306 2307 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2308 &resp, sizeof resp)) 2309 ret = -EFAULT; 2310 2311 out_put: 2312 uobj_put_obj_read(qp); 2313 2314 while (wr) { 2315 if (is_ud && ud_wr(wr)->ah) 2316 uobj_put_obj_read(ud_wr(wr)->ah); 2317 next = wr->next; 2318 kfree(wr); 2319 wr = next; 2320 } 2321 2322 out: 2323 kfree(user_wr); 2324 2325 return ret ? ret : in_len; 2326 } 2327 2328 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 2329 int in_len, 2330 u32 wr_count, 2331 u32 sge_count, 2332 u32 wqe_size) 2333 { 2334 struct ib_uverbs_recv_wr *user_wr; 2335 struct ib_recv_wr *wr = NULL, *last, *next; 2336 int sg_ind; 2337 int i; 2338 int ret; 2339 2340 if (in_len < wqe_size * wr_count + 2341 sge_count * sizeof (struct ib_uverbs_sge)) 2342 return ERR_PTR(-EINVAL); 2343 2344 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 2345 return ERR_PTR(-EINVAL); 2346 2347 user_wr = kmalloc(wqe_size, GFP_KERNEL); 2348 if (!user_wr) 2349 return ERR_PTR(-ENOMEM); 2350 2351 sg_ind = 0; 2352 last = NULL; 2353 for (i = 0; i < wr_count; ++i) { 2354 if (copy_from_user(user_wr, buf + i * wqe_size, 2355 wqe_size)) { 2356 ret = -EFAULT; 2357 goto err; 2358 } 2359 2360 if (user_wr->num_sge + sg_ind > sge_count) { 2361 ret = -EINVAL; 2362 goto err; 2363 } 2364 2365 if (user_wr->num_sge >= 2366 (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) / 2367 sizeof (struct ib_sge)) { 2368 ret = -EINVAL; 2369 goto err; 2370 } 2371 2372 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2373 user_wr->num_sge * sizeof (struct ib_sge), 2374 GFP_KERNEL); 2375 if (!next) { 2376 ret = -ENOMEM; 2377 goto err; 2378 } 2379 2380 if (!last) 2381 wr = next; 2382 else 2383 last->next = next; 2384 last = next; 2385 2386 next->next = NULL; 2387 next->wr_id = user_wr->wr_id; 2388 next->num_sge = user_wr->num_sge; 2389 2390 if (next->num_sge) { 2391 next->sg_list = (void *) next + 2392 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2393 if (copy_from_user(next->sg_list, 2394 buf + wr_count * wqe_size + 2395 sg_ind * sizeof (struct ib_sge), 2396 next->num_sge * sizeof (struct ib_sge))) { 2397 ret = -EFAULT; 2398 goto err; 2399 } 2400 sg_ind += next->num_sge; 2401 } else 2402 next->sg_list = NULL; 2403 } 2404 2405 kfree(user_wr); 2406 return wr; 2407 2408 err: 2409 kfree(user_wr); 2410 2411 while (wr) { 2412 next = wr->next; 2413 kfree(wr); 2414 wr = next; 2415 } 2416 2417 return ERR_PTR(ret); 2418 } 2419 2420 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 2421 struct ib_device *ib_dev, 2422 const char __user *buf, int in_len, 2423 int out_len) 2424 { 2425 struct ib_uverbs_post_recv cmd; 2426 struct ib_uverbs_post_recv_resp resp; 2427 struct ib_recv_wr *wr, *next, *bad_wr; 2428 struct ib_qp *qp; 2429 ssize_t ret = -EINVAL; 2430 2431 if (copy_from_user(&cmd, buf, sizeof cmd)) 2432 return -EFAULT; 2433 2434 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2435 in_len - sizeof cmd, cmd.wr_count, 2436 cmd.sge_count, cmd.wqe_size); 2437 if (IS_ERR(wr)) 2438 return PTR_ERR(wr); 2439 2440 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 2441 if (!qp) 2442 goto out; 2443 2444 resp.bad_wr = 0; 2445 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); 2446 2447 uobj_put_obj_read(qp); 2448 if (ret) { 2449 for (next = wr; next; next = next->next) { 2450 ++resp.bad_wr; 2451 if (next == bad_wr) 2452 break; 2453 } 2454 } 2455 2456 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2457 &resp, sizeof resp)) 2458 ret = -EFAULT; 2459 2460 out: 2461 while (wr) { 2462 next = wr->next; 2463 kfree(wr); 2464 wr = next; 2465 } 2466 2467 return ret ? ret : in_len; 2468 } 2469 2470 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 2471 struct ib_device *ib_dev, 2472 const char __user *buf, int in_len, 2473 int out_len) 2474 { 2475 struct ib_uverbs_post_srq_recv cmd; 2476 struct ib_uverbs_post_srq_recv_resp resp; 2477 struct ib_recv_wr *wr, *next, *bad_wr; 2478 struct ib_srq *srq; 2479 ssize_t ret = -EINVAL; 2480 2481 if (copy_from_user(&cmd, buf, sizeof cmd)) 2482 return -EFAULT; 2483 2484 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2485 in_len - sizeof cmd, cmd.wr_count, 2486 cmd.sge_count, cmd.wqe_size); 2487 if (IS_ERR(wr)) 2488 return PTR_ERR(wr); 2489 2490 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext); 2491 if (!srq) 2492 goto out; 2493 2494 resp.bad_wr = 0; 2495 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 2496 2497 uobj_put_obj_read(srq); 2498 2499 if (ret) 2500 for (next = wr; next; next = next->next) { 2501 ++resp.bad_wr; 2502 if (next == bad_wr) 2503 break; 2504 } 2505 2506 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2507 &resp, sizeof resp)) 2508 ret = -EFAULT; 2509 2510 out: 2511 while (wr) { 2512 next = wr->next; 2513 kfree(wr); 2514 wr = next; 2515 } 2516 2517 return ret ? ret : in_len; 2518 } 2519 2520 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 2521 struct ib_device *ib_dev, 2522 const char __user *buf, int in_len, 2523 int out_len) 2524 { 2525 struct ib_uverbs_create_ah cmd; 2526 struct ib_uverbs_create_ah_resp resp; 2527 struct ib_uobject *uobj; 2528 struct ib_pd *pd; 2529 struct ib_ah *ah; 2530 struct rdma_ah_attr attr; 2531 int ret; 2532 struct ib_udata udata; 2533 u8 *dmac; 2534 2535 if (out_len < sizeof resp) 2536 return -ENOSPC; 2537 2538 if (copy_from_user(&cmd, buf, sizeof cmd)) 2539 return -EFAULT; 2540 2541 if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) 2542 return -EINVAL; 2543 2544 INIT_UDATA(&udata, buf + sizeof(cmd), 2545 (unsigned long)cmd.response + sizeof(resp), 2546 in_len - sizeof(cmd), out_len - sizeof(resp)); 2547 2548 uobj = uobj_alloc(uobj_get_type(ah), file->ucontext); 2549 if (IS_ERR(uobj)) 2550 return PTR_ERR(uobj); 2551 2552 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 2553 if (!pd) { 2554 ret = -EINVAL; 2555 goto err; 2556 } 2557 2558 attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num); 2559 rdma_ah_set_dlid(&attr, cmd.attr.dlid); 2560 rdma_ah_set_sl(&attr, cmd.attr.sl); 2561 rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits); 2562 rdma_ah_set_static_rate(&attr, cmd.attr.static_rate); 2563 rdma_ah_set_port_num(&attr, cmd.attr.port_num); 2564 2565 if (cmd.attr.is_global) { 2566 rdma_ah_set_grh(&attr, NULL, cmd.attr.grh.flow_label, 2567 cmd.attr.grh.sgid_index, 2568 cmd.attr.grh.hop_limit, 2569 cmd.attr.grh.traffic_class); 2570 rdma_ah_set_dgid_raw(&attr, cmd.attr.grh.dgid); 2571 } else { 2572 rdma_ah_set_ah_flags(&attr, 0); 2573 } 2574 dmac = rdma_ah_retrieve_dmac(&attr); 2575 if (dmac) 2576 memset(dmac, 0, ETH_ALEN); 2577 2578 ah = pd->device->create_ah(pd, &attr, &udata); 2579 2580 if (IS_ERR(ah)) { 2581 ret = PTR_ERR(ah); 2582 goto err_put; 2583 } 2584 2585 ah->device = pd->device; 2586 ah->pd = pd; 2587 atomic_inc(&pd->usecnt); 2588 ah->uobject = uobj; 2589 uobj->user_handle = cmd.user_handle; 2590 uobj->object = ah; 2591 2592 resp.ah_handle = uobj->id; 2593 2594 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2595 &resp, sizeof resp)) { 2596 ret = -EFAULT; 2597 goto err_copy; 2598 } 2599 2600 uobj_put_obj_read(pd); 2601 uobj_alloc_commit(uobj); 2602 2603 return in_len; 2604 2605 err_copy: 2606 rdma_destroy_ah(ah); 2607 2608 err_put: 2609 uobj_put_obj_read(pd); 2610 2611 err: 2612 uobj_alloc_abort(uobj); 2613 return ret; 2614 } 2615 2616 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 2617 struct ib_device *ib_dev, 2618 const char __user *buf, int in_len, int out_len) 2619 { 2620 struct ib_uverbs_destroy_ah cmd; 2621 struct ib_uobject *uobj; 2622 int ret; 2623 2624 if (copy_from_user(&cmd, buf, sizeof cmd)) 2625 return -EFAULT; 2626 2627 uobj = uobj_get_write(uobj_get_type(ah), cmd.ah_handle, 2628 file->ucontext); 2629 if (IS_ERR(uobj)) 2630 return PTR_ERR(uobj); 2631 2632 ret = uobj_remove_commit(uobj); 2633 return ret ?: in_len; 2634 } 2635 2636 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 2637 struct ib_device *ib_dev, 2638 const char __user *buf, int in_len, 2639 int out_len) 2640 { 2641 struct ib_uverbs_attach_mcast cmd; 2642 struct ib_qp *qp; 2643 struct ib_uqp_object *obj; 2644 struct ib_uverbs_mcast_entry *mcast; 2645 int ret; 2646 2647 if (copy_from_user(&cmd, buf, sizeof cmd)) 2648 return -EFAULT; 2649 2650 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 2651 if (!qp) 2652 return -EINVAL; 2653 2654 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2655 2656 mutex_lock(&obj->mcast_lock); 2657 list_for_each_entry(mcast, &obj->mcast_list, list) 2658 if (cmd.mlid == mcast->lid && 2659 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2660 ret = 0; 2661 goto out_put; 2662 } 2663 2664 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 2665 if (!mcast) { 2666 ret = -ENOMEM; 2667 goto out_put; 2668 } 2669 2670 mcast->lid = cmd.mlid; 2671 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 2672 2673 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 2674 if (!ret) 2675 list_add_tail(&mcast->list, &obj->mcast_list); 2676 else 2677 kfree(mcast); 2678 2679 out_put: 2680 mutex_unlock(&obj->mcast_lock); 2681 uobj_put_obj_read(qp); 2682 2683 return ret ? ret : in_len; 2684 } 2685 2686 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 2687 struct ib_device *ib_dev, 2688 const char __user *buf, int in_len, 2689 int out_len) 2690 { 2691 struct ib_uverbs_detach_mcast cmd; 2692 struct ib_uqp_object *obj; 2693 struct ib_qp *qp; 2694 struct ib_uverbs_mcast_entry *mcast; 2695 int ret = -EINVAL; 2696 bool found = false; 2697 2698 if (copy_from_user(&cmd, buf, sizeof cmd)) 2699 return -EFAULT; 2700 2701 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 2702 if (!qp) 2703 return -EINVAL; 2704 2705 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2706 mutex_lock(&obj->mcast_lock); 2707 2708 list_for_each_entry(mcast, &obj->mcast_list, list) 2709 if (cmd.mlid == mcast->lid && 2710 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2711 list_del(&mcast->list); 2712 kfree(mcast); 2713 found = true; 2714 break; 2715 } 2716 2717 if (!found) { 2718 ret = -EINVAL; 2719 goto out_put; 2720 } 2721 2722 ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid); 2723 2724 out_put: 2725 mutex_unlock(&obj->mcast_lock); 2726 uobj_put_obj_read(qp); 2727 return ret ? ret : in_len; 2728 } 2729 2730 static int kern_spec_to_ib_spec_action(struct ib_uverbs_flow_spec *kern_spec, 2731 union ib_flow_spec *ib_spec) 2732 { 2733 ib_spec->type = kern_spec->type; 2734 switch (ib_spec->type) { 2735 case IB_FLOW_SPEC_ACTION_TAG: 2736 if (kern_spec->flow_tag.size != 2737 sizeof(struct ib_uverbs_flow_spec_action_tag)) 2738 return -EINVAL; 2739 2740 ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag); 2741 ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id; 2742 break; 2743 case IB_FLOW_SPEC_ACTION_DROP: 2744 if (kern_spec->drop.size != 2745 sizeof(struct ib_uverbs_flow_spec_action_drop)) 2746 return -EINVAL; 2747 2748 ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop); 2749 break; 2750 default: 2751 return -EINVAL; 2752 } 2753 return 0; 2754 } 2755 2756 static size_t kern_spec_filter_sz(struct ib_uverbs_flow_spec_hdr *spec) 2757 { 2758 /* Returns user space filter size, includes padding */ 2759 return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2; 2760 } 2761 2762 static ssize_t spec_filter_size(void *kern_spec_filter, u16 kern_filter_size, 2763 u16 ib_real_filter_sz) 2764 { 2765 /* 2766 * User space filter structures must be 64 bit aligned, otherwise this 2767 * may pass, but we won't handle additional new attributes. 2768 */ 2769 2770 if (kern_filter_size > ib_real_filter_sz) { 2771 if (memchr_inv(kern_spec_filter + 2772 ib_real_filter_sz, 0, 2773 kern_filter_size - ib_real_filter_sz)) 2774 return -EINVAL; 2775 return ib_real_filter_sz; 2776 } 2777 return kern_filter_size; 2778 } 2779 2780 static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec, 2781 union ib_flow_spec *ib_spec) 2782 { 2783 ssize_t actual_filter_sz; 2784 ssize_t kern_filter_sz; 2785 ssize_t ib_filter_sz; 2786 void *kern_spec_mask; 2787 void *kern_spec_val; 2788 2789 if (kern_spec->reserved) 2790 return -EINVAL; 2791 2792 ib_spec->type = kern_spec->type; 2793 2794 kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr); 2795 /* User flow spec size must be aligned to 4 bytes */ 2796 if (kern_filter_sz != ALIGN(kern_filter_sz, 4)) 2797 return -EINVAL; 2798 2799 kern_spec_val = (void *)kern_spec + 2800 sizeof(struct ib_uverbs_flow_spec_hdr); 2801 kern_spec_mask = kern_spec_val + kern_filter_sz; 2802 if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL)) 2803 return -EINVAL; 2804 2805 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) { 2806 case IB_FLOW_SPEC_ETH: 2807 ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz); 2808 actual_filter_sz = spec_filter_size(kern_spec_mask, 2809 kern_filter_sz, 2810 ib_filter_sz); 2811 if (actual_filter_sz <= 0) 2812 return -EINVAL; 2813 ib_spec->size = sizeof(struct ib_flow_spec_eth); 2814 memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz); 2815 memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz); 2816 break; 2817 case IB_FLOW_SPEC_IPV4: 2818 ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz); 2819 actual_filter_sz = spec_filter_size(kern_spec_mask, 2820 kern_filter_sz, 2821 ib_filter_sz); 2822 if (actual_filter_sz <= 0) 2823 return -EINVAL; 2824 ib_spec->size = sizeof(struct ib_flow_spec_ipv4); 2825 memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz); 2826 memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz); 2827 break; 2828 case IB_FLOW_SPEC_IPV6: 2829 ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz); 2830 actual_filter_sz = spec_filter_size(kern_spec_mask, 2831 kern_filter_sz, 2832 ib_filter_sz); 2833 if (actual_filter_sz <= 0) 2834 return -EINVAL; 2835 ib_spec->size = sizeof(struct ib_flow_spec_ipv6); 2836 memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz); 2837 memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz); 2838 2839 if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) || 2840 (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20)) 2841 return -EINVAL; 2842 break; 2843 case IB_FLOW_SPEC_TCP: 2844 case IB_FLOW_SPEC_UDP: 2845 ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz); 2846 actual_filter_sz = spec_filter_size(kern_spec_mask, 2847 kern_filter_sz, 2848 ib_filter_sz); 2849 if (actual_filter_sz <= 0) 2850 return -EINVAL; 2851 ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp); 2852 memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz); 2853 memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz); 2854 break; 2855 case IB_FLOW_SPEC_VXLAN_TUNNEL: 2856 ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz); 2857 actual_filter_sz = spec_filter_size(kern_spec_mask, 2858 kern_filter_sz, 2859 ib_filter_sz); 2860 if (actual_filter_sz <= 0) 2861 return -EINVAL; 2862 ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel); 2863 memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz); 2864 memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz); 2865 2866 if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) || 2867 (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24)) 2868 return -EINVAL; 2869 break; 2870 default: 2871 return -EINVAL; 2872 } 2873 return 0; 2874 } 2875 2876 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, 2877 union ib_flow_spec *ib_spec) 2878 { 2879 if (kern_spec->reserved) 2880 return -EINVAL; 2881 2882 if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG) 2883 return kern_spec_to_ib_spec_action(kern_spec, ib_spec); 2884 else 2885 return kern_spec_to_ib_spec_filter(kern_spec, ib_spec); 2886 } 2887 2888 int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file, 2889 struct ib_device *ib_dev, 2890 struct ib_udata *ucore, 2891 struct ib_udata *uhw) 2892 { 2893 struct ib_uverbs_ex_create_wq cmd = {}; 2894 struct ib_uverbs_ex_create_wq_resp resp = {}; 2895 struct ib_uwq_object *obj; 2896 int err = 0; 2897 struct ib_cq *cq; 2898 struct ib_pd *pd; 2899 struct ib_wq *wq; 2900 struct ib_wq_init_attr wq_init_attr = {}; 2901 size_t required_cmd_sz; 2902 size_t required_resp_len; 2903 2904 required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge); 2905 required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn); 2906 2907 if (ucore->inlen < required_cmd_sz) 2908 return -EINVAL; 2909 2910 if (ucore->outlen < required_resp_len) 2911 return -ENOSPC; 2912 2913 if (ucore->inlen > sizeof(cmd) && 2914 !ib_is_udata_cleared(ucore, sizeof(cmd), 2915 ucore->inlen - sizeof(cmd))) 2916 return -EOPNOTSUPP; 2917 2918 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 2919 if (err) 2920 return err; 2921 2922 if (cmd.comp_mask) 2923 return -EOPNOTSUPP; 2924 2925 obj = (struct ib_uwq_object *)uobj_alloc(uobj_get_type(wq), 2926 file->ucontext); 2927 if (IS_ERR(obj)) 2928 return PTR_ERR(obj); 2929 2930 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 2931 if (!pd) { 2932 err = -EINVAL; 2933 goto err_uobj; 2934 } 2935 2936 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext); 2937 if (!cq) { 2938 err = -EINVAL; 2939 goto err_put_pd; 2940 } 2941 2942 wq_init_attr.cq = cq; 2943 wq_init_attr.max_sge = cmd.max_sge; 2944 wq_init_attr.max_wr = cmd.max_wr; 2945 wq_init_attr.wq_context = file; 2946 wq_init_attr.wq_type = cmd.wq_type; 2947 wq_init_attr.event_handler = ib_uverbs_wq_event_handler; 2948 if (ucore->inlen >= (offsetof(typeof(cmd), create_flags) + 2949 sizeof(cmd.create_flags))) 2950 wq_init_attr.create_flags = cmd.create_flags; 2951 obj->uevent.events_reported = 0; 2952 INIT_LIST_HEAD(&obj->uevent.event_list); 2953 wq = pd->device->create_wq(pd, &wq_init_attr, uhw); 2954 if (IS_ERR(wq)) { 2955 err = PTR_ERR(wq); 2956 goto err_put_cq; 2957 } 2958 2959 wq->uobject = &obj->uevent.uobject; 2960 obj->uevent.uobject.object = wq; 2961 wq->wq_type = wq_init_attr.wq_type; 2962 wq->cq = cq; 2963 wq->pd = pd; 2964 wq->device = pd->device; 2965 wq->wq_context = wq_init_attr.wq_context; 2966 atomic_set(&wq->usecnt, 0); 2967 atomic_inc(&pd->usecnt); 2968 atomic_inc(&cq->usecnt); 2969 wq->uobject = &obj->uevent.uobject; 2970 obj->uevent.uobject.object = wq; 2971 2972 memset(&resp, 0, sizeof(resp)); 2973 resp.wq_handle = obj->uevent.uobject.id; 2974 resp.max_sge = wq_init_attr.max_sge; 2975 resp.max_wr = wq_init_attr.max_wr; 2976 resp.wqn = wq->wq_num; 2977 resp.response_length = required_resp_len; 2978 err = ib_copy_to_udata(ucore, 2979 &resp, resp.response_length); 2980 if (err) 2981 goto err_copy; 2982 2983 uobj_put_obj_read(pd); 2984 uobj_put_obj_read(cq); 2985 uobj_alloc_commit(&obj->uevent.uobject); 2986 return 0; 2987 2988 err_copy: 2989 ib_destroy_wq(wq); 2990 err_put_cq: 2991 uobj_put_obj_read(cq); 2992 err_put_pd: 2993 uobj_put_obj_read(pd); 2994 err_uobj: 2995 uobj_alloc_abort(&obj->uevent.uobject); 2996 2997 return err; 2998 } 2999 3000 int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file, 3001 struct ib_device *ib_dev, 3002 struct ib_udata *ucore, 3003 struct ib_udata *uhw) 3004 { 3005 struct ib_uverbs_ex_destroy_wq cmd = {}; 3006 struct ib_uverbs_ex_destroy_wq_resp resp = {}; 3007 struct ib_uobject *uobj; 3008 struct ib_uwq_object *obj; 3009 size_t required_cmd_sz; 3010 size_t required_resp_len; 3011 int ret; 3012 3013 required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle); 3014 required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); 3015 3016 if (ucore->inlen < required_cmd_sz) 3017 return -EINVAL; 3018 3019 if (ucore->outlen < required_resp_len) 3020 return -ENOSPC; 3021 3022 if (ucore->inlen > sizeof(cmd) && 3023 !ib_is_udata_cleared(ucore, sizeof(cmd), 3024 ucore->inlen - sizeof(cmd))) 3025 return -EOPNOTSUPP; 3026 3027 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3028 if (ret) 3029 return ret; 3030 3031 if (cmd.comp_mask) 3032 return -EOPNOTSUPP; 3033 3034 resp.response_length = required_resp_len; 3035 uobj = uobj_get_write(uobj_get_type(wq), cmd.wq_handle, 3036 file->ucontext); 3037 if (IS_ERR(uobj)) 3038 return PTR_ERR(uobj); 3039 3040 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject); 3041 /* 3042 * Make sure we don't free the memory in remove_commit as we still 3043 * needs the uobject memory to create the response. 3044 */ 3045 uverbs_uobject_get(uobj); 3046 3047 ret = uobj_remove_commit(uobj); 3048 resp.events_reported = obj->uevent.events_reported; 3049 uverbs_uobject_put(uobj); 3050 if (ret) 3051 return ret; 3052 3053 return ib_copy_to_udata(ucore, &resp, resp.response_length); 3054 } 3055 3056 int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file, 3057 struct ib_device *ib_dev, 3058 struct ib_udata *ucore, 3059 struct ib_udata *uhw) 3060 { 3061 struct ib_uverbs_ex_modify_wq cmd = {}; 3062 struct ib_wq *wq; 3063 struct ib_wq_attr wq_attr = {}; 3064 size_t required_cmd_sz; 3065 int ret; 3066 3067 required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state); 3068 if (ucore->inlen < required_cmd_sz) 3069 return -EINVAL; 3070 3071 if (ucore->inlen > sizeof(cmd) && 3072 !ib_is_udata_cleared(ucore, sizeof(cmd), 3073 ucore->inlen - sizeof(cmd))) 3074 return -EOPNOTSUPP; 3075 3076 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3077 if (ret) 3078 return ret; 3079 3080 if (!cmd.attr_mask) 3081 return -EINVAL; 3082 3083 if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS)) 3084 return -EINVAL; 3085 3086 wq = uobj_get_obj_read(wq, cmd.wq_handle, file->ucontext); 3087 if (!wq) 3088 return -EINVAL; 3089 3090 wq_attr.curr_wq_state = cmd.curr_wq_state; 3091 wq_attr.wq_state = cmd.wq_state; 3092 if (cmd.attr_mask & IB_WQ_FLAGS) { 3093 wq_attr.flags = cmd.flags; 3094 wq_attr.flags_mask = cmd.flags_mask; 3095 } 3096 ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw); 3097 uobj_put_obj_read(wq); 3098 return ret; 3099 } 3100 3101 int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, 3102 struct ib_device *ib_dev, 3103 struct ib_udata *ucore, 3104 struct ib_udata *uhw) 3105 { 3106 struct ib_uverbs_ex_create_rwq_ind_table cmd = {}; 3107 struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {}; 3108 struct ib_uobject *uobj; 3109 int err = 0; 3110 struct ib_rwq_ind_table_init_attr init_attr = {}; 3111 struct ib_rwq_ind_table *rwq_ind_tbl; 3112 struct ib_wq **wqs = NULL; 3113 u32 *wqs_handles = NULL; 3114 struct ib_wq *wq = NULL; 3115 int i, j, num_read_wqs; 3116 u32 num_wq_handles; 3117 u32 expected_in_size; 3118 size_t required_cmd_sz_header; 3119 size_t required_resp_len; 3120 3121 required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size); 3122 required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num); 3123 3124 if (ucore->inlen < required_cmd_sz_header) 3125 return -EINVAL; 3126 3127 if (ucore->outlen < required_resp_len) 3128 return -ENOSPC; 3129 3130 err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header); 3131 if (err) 3132 return err; 3133 3134 ucore->inbuf += required_cmd_sz_header; 3135 ucore->inlen -= required_cmd_sz_header; 3136 3137 if (cmd.comp_mask) 3138 return -EOPNOTSUPP; 3139 3140 if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE) 3141 return -EINVAL; 3142 3143 num_wq_handles = 1 << cmd.log_ind_tbl_size; 3144 expected_in_size = num_wq_handles * sizeof(__u32); 3145 if (num_wq_handles == 1) 3146 /* input size for wq handles is u64 aligned */ 3147 expected_in_size += sizeof(__u32); 3148 3149 if (ucore->inlen < expected_in_size) 3150 return -EINVAL; 3151 3152 if (ucore->inlen > expected_in_size && 3153 !ib_is_udata_cleared(ucore, expected_in_size, 3154 ucore->inlen - expected_in_size)) 3155 return -EOPNOTSUPP; 3156 3157 wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles), 3158 GFP_KERNEL); 3159 if (!wqs_handles) 3160 return -ENOMEM; 3161 3162 err = ib_copy_from_udata(wqs_handles, ucore, 3163 num_wq_handles * sizeof(__u32)); 3164 if (err) 3165 goto err_free; 3166 3167 wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL); 3168 if (!wqs) { 3169 err = -ENOMEM; 3170 goto err_free; 3171 } 3172 3173 for (num_read_wqs = 0; num_read_wqs < num_wq_handles; 3174 num_read_wqs++) { 3175 wq = uobj_get_obj_read(wq, wqs_handles[num_read_wqs], 3176 file->ucontext); 3177 if (!wq) { 3178 err = -EINVAL; 3179 goto put_wqs; 3180 } 3181 3182 wqs[num_read_wqs] = wq; 3183 } 3184 3185 uobj = uobj_alloc(uobj_get_type(rwq_ind_table), file->ucontext); 3186 if (IS_ERR(uobj)) { 3187 err = PTR_ERR(uobj); 3188 goto put_wqs; 3189 } 3190 3191 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size; 3192 init_attr.ind_tbl = wqs; 3193 rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw); 3194 3195 if (IS_ERR(rwq_ind_tbl)) { 3196 err = PTR_ERR(rwq_ind_tbl); 3197 goto err_uobj; 3198 } 3199 3200 rwq_ind_tbl->ind_tbl = wqs; 3201 rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size; 3202 rwq_ind_tbl->uobject = uobj; 3203 uobj->object = rwq_ind_tbl; 3204 rwq_ind_tbl->device = ib_dev; 3205 atomic_set(&rwq_ind_tbl->usecnt, 0); 3206 3207 for (i = 0; i < num_wq_handles; i++) 3208 atomic_inc(&wqs[i]->usecnt); 3209 3210 resp.ind_tbl_handle = uobj->id; 3211 resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num; 3212 resp.response_length = required_resp_len; 3213 3214 err = ib_copy_to_udata(ucore, 3215 &resp, resp.response_length); 3216 if (err) 3217 goto err_copy; 3218 3219 kfree(wqs_handles); 3220 3221 for (j = 0; j < num_read_wqs; j++) 3222 uobj_put_obj_read(wqs[j]); 3223 3224 uobj_alloc_commit(uobj); 3225 return 0; 3226 3227 err_copy: 3228 ib_destroy_rwq_ind_table(rwq_ind_tbl); 3229 err_uobj: 3230 uobj_alloc_abort(uobj); 3231 put_wqs: 3232 for (j = 0; j < num_read_wqs; j++) 3233 uobj_put_obj_read(wqs[j]); 3234 err_free: 3235 kfree(wqs_handles); 3236 kfree(wqs); 3237 return err; 3238 } 3239 3240 int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file, 3241 struct ib_device *ib_dev, 3242 struct ib_udata *ucore, 3243 struct ib_udata *uhw) 3244 { 3245 struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {}; 3246 struct ib_uobject *uobj; 3247 int ret; 3248 size_t required_cmd_sz; 3249 3250 required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle); 3251 3252 if (ucore->inlen < required_cmd_sz) 3253 return -EINVAL; 3254 3255 if (ucore->inlen > sizeof(cmd) && 3256 !ib_is_udata_cleared(ucore, sizeof(cmd), 3257 ucore->inlen - sizeof(cmd))) 3258 return -EOPNOTSUPP; 3259 3260 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3261 if (ret) 3262 return ret; 3263 3264 if (cmd.comp_mask) 3265 return -EOPNOTSUPP; 3266 3267 uobj = uobj_get_write(uobj_get_type(rwq_ind_table), cmd.ind_tbl_handle, 3268 file->ucontext); 3269 if (IS_ERR(uobj)) 3270 return PTR_ERR(uobj); 3271 3272 return uobj_remove_commit(uobj); 3273 } 3274 3275 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, 3276 struct ib_device *ib_dev, 3277 struct ib_udata *ucore, 3278 struct ib_udata *uhw) 3279 { 3280 struct ib_uverbs_create_flow cmd; 3281 struct ib_uverbs_create_flow_resp resp; 3282 struct ib_uobject *uobj; 3283 struct ib_flow *flow_id; 3284 struct ib_uverbs_flow_attr *kern_flow_attr; 3285 struct ib_flow_attr *flow_attr; 3286 struct ib_qp *qp; 3287 int err = 0; 3288 void *kern_spec; 3289 void *ib_spec; 3290 int i; 3291 3292 if (ucore->inlen < sizeof(cmd)) 3293 return -EINVAL; 3294 3295 if (ucore->outlen < sizeof(resp)) 3296 return -ENOSPC; 3297 3298 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3299 if (err) 3300 return err; 3301 3302 ucore->inbuf += sizeof(cmd); 3303 ucore->inlen -= sizeof(cmd); 3304 3305 if (cmd.comp_mask) 3306 return -EINVAL; 3307 3308 if (!capable(CAP_NET_RAW)) 3309 return -EPERM; 3310 3311 if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED) 3312 return -EINVAL; 3313 3314 if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && 3315 ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) || 3316 (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT))) 3317 return -EINVAL; 3318 3319 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) 3320 return -EINVAL; 3321 3322 if (cmd.flow_attr.size > ucore->inlen || 3323 cmd.flow_attr.size > 3324 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) 3325 return -EINVAL; 3326 3327 if (cmd.flow_attr.reserved[0] || 3328 cmd.flow_attr.reserved[1]) 3329 return -EINVAL; 3330 3331 if (cmd.flow_attr.num_of_specs) { 3332 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, 3333 GFP_KERNEL); 3334 if (!kern_flow_attr) 3335 return -ENOMEM; 3336 3337 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); 3338 err = ib_copy_from_udata(kern_flow_attr + 1, ucore, 3339 cmd.flow_attr.size); 3340 if (err) 3341 goto err_free_attr; 3342 } else { 3343 kern_flow_attr = &cmd.flow_attr; 3344 } 3345 3346 uobj = uobj_alloc(uobj_get_type(flow), file->ucontext); 3347 if (IS_ERR(uobj)) { 3348 err = PTR_ERR(uobj); 3349 goto err_free_attr; 3350 } 3351 3352 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 3353 if (!qp) { 3354 err = -EINVAL; 3355 goto err_uobj; 3356 } 3357 3358 flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs * 3359 sizeof(union ib_flow_spec), GFP_KERNEL); 3360 if (!flow_attr) { 3361 err = -ENOMEM; 3362 goto err_put; 3363 } 3364 3365 flow_attr->type = kern_flow_attr->type; 3366 flow_attr->priority = kern_flow_attr->priority; 3367 flow_attr->num_of_specs = kern_flow_attr->num_of_specs; 3368 flow_attr->port = kern_flow_attr->port; 3369 flow_attr->flags = kern_flow_attr->flags; 3370 flow_attr->size = sizeof(*flow_attr); 3371 3372 kern_spec = kern_flow_attr + 1; 3373 ib_spec = flow_attr + 1; 3374 for (i = 0; i < flow_attr->num_of_specs && 3375 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && 3376 cmd.flow_attr.size >= 3377 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { 3378 err = kern_spec_to_ib_spec(kern_spec, ib_spec); 3379 if (err) 3380 goto err_free; 3381 flow_attr->size += 3382 ((union ib_flow_spec *) ib_spec)->size; 3383 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; 3384 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size; 3385 ib_spec += ((union ib_flow_spec *) ib_spec)->size; 3386 } 3387 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { 3388 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", 3389 i, cmd.flow_attr.size); 3390 err = -EINVAL; 3391 goto err_free; 3392 } 3393 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); 3394 if (IS_ERR(flow_id)) { 3395 err = PTR_ERR(flow_id); 3396 goto err_free; 3397 } 3398 flow_id->uobject = uobj; 3399 uobj->object = flow_id; 3400 3401 memset(&resp, 0, sizeof(resp)); 3402 resp.flow_handle = uobj->id; 3403 3404 err = ib_copy_to_udata(ucore, 3405 &resp, sizeof(resp)); 3406 if (err) 3407 goto err_copy; 3408 3409 uobj_put_obj_read(qp); 3410 uobj_alloc_commit(uobj); 3411 kfree(flow_attr); 3412 if (cmd.flow_attr.num_of_specs) 3413 kfree(kern_flow_attr); 3414 return 0; 3415 err_copy: 3416 ib_destroy_flow(flow_id); 3417 err_free: 3418 kfree(flow_attr); 3419 err_put: 3420 uobj_put_obj_read(qp); 3421 err_uobj: 3422 uobj_alloc_abort(uobj); 3423 err_free_attr: 3424 if (cmd.flow_attr.num_of_specs) 3425 kfree(kern_flow_attr); 3426 return err; 3427 } 3428 3429 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, 3430 struct ib_device *ib_dev, 3431 struct ib_udata *ucore, 3432 struct ib_udata *uhw) 3433 { 3434 struct ib_uverbs_destroy_flow cmd; 3435 struct ib_uobject *uobj; 3436 int ret; 3437 3438 if (ucore->inlen < sizeof(cmd)) 3439 return -EINVAL; 3440 3441 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3442 if (ret) 3443 return ret; 3444 3445 if (cmd.comp_mask) 3446 return -EINVAL; 3447 3448 uobj = uobj_get_write(uobj_get_type(flow), cmd.flow_handle, 3449 file->ucontext); 3450 if (IS_ERR(uobj)) 3451 return PTR_ERR(uobj); 3452 3453 ret = uobj_remove_commit(uobj); 3454 return ret; 3455 } 3456 3457 static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 3458 struct ib_device *ib_dev, 3459 struct ib_uverbs_create_xsrq *cmd, 3460 struct ib_udata *udata) 3461 { 3462 struct ib_uverbs_create_srq_resp resp; 3463 struct ib_usrq_object *obj; 3464 struct ib_pd *pd; 3465 struct ib_srq *srq; 3466 struct ib_uobject *uninitialized_var(xrcd_uobj); 3467 struct ib_srq_init_attr attr; 3468 int ret; 3469 3470 obj = (struct ib_usrq_object *)uobj_alloc(uobj_get_type(srq), 3471 file->ucontext); 3472 if (IS_ERR(obj)) 3473 return PTR_ERR(obj); 3474 3475 if (cmd->srq_type == IB_SRQT_XRC) { 3476 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd->xrcd_handle, 3477 file->ucontext); 3478 if (IS_ERR(xrcd_uobj)) { 3479 ret = -EINVAL; 3480 goto err; 3481 } 3482 3483 attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object; 3484 if (!attr.ext.xrc.xrcd) { 3485 ret = -EINVAL; 3486 goto err_put_xrcd; 3487 } 3488 3489 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 3490 atomic_inc(&obj->uxrcd->refcnt); 3491 3492 attr.ext.xrc.cq = uobj_get_obj_read(cq, cmd->cq_handle, 3493 file->ucontext); 3494 if (!attr.ext.xrc.cq) { 3495 ret = -EINVAL; 3496 goto err_put_xrcd; 3497 } 3498 } 3499 3500 pd = uobj_get_obj_read(pd, cmd->pd_handle, file->ucontext); 3501 if (!pd) { 3502 ret = -EINVAL; 3503 goto err_put_cq; 3504 } 3505 3506 attr.event_handler = ib_uverbs_srq_event_handler; 3507 attr.srq_context = file; 3508 attr.srq_type = cmd->srq_type; 3509 attr.attr.max_wr = cmd->max_wr; 3510 attr.attr.max_sge = cmd->max_sge; 3511 attr.attr.srq_limit = cmd->srq_limit; 3512 3513 obj->uevent.events_reported = 0; 3514 INIT_LIST_HEAD(&obj->uevent.event_list); 3515 3516 srq = pd->device->create_srq(pd, &attr, udata); 3517 if (IS_ERR(srq)) { 3518 ret = PTR_ERR(srq); 3519 goto err_put; 3520 } 3521 3522 srq->device = pd->device; 3523 srq->pd = pd; 3524 srq->srq_type = cmd->srq_type; 3525 srq->uobject = &obj->uevent.uobject; 3526 srq->event_handler = attr.event_handler; 3527 srq->srq_context = attr.srq_context; 3528 3529 if (cmd->srq_type == IB_SRQT_XRC) { 3530 srq->ext.xrc.cq = attr.ext.xrc.cq; 3531 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; 3532 atomic_inc(&attr.ext.xrc.cq->usecnt); 3533 atomic_inc(&attr.ext.xrc.xrcd->usecnt); 3534 } 3535 3536 atomic_inc(&pd->usecnt); 3537 atomic_set(&srq->usecnt, 0); 3538 3539 obj->uevent.uobject.object = srq; 3540 obj->uevent.uobject.user_handle = cmd->user_handle; 3541 3542 memset(&resp, 0, sizeof resp); 3543 resp.srq_handle = obj->uevent.uobject.id; 3544 resp.max_wr = attr.attr.max_wr; 3545 resp.max_sge = attr.attr.max_sge; 3546 if (cmd->srq_type == IB_SRQT_XRC) 3547 resp.srqn = srq->ext.xrc.srq_num; 3548 3549 if (copy_to_user((void __user *) (unsigned long) cmd->response, 3550 &resp, sizeof resp)) { 3551 ret = -EFAULT; 3552 goto err_copy; 3553 } 3554 3555 if (cmd->srq_type == IB_SRQT_XRC) { 3556 uobj_put_read(xrcd_uobj); 3557 uobj_put_obj_read(attr.ext.xrc.cq); 3558 } 3559 uobj_put_obj_read(pd); 3560 uobj_alloc_commit(&obj->uevent.uobject); 3561 3562 return 0; 3563 3564 err_copy: 3565 ib_destroy_srq(srq); 3566 3567 err_put: 3568 uobj_put_obj_read(pd); 3569 3570 err_put_cq: 3571 if (cmd->srq_type == IB_SRQT_XRC) 3572 uobj_put_obj_read(attr.ext.xrc.cq); 3573 3574 err_put_xrcd: 3575 if (cmd->srq_type == IB_SRQT_XRC) { 3576 atomic_dec(&obj->uxrcd->refcnt); 3577 uobj_put_read(xrcd_uobj); 3578 } 3579 3580 err: 3581 uobj_alloc_abort(&obj->uevent.uobject); 3582 return ret; 3583 } 3584 3585 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 3586 struct ib_device *ib_dev, 3587 const char __user *buf, int in_len, 3588 int out_len) 3589 { 3590 struct ib_uverbs_create_srq cmd; 3591 struct ib_uverbs_create_xsrq xcmd; 3592 struct ib_uverbs_create_srq_resp resp; 3593 struct ib_udata udata; 3594 int ret; 3595 3596 if (out_len < sizeof resp) 3597 return -ENOSPC; 3598 3599 if (copy_from_user(&cmd, buf, sizeof cmd)) 3600 return -EFAULT; 3601 3602 xcmd.response = cmd.response; 3603 xcmd.user_handle = cmd.user_handle; 3604 xcmd.srq_type = IB_SRQT_BASIC; 3605 xcmd.pd_handle = cmd.pd_handle; 3606 xcmd.max_wr = cmd.max_wr; 3607 xcmd.max_sge = cmd.max_sge; 3608 xcmd.srq_limit = cmd.srq_limit; 3609 3610 INIT_UDATA(&udata, buf + sizeof cmd, 3611 (unsigned long) cmd.response + sizeof resp, 3612 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr), 3613 out_len - sizeof resp); 3614 3615 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata); 3616 if (ret) 3617 return ret; 3618 3619 return in_len; 3620 } 3621 3622 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, 3623 struct ib_device *ib_dev, 3624 const char __user *buf, int in_len, int out_len) 3625 { 3626 struct ib_uverbs_create_xsrq cmd; 3627 struct ib_uverbs_create_srq_resp resp; 3628 struct ib_udata udata; 3629 int ret; 3630 3631 if (out_len < sizeof resp) 3632 return -ENOSPC; 3633 3634 if (copy_from_user(&cmd, buf, sizeof cmd)) 3635 return -EFAULT; 3636 3637 INIT_UDATA(&udata, buf + sizeof cmd, 3638 (unsigned long) cmd.response + sizeof resp, 3639 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr), 3640 out_len - sizeof resp); 3641 3642 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata); 3643 if (ret) 3644 return ret; 3645 3646 return in_len; 3647 } 3648 3649 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 3650 struct ib_device *ib_dev, 3651 const char __user *buf, int in_len, 3652 int out_len) 3653 { 3654 struct ib_uverbs_modify_srq cmd; 3655 struct ib_udata udata; 3656 struct ib_srq *srq; 3657 struct ib_srq_attr attr; 3658 int ret; 3659 3660 if (copy_from_user(&cmd, buf, sizeof cmd)) 3661 return -EFAULT; 3662 3663 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 3664 out_len); 3665 3666 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext); 3667 if (!srq) 3668 return -EINVAL; 3669 3670 attr.max_wr = cmd.max_wr; 3671 attr.srq_limit = cmd.srq_limit; 3672 3673 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 3674 3675 uobj_put_obj_read(srq); 3676 3677 return ret ? ret : in_len; 3678 } 3679 3680 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 3681 struct ib_device *ib_dev, 3682 const char __user *buf, 3683 int in_len, int out_len) 3684 { 3685 struct ib_uverbs_query_srq cmd; 3686 struct ib_uverbs_query_srq_resp resp; 3687 struct ib_srq_attr attr; 3688 struct ib_srq *srq; 3689 int ret; 3690 3691 if (out_len < sizeof resp) 3692 return -ENOSPC; 3693 3694 if (copy_from_user(&cmd, buf, sizeof cmd)) 3695 return -EFAULT; 3696 3697 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext); 3698 if (!srq) 3699 return -EINVAL; 3700 3701 ret = ib_query_srq(srq, &attr); 3702 3703 uobj_put_obj_read(srq); 3704 3705 if (ret) 3706 return ret; 3707 3708 memset(&resp, 0, sizeof resp); 3709 3710 resp.max_wr = attr.max_wr; 3711 resp.max_sge = attr.max_sge; 3712 resp.srq_limit = attr.srq_limit; 3713 3714 if (copy_to_user((void __user *) (unsigned long) cmd.response, 3715 &resp, sizeof resp)) 3716 return -EFAULT; 3717 3718 return in_len; 3719 } 3720 3721 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 3722 struct ib_device *ib_dev, 3723 const char __user *buf, int in_len, 3724 int out_len) 3725 { 3726 struct ib_uverbs_destroy_srq cmd; 3727 struct ib_uverbs_destroy_srq_resp resp; 3728 struct ib_uobject *uobj; 3729 struct ib_uevent_object *obj; 3730 int ret = -EINVAL; 3731 3732 if (copy_from_user(&cmd, buf, sizeof cmd)) 3733 return -EFAULT; 3734 3735 uobj = uobj_get_write(uobj_get_type(srq), cmd.srq_handle, 3736 file->ucontext); 3737 if (IS_ERR(uobj)) 3738 return PTR_ERR(uobj); 3739 3740 obj = container_of(uobj, struct ib_uevent_object, uobject); 3741 /* 3742 * Make sure we don't free the memory in remove_commit as we still 3743 * needs the uobject memory to create the response. 3744 */ 3745 uverbs_uobject_get(uobj); 3746 3747 memset(&resp, 0, sizeof(resp)); 3748 3749 ret = uobj_remove_commit(uobj); 3750 if (ret) { 3751 uverbs_uobject_put(uobj); 3752 return ret; 3753 } 3754 resp.events_reported = obj->events_reported; 3755 uverbs_uobject_put(uobj); 3756 if (copy_to_user((void __user *)(unsigned long)cmd.response, 3757 &resp, sizeof(resp))) 3758 return -EFAULT; 3759 3760 return in_len; 3761 } 3762 3763 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, 3764 struct ib_device *ib_dev, 3765 struct ib_udata *ucore, 3766 struct ib_udata *uhw) 3767 { 3768 struct ib_uverbs_ex_query_device_resp resp = { {0} }; 3769 struct ib_uverbs_ex_query_device cmd; 3770 struct ib_device_attr attr = {0}; 3771 int err; 3772 3773 if (ucore->inlen < sizeof(cmd)) 3774 return -EINVAL; 3775 3776 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3777 if (err) 3778 return err; 3779 3780 if (cmd.comp_mask) 3781 return -EINVAL; 3782 3783 if (cmd.reserved) 3784 return -EINVAL; 3785 3786 resp.response_length = offsetof(typeof(resp), odp_caps); 3787 3788 if (ucore->outlen < resp.response_length) 3789 return -ENOSPC; 3790 3791 err = ib_dev->query_device(ib_dev, &attr, uhw); 3792 if (err) 3793 return err; 3794 3795 copy_query_dev_fields(file, ib_dev, &resp.base, &attr); 3796 3797 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps)) 3798 goto end; 3799 3800 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 3801 resp.odp_caps.general_caps = attr.odp_caps.general_caps; 3802 resp.odp_caps.per_transport_caps.rc_odp_caps = 3803 attr.odp_caps.per_transport_caps.rc_odp_caps; 3804 resp.odp_caps.per_transport_caps.uc_odp_caps = 3805 attr.odp_caps.per_transport_caps.uc_odp_caps; 3806 resp.odp_caps.per_transport_caps.ud_odp_caps = 3807 attr.odp_caps.per_transport_caps.ud_odp_caps; 3808 #endif 3809 resp.response_length += sizeof(resp.odp_caps); 3810 3811 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask)) 3812 goto end; 3813 3814 resp.timestamp_mask = attr.timestamp_mask; 3815 resp.response_length += sizeof(resp.timestamp_mask); 3816 3817 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock)) 3818 goto end; 3819 3820 resp.hca_core_clock = attr.hca_core_clock; 3821 resp.response_length += sizeof(resp.hca_core_clock); 3822 3823 if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex)) 3824 goto end; 3825 3826 resp.device_cap_flags_ex = attr.device_cap_flags; 3827 resp.response_length += sizeof(resp.device_cap_flags_ex); 3828 3829 if (ucore->outlen < resp.response_length + sizeof(resp.rss_caps)) 3830 goto end; 3831 3832 resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts; 3833 resp.rss_caps.max_rwq_indirection_tables = 3834 attr.rss_caps.max_rwq_indirection_tables; 3835 resp.rss_caps.max_rwq_indirection_table_size = 3836 attr.rss_caps.max_rwq_indirection_table_size; 3837 3838 resp.response_length += sizeof(resp.rss_caps); 3839 3840 if (ucore->outlen < resp.response_length + sizeof(resp.max_wq_type_rq)) 3841 goto end; 3842 3843 resp.max_wq_type_rq = attr.max_wq_type_rq; 3844 resp.response_length += sizeof(resp.max_wq_type_rq); 3845 3846 if (ucore->outlen < resp.response_length + sizeof(resp.raw_packet_caps)) 3847 goto end; 3848 3849 resp.raw_packet_caps = attr.raw_packet_caps; 3850 resp.response_length += sizeof(resp.raw_packet_caps); 3851 end: 3852 err = ib_copy_to_udata(ucore, &resp, resp.response_length); 3853 return err; 3854 } 3855