1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/file.h> 37 #include <linux/fs.h> 38 #include <linux/slab.h> 39 #include <linux/sched.h> 40 41 #include <linux/uaccess.h> 42 43 #include <rdma/uverbs_types.h> 44 #include <rdma/uverbs_std_types.h> 45 #include "rdma_core.h" 46 47 #include "uverbs.h" 48 #include "core_priv.h" 49 50 static struct ib_uverbs_completion_event_file * 51 ib_uverbs_lookup_comp_file(int fd, struct ib_ucontext *context) 52 { 53 struct ib_uobject *uobj = uobj_get_read(uobj_get_type(comp_channel), 54 fd, context); 55 struct ib_uobject_file *uobj_file; 56 57 if (IS_ERR(uobj)) 58 return (void *)uobj; 59 60 uverbs_uobject_get(uobj); 61 uobj_put_read(uobj); 62 63 uobj_file = container_of(uobj, struct ib_uobject_file, uobj); 64 return container_of(uobj_file, struct ib_uverbs_completion_event_file, 65 uobj_file); 66 } 67 68 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 69 struct ib_device *ib_dev, 70 const char __user *buf, 71 int in_len, int out_len) 72 { 73 struct ib_uverbs_get_context cmd; 74 struct ib_uverbs_get_context_resp resp; 75 struct ib_udata udata; 76 struct ib_ucontext *ucontext; 77 struct file *filp; 78 struct ib_rdmacg_object cg_obj; 79 int ret; 80 81 if (out_len < sizeof resp) 82 return -ENOSPC; 83 84 if (copy_from_user(&cmd, buf, sizeof cmd)) 85 return -EFAULT; 86 87 mutex_lock(&file->mutex); 88 89 if (file->ucontext) { 90 ret = -EINVAL; 91 goto err; 92 } 93 94 INIT_UDATA(&udata, buf + sizeof cmd, 95 (unsigned long) cmd.response + sizeof resp, 96 in_len - sizeof cmd, out_len - sizeof resp); 97 98 ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); 99 if (ret) 100 goto err; 101 102 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata); 103 if (IS_ERR(ucontext)) { 104 ret = PTR_ERR(ucontext); 105 goto err_alloc; 106 } 107 108 ucontext->device = ib_dev; 109 ucontext->cg_obj = cg_obj; 110 /* ufile is required when some objects are released */ 111 ucontext->ufile = file; 112 uverbs_initialize_ucontext(ucontext); 113 114 rcu_read_lock(); 115 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); 116 rcu_read_unlock(); 117 ucontext->closing = 0; 118 119 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 120 ucontext->umem_tree = RB_ROOT; 121 init_rwsem(&ucontext->umem_rwsem); 122 ucontext->odp_mrs_count = 0; 123 INIT_LIST_HEAD(&ucontext->no_private_counters); 124 125 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)) 126 ucontext->invalidate_range = NULL; 127 128 #endif 129 130 resp.num_comp_vectors = file->device->num_comp_vectors; 131 132 ret = get_unused_fd_flags(O_CLOEXEC); 133 if (ret < 0) 134 goto err_free; 135 resp.async_fd = ret; 136 137 filp = ib_uverbs_alloc_async_event_file(file, ib_dev); 138 if (IS_ERR(filp)) { 139 ret = PTR_ERR(filp); 140 goto err_fd; 141 } 142 143 if (copy_to_user((void __user *) (unsigned long) cmd.response, 144 &resp, sizeof resp)) { 145 ret = -EFAULT; 146 goto err_file; 147 } 148 149 file->ucontext = ucontext; 150 151 fd_install(resp.async_fd, filp); 152 153 mutex_unlock(&file->mutex); 154 155 return in_len; 156 157 err_file: 158 ib_uverbs_free_async_event_file(file); 159 fput(filp); 160 161 err_fd: 162 put_unused_fd(resp.async_fd); 163 164 err_free: 165 put_pid(ucontext->tgid); 166 ib_dev->dealloc_ucontext(ucontext); 167 168 err_alloc: 169 ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); 170 171 err: 172 mutex_unlock(&file->mutex); 173 return ret; 174 } 175 176 static void copy_query_dev_fields(struct ib_uverbs_file *file, 177 struct ib_device *ib_dev, 178 struct ib_uverbs_query_device_resp *resp, 179 struct ib_device_attr *attr) 180 { 181 resp->fw_ver = attr->fw_ver; 182 resp->node_guid = ib_dev->node_guid; 183 resp->sys_image_guid = attr->sys_image_guid; 184 resp->max_mr_size = attr->max_mr_size; 185 resp->page_size_cap = attr->page_size_cap; 186 resp->vendor_id = attr->vendor_id; 187 resp->vendor_part_id = attr->vendor_part_id; 188 resp->hw_ver = attr->hw_ver; 189 resp->max_qp = attr->max_qp; 190 resp->max_qp_wr = attr->max_qp_wr; 191 resp->device_cap_flags = lower_32_bits(attr->device_cap_flags); 192 resp->max_sge = attr->max_sge; 193 resp->max_sge_rd = attr->max_sge_rd; 194 resp->max_cq = attr->max_cq; 195 resp->max_cqe = attr->max_cqe; 196 resp->max_mr = attr->max_mr; 197 resp->max_pd = attr->max_pd; 198 resp->max_qp_rd_atom = attr->max_qp_rd_atom; 199 resp->max_ee_rd_atom = attr->max_ee_rd_atom; 200 resp->max_res_rd_atom = attr->max_res_rd_atom; 201 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom; 202 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom; 203 resp->atomic_cap = attr->atomic_cap; 204 resp->max_ee = attr->max_ee; 205 resp->max_rdd = attr->max_rdd; 206 resp->max_mw = attr->max_mw; 207 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp; 208 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp; 209 resp->max_mcast_grp = attr->max_mcast_grp; 210 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach; 211 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach; 212 resp->max_ah = attr->max_ah; 213 resp->max_fmr = attr->max_fmr; 214 resp->max_map_per_fmr = attr->max_map_per_fmr; 215 resp->max_srq = attr->max_srq; 216 resp->max_srq_wr = attr->max_srq_wr; 217 resp->max_srq_sge = attr->max_srq_sge; 218 resp->max_pkeys = attr->max_pkeys; 219 resp->local_ca_ack_delay = attr->local_ca_ack_delay; 220 resp->phys_port_cnt = ib_dev->phys_port_cnt; 221 } 222 223 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 224 struct ib_device *ib_dev, 225 const char __user *buf, 226 int in_len, int out_len) 227 { 228 struct ib_uverbs_query_device cmd; 229 struct ib_uverbs_query_device_resp resp; 230 231 if (out_len < sizeof resp) 232 return -ENOSPC; 233 234 if (copy_from_user(&cmd, buf, sizeof cmd)) 235 return -EFAULT; 236 237 memset(&resp, 0, sizeof resp); 238 copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs); 239 240 if (copy_to_user((void __user *) (unsigned long) cmd.response, 241 &resp, sizeof resp)) 242 return -EFAULT; 243 244 return in_len; 245 } 246 247 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 248 struct ib_device *ib_dev, 249 const char __user *buf, 250 int in_len, int out_len) 251 { 252 struct ib_uverbs_query_port cmd; 253 struct ib_uverbs_query_port_resp resp; 254 struct ib_port_attr attr; 255 int ret; 256 257 if (out_len < sizeof resp) 258 return -ENOSPC; 259 260 if (copy_from_user(&cmd, buf, sizeof cmd)) 261 return -EFAULT; 262 263 ret = ib_query_port(ib_dev, cmd.port_num, &attr); 264 if (ret) 265 return ret; 266 267 memset(&resp, 0, sizeof resp); 268 269 resp.state = attr.state; 270 resp.max_mtu = attr.max_mtu; 271 resp.active_mtu = attr.active_mtu; 272 resp.gid_tbl_len = attr.gid_tbl_len; 273 resp.port_cap_flags = attr.port_cap_flags; 274 resp.max_msg_sz = attr.max_msg_sz; 275 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 276 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 277 resp.pkey_tbl_len = attr.pkey_tbl_len; 278 resp.lid = attr.lid; 279 resp.sm_lid = attr.sm_lid; 280 resp.lmc = attr.lmc; 281 resp.max_vl_num = attr.max_vl_num; 282 resp.sm_sl = attr.sm_sl; 283 resp.subnet_timeout = attr.subnet_timeout; 284 resp.init_type_reply = attr.init_type_reply; 285 resp.active_width = attr.active_width; 286 resp.active_speed = attr.active_speed; 287 resp.phys_state = attr.phys_state; 288 resp.link_layer = rdma_port_get_link_layer(ib_dev, 289 cmd.port_num); 290 291 if (copy_to_user((void __user *) (unsigned long) cmd.response, 292 &resp, sizeof resp)) 293 return -EFAULT; 294 295 return in_len; 296 } 297 298 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 299 struct ib_device *ib_dev, 300 const char __user *buf, 301 int in_len, int out_len) 302 { 303 struct ib_uverbs_alloc_pd cmd; 304 struct ib_uverbs_alloc_pd_resp resp; 305 struct ib_udata udata; 306 struct ib_uobject *uobj; 307 struct ib_pd *pd; 308 int ret; 309 310 if (out_len < sizeof resp) 311 return -ENOSPC; 312 313 if (copy_from_user(&cmd, buf, sizeof cmd)) 314 return -EFAULT; 315 316 INIT_UDATA(&udata, buf + sizeof cmd, 317 (unsigned long) cmd.response + sizeof resp, 318 in_len - sizeof cmd, out_len - sizeof resp); 319 320 uobj = uobj_alloc(uobj_get_type(pd), file->ucontext); 321 if (IS_ERR(uobj)) 322 return PTR_ERR(uobj); 323 324 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata); 325 if (IS_ERR(pd)) { 326 ret = PTR_ERR(pd); 327 goto err; 328 } 329 330 pd->device = ib_dev; 331 pd->uobject = uobj; 332 pd->__internal_mr = NULL; 333 atomic_set(&pd->usecnt, 0); 334 335 uobj->object = pd; 336 memset(&resp, 0, sizeof resp); 337 resp.pd_handle = uobj->id; 338 339 if (copy_to_user((void __user *) (unsigned long) cmd.response, 340 &resp, sizeof resp)) { 341 ret = -EFAULT; 342 goto err_copy; 343 } 344 345 uobj_alloc_commit(uobj); 346 347 return in_len; 348 349 err_copy: 350 ib_dealloc_pd(pd); 351 352 err: 353 uobj_alloc_abort(uobj); 354 return ret; 355 } 356 357 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 358 struct ib_device *ib_dev, 359 const char __user *buf, 360 int in_len, int out_len) 361 { 362 struct ib_uverbs_dealloc_pd cmd; 363 struct ib_uobject *uobj; 364 int ret; 365 366 if (copy_from_user(&cmd, buf, sizeof cmd)) 367 return -EFAULT; 368 369 uobj = uobj_get_write(uobj_get_type(pd), cmd.pd_handle, 370 file->ucontext); 371 if (IS_ERR(uobj)) 372 return PTR_ERR(uobj); 373 374 ret = uobj_remove_commit(uobj); 375 376 return ret ?: in_len; 377 } 378 379 struct xrcd_table_entry { 380 struct rb_node node; 381 struct ib_xrcd *xrcd; 382 struct inode *inode; 383 }; 384 385 static int xrcd_table_insert(struct ib_uverbs_device *dev, 386 struct inode *inode, 387 struct ib_xrcd *xrcd) 388 { 389 struct xrcd_table_entry *entry, *scan; 390 struct rb_node **p = &dev->xrcd_tree.rb_node; 391 struct rb_node *parent = NULL; 392 393 entry = kmalloc(sizeof *entry, GFP_KERNEL); 394 if (!entry) 395 return -ENOMEM; 396 397 entry->xrcd = xrcd; 398 entry->inode = inode; 399 400 while (*p) { 401 parent = *p; 402 scan = rb_entry(parent, struct xrcd_table_entry, node); 403 404 if (inode < scan->inode) { 405 p = &(*p)->rb_left; 406 } else if (inode > scan->inode) { 407 p = &(*p)->rb_right; 408 } else { 409 kfree(entry); 410 return -EEXIST; 411 } 412 } 413 414 rb_link_node(&entry->node, parent, p); 415 rb_insert_color(&entry->node, &dev->xrcd_tree); 416 igrab(inode); 417 return 0; 418 } 419 420 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, 421 struct inode *inode) 422 { 423 struct xrcd_table_entry *entry; 424 struct rb_node *p = dev->xrcd_tree.rb_node; 425 426 while (p) { 427 entry = rb_entry(p, struct xrcd_table_entry, node); 428 429 if (inode < entry->inode) 430 p = p->rb_left; 431 else if (inode > entry->inode) 432 p = p->rb_right; 433 else 434 return entry; 435 } 436 437 return NULL; 438 } 439 440 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) 441 { 442 struct xrcd_table_entry *entry; 443 444 entry = xrcd_table_search(dev, inode); 445 if (!entry) 446 return NULL; 447 448 return entry->xrcd; 449 } 450 451 static void xrcd_table_delete(struct ib_uverbs_device *dev, 452 struct inode *inode) 453 { 454 struct xrcd_table_entry *entry; 455 456 entry = xrcd_table_search(dev, inode); 457 if (entry) { 458 iput(inode); 459 rb_erase(&entry->node, &dev->xrcd_tree); 460 kfree(entry); 461 } 462 } 463 464 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, 465 struct ib_device *ib_dev, 466 const char __user *buf, int in_len, 467 int out_len) 468 { 469 struct ib_uverbs_open_xrcd cmd; 470 struct ib_uverbs_open_xrcd_resp resp; 471 struct ib_udata udata; 472 struct ib_uxrcd_object *obj; 473 struct ib_xrcd *xrcd = NULL; 474 struct fd f = {NULL, 0}; 475 struct inode *inode = NULL; 476 int ret = 0; 477 int new_xrcd = 0; 478 479 if (out_len < sizeof resp) 480 return -ENOSPC; 481 482 if (copy_from_user(&cmd, buf, sizeof cmd)) 483 return -EFAULT; 484 485 INIT_UDATA(&udata, buf + sizeof cmd, 486 (unsigned long) cmd.response + sizeof resp, 487 in_len - sizeof cmd, out_len - sizeof resp); 488 489 mutex_lock(&file->device->xrcd_tree_mutex); 490 491 if (cmd.fd != -1) { 492 /* search for file descriptor */ 493 f = fdget(cmd.fd); 494 if (!f.file) { 495 ret = -EBADF; 496 goto err_tree_mutex_unlock; 497 } 498 499 inode = file_inode(f.file); 500 xrcd = find_xrcd(file->device, inode); 501 if (!xrcd && !(cmd.oflags & O_CREAT)) { 502 /* no file descriptor. Need CREATE flag */ 503 ret = -EAGAIN; 504 goto err_tree_mutex_unlock; 505 } 506 507 if (xrcd && cmd.oflags & O_EXCL) { 508 ret = -EINVAL; 509 goto err_tree_mutex_unlock; 510 } 511 } 512 513 obj = (struct ib_uxrcd_object *)uobj_alloc(uobj_get_type(xrcd), 514 file->ucontext); 515 if (IS_ERR(obj)) { 516 ret = PTR_ERR(obj); 517 goto err_tree_mutex_unlock; 518 } 519 520 if (!xrcd) { 521 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata); 522 if (IS_ERR(xrcd)) { 523 ret = PTR_ERR(xrcd); 524 goto err; 525 } 526 527 xrcd->inode = inode; 528 xrcd->device = ib_dev; 529 atomic_set(&xrcd->usecnt, 0); 530 mutex_init(&xrcd->tgt_qp_mutex); 531 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 532 new_xrcd = 1; 533 } 534 535 atomic_set(&obj->refcnt, 0); 536 obj->uobject.object = xrcd; 537 memset(&resp, 0, sizeof resp); 538 resp.xrcd_handle = obj->uobject.id; 539 540 if (inode) { 541 if (new_xrcd) { 542 /* create new inode/xrcd table entry */ 543 ret = xrcd_table_insert(file->device, inode, xrcd); 544 if (ret) 545 goto err_dealloc_xrcd; 546 } 547 atomic_inc(&xrcd->usecnt); 548 } 549 550 if (copy_to_user((void __user *) (unsigned long) cmd.response, 551 &resp, sizeof resp)) { 552 ret = -EFAULT; 553 goto err_copy; 554 } 555 556 if (f.file) 557 fdput(f); 558 559 uobj_alloc_commit(&obj->uobject); 560 561 mutex_unlock(&file->device->xrcd_tree_mutex); 562 return in_len; 563 564 err_copy: 565 if (inode) { 566 if (new_xrcd) 567 xrcd_table_delete(file->device, inode); 568 atomic_dec(&xrcd->usecnt); 569 } 570 571 err_dealloc_xrcd: 572 ib_dealloc_xrcd(xrcd); 573 574 err: 575 uobj_alloc_abort(&obj->uobject); 576 577 err_tree_mutex_unlock: 578 if (f.file) 579 fdput(f); 580 581 mutex_unlock(&file->device->xrcd_tree_mutex); 582 583 return ret; 584 } 585 586 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, 587 struct ib_device *ib_dev, 588 const char __user *buf, int in_len, 589 int out_len) 590 { 591 struct ib_uverbs_close_xrcd cmd; 592 struct ib_uobject *uobj; 593 int ret = 0; 594 595 if (copy_from_user(&cmd, buf, sizeof cmd)) 596 return -EFAULT; 597 598 uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle, 599 file->ucontext); 600 if (IS_ERR(uobj)) { 601 mutex_unlock(&file->device->xrcd_tree_mutex); 602 return PTR_ERR(uobj); 603 } 604 605 ret = uobj_remove_commit(uobj); 606 return ret ?: in_len; 607 } 608 609 int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, 610 struct ib_xrcd *xrcd, 611 enum rdma_remove_reason why) 612 { 613 struct inode *inode; 614 int ret; 615 616 inode = xrcd->inode; 617 if (inode && !atomic_dec_and_test(&xrcd->usecnt)) 618 return 0; 619 620 ret = ib_dealloc_xrcd(xrcd); 621 622 if (why == RDMA_REMOVE_DESTROY && ret) 623 atomic_inc(&xrcd->usecnt); 624 else if (inode) 625 xrcd_table_delete(dev, inode); 626 627 return ret; 628 } 629 630 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 631 struct ib_device *ib_dev, 632 const char __user *buf, int in_len, 633 int out_len) 634 { 635 struct ib_uverbs_reg_mr cmd; 636 struct ib_uverbs_reg_mr_resp resp; 637 struct ib_udata udata; 638 struct ib_uobject *uobj; 639 struct ib_pd *pd; 640 struct ib_mr *mr; 641 int ret; 642 643 if (out_len < sizeof resp) 644 return -ENOSPC; 645 646 if (copy_from_user(&cmd, buf, sizeof cmd)) 647 return -EFAULT; 648 649 INIT_UDATA(&udata, buf + sizeof cmd, 650 (unsigned long) cmd.response + sizeof resp, 651 in_len - sizeof cmd, out_len - sizeof resp); 652 653 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 654 return -EINVAL; 655 656 ret = ib_check_mr_access(cmd.access_flags); 657 if (ret) 658 return ret; 659 660 uobj = uobj_alloc(uobj_get_type(mr), file->ucontext); 661 if (IS_ERR(uobj)) 662 return PTR_ERR(uobj); 663 664 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 665 if (!pd) { 666 ret = -EINVAL; 667 goto err_free; 668 } 669 670 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) { 671 if (!(pd->device->attrs.device_cap_flags & 672 IB_DEVICE_ON_DEMAND_PAGING)) { 673 pr_debug("ODP support not available\n"); 674 ret = -EINVAL; 675 goto err_put; 676 } 677 } 678 679 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 680 cmd.access_flags, &udata); 681 if (IS_ERR(mr)) { 682 ret = PTR_ERR(mr); 683 goto err_put; 684 } 685 686 mr->device = pd->device; 687 mr->pd = pd; 688 mr->uobject = uobj; 689 atomic_inc(&pd->usecnt); 690 691 uobj->object = mr; 692 693 memset(&resp, 0, sizeof resp); 694 resp.lkey = mr->lkey; 695 resp.rkey = mr->rkey; 696 resp.mr_handle = uobj->id; 697 698 if (copy_to_user((void __user *) (unsigned long) cmd.response, 699 &resp, sizeof resp)) { 700 ret = -EFAULT; 701 goto err_copy; 702 } 703 704 uobj_put_obj_read(pd); 705 706 uobj_alloc_commit(uobj); 707 708 return in_len; 709 710 err_copy: 711 ib_dereg_mr(mr); 712 713 err_put: 714 uobj_put_obj_read(pd); 715 716 err_free: 717 uobj_alloc_abort(uobj); 718 return ret; 719 } 720 721 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, 722 struct ib_device *ib_dev, 723 const char __user *buf, int in_len, 724 int out_len) 725 { 726 struct ib_uverbs_rereg_mr cmd; 727 struct ib_uverbs_rereg_mr_resp resp; 728 struct ib_udata udata; 729 struct ib_pd *pd = NULL; 730 struct ib_mr *mr; 731 struct ib_pd *old_pd; 732 int ret; 733 struct ib_uobject *uobj; 734 735 if (out_len < sizeof(resp)) 736 return -ENOSPC; 737 738 if (copy_from_user(&cmd, buf, sizeof(cmd))) 739 return -EFAULT; 740 741 INIT_UDATA(&udata, buf + sizeof(cmd), 742 (unsigned long) cmd.response + sizeof(resp), 743 in_len - sizeof(cmd), out_len - sizeof(resp)); 744 745 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags) 746 return -EINVAL; 747 748 if ((cmd.flags & IB_MR_REREG_TRANS) && 749 (!cmd.start || !cmd.hca_va || 0 >= cmd.length || 750 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))) 751 return -EINVAL; 752 753 uobj = uobj_get_write(uobj_get_type(mr), cmd.mr_handle, 754 file->ucontext); 755 if (IS_ERR(uobj)) 756 return PTR_ERR(uobj); 757 758 mr = uobj->object; 759 760 if (cmd.flags & IB_MR_REREG_ACCESS) { 761 ret = ib_check_mr_access(cmd.access_flags); 762 if (ret) 763 goto put_uobjs; 764 } 765 766 if (cmd.flags & IB_MR_REREG_PD) { 767 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 768 if (!pd) { 769 ret = -EINVAL; 770 goto put_uobjs; 771 } 772 } 773 774 old_pd = mr->pd; 775 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, 776 cmd.length, cmd.hca_va, 777 cmd.access_flags, pd, &udata); 778 if (!ret) { 779 if (cmd.flags & IB_MR_REREG_PD) { 780 atomic_inc(&pd->usecnt); 781 mr->pd = pd; 782 atomic_dec(&old_pd->usecnt); 783 } 784 } else { 785 goto put_uobj_pd; 786 } 787 788 memset(&resp, 0, sizeof(resp)); 789 resp.lkey = mr->lkey; 790 resp.rkey = mr->rkey; 791 792 if (copy_to_user((void __user *)(unsigned long)cmd.response, 793 &resp, sizeof(resp))) 794 ret = -EFAULT; 795 else 796 ret = in_len; 797 798 put_uobj_pd: 799 if (cmd.flags & IB_MR_REREG_PD) 800 uobj_put_obj_read(pd); 801 802 put_uobjs: 803 uobj_put_write(uobj); 804 805 return ret; 806 } 807 808 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 809 struct ib_device *ib_dev, 810 const char __user *buf, int in_len, 811 int out_len) 812 { 813 struct ib_uverbs_dereg_mr cmd; 814 struct ib_uobject *uobj; 815 int ret = -EINVAL; 816 817 if (copy_from_user(&cmd, buf, sizeof cmd)) 818 return -EFAULT; 819 820 uobj = uobj_get_write(uobj_get_type(mr), cmd.mr_handle, 821 file->ucontext); 822 if (IS_ERR(uobj)) 823 return PTR_ERR(uobj); 824 825 ret = uobj_remove_commit(uobj); 826 827 return ret ?: in_len; 828 } 829 830 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, 831 struct ib_device *ib_dev, 832 const char __user *buf, int in_len, 833 int out_len) 834 { 835 struct ib_uverbs_alloc_mw cmd; 836 struct ib_uverbs_alloc_mw_resp resp; 837 struct ib_uobject *uobj; 838 struct ib_pd *pd; 839 struct ib_mw *mw; 840 struct ib_udata udata; 841 int ret; 842 843 if (out_len < sizeof(resp)) 844 return -ENOSPC; 845 846 if (copy_from_user(&cmd, buf, sizeof(cmd))) 847 return -EFAULT; 848 849 uobj = uobj_alloc(uobj_get_type(mw), file->ucontext); 850 if (IS_ERR(uobj)) 851 return PTR_ERR(uobj); 852 853 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 854 if (!pd) { 855 ret = -EINVAL; 856 goto err_free; 857 } 858 859 INIT_UDATA(&udata, buf + sizeof(cmd), 860 (unsigned long)cmd.response + sizeof(resp), 861 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 862 out_len - sizeof(resp)); 863 864 mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata); 865 if (IS_ERR(mw)) { 866 ret = PTR_ERR(mw); 867 goto err_put; 868 } 869 870 mw->device = pd->device; 871 mw->pd = pd; 872 mw->uobject = uobj; 873 atomic_inc(&pd->usecnt); 874 875 uobj->object = mw; 876 877 memset(&resp, 0, sizeof(resp)); 878 resp.rkey = mw->rkey; 879 resp.mw_handle = uobj->id; 880 881 if (copy_to_user((void __user *)(unsigned long)cmd.response, 882 &resp, sizeof(resp))) { 883 ret = -EFAULT; 884 goto err_copy; 885 } 886 887 uobj_put_obj_read(pd); 888 uobj_alloc_commit(uobj); 889 890 return in_len; 891 892 err_copy: 893 uverbs_dealloc_mw(mw); 894 err_put: 895 uobj_put_obj_read(pd); 896 err_free: 897 uobj_alloc_abort(uobj); 898 return ret; 899 } 900 901 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file, 902 struct ib_device *ib_dev, 903 const char __user *buf, int in_len, 904 int out_len) 905 { 906 struct ib_uverbs_dealloc_mw cmd; 907 struct ib_uobject *uobj; 908 int ret = -EINVAL; 909 910 if (copy_from_user(&cmd, buf, sizeof(cmd))) 911 return -EFAULT; 912 913 uobj = uobj_get_write(uobj_get_type(mw), cmd.mw_handle, 914 file->ucontext); 915 if (IS_ERR(uobj)) 916 return PTR_ERR(uobj); 917 918 ret = uobj_remove_commit(uobj); 919 return ret ?: in_len; 920 } 921 922 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 923 struct ib_device *ib_dev, 924 const char __user *buf, int in_len, 925 int out_len) 926 { 927 struct ib_uverbs_create_comp_channel cmd; 928 struct ib_uverbs_create_comp_channel_resp resp; 929 struct ib_uobject *uobj; 930 struct ib_uverbs_completion_event_file *ev_file; 931 932 if (out_len < sizeof resp) 933 return -ENOSPC; 934 935 if (copy_from_user(&cmd, buf, sizeof cmd)) 936 return -EFAULT; 937 938 uobj = uobj_alloc(uobj_get_type(comp_channel), file->ucontext); 939 if (IS_ERR(uobj)) 940 return PTR_ERR(uobj); 941 942 resp.fd = uobj->id; 943 944 ev_file = container_of(uobj, struct ib_uverbs_completion_event_file, 945 uobj_file.uobj); 946 ib_uverbs_init_event_queue(&ev_file->ev_queue); 947 948 if (copy_to_user((void __user *) (unsigned long) cmd.response, 949 &resp, sizeof resp)) { 950 uobj_alloc_abort(uobj); 951 return -EFAULT; 952 } 953 954 uobj_alloc_commit(uobj); 955 return in_len; 956 } 957 958 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, 959 struct ib_device *ib_dev, 960 struct ib_udata *ucore, 961 struct ib_udata *uhw, 962 struct ib_uverbs_ex_create_cq *cmd, 963 size_t cmd_sz, 964 int (*cb)(struct ib_uverbs_file *file, 965 struct ib_ucq_object *obj, 966 struct ib_uverbs_ex_create_cq_resp *resp, 967 struct ib_udata *udata, 968 void *context), 969 void *context) 970 { 971 struct ib_ucq_object *obj; 972 struct ib_uverbs_completion_event_file *ev_file = NULL; 973 struct ib_cq *cq; 974 int ret; 975 struct ib_uverbs_ex_create_cq_resp resp; 976 struct ib_cq_init_attr attr = {}; 977 978 if (cmd->comp_vector >= file->device->num_comp_vectors) 979 return ERR_PTR(-EINVAL); 980 981 obj = (struct ib_ucq_object *)uobj_alloc(uobj_get_type(cq), 982 file->ucontext); 983 if (IS_ERR(obj)) 984 return obj; 985 986 if (cmd->comp_channel >= 0) { 987 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel, 988 file->ucontext); 989 if (IS_ERR(ev_file)) { 990 ret = PTR_ERR(ev_file); 991 goto err; 992 } 993 } 994 995 obj->uobject.user_handle = cmd->user_handle; 996 obj->uverbs_file = file; 997 obj->comp_events_reported = 0; 998 obj->async_events_reported = 0; 999 INIT_LIST_HEAD(&obj->comp_list); 1000 INIT_LIST_HEAD(&obj->async_list); 1001 1002 attr.cqe = cmd->cqe; 1003 attr.comp_vector = cmd->comp_vector; 1004 1005 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags)) 1006 attr.flags = cmd->flags; 1007 1008 cq = ib_dev->create_cq(ib_dev, &attr, file->ucontext, uhw); 1009 if (IS_ERR(cq)) { 1010 ret = PTR_ERR(cq); 1011 goto err_file; 1012 } 1013 1014 cq->device = ib_dev; 1015 cq->uobject = &obj->uobject; 1016 cq->comp_handler = ib_uverbs_comp_handler; 1017 cq->event_handler = ib_uverbs_cq_event_handler; 1018 cq->cq_context = &ev_file->ev_queue; 1019 atomic_set(&cq->usecnt, 0); 1020 1021 obj->uobject.object = cq; 1022 memset(&resp, 0, sizeof resp); 1023 resp.base.cq_handle = obj->uobject.id; 1024 resp.base.cqe = cq->cqe; 1025 1026 resp.response_length = offsetof(typeof(resp), response_length) + 1027 sizeof(resp.response_length); 1028 1029 ret = cb(file, obj, &resp, ucore, context); 1030 if (ret) 1031 goto err_cb; 1032 1033 uobj_alloc_commit(&obj->uobject); 1034 1035 return obj; 1036 1037 err_cb: 1038 ib_destroy_cq(cq); 1039 1040 err_file: 1041 if (ev_file) 1042 ib_uverbs_release_ucq(file, ev_file, obj); 1043 1044 err: 1045 uobj_alloc_abort(&obj->uobject); 1046 1047 return ERR_PTR(ret); 1048 } 1049 1050 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file, 1051 struct ib_ucq_object *obj, 1052 struct ib_uverbs_ex_create_cq_resp *resp, 1053 struct ib_udata *ucore, void *context) 1054 { 1055 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1056 return -EFAULT; 1057 1058 return 0; 1059 } 1060 1061 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 1062 struct ib_device *ib_dev, 1063 const char __user *buf, int in_len, 1064 int out_len) 1065 { 1066 struct ib_uverbs_create_cq cmd; 1067 struct ib_uverbs_ex_create_cq cmd_ex; 1068 struct ib_uverbs_create_cq_resp resp; 1069 struct ib_udata ucore; 1070 struct ib_udata uhw; 1071 struct ib_ucq_object *obj; 1072 1073 if (out_len < sizeof(resp)) 1074 return -ENOSPC; 1075 1076 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1077 return -EFAULT; 1078 1079 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), sizeof(resp)); 1080 1081 INIT_UDATA(&uhw, buf + sizeof(cmd), 1082 (unsigned long)cmd.response + sizeof(resp), 1083 in_len - sizeof(cmd), out_len - sizeof(resp)); 1084 1085 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1086 cmd_ex.user_handle = cmd.user_handle; 1087 cmd_ex.cqe = cmd.cqe; 1088 cmd_ex.comp_vector = cmd.comp_vector; 1089 cmd_ex.comp_channel = cmd.comp_channel; 1090 1091 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex, 1092 offsetof(typeof(cmd_ex), comp_channel) + 1093 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb, 1094 NULL); 1095 1096 if (IS_ERR(obj)) 1097 return PTR_ERR(obj); 1098 1099 return in_len; 1100 } 1101 1102 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file, 1103 struct ib_ucq_object *obj, 1104 struct ib_uverbs_ex_create_cq_resp *resp, 1105 struct ib_udata *ucore, void *context) 1106 { 1107 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1108 return -EFAULT; 1109 1110 return 0; 1111 } 1112 1113 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file, 1114 struct ib_device *ib_dev, 1115 struct ib_udata *ucore, 1116 struct ib_udata *uhw) 1117 { 1118 struct ib_uverbs_ex_create_cq_resp resp; 1119 struct ib_uverbs_ex_create_cq cmd; 1120 struct ib_ucq_object *obj; 1121 int err; 1122 1123 if (ucore->inlen < sizeof(cmd)) 1124 return -EINVAL; 1125 1126 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 1127 if (err) 1128 return err; 1129 1130 if (cmd.comp_mask) 1131 return -EINVAL; 1132 1133 if (cmd.reserved) 1134 return -EINVAL; 1135 1136 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1137 sizeof(resp.response_length))) 1138 return -ENOSPC; 1139 1140 obj = create_cq(file, ib_dev, ucore, uhw, &cmd, 1141 min(ucore->inlen, sizeof(cmd)), 1142 ib_uverbs_ex_create_cq_cb, NULL); 1143 1144 if (IS_ERR(obj)) 1145 return PTR_ERR(obj); 1146 1147 return 0; 1148 } 1149 1150 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 1151 struct ib_device *ib_dev, 1152 const char __user *buf, int in_len, 1153 int out_len) 1154 { 1155 struct ib_uverbs_resize_cq cmd; 1156 struct ib_uverbs_resize_cq_resp resp; 1157 struct ib_udata udata; 1158 struct ib_cq *cq; 1159 int ret = -EINVAL; 1160 1161 if (copy_from_user(&cmd, buf, sizeof cmd)) 1162 return -EFAULT; 1163 1164 INIT_UDATA(&udata, buf + sizeof cmd, 1165 (unsigned long) cmd.response + sizeof resp, 1166 in_len - sizeof cmd, out_len - sizeof resp); 1167 1168 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext); 1169 if (!cq) 1170 return -EINVAL; 1171 1172 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 1173 if (ret) 1174 goto out; 1175 1176 resp.cqe = cq->cqe; 1177 1178 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1179 &resp, sizeof resp.cqe)) 1180 ret = -EFAULT; 1181 1182 out: 1183 uobj_put_obj_read(cq); 1184 1185 return ret ? ret : in_len; 1186 } 1187 1188 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc) 1189 { 1190 struct ib_uverbs_wc tmp; 1191 1192 tmp.wr_id = wc->wr_id; 1193 tmp.status = wc->status; 1194 tmp.opcode = wc->opcode; 1195 tmp.vendor_err = wc->vendor_err; 1196 tmp.byte_len = wc->byte_len; 1197 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data; 1198 tmp.qp_num = wc->qp->qp_num; 1199 tmp.src_qp = wc->src_qp; 1200 tmp.wc_flags = wc->wc_flags; 1201 tmp.pkey_index = wc->pkey_index; 1202 tmp.slid = wc->slid; 1203 tmp.sl = wc->sl; 1204 tmp.dlid_path_bits = wc->dlid_path_bits; 1205 tmp.port_num = wc->port_num; 1206 tmp.reserved = 0; 1207 1208 if (copy_to_user(dest, &tmp, sizeof tmp)) 1209 return -EFAULT; 1210 1211 return 0; 1212 } 1213 1214 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 1215 struct ib_device *ib_dev, 1216 const char __user *buf, int in_len, 1217 int out_len) 1218 { 1219 struct ib_uverbs_poll_cq cmd; 1220 struct ib_uverbs_poll_cq_resp resp; 1221 u8 __user *header_ptr; 1222 u8 __user *data_ptr; 1223 struct ib_cq *cq; 1224 struct ib_wc wc; 1225 int ret; 1226 1227 if (copy_from_user(&cmd, buf, sizeof cmd)) 1228 return -EFAULT; 1229 1230 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext); 1231 if (!cq) 1232 return -EINVAL; 1233 1234 /* we copy a struct ib_uverbs_poll_cq_resp to user space */ 1235 header_ptr = (void __user *)(unsigned long) cmd.response; 1236 data_ptr = header_ptr + sizeof resp; 1237 1238 memset(&resp, 0, sizeof resp); 1239 while (resp.count < cmd.ne) { 1240 ret = ib_poll_cq(cq, 1, &wc); 1241 if (ret < 0) 1242 goto out_put; 1243 if (!ret) 1244 break; 1245 1246 ret = copy_wc_to_user(data_ptr, &wc); 1247 if (ret) 1248 goto out_put; 1249 1250 data_ptr += sizeof(struct ib_uverbs_wc); 1251 ++resp.count; 1252 } 1253 1254 if (copy_to_user(header_ptr, &resp, sizeof resp)) { 1255 ret = -EFAULT; 1256 goto out_put; 1257 } 1258 1259 ret = in_len; 1260 1261 out_put: 1262 uobj_put_obj_read(cq); 1263 return ret; 1264 } 1265 1266 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 1267 struct ib_device *ib_dev, 1268 const char __user *buf, int in_len, 1269 int out_len) 1270 { 1271 struct ib_uverbs_req_notify_cq cmd; 1272 struct ib_cq *cq; 1273 1274 if (copy_from_user(&cmd, buf, sizeof cmd)) 1275 return -EFAULT; 1276 1277 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext); 1278 if (!cq) 1279 return -EINVAL; 1280 1281 ib_req_notify_cq(cq, cmd.solicited_only ? 1282 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 1283 1284 uobj_put_obj_read(cq); 1285 1286 return in_len; 1287 } 1288 1289 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 1290 struct ib_device *ib_dev, 1291 const char __user *buf, int in_len, 1292 int out_len) 1293 { 1294 struct ib_uverbs_destroy_cq cmd; 1295 struct ib_uverbs_destroy_cq_resp resp; 1296 struct ib_uobject *uobj; 1297 struct ib_cq *cq; 1298 struct ib_ucq_object *obj; 1299 struct ib_uverbs_event_queue *ev_queue; 1300 int ret = -EINVAL; 1301 1302 if (copy_from_user(&cmd, buf, sizeof cmd)) 1303 return -EFAULT; 1304 1305 uobj = uobj_get_write(uobj_get_type(cq), cmd.cq_handle, 1306 file->ucontext); 1307 if (IS_ERR(uobj)) 1308 return PTR_ERR(uobj); 1309 1310 /* 1311 * Make sure we don't free the memory in remove_commit as we still 1312 * needs the uobject memory to create the response. 1313 */ 1314 uverbs_uobject_get(uobj); 1315 cq = uobj->object; 1316 ev_queue = cq->cq_context; 1317 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1318 1319 memset(&resp, 0, sizeof(resp)); 1320 1321 ret = uobj_remove_commit(uobj); 1322 if (ret) { 1323 uverbs_uobject_put(uobj); 1324 return ret; 1325 } 1326 1327 resp.comp_events_reported = obj->comp_events_reported; 1328 resp.async_events_reported = obj->async_events_reported; 1329 1330 uverbs_uobject_put(uobj); 1331 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1332 &resp, sizeof resp)) 1333 return -EFAULT; 1334 1335 return in_len; 1336 } 1337 1338 static int create_qp(struct ib_uverbs_file *file, 1339 struct ib_udata *ucore, 1340 struct ib_udata *uhw, 1341 struct ib_uverbs_ex_create_qp *cmd, 1342 size_t cmd_sz, 1343 int (*cb)(struct ib_uverbs_file *file, 1344 struct ib_uverbs_ex_create_qp_resp *resp, 1345 struct ib_udata *udata), 1346 void *context) 1347 { 1348 struct ib_uqp_object *obj; 1349 struct ib_device *device; 1350 struct ib_pd *pd = NULL; 1351 struct ib_xrcd *xrcd = NULL; 1352 struct ib_uobject *xrcd_uobj = ERR_PTR(-ENOENT); 1353 struct ib_cq *scq = NULL, *rcq = NULL; 1354 struct ib_srq *srq = NULL; 1355 struct ib_qp *qp; 1356 char *buf; 1357 struct ib_qp_init_attr attr = {}; 1358 struct ib_uverbs_ex_create_qp_resp resp; 1359 int ret; 1360 struct ib_rwq_ind_table *ind_tbl = NULL; 1361 bool has_sq = true; 1362 1363 if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) 1364 return -EPERM; 1365 1366 obj = (struct ib_uqp_object *)uobj_alloc(uobj_get_type(qp), 1367 file->ucontext); 1368 if (IS_ERR(obj)) 1369 return PTR_ERR(obj); 1370 obj->uxrcd = NULL; 1371 obj->uevent.uobject.user_handle = cmd->user_handle; 1372 mutex_init(&obj->mcast_lock); 1373 1374 if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) + 1375 sizeof(cmd->rwq_ind_tbl_handle) && 1376 (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) { 1377 ind_tbl = uobj_get_obj_read(rwq_ind_table, 1378 cmd->rwq_ind_tbl_handle, 1379 file->ucontext); 1380 if (!ind_tbl) { 1381 ret = -EINVAL; 1382 goto err_put; 1383 } 1384 1385 attr.rwq_ind_tbl = ind_tbl; 1386 } 1387 1388 if ((cmd_sz >= offsetof(typeof(*cmd), reserved1) + 1389 sizeof(cmd->reserved1)) && cmd->reserved1) { 1390 ret = -EOPNOTSUPP; 1391 goto err_put; 1392 } 1393 1394 if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) { 1395 ret = -EINVAL; 1396 goto err_put; 1397 } 1398 1399 if (ind_tbl && !cmd->max_send_wr) 1400 has_sq = false; 1401 1402 if (cmd->qp_type == IB_QPT_XRC_TGT) { 1403 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd->pd_handle, 1404 file->ucontext); 1405 1406 if (IS_ERR(xrcd_uobj)) { 1407 ret = -EINVAL; 1408 goto err_put; 1409 } 1410 1411 xrcd = (struct ib_xrcd *)xrcd_uobj->object; 1412 if (!xrcd) { 1413 ret = -EINVAL; 1414 goto err_put; 1415 } 1416 device = xrcd->device; 1417 } else { 1418 if (cmd->qp_type == IB_QPT_XRC_INI) { 1419 cmd->max_recv_wr = 0; 1420 cmd->max_recv_sge = 0; 1421 } else { 1422 if (cmd->is_srq) { 1423 srq = uobj_get_obj_read(srq, cmd->srq_handle, 1424 file->ucontext); 1425 if (!srq || srq->srq_type != IB_SRQT_BASIC) { 1426 ret = -EINVAL; 1427 goto err_put; 1428 } 1429 } 1430 1431 if (!ind_tbl) { 1432 if (cmd->recv_cq_handle != cmd->send_cq_handle) { 1433 rcq = uobj_get_obj_read(cq, cmd->recv_cq_handle, 1434 file->ucontext); 1435 if (!rcq) { 1436 ret = -EINVAL; 1437 goto err_put; 1438 } 1439 } 1440 } 1441 } 1442 1443 if (has_sq) 1444 scq = uobj_get_obj_read(cq, cmd->send_cq_handle, 1445 file->ucontext); 1446 if (!ind_tbl) 1447 rcq = rcq ?: scq; 1448 pd = uobj_get_obj_read(pd, cmd->pd_handle, file->ucontext); 1449 if (!pd || (!scq && has_sq)) { 1450 ret = -EINVAL; 1451 goto err_put; 1452 } 1453 1454 device = pd->device; 1455 } 1456 1457 attr.event_handler = ib_uverbs_qp_event_handler; 1458 attr.qp_context = file; 1459 attr.send_cq = scq; 1460 attr.recv_cq = rcq; 1461 attr.srq = srq; 1462 attr.xrcd = xrcd; 1463 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR : 1464 IB_SIGNAL_REQ_WR; 1465 attr.qp_type = cmd->qp_type; 1466 attr.create_flags = 0; 1467 1468 attr.cap.max_send_wr = cmd->max_send_wr; 1469 attr.cap.max_recv_wr = cmd->max_recv_wr; 1470 attr.cap.max_send_sge = cmd->max_send_sge; 1471 attr.cap.max_recv_sge = cmd->max_recv_sge; 1472 attr.cap.max_inline_data = cmd->max_inline_data; 1473 1474 obj->uevent.events_reported = 0; 1475 INIT_LIST_HEAD(&obj->uevent.event_list); 1476 INIT_LIST_HEAD(&obj->mcast_list); 1477 1478 if (cmd_sz >= offsetof(typeof(*cmd), create_flags) + 1479 sizeof(cmd->create_flags)) 1480 attr.create_flags = cmd->create_flags; 1481 1482 if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | 1483 IB_QP_CREATE_CROSS_CHANNEL | 1484 IB_QP_CREATE_MANAGED_SEND | 1485 IB_QP_CREATE_MANAGED_RECV | 1486 IB_QP_CREATE_SCATTER_FCS | 1487 IB_QP_CREATE_CVLAN_STRIPPING)) { 1488 ret = -EINVAL; 1489 goto err_put; 1490 } 1491 1492 buf = (void *)cmd + sizeof(*cmd); 1493 if (cmd_sz > sizeof(*cmd)) 1494 if (!(buf[0] == 0 && !memcmp(buf, buf + 1, 1495 cmd_sz - sizeof(*cmd) - 1))) { 1496 ret = -EINVAL; 1497 goto err_put; 1498 } 1499 1500 if (cmd->qp_type == IB_QPT_XRC_TGT) 1501 qp = ib_create_qp(pd, &attr); 1502 else 1503 qp = device->create_qp(pd, &attr, uhw); 1504 1505 if (IS_ERR(qp)) { 1506 ret = PTR_ERR(qp); 1507 goto err_put; 1508 } 1509 1510 if (cmd->qp_type != IB_QPT_XRC_TGT) { 1511 qp->real_qp = qp; 1512 qp->device = device; 1513 qp->pd = pd; 1514 qp->send_cq = attr.send_cq; 1515 qp->recv_cq = attr.recv_cq; 1516 qp->srq = attr.srq; 1517 qp->rwq_ind_tbl = ind_tbl; 1518 qp->event_handler = attr.event_handler; 1519 qp->qp_context = attr.qp_context; 1520 qp->qp_type = attr.qp_type; 1521 atomic_set(&qp->usecnt, 0); 1522 atomic_inc(&pd->usecnt); 1523 if (attr.send_cq) 1524 atomic_inc(&attr.send_cq->usecnt); 1525 if (attr.recv_cq) 1526 atomic_inc(&attr.recv_cq->usecnt); 1527 if (attr.srq) 1528 atomic_inc(&attr.srq->usecnt); 1529 if (ind_tbl) 1530 atomic_inc(&ind_tbl->usecnt); 1531 } 1532 qp->uobject = &obj->uevent.uobject; 1533 1534 obj->uevent.uobject.object = qp; 1535 1536 memset(&resp, 0, sizeof resp); 1537 resp.base.qpn = qp->qp_num; 1538 resp.base.qp_handle = obj->uevent.uobject.id; 1539 resp.base.max_recv_sge = attr.cap.max_recv_sge; 1540 resp.base.max_send_sge = attr.cap.max_send_sge; 1541 resp.base.max_recv_wr = attr.cap.max_recv_wr; 1542 resp.base.max_send_wr = attr.cap.max_send_wr; 1543 resp.base.max_inline_data = attr.cap.max_inline_data; 1544 1545 resp.response_length = offsetof(typeof(resp), response_length) + 1546 sizeof(resp.response_length); 1547 1548 ret = cb(file, &resp, ucore); 1549 if (ret) 1550 goto err_cb; 1551 1552 if (xrcd) { 1553 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, 1554 uobject); 1555 atomic_inc(&obj->uxrcd->refcnt); 1556 uobj_put_read(xrcd_uobj); 1557 } 1558 1559 if (pd) 1560 uobj_put_obj_read(pd); 1561 if (scq) 1562 uobj_put_obj_read(scq); 1563 if (rcq && rcq != scq) 1564 uobj_put_obj_read(rcq); 1565 if (srq) 1566 uobj_put_obj_read(srq); 1567 if (ind_tbl) 1568 uobj_put_obj_read(ind_tbl); 1569 1570 uobj_alloc_commit(&obj->uevent.uobject); 1571 1572 return 0; 1573 err_cb: 1574 ib_destroy_qp(qp); 1575 1576 err_put: 1577 if (!IS_ERR(xrcd_uobj)) 1578 uobj_put_read(xrcd_uobj); 1579 if (pd) 1580 uobj_put_obj_read(pd); 1581 if (scq) 1582 uobj_put_obj_read(scq); 1583 if (rcq && rcq != scq) 1584 uobj_put_obj_read(rcq); 1585 if (srq) 1586 uobj_put_obj_read(srq); 1587 if (ind_tbl) 1588 uobj_put_obj_read(ind_tbl); 1589 1590 uobj_alloc_abort(&obj->uevent.uobject); 1591 return ret; 1592 } 1593 1594 static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file, 1595 struct ib_uverbs_ex_create_qp_resp *resp, 1596 struct ib_udata *ucore) 1597 { 1598 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1599 return -EFAULT; 1600 1601 return 0; 1602 } 1603 1604 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 1605 struct ib_device *ib_dev, 1606 const char __user *buf, int in_len, 1607 int out_len) 1608 { 1609 struct ib_uverbs_create_qp cmd; 1610 struct ib_uverbs_ex_create_qp cmd_ex; 1611 struct ib_udata ucore; 1612 struct ib_udata uhw; 1613 ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp); 1614 int err; 1615 1616 if (out_len < resp_size) 1617 return -ENOSPC; 1618 1619 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1620 return -EFAULT; 1621 1622 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), 1623 resp_size); 1624 INIT_UDATA(&uhw, buf + sizeof(cmd), 1625 (unsigned long)cmd.response + resp_size, 1626 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1627 out_len - resp_size); 1628 1629 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1630 cmd_ex.user_handle = cmd.user_handle; 1631 cmd_ex.pd_handle = cmd.pd_handle; 1632 cmd_ex.send_cq_handle = cmd.send_cq_handle; 1633 cmd_ex.recv_cq_handle = cmd.recv_cq_handle; 1634 cmd_ex.srq_handle = cmd.srq_handle; 1635 cmd_ex.max_send_wr = cmd.max_send_wr; 1636 cmd_ex.max_recv_wr = cmd.max_recv_wr; 1637 cmd_ex.max_send_sge = cmd.max_send_sge; 1638 cmd_ex.max_recv_sge = cmd.max_recv_sge; 1639 cmd_ex.max_inline_data = cmd.max_inline_data; 1640 cmd_ex.sq_sig_all = cmd.sq_sig_all; 1641 cmd_ex.qp_type = cmd.qp_type; 1642 cmd_ex.is_srq = cmd.is_srq; 1643 1644 err = create_qp(file, &ucore, &uhw, &cmd_ex, 1645 offsetof(typeof(cmd_ex), is_srq) + 1646 sizeof(cmd.is_srq), ib_uverbs_create_qp_cb, 1647 NULL); 1648 1649 if (err) 1650 return err; 1651 1652 return in_len; 1653 } 1654 1655 static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file, 1656 struct ib_uverbs_ex_create_qp_resp *resp, 1657 struct ib_udata *ucore) 1658 { 1659 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1660 return -EFAULT; 1661 1662 return 0; 1663 } 1664 1665 int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file, 1666 struct ib_device *ib_dev, 1667 struct ib_udata *ucore, 1668 struct ib_udata *uhw) 1669 { 1670 struct ib_uverbs_ex_create_qp_resp resp; 1671 struct ib_uverbs_ex_create_qp cmd = {0}; 1672 int err; 1673 1674 if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) + 1675 sizeof(cmd.comp_mask))) 1676 return -EINVAL; 1677 1678 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 1679 if (err) 1680 return err; 1681 1682 if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK) 1683 return -EINVAL; 1684 1685 if (cmd.reserved) 1686 return -EINVAL; 1687 1688 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1689 sizeof(resp.response_length))) 1690 return -ENOSPC; 1691 1692 err = create_qp(file, ucore, uhw, &cmd, 1693 min(ucore->inlen, sizeof(cmd)), 1694 ib_uverbs_ex_create_qp_cb, NULL); 1695 1696 if (err) 1697 return err; 1698 1699 return 0; 1700 } 1701 1702 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, 1703 struct ib_device *ib_dev, 1704 const char __user *buf, int in_len, int out_len) 1705 { 1706 struct ib_uverbs_open_qp cmd; 1707 struct ib_uverbs_create_qp_resp resp; 1708 struct ib_udata udata; 1709 struct ib_uqp_object *obj; 1710 struct ib_xrcd *xrcd; 1711 struct ib_uobject *uninitialized_var(xrcd_uobj); 1712 struct ib_qp *qp; 1713 struct ib_qp_open_attr attr; 1714 int ret; 1715 1716 if (out_len < sizeof resp) 1717 return -ENOSPC; 1718 1719 if (copy_from_user(&cmd, buf, sizeof cmd)) 1720 return -EFAULT; 1721 1722 INIT_UDATA(&udata, buf + sizeof cmd, 1723 (unsigned long) cmd.response + sizeof resp, 1724 in_len - sizeof cmd, out_len - sizeof resp); 1725 1726 obj = (struct ib_uqp_object *)uobj_alloc(uobj_get_type(qp), 1727 file->ucontext); 1728 if (IS_ERR(obj)) 1729 return PTR_ERR(obj); 1730 1731 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd.pd_handle, 1732 file->ucontext); 1733 if (IS_ERR(xrcd_uobj)) { 1734 ret = -EINVAL; 1735 goto err_put; 1736 } 1737 1738 xrcd = (struct ib_xrcd *)xrcd_uobj->object; 1739 if (!xrcd) { 1740 ret = -EINVAL; 1741 goto err_xrcd; 1742 } 1743 1744 attr.event_handler = ib_uverbs_qp_event_handler; 1745 attr.qp_context = file; 1746 attr.qp_num = cmd.qpn; 1747 attr.qp_type = cmd.qp_type; 1748 1749 obj->uevent.events_reported = 0; 1750 INIT_LIST_HEAD(&obj->uevent.event_list); 1751 INIT_LIST_HEAD(&obj->mcast_list); 1752 1753 qp = ib_open_qp(xrcd, &attr); 1754 if (IS_ERR(qp)) { 1755 ret = PTR_ERR(qp); 1756 goto err_xrcd; 1757 } 1758 1759 obj->uevent.uobject.object = qp; 1760 obj->uevent.uobject.user_handle = cmd.user_handle; 1761 1762 memset(&resp, 0, sizeof resp); 1763 resp.qpn = qp->qp_num; 1764 resp.qp_handle = obj->uevent.uobject.id; 1765 1766 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1767 &resp, sizeof resp)) { 1768 ret = -EFAULT; 1769 goto err_destroy; 1770 } 1771 1772 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 1773 atomic_inc(&obj->uxrcd->refcnt); 1774 qp->uobject = &obj->uevent.uobject; 1775 uobj_put_read(xrcd_uobj); 1776 1777 1778 uobj_alloc_commit(&obj->uevent.uobject); 1779 1780 return in_len; 1781 1782 err_destroy: 1783 ib_destroy_qp(qp); 1784 err_xrcd: 1785 uobj_put_read(xrcd_uobj); 1786 err_put: 1787 uobj_alloc_abort(&obj->uevent.uobject); 1788 return ret; 1789 } 1790 1791 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 1792 struct ib_device *ib_dev, 1793 const char __user *buf, int in_len, 1794 int out_len) 1795 { 1796 struct ib_uverbs_query_qp cmd; 1797 struct ib_uverbs_query_qp_resp resp; 1798 struct ib_qp *qp; 1799 struct ib_qp_attr *attr; 1800 struct ib_qp_init_attr *init_attr; 1801 const struct ib_global_route *grh; 1802 int ret; 1803 1804 if (copy_from_user(&cmd, buf, sizeof cmd)) 1805 return -EFAULT; 1806 1807 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1808 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 1809 if (!attr || !init_attr) { 1810 ret = -ENOMEM; 1811 goto out; 1812 } 1813 1814 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 1815 if (!qp) { 1816 ret = -EINVAL; 1817 goto out; 1818 } 1819 1820 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 1821 1822 uobj_put_obj_read(qp); 1823 1824 if (ret) 1825 goto out; 1826 1827 memset(&resp, 0, sizeof resp); 1828 1829 resp.qp_state = attr->qp_state; 1830 resp.cur_qp_state = attr->cur_qp_state; 1831 resp.path_mtu = attr->path_mtu; 1832 resp.path_mig_state = attr->path_mig_state; 1833 resp.qkey = attr->qkey; 1834 resp.rq_psn = attr->rq_psn; 1835 resp.sq_psn = attr->sq_psn; 1836 resp.dest_qp_num = attr->dest_qp_num; 1837 resp.qp_access_flags = attr->qp_access_flags; 1838 resp.pkey_index = attr->pkey_index; 1839 resp.alt_pkey_index = attr->alt_pkey_index; 1840 resp.sq_draining = attr->sq_draining; 1841 resp.max_rd_atomic = attr->max_rd_atomic; 1842 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 1843 resp.min_rnr_timer = attr->min_rnr_timer; 1844 resp.port_num = attr->port_num; 1845 resp.timeout = attr->timeout; 1846 resp.retry_cnt = attr->retry_cnt; 1847 resp.rnr_retry = attr->rnr_retry; 1848 resp.alt_port_num = attr->alt_port_num; 1849 resp.alt_timeout = attr->alt_timeout; 1850 1851 resp.dest.dlid = rdma_ah_get_dlid(&attr->ah_attr); 1852 resp.dest.sl = rdma_ah_get_sl(&attr->ah_attr); 1853 resp.dest.src_path_bits = rdma_ah_get_path_bits(&attr->ah_attr); 1854 resp.dest.static_rate = rdma_ah_get_static_rate(&attr->ah_attr); 1855 resp.dest.is_global = !!(rdma_ah_get_ah_flags(&attr->ah_attr) & 1856 IB_AH_GRH); 1857 if (resp.dest.is_global) { 1858 grh = rdma_ah_read_grh(&attr->ah_attr); 1859 memcpy(resp.dest.dgid, grh->dgid.raw, 16); 1860 resp.dest.flow_label = grh->flow_label; 1861 resp.dest.sgid_index = grh->sgid_index; 1862 resp.dest.hop_limit = grh->hop_limit; 1863 resp.dest.traffic_class = grh->traffic_class; 1864 } 1865 resp.dest.port_num = rdma_ah_get_port_num(&attr->ah_attr); 1866 1867 resp.alt_dest.dlid = rdma_ah_get_dlid(&attr->alt_ah_attr); 1868 resp.alt_dest.sl = rdma_ah_get_sl(&attr->alt_ah_attr); 1869 resp.alt_dest.src_path_bits = rdma_ah_get_path_bits(&attr->alt_ah_attr); 1870 resp.alt_dest.static_rate 1871 = rdma_ah_get_static_rate(&attr->alt_ah_attr); 1872 resp.alt_dest.is_global 1873 = !!(rdma_ah_get_ah_flags(&attr->alt_ah_attr) & 1874 IB_AH_GRH); 1875 if (resp.alt_dest.is_global) { 1876 grh = rdma_ah_read_grh(&attr->alt_ah_attr); 1877 memcpy(resp.alt_dest.dgid, grh->dgid.raw, 16); 1878 resp.alt_dest.flow_label = grh->flow_label; 1879 resp.alt_dest.sgid_index = grh->sgid_index; 1880 resp.alt_dest.hop_limit = grh->hop_limit; 1881 resp.alt_dest.traffic_class = grh->traffic_class; 1882 } 1883 resp.alt_dest.port_num = rdma_ah_get_port_num(&attr->alt_ah_attr); 1884 1885 resp.max_send_wr = init_attr->cap.max_send_wr; 1886 resp.max_recv_wr = init_attr->cap.max_recv_wr; 1887 resp.max_send_sge = init_attr->cap.max_send_sge; 1888 resp.max_recv_sge = init_attr->cap.max_recv_sge; 1889 resp.max_inline_data = init_attr->cap.max_inline_data; 1890 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 1891 1892 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1893 &resp, sizeof resp)) 1894 ret = -EFAULT; 1895 1896 out: 1897 kfree(attr); 1898 kfree(init_attr); 1899 1900 return ret ? ret : in_len; 1901 } 1902 1903 /* Remove ignored fields set in the attribute mask */ 1904 static int modify_qp_mask(enum ib_qp_type qp_type, int mask) 1905 { 1906 switch (qp_type) { 1907 case IB_QPT_XRC_INI: 1908 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); 1909 case IB_QPT_XRC_TGT: 1910 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | 1911 IB_QP_RNR_RETRY); 1912 default: 1913 return mask; 1914 } 1915 } 1916 1917 static int modify_qp(struct ib_uverbs_file *file, 1918 struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata) 1919 { 1920 struct ib_qp_attr *attr; 1921 struct ib_qp *qp; 1922 int ret; 1923 1924 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1925 if (!attr) 1926 return -ENOMEM; 1927 1928 qp = uobj_get_obj_read(qp, cmd->base.qp_handle, file->ucontext); 1929 if (!qp) { 1930 ret = -EINVAL; 1931 goto out; 1932 } 1933 1934 attr->qp_state = cmd->base.qp_state; 1935 attr->cur_qp_state = cmd->base.cur_qp_state; 1936 attr->path_mtu = cmd->base.path_mtu; 1937 attr->path_mig_state = cmd->base.path_mig_state; 1938 attr->qkey = cmd->base.qkey; 1939 attr->rq_psn = cmd->base.rq_psn; 1940 attr->sq_psn = cmd->base.sq_psn; 1941 attr->dest_qp_num = cmd->base.dest_qp_num; 1942 attr->qp_access_flags = cmd->base.qp_access_flags; 1943 attr->pkey_index = cmd->base.pkey_index; 1944 attr->alt_pkey_index = cmd->base.alt_pkey_index; 1945 attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify; 1946 attr->max_rd_atomic = cmd->base.max_rd_atomic; 1947 attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic; 1948 attr->min_rnr_timer = cmd->base.min_rnr_timer; 1949 attr->port_num = cmd->base.port_num; 1950 attr->timeout = cmd->base.timeout; 1951 attr->retry_cnt = cmd->base.retry_cnt; 1952 attr->rnr_retry = cmd->base.rnr_retry; 1953 attr->alt_port_num = cmd->base.alt_port_num; 1954 attr->alt_timeout = cmd->base.alt_timeout; 1955 attr->rate_limit = cmd->rate_limit; 1956 1957 attr->ah_attr.type = rdma_ah_find_type(qp->device, 1958 cmd->base.dest.port_num); 1959 if (cmd->base.dest.is_global) { 1960 rdma_ah_set_grh(&attr->ah_attr, NULL, 1961 cmd->base.dest.flow_label, 1962 cmd->base.dest.sgid_index, 1963 cmd->base.dest.hop_limit, 1964 cmd->base.dest.traffic_class); 1965 rdma_ah_set_dgid_raw(&attr->ah_attr, cmd->base.dest.dgid); 1966 } else { 1967 rdma_ah_set_ah_flags(&attr->ah_attr, 0); 1968 } 1969 rdma_ah_set_dlid(&attr->ah_attr, cmd->base.dest.dlid); 1970 rdma_ah_set_sl(&attr->ah_attr, cmd->base.dest.sl); 1971 rdma_ah_set_path_bits(&attr->ah_attr, cmd->base.dest.src_path_bits); 1972 rdma_ah_set_static_rate(&attr->ah_attr, cmd->base.dest.static_rate); 1973 rdma_ah_set_port_num(&attr->ah_attr, 1974 cmd->base.dest.port_num); 1975 1976 attr->alt_ah_attr.type = rdma_ah_find_type(qp->device, 1977 cmd->base.dest.port_num); 1978 if (cmd->base.alt_dest.is_global) { 1979 rdma_ah_set_grh(&attr->alt_ah_attr, NULL, 1980 cmd->base.alt_dest.flow_label, 1981 cmd->base.alt_dest.sgid_index, 1982 cmd->base.alt_dest.hop_limit, 1983 cmd->base.alt_dest.traffic_class); 1984 rdma_ah_set_dgid_raw(&attr->alt_ah_attr, 1985 cmd->base.alt_dest.dgid); 1986 } else { 1987 rdma_ah_set_ah_flags(&attr->alt_ah_attr, 0); 1988 } 1989 1990 rdma_ah_set_dlid(&attr->alt_ah_attr, cmd->base.alt_dest.dlid); 1991 rdma_ah_set_sl(&attr->alt_ah_attr, cmd->base.alt_dest.sl); 1992 rdma_ah_set_path_bits(&attr->alt_ah_attr, 1993 cmd->base.alt_dest.src_path_bits); 1994 rdma_ah_set_static_rate(&attr->alt_ah_attr, 1995 cmd->base.alt_dest.static_rate); 1996 rdma_ah_set_port_num(&attr->alt_ah_attr, 1997 cmd->base.alt_dest.port_num); 1998 1999 if (qp->real_qp == qp) { 2000 if (cmd->base.attr_mask & IB_QP_AV) { 2001 ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr); 2002 if (ret) 2003 goto release_qp; 2004 } 2005 ret = qp->device->modify_qp(qp, attr, 2006 modify_qp_mask(qp->qp_type, 2007 cmd->base.attr_mask), 2008 udata); 2009 } else { 2010 ret = ib_modify_qp(qp, attr, 2011 modify_qp_mask(qp->qp_type, 2012 cmd->base.attr_mask)); 2013 } 2014 2015 release_qp: 2016 uobj_put_obj_read(qp); 2017 2018 out: 2019 kfree(attr); 2020 2021 return ret; 2022 } 2023 2024 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 2025 struct ib_device *ib_dev, 2026 const char __user *buf, int in_len, 2027 int out_len) 2028 { 2029 struct ib_uverbs_ex_modify_qp cmd = {}; 2030 struct ib_udata udata; 2031 int ret; 2032 2033 if (copy_from_user(&cmd.base, buf, sizeof(cmd.base))) 2034 return -EFAULT; 2035 2036 if (cmd.base.attr_mask & 2037 ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1)) 2038 return -EOPNOTSUPP; 2039 2040 INIT_UDATA(&udata, buf + sizeof(cmd.base), NULL, 2041 in_len - sizeof(cmd.base), out_len); 2042 2043 ret = modify_qp(file, &cmd, &udata); 2044 if (ret) 2045 return ret; 2046 2047 return in_len; 2048 } 2049 2050 int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file, 2051 struct ib_device *ib_dev, 2052 struct ib_udata *ucore, 2053 struct ib_udata *uhw) 2054 { 2055 struct ib_uverbs_ex_modify_qp cmd = {}; 2056 int ret; 2057 2058 /* 2059 * Last bit is reserved for extending the attr_mask by 2060 * using another field. 2061 */ 2062 BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31)); 2063 2064 if (ucore->inlen < sizeof(cmd.base)) 2065 return -EINVAL; 2066 2067 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 2068 if (ret) 2069 return ret; 2070 2071 if (cmd.base.attr_mask & 2072 ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1)) 2073 return -EOPNOTSUPP; 2074 2075 if (ucore->inlen > sizeof(cmd)) { 2076 if (ib_is_udata_cleared(ucore, sizeof(cmd), 2077 ucore->inlen - sizeof(cmd))) 2078 return -EOPNOTSUPP; 2079 } 2080 2081 ret = modify_qp(file, &cmd, uhw); 2082 2083 return ret; 2084 } 2085 2086 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 2087 struct ib_device *ib_dev, 2088 const char __user *buf, int in_len, 2089 int out_len) 2090 { 2091 struct ib_uverbs_destroy_qp cmd; 2092 struct ib_uverbs_destroy_qp_resp resp; 2093 struct ib_uobject *uobj; 2094 struct ib_qp *qp; 2095 struct ib_uqp_object *obj; 2096 int ret = -EINVAL; 2097 2098 if (copy_from_user(&cmd, buf, sizeof cmd)) 2099 return -EFAULT; 2100 2101 memset(&resp, 0, sizeof resp); 2102 2103 uobj = uobj_get_write(uobj_get_type(qp), cmd.qp_handle, 2104 file->ucontext); 2105 if (IS_ERR(uobj)) 2106 return PTR_ERR(uobj); 2107 2108 qp = uobj->object; 2109 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 2110 /* 2111 * Make sure we don't free the memory in remove_commit as we still 2112 * needs the uobject memory to create the response. 2113 */ 2114 uverbs_uobject_get(uobj); 2115 2116 ret = uobj_remove_commit(uobj); 2117 if (ret) { 2118 uverbs_uobject_put(uobj); 2119 return ret; 2120 } 2121 2122 resp.events_reported = obj->uevent.events_reported; 2123 uverbs_uobject_put(uobj); 2124 2125 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2126 &resp, sizeof resp)) 2127 return -EFAULT; 2128 2129 return in_len; 2130 } 2131 2132 static void *alloc_wr(size_t wr_size, __u32 num_sge) 2133 { 2134 if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) / 2135 sizeof (struct ib_sge)) 2136 return NULL; 2137 2138 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) + 2139 num_sge * sizeof (struct ib_sge), GFP_KERNEL); 2140 } 2141 2142 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 2143 struct ib_device *ib_dev, 2144 const char __user *buf, int in_len, 2145 int out_len) 2146 { 2147 struct ib_uverbs_post_send cmd; 2148 struct ib_uverbs_post_send_resp resp; 2149 struct ib_uverbs_send_wr *user_wr; 2150 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 2151 struct ib_qp *qp; 2152 int i, sg_ind; 2153 int is_ud; 2154 ssize_t ret = -EINVAL; 2155 size_t next_size; 2156 2157 if (copy_from_user(&cmd, buf, sizeof cmd)) 2158 return -EFAULT; 2159 2160 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 2161 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 2162 return -EINVAL; 2163 2164 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 2165 return -EINVAL; 2166 2167 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 2168 if (!user_wr) 2169 return -ENOMEM; 2170 2171 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 2172 if (!qp) 2173 goto out; 2174 2175 is_ud = qp->qp_type == IB_QPT_UD; 2176 sg_ind = 0; 2177 last = NULL; 2178 for (i = 0; i < cmd.wr_count; ++i) { 2179 if (copy_from_user(user_wr, 2180 buf + sizeof cmd + i * cmd.wqe_size, 2181 cmd.wqe_size)) { 2182 ret = -EFAULT; 2183 goto out_put; 2184 } 2185 2186 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 2187 ret = -EINVAL; 2188 goto out_put; 2189 } 2190 2191 if (is_ud) { 2192 struct ib_ud_wr *ud; 2193 2194 if (user_wr->opcode != IB_WR_SEND && 2195 user_wr->opcode != IB_WR_SEND_WITH_IMM) { 2196 ret = -EINVAL; 2197 goto out_put; 2198 } 2199 2200 next_size = sizeof(*ud); 2201 ud = alloc_wr(next_size, user_wr->num_sge); 2202 if (!ud) { 2203 ret = -ENOMEM; 2204 goto out_put; 2205 } 2206 2207 ud->ah = uobj_get_obj_read(ah, user_wr->wr.ud.ah, 2208 file->ucontext); 2209 if (!ud->ah) { 2210 kfree(ud); 2211 ret = -EINVAL; 2212 goto out_put; 2213 } 2214 ud->remote_qpn = user_wr->wr.ud.remote_qpn; 2215 ud->remote_qkey = user_wr->wr.ud.remote_qkey; 2216 2217 next = &ud->wr; 2218 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || 2219 user_wr->opcode == IB_WR_RDMA_WRITE || 2220 user_wr->opcode == IB_WR_RDMA_READ) { 2221 struct ib_rdma_wr *rdma; 2222 2223 next_size = sizeof(*rdma); 2224 rdma = alloc_wr(next_size, user_wr->num_sge); 2225 if (!rdma) { 2226 ret = -ENOMEM; 2227 goto out_put; 2228 } 2229 2230 rdma->remote_addr = user_wr->wr.rdma.remote_addr; 2231 rdma->rkey = user_wr->wr.rdma.rkey; 2232 2233 next = &rdma->wr; 2234 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || 2235 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 2236 struct ib_atomic_wr *atomic; 2237 2238 next_size = sizeof(*atomic); 2239 atomic = alloc_wr(next_size, user_wr->num_sge); 2240 if (!atomic) { 2241 ret = -ENOMEM; 2242 goto out_put; 2243 } 2244 2245 atomic->remote_addr = user_wr->wr.atomic.remote_addr; 2246 atomic->compare_add = user_wr->wr.atomic.compare_add; 2247 atomic->swap = user_wr->wr.atomic.swap; 2248 atomic->rkey = user_wr->wr.atomic.rkey; 2249 2250 next = &atomic->wr; 2251 } else if (user_wr->opcode == IB_WR_SEND || 2252 user_wr->opcode == IB_WR_SEND_WITH_IMM || 2253 user_wr->opcode == IB_WR_SEND_WITH_INV) { 2254 next_size = sizeof(*next); 2255 next = alloc_wr(next_size, user_wr->num_sge); 2256 if (!next) { 2257 ret = -ENOMEM; 2258 goto out_put; 2259 } 2260 } else { 2261 ret = -EINVAL; 2262 goto out_put; 2263 } 2264 2265 if (user_wr->opcode == IB_WR_SEND_WITH_IMM || 2266 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 2267 next->ex.imm_data = 2268 (__be32 __force) user_wr->ex.imm_data; 2269 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) { 2270 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey; 2271 } 2272 2273 if (!last) 2274 wr = next; 2275 else 2276 last->next = next; 2277 last = next; 2278 2279 next->next = NULL; 2280 next->wr_id = user_wr->wr_id; 2281 next->num_sge = user_wr->num_sge; 2282 next->opcode = user_wr->opcode; 2283 next->send_flags = user_wr->send_flags; 2284 2285 if (next->num_sge) { 2286 next->sg_list = (void *) next + 2287 ALIGN(next_size, sizeof(struct ib_sge)); 2288 if (copy_from_user(next->sg_list, 2289 buf + sizeof cmd + 2290 cmd.wr_count * cmd.wqe_size + 2291 sg_ind * sizeof (struct ib_sge), 2292 next->num_sge * sizeof (struct ib_sge))) { 2293 ret = -EFAULT; 2294 goto out_put; 2295 } 2296 sg_ind += next->num_sge; 2297 } else 2298 next->sg_list = NULL; 2299 } 2300 2301 resp.bad_wr = 0; 2302 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); 2303 if (ret) 2304 for (next = wr; next; next = next->next) { 2305 ++resp.bad_wr; 2306 if (next == bad_wr) 2307 break; 2308 } 2309 2310 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2311 &resp, sizeof resp)) 2312 ret = -EFAULT; 2313 2314 out_put: 2315 uobj_put_obj_read(qp); 2316 2317 while (wr) { 2318 if (is_ud && ud_wr(wr)->ah) 2319 uobj_put_obj_read(ud_wr(wr)->ah); 2320 next = wr->next; 2321 kfree(wr); 2322 wr = next; 2323 } 2324 2325 out: 2326 kfree(user_wr); 2327 2328 return ret ? ret : in_len; 2329 } 2330 2331 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 2332 int in_len, 2333 u32 wr_count, 2334 u32 sge_count, 2335 u32 wqe_size) 2336 { 2337 struct ib_uverbs_recv_wr *user_wr; 2338 struct ib_recv_wr *wr = NULL, *last, *next; 2339 int sg_ind; 2340 int i; 2341 int ret; 2342 2343 if (in_len < wqe_size * wr_count + 2344 sge_count * sizeof (struct ib_uverbs_sge)) 2345 return ERR_PTR(-EINVAL); 2346 2347 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 2348 return ERR_PTR(-EINVAL); 2349 2350 user_wr = kmalloc(wqe_size, GFP_KERNEL); 2351 if (!user_wr) 2352 return ERR_PTR(-ENOMEM); 2353 2354 sg_ind = 0; 2355 last = NULL; 2356 for (i = 0; i < wr_count; ++i) { 2357 if (copy_from_user(user_wr, buf + i * wqe_size, 2358 wqe_size)) { 2359 ret = -EFAULT; 2360 goto err; 2361 } 2362 2363 if (user_wr->num_sge + sg_ind > sge_count) { 2364 ret = -EINVAL; 2365 goto err; 2366 } 2367 2368 if (user_wr->num_sge >= 2369 (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) / 2370 sizeof (struct ib_sge)) { 2371 ret = -EINVAL; 2372 goto err; 2373 } 2374 2375 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2376 user_wr->num_sge * sizeof (struct ib_sge), 2377 GFP_KERNEL); 2378 if (!next) { 2379 ret = -ENOMEM; 2380 goto err; 2381 } 2382 2383 if (!last) 2384 wr = next; 2385 else 2386 last->next = next; 2387 last = next; 2388 2389 next->next = NULL; 2390 next->wr_id = user_wr->wr_id; 2391 next->num_sge = user_wr->num_sge; 2392 2393 if (next->num_sge) { 2394 next->sg_list = (void *) next + 2395 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2396 if (copy_from_user(next->sg_list, 2397 buf + wr_count * wqe_size + 2398 sg_ind * sizeof (struct ib_sge), 2399 next->num_sge * sizeof (struct ib_sge))) { 2400 ret = -EFAULT; 2401 goto err; 2402 } 2403 sg_ind += next->num_sge; 2404 } else 2405 next->sg_list = NULL; 2406 } 2407 2408 kfree(user_wr); 2409 return wr; 2410 2411 err: 2412 kfree(user_wr); 2413 2414 while (wr) { 2415 next = wr->next; 2416 kfree(wr); 2417 wr = next; 2418 } 2419 2420 return ERR_PTR(ret); 2421 } 2422 2423 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 2424 struct ib_device *ib_dev, 2425 const char __user *buf, int in_len, 2426 int out_len) 2427 { 2428 struct ib_uverbs_post_recv cmd; 2429 struct ib_uverbs_post_recv_resp resp; 2430 struct ib_recv_wr *wr, *next, *bad_wr; 2431 struct ib_qp *qp; 2432 ssize_t ret = -EINVAL; 2433 2434 if (copy_from_user(&cmd, buf, sizeof cmd)) 2435 return -EFAULT; 2436 2437 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2438 in_len - sizeof cmd, cmd.wr_count, 2439 cmd.sge_count, cmd.wqe_size); 2440 if (IS_ERR(wr)) 2441 return PTR_ERR(wr); 2442 2443 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 2444 if (!qp) 2445 goto out; 2446 2447 resp.bad_wr = 0; 2448 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); 2449 2450 uobj_put_obj_read(qp); 2451 if (ret) { 2452 for (next = wr; next; next = next->next) { 2453 ++resp.bad_wr; 2454 if (next == bad_wr) 2455 break; 2456 } 2457 } 2458 2459 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2460 &resp, sizeof resp)) 2461 ret = -EFAULT; 2462 2463 out: 2464 while (wr) { 2465 next = wr->next; 2466 kfree(wr); 2467 wr = next; 2468 } 2469 2470 return ret ? ret : in_len; 2471 } 2472 2473 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 2474 struct ib_device *ib_dev, 2475 const char __user *buf, int in_len, 2476 int out_len) 2477 { 2478 struct ib_uverbs_post_srq_recv cmd; 2479 struct ib_uverbs_post_srq_recv_resp resp; 2480 struct ib_recv_wr *wr, *next, *bad_wr; 2481 struct ib_srq *srq; 2482 ssize_t ret = -EINVAL; 2483 2484 if (copy_from_user(&cmd, buf, sizeof cmd)) 2485 return -EFAULT; 2486 2487 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2488 in_len - sizeof cmd, cmd.wr_count, 2489 cmd.sge_count, cmd.wqe_size); 2490 if (IS_ERR(wr)) 2491 return PTR_ERR(wr); 2492 2493 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext); 2494 if (!srq) 2495 goto out; 2496 2497 resp.bad_wr = 0; 2498 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 2499 2500 uobj_put_obj_read(srq); 2501 2502 if (ret) 2503 for (next = wr; next; next = next->next) { 2504 ++resp.bad_wr; 2505 if (next == bad_wr) 2506 break; 2507 } 2508 2509 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2510 &resp, sizeof resp)) 2511 ret = -EFAULT; 2512 2513 out: 2514 while (wr) { 2515 next = wr->next; 2516 kfree(wr); 2517 wr = next; 2518 } 2519 2520 return ret ? ret : in_len; 2521 } 2522 2523 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 2524 struct ib_device *ib_dev, 2525 const char __user *buf, int in_len, 2526 int out_len) 2527 { 2528 struct ib_uverbs_create_ah cmd; 2529 struct ib_uverbs_create_ah_resp resp; 2530 struct ib_uobject *uobj; 2531 struct ib_pd *pd; 2532 struct ib_ah *ah; 2533 struct rdma_ah_attr attr; 2534 int ret; 2535 struct ib_udata udata; 2536 u8 *dmac; 2537 2538 if (out_len < sizeof resp) 2539 return -ENOSPC; 2540 2541 if (copy_from_user(&cmd, buf, sizeof cmd)) 2542 return -EFAULT; 2543 2544 INIT_UDATA(&udata, buf + sizeof(cmd), 2545 (unsigned long)cmd.response + sizeof(resp), 2546 in_len - sizeof(cmd), out_len - sizeof(resp)); 2547 2548 uobj = uobj_alloc(uobj_get_type(ah), file->ucontext); 2549 if (IS_ERR(uobj)) 2550 return PTR_ERR(uobj); 2551 2552 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 2553 if (!pd) { 2554 ret = -EINVAL; 2555 goto err; 2556 } 2557 2558 attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num); 2559 rdma_ah_set_dlid(&attr, cmd.attr.dlid); 2560 rdma_ah_set_sl(&attr, cmd.attr.sl); 2561 rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits); 2562 rdma_ah_set_static_rate(&attr, cmd.attr.static_rate); 2563 rdma_ah_set_port_num(&attr, cmd.attr.port_num); 2564 2565 if (cmd.attr.is_global) { 2566 rdma_ah_set_grh(&attr, NULL, cmd.attr.grh.flow_label, 2567 cmd.attr.grh.sgid_index, 2568 cmd.attr.grh.hop_limit, 2569 cmd.attr.grh.traffic_class); 2570 rdma_ah_set_dgid_raw(&attr, cmd.attr.grh.dgid); 2571 } else { 2572 rdma_ah_set_ah_flags(&attr, 0); 2573 } 2574 dmac = rdma_ah_retrieve_dmac(&attr); 2575 if (dmac) 2576 memset(dmac, 0, ETH_ALEN); 2577 2578 ah = pd->device->create_ah(pd, &attr, &udata); 2579 2580 if (IS_ERR(ah)) { 2581 ret = PTR_ERR(ah); 2582 goto err_put; 2583 } 2584 2585 ah->device = pd->device; 2586 ah->pd = pd; 2587 atomic_inc(&pd->usecnt); 2588 ah->uobject = uobj; 2589 uobj->user_handle = cmd.user_handle; 2590 uobj->object = ah; 2591 2592 resp.ah_handle = uobj->id; 2593 2594 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2595 &resp, sizeof resp)) { 2596 ret = -EFAULT; 2597 goto err_copy; 2598 } 2599 2600 uobj_put_obj_read(pd); 2601 uobj_alloc_commit(uobj); 2602 2603 return in_len; 2604 2605 err_copy: 2606 rdma_destroy_ah(ah); 2607 2608 err_put: 2609 uobj_put_obj_read(pd); 2610 2611 err: 2612 uobj_alloc_abort(uobj); 2613 return ret; 2614 } 2615 2616 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 2617 struct ib_device *ib_dev, 2618 const char __user *buf, int in_len, int out_len) 2619 { 2620 struct ib_uverbs_destroy_ah cmd; 2621 struct ib_uobject *uobj; 2622 int ret; 2623 2624 if (copy_from_user(&cmd, buf, sizeof cmd)) 2625 return -EFAULT; 2626 2627 uobj = uobj_get_write(uobj_get_type(ah), cmd.ah_handle, 2628 file->ucontext); 2629 if (IS_ERR(uobj)) 2630 return PTR_ERR(uobj); 2631 2632 ret = uobj_remove_commit(uobj); 2633 return ret ?: in_len; 2634 } 2635 2636 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 2637 struct ib_device *ib_dev, 2638 const char __user *buf, int in_len, 2639 int out_len) 2640 { 2641 struct ib_uverbs_attach_mcast cmd; 2642 struct ib_qp *qp; 2643 struct ib_uqp_object *obj; 2644 struct ib_uverbs_mcast_entry *mcast; 2645 int ret; 2646 2647 if (copy_from_user(&cmd, buf, sizeof cmd)) 2648 return -EFAULT; 2649 2650 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 2651 if (!qp) 2652 return -EINVAL; 2653 2654 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2655 2656 mutex_lock(&obj->mcast_lock); 2657 list_for_each_entry(mcast, &obj->mcast_list, list) 2658 if (cmd.mlid == mcast->lid && 2659 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2660 ret = 0; 2661 goto out_put; 2662 } 2663 2664 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 2665 if (!mcast) { 2666 ret = -ENOMEM; 2667 goto out_put; 2668 } 2669 2670 mcast->lid = cmd.mlid; 2671 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 2672 2673 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 2674 if (!ret) 2675 list_add_tail(&mcast->list, &obj->mcast_list); 2676 else 2677 kfree(mcast); 2678 2679 out_put: 2680 mutex_unlock(&obj->mcast_lock); 2681 uobj_put_obj_read(qp); 2682 2683 return ret ? ret : in_len; 2684 } 2685 2686 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 2687 struct ib_device *ib_dev, 2688 const char __user *buf, int in_len, 2689 int out_len) 2690 { 2691 struct ib_uverbs_detach_mcast cmd; 2692 struct ib_uqp_object *obj; 2693 struct ib_qp *qp; 2694 struct ib_uverbs_mcast_entry *mcast; 2695 int ret = -EINVAL; 2696 bool found = false; 2697 2698 if (copy_from_user(&cmd, buf, sizeof cmd)) 2699 return -EFAULT; 2700 2701 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 2702 if (!qp) 2703 return -EINVAL; 2704 2705 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2706 mutex_lock(&obj->mcast_lock); 2707 2708 list_for_each_entry(mcast, &obj->mcast_list, list) 2709 if (cmd.mlid == mcast->lid && 2710 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2711 list_del(&mcast->list); 2712 kfree(mcast); 2713 found = true; 2714 break; 2715 } 2716 2717 if (!found) { 2718 ret = -EINVAL; 2719 goto out_put; 2720 } 2721 2722 ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid); 2723 2724 out_put: 2725 mutex_unlock(&obj->mcast_lock); 2726 uobj_put_obj_read(qp); 2727 return ret ? ret : in_len; 2728 } 2729 2730 static int kern_spec_to_ib_spec_action(struct ib_uverbs_flow_spec *kern_spec, 2731 union ib_flow_spec *ib_spec) 2732 { 2733 ib_spec->type = kern_spec->type; 2734 switch (ib_spec->type) { 2735 case IB_FLOW_SPEC_ACTION_TAG: 2736 if (kern_spec->flow_tag.size != 2737 sizeof(struct ib_uverbs_flow_spec_action_tag)) 2738 return -EINVAL; 2739 2740 ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag); 2741 ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id; 2742 break; 2743 case IB_FLOW_SPEC_ACTION_DROP: 2744 if (kern_spec->drop.size != 2745 sizeof(struct ib_uverbs_flow_spec_action_drop)) 2746 return -EINVAL; 2747 2748 ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop); 2749 break; 2750 default: 2751 return -EINVAL; 2752 } 2753 return 0; 2754 } 2755 2756 static size_t kern_spec_filter_sz(struct ib_uverbs_flow_spec_hdr *spec) 2757 { 2758 /* Returns user space filter size, includes padding */ 2759 return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2; 2760 } 2761 2762 static ssize_t spec_filter_size(void *kern_spec_filter, u16 kern_filter_size, 2763 u16 ib_real_filter_sz) 2764 { 2765 /* 2766 * User space filter structures must be 64 bit aligned, otherwise this 2767 * may pass, but we won't handle additional new attributes. 2768 */ 2769 2770 if (kern_filter_size > ib_real_filter_sz) { 2771 if (memchr_inv(kern_spec_filter + 2772 ib_real_filter_sz, 0, 2773 kern_filter_size - ib_real_filter_sz)) 2774 return -EINVAL; 2775 return ib_real_filter_sz; 2776 } 2777 return kern_filter_size; 2778 } 2779 2780 static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec, 2781 union ib_flow_spec *ib_spec) 2782 { 2783 ssize_t actual_filter_sz; 2784 ssize_t kern_filter_sz; 2785 ssize_t ib_filter_sz; 2786 void *kern_spec_mask; 2787 void *kern_spec_val; 2788 2789 if (kern_spec->reserved) 2790 return -EINVAL; 2791 2792 ib_spec->type = kern_spec->type; 2793 2794 kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr); 2795 /* User flow spec size must be aligned to 4 bytes */ 2796 if (kern_filter_sz != ALIGN(kern_filter_sz, 4)) 2797 return -EINVAL; 2798 2799 kern_spec_val = (void *)kern_spec + 2800 sizeof(struct ib_uverbs_flow_spec_hdr); 2801 kern_spec_mask = kern_spec_val + kern_filter_sz; 2802 if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL)) 2803 return -EINVAL; 2804 2805 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) { 2806 case IB_FLOW_SPEC_ETH: 2807 ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz); 2808 actual_filter_sz = spec_filter_size(kern_spec_mask, 2809 kern_filter_sz, 2810 ib_filter_sz); 2811 if (actual_filter_sz <= 0) 2812 return -EINVAL; 2813 ib_spec->size = sizeof(struct ib_flow_spec_eth); 2814 memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz); 2815 memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz); 2816 break; 2817 case IB_FLOW_SPEC_IPV4: 2818 ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz); 2819 actual_filter_sz = spec_filter_size(kern_spec_mask, 2820 kern_filter_sz, 2821 ib_filter_sz); 2822 if (actual_filter_sz <= 0) 2823 return -EINVAL; 2824 ib_spec->size = sizeof(struct ib_flow_spec_ipv4); 2825 memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz); 2826 memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz); 2827 break; 2828 case IB_FLOW_SPEC_IPV6: 2829 ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz); 2830 actual_filter_sz = spec_filter_size(kern_spec_mask, 2831 kern_filter_sz, 2832 ib_filter_sz); 2833 if (actual_filter_sz <= 0) 2834 return -EINVAL; 2835 ib_spec->size = sizeof(struct ib_flow_spec_ipv6); 2836 memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz); 2837 memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz); 2838 2839 if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) || 2840 (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20)) 2841 return -EINVAL; 2842 break; 2843 case IB_FLOW_SPEC_TCP: 2844 case IB_FLOW_SPEC_UDP: 2845 ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz); 2846 actual_filter_sz = spec_filter_size(kern_spec_mask, 2847 kern_filter_sz, 2848 ib_filter_sz); 2849 if (actual_filter_sz <= 0) 2850 return -EINVAL; 2851 ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp); 2852 memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz); 2853 memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz); 2854 break; 2855 case IB_FLOW_SPEC_VXLAN_TUNNEL: 2856 ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz); 2857 actual_filter_sz = spec_filter_size(kern_spec_mask, 2858 kern_filter_sz, 2859 ib_filter_sz); 2860 if (actual_filter_sz <= 0) 2861 return -EINVAL; 2862 ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel); 2863 memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz); 2864 memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz); 2865 2866 if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) || 2867 (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24)) 2868 return -EINVAL; 2869 break; 2870 default: 2871 return -EINVAL; 2872 } 2873 return 0; 2874 } 2875 2876 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, 2877 union ib_flow_spec *ib_spec) 2878 { 2879 if (kern_spec->reserved) 2880 return -EINVAL; 2881 2882 if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG) 2883 return kern_spec_to_ib_spec_action(kern_spec, ib_spec); 2884 else 2885 return kern_spec_to_ib_spec_filter(kern_spec, ib_spec); 2886 } 2887 2888 int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file, 2889 struct ib_device *ib_dev, 2890 struct ib_udata *ucore, 2891 struct ib_udata *uhw) 2892 { 2893 struct ib_uverbs_ex_create_wq cmd = {}; 2894 struct ib_uverbs_ex_create_wq_resp resp = {}; 2895 struct ib_uwq_object *obj; 2896 int err = 0; 2897 struct ib_cq *cq; 2898 struct ib_pd *pd; 2899 struct ib_wq *wq; 2900 struct ib_wq_init_attr wq_init_attr = {}; 2901 size_t required_cmd_sz; 2902 size_t required_resp_len; 2903 2904 required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge); 2905 required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn); 2906 2907 if (ucore->inlen < required_cmd_sz) 2908 return -EINVAL; 2909 2910 if (ucore->outlen < required_resp_len) 2911 return -ENOSPC; 2912 2913 if (ucore->inlen > sizeof(cmd) && 2914 !ib_is_udata_cleared(ucore, sizeof(cmd), 2915 ucore->inlen - sizeof(cmd))) 2916 return -EOPNOTSUPP; 2917 2918 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 2919 if (err) 2920 return err; 2921 2922 if (cmd.comp_mask) 2923 return -EOPNOTSUPP; 2924 2925 obj = (struct ib_uwq_object *)uobj_alloc(uobj_get_type(wq), 2926 file->ucontext); 2927 if (IS_ERR(obj)) 2928 return PTR_ERR(obj); 2929 2930 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 2931 if (!pd) { 2932 err = -EINVAL; 2933 goto err_uobj; 2934 } 2935 2936 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext); 2937 if (!cq) { 2938 err = -EINVAL; 2939 goto err_put_pd; 2940 } 2941 2942 wq_init_attr.cq = cq; 2943 wq_init_attr.max_sge = cmd.max_sge; 2944 wq_init_attr.max_wr = cmd.max_wr; 2945 wq_init_attr.wq_context = file; 2946 wq_init_attr.wq_type = cmd.wq_type; 2947 wq_init_attr.event_handler = ib_uverbs_wq_event_handler; 2948 if (ucore->inlen >= (offsetof(typeof(cmd), create_flags) + 2949 sizeof(cmd.create_flags))) 2950 wq_init_attr.create_flags = cmd.create_flags; 2951 obj->uevent.events_reported = 0; 2952 INIT_LIST_HEAD(&obj->uevent.event_list); 2953 wq = pd->device->create_wq(pd, &wq_init_attr, uhw); 2954 if (IS_ERR(wq)) { 2955 err = PTR_ERR(wq); 2956 goto err_put_cq; 2957 } 2958 2959 wq->uobject = &obj->uevent.uobject; 2960 obj->uevent.uobject.object = wq; 2961 wq->wq_type = wq_init_attr.wq_type; 2962 wq->cq = cq; 2963 wq->pd = pd; 2964 wq->device = pd->device; 2965 wq->wq_context = wq_init_attr.wq_context; 2966 atomic_set(&wq->usecnt, 0); 2967 atomic_inc(&pd->usecnt); 2968 atomic_inc(&cq->usecnt); 2969 wq->uobject = &obj->uevent.uobject; 2970 obj->uevent.uobject.object = wq; 2971 2972 memset(&resp, 0, sizeof(resp)); 2973 resp.wq_handle = obj->uevent.uobject.id; 2974 resp.max_sge = wq_init_attr.max_sge; 2975 resp.max_wr = wq_init_attr.max_wr; 2976 resp.wqn = wq->wq_num; 2977 resp.response_length = required_resp_len; 2978 err = ib_copy_to_udata(ucore, 2979 &resp, resp.response_length); 2980 if (err) 2981 goto err_copy; 2982 2983 uobj_put_obj_read(pd); 2984 uobj_put_obj_read(cq); 2985 uobj_alloc_commit(&obj->uevent.uobject); 2986 return 0; 2987 2988 err_copy: 2989 ib_destroy_wq(wq); 2990 err_put_cq: 2991 uobj_put_obj_read(cq); 2992 err_put_pd: 2993 uobj_put_obj_read(pd); 2994 err_uobj: 2995 uobj_alloc_abort(&obj->uevent.uobject); 2996 2997 return err; 2998 } 2999 3000 int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file, 3001 struct ib_device *ib_dev, 3002 struct ib_udata *ucore, 3003 struct ib_udata *uhw) 3004 { 3005 struct ib_uverbs_ex_destroy_wq cmd = {}; 3006 struct ib_uverbs_ex_destroy_wq_resp resp = {}; 3007 struct ib_wq *wq; 3008 struct ib_uobject *uobj; 3009 struct ib_uwq_object *obj; 3010 size_t required_cmd_sz; 3011 size_t required_resp_len; 3012 int ret; 3013 3014 required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle); 3015 required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); 3016 3017 if (ucore->inlen < required_cmd_sz) 3018 return -EINVAL; 3019 3020 if (ucore->outlen < required_resp_len) 3021 return -ENOSPC; 3022 3023 if (ucore->inlen > sizeof(cmd) && 3024 !ib_is_udata_cleared(ucore, sizeof(cmd), 3025 ucore->inlen - sizeof(cmd))) 3026 return -EOPNOTSUPP; 3027 3028 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3029 if (ret) 3030 return ret; 3031 3032 if (cmd.comp_mask) 3033 return -EOPNOTSUPP; 3034 3035 resp.response_length = required_resp_len; 3036 uobj = uobj_get_write(uobj_get_type(wq), cmd.wq_handle, 3037 file->ucontext); 3038 if (IS_ERR(uobj)) 3039 return PTR_ERR(uobj); 3040 3041 wq = uobj->object; 3042 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject); 3043 /* 3044 * Make sure we don't free the memory in remove_commit as we still 3045 * needs the uobject memory to create the response. 3046 */ 3047 uverbs_uobject_get(uobj); 3048 3049 ret = uobj_remove_commit(uobj); 3050 resp.events_reported = obj->uevent.events_reported; 3051 uverbs_uobject_put(uobj); 3052 if (ret) 3053 return ret; 3054 3055 return ib_copy_to_udata(ucore, &resp, resp.response_length); 3056 } 3057 3058 int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file, 3059 struct ib_device *ib_dev, 3060 struct ib_udata *ucore, 3061 struct ib_udata *uhw) 3062 { 3063 struct ib_uverbs_ex_modify_wq cmd = {}; 3064 struct ib_wq *wq; 3065 struct ib_wq_attr wq_attr = {}; 3066 size_t required_cmd_sz; 3067 int ret; 3068 3069 required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state); 3070 if (ucore->inlen < required_cmd_sz) 3071 return -EINVAL; 3072 3073 if (ucore->inlen > sizeof(cmd) && 3074 !ib_is_udata_cleared(ucore, sizeof(cmd), 3075 ucore->inlen - sizeof(cmd))) 3076 return -EOPNOTSUPP; 3077 3078 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3079 if (ret) 3080 return ret; 3081 3082 if (!cmd.attr_mask) 3083 return -EINVAL; 3084 3085 if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS)) 3086 return -EINVAL; 3087 3088 wq = uobj_get_obj_read(wq, cmd.wq_handle, file->ucontext); 3089 if (!wq) 3090 return -EINVAL; 3091 3092 wq_attr.curr_wq_state = cmd.curr_wq_state; 3093 wq_attr.wq_state = cmd.wq_state; 3094 if (cmd.attr_mask & IB_WQ_FLAGS) { 3095 wq_attr.flags = cmd.flags; 3096 wq_attr.flags_mask = cmd.flags_mask; 3097 } 3098 ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw); 3099 uobj_put_obj_read(wq); 3100 return ret; 3101 } 3102 3103 int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, 3104 struct ib_device *ib_dev, 3105 struct ib_udata *ucore, 3106 struct ib_udata *uhw) 3107 { 3108 struct ib_uverbs_ex_create_rwq_ind_table cmd = {}; 3109 struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {}; 3110 struct ib_uobject *uobj; 3111 int err = 0; 3112 struct ib_rwq_ind_table_init_attr init_attr = {}; 3113 struct ib_rwq_ind_table *rwq_ind_tbl; 3114 struct ib_wq **wqs = NULL; 3115 u32 *wqs_handles = NULL; 3116 struct ib_wq *wq = NULL; 3117 int i, j, num_read_wqs; 3118 u32 num_wq_handles; 3119 u32 expected_in_size; 3120 size_t required_cmd_sz_header; 3121 size_t required_resp_len; 3122 3123 required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size); 3124 required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num); 3125 3126 if (ucore->inlen < required_cmd_sz_header) 3127 return -EINVAL; 3128 3129 if (ucore->outlen < required_resp_len) 3130 return -ENOSPC; 3131 3132 err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header); 3133 if (err) 3134 return err; 3135 3136 ucore->inbuf += required_cmd_sz_header; 3137 ucore->inlen -= required_cmd_sz_header; 3138 3139 if (cmd.comp_mask) 3140 return -EOPNOTSUPP; 3141 3142 if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE) 3143 return -EINVAL; 3144 3145 num_wq_handles = 1 << cmd.log_ind_tbl_size; 3146 expected_in_size = num_wq_handles * sizeof(__u32); 3147 if (num_wq_handles == 1) 3148 /* input size for wq handles is u64 aligned */ 3149 expected_in_size += sizeof(__u32); 3150 3151 if (ucore->inlen < expected_in_size) 3152 return -EINVAL; 3153 3154 if (ucore->inlen > expected_in_size && 3155 !ib_is_udata_cleared(ucore, expected_in_size, 3156 ucore->inlen - expected_in_size)) 3157 return -EOPNOTSUPP; 3158 3159 wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles), 3160 GFP_KERNEL); 3161 if (!wqs_handles) 3162 return -ENOMEM; 3163 3164 err = ib_copy_from_udata(wqs_handles, ucore, 3165 num_wq_handles * sizeof(__u32)); 3166 if (err) 3167 goto err_free; 3168 3169 wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL); 3170 if (!wqs) { 3171 err = -ENOMEM; 3172 goto err_free; 3173 } 3174 3175 for (num_read_wqs = 0; num_read_wqs < num_wq_handles; 3176 num_read_wqs++) { 3177 wq = uobj_get_obj_read(wq, wqs_handles[num_read_wqs], 3178 file->ucontext); 3179 if (!wq) { 3180 err = -EINVAL; 3181 goto put_wqs; 3182 } 3183 3184 wqs[num_read_wqs] = wq; 3185 } 3186 3187 uobj = uobj_alloc(uobj_get_type(rwq_ind_table), file->ucontext); 3188 if (IS_ERR(uobj)) { 3189 err = PTR_ERR(uobj); 3190 goto put_wqs; 3191 } 3192 3193 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size; 3194 init_attr.ind_tbl = wqs; 3195 rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw); 3196 3197 if (IS_ERR(rwq_ind_tbl)) { 3198 err = PTR_ERR(rwq_ind_tbl); 3199 goto err_uobj; 3200 } 3201 3202 rwq_ind_tbl->ind_tbl = wqs; 3203 rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size; 3204 rwq_ind_tbl->uobject = uobj; 3205 uobj->object = rwq_ind_tbl; 3206 rwq_ind_tbl->device = ib_dev; 3207 atomic_set(&rwq_ind_tbl->usecnt, 0); 3208 3209 for (i = 0; i < num_wq_handles; i++) 3210 atomic_inc(&wqs[i]->usecnt); 3211 3212 resp.ind_tbl_handle = uobj->id; 3213 resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num; 3214 resp.response_length = required_resp_len; 3215 3216 err = ib_copy_to_udata(ucore, 3217 &resp, resp.response_length); 3218 if (err) 3219 goto err_copy; 3220 3221 kfree(wqs_handles); 3222 3223 for (j = 0; j < num_read_wqs; j++) 3224 uobj_put_obj_read(wqs[j]); 3225 3226 uobj_alloc_commit(uobj); 3227 return 0; 3228 3229 err_copy: 3230 ib_destroy_rwq_ind_table(rwq_ind_tbl); 3231 err_uobj: 3232 uobj_alloc_abort(uobj); 3233 put_wqs: 3234 for (j = 0; j < num_read_wqs; j++) 3235 uobj_put_obj_read(wqs[j]); 3236 err_free: 3237 kfree(wqs_handles); 3238 kfree(wqs); 3239 return err; 3240 } 3241 3242 int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file, 3243 struct ib_device *ib_dev, 3244 struct ib_udata *ucore, 3245 struct ib_udata *uhw) 3246 { 3247 struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {}; 3248 struct ib_uobject *uobj; 3249 int ret; 3250 size_t required_cmd_sz; 3251 3252 required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle); 3253 3254 if (ucore->inlen < required_cmd_sz) 3255 return -EINVAL; 3256 3257 if (ucore->inlen > sizeof(cmd) && 3258 !ib_is_udata_cleared(ucore, sizeof(cmd), 3259 ucore->inlen - sizeof(cmd))) 3260 return -EOPNOTSUPP; 3261 3262 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3263 if (ret) 3264 return ret; 3265 3266 if (cmd.comp_mask) 3267 return -EOPNOTSUPP; 3268 3269 uobj = uobj_get_write(uobj_get_type(rwq_ind_table), cmd.ind_tbl_handle, 3270 file->ucontext); 3271 if (IS_ERR(uobj)) 3272 return PTR_ERR(uobj); 3273 3274 return uobj_remove_commit(uobj); 3275 } 3276 3277 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, 3278 struct ib_device *ib_dev, 3279 struct ib_udata *ucore, 3280 struct ib_udata *uhw) 3281 { 3282 struct ib_uverbs_create_flow cmd; 3283 struct ib_uverbs_create_flow_resp resp; 3284 struct ib_uobject *uobj; 3285 struct ib_flow *flow_id; 3286 struct ib_uverbs_flow_attr *kern_flow_attr; 3287 struct ib_flow_attr *flow_attr; 3288 struct ib_qp *qp; 3289 int err = 0; 3290 void *kern_spec; 3291 void *ib_spec; 3292 int i; 3293 3294 if (ucore->inlen < sizeof(cmd)) 3295 return -EINVAL; 3296 3297 if (ucore->outlen < sizeof(resp)) 3298 return -ENOSPC; 3299 3300 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3301 if (err) 3302 return err; 3303 3304 ucore->inbuf += sizeof(cmd); 3305 ucore->inlen -= sizeof(cmd); 3306 3307 if (cmd.comp_mask) 3308 return -EINVAL; 3309 3310 if (!capable(CAP_NET_RAW)) 3311 return -EPERM; 3312 3313 if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED) 3314 return -EINVAL; 3315 3316 if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && 3317 ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) || 3318 (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT))) 3319 return -EINVAL; 3320 3321 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) 3322 return -EINVAL; 3323 3324 if (cmd.flow_attr.size > ucore->inlen || 3325 cmd.flow_attr.size > 3326 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) 3327 return -EINVAL; 3328 3329 if (cmd.flow_attr.reserved[0] || 3330 cmd.flow_attr.reserved[1]) 3331 return -EINVAL; 3332 3333 if (cmd.flow_attr.num_of_specs) { 3334 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, 3335 GFP_KERNEL); 3336 if (!kern_flow_attr) 3337 return -ENOMEM; 3338 3339 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); 3340 err = ib_copy_from_udata(kern_flow_attr + 1, ucore, 3341 cmd.flow_attr.size); 3342 if (err) 3343 goto err_free_attr; 3344 } else { 3345 kern_flow_attr = &cmd.flow_attr; 3346 } 3347 3348 uobj = uobj_alloc(uobj_get_type(flow), file->ucontext); 3349 if (IS_ERR(uobj)) { 3350 err = PTR_ERR(uobj); 3351 goto err_free_attr; 3352 } 3353 3354 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 3355 if (!qp) { 3356 err = -EINVAL; 3357 goto err_uobj; 3358 } 3359 3360 flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs * 3361 sizeof(union ib_flow_spec), GFP_KERNEL); 3362 if (!flow_attr) { 3363 err = -ENOMEM; 3364 goto err_put; 3365 } 3366 3367 flow_attr->type = kern_flow_attr->type; 3368 flow_attr->priority = kern_flow_attr->priority; 3369 flow_attr->num_of_specs = kern_flow_attr->num_of_specs; 3370 flow_attr->port = kern_flow_attr->port; 3371 flow_attr->flags = kern_flow_attr->flags; 3372 flow_attr->size = sizeof(*flow_attr); 3373 3374 kern_spec = kern_flow_attr + 1; 3375 ib_spec = flow_attr + 1; 3376 for (i = 0; i < flow_attr->num_of_specs && 3377 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && 3378 cmd.flow_attr.size >= 3379 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { 3380 err = kern_spec_to_ib_spec(kern_spec, ib_spec); 3381 if (err) 3382 goto err_free; 3383 flow_attr->size += 3384 ((union ib_flow_spec *) ib_spec)->size; 3385 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; 3386 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size; 3387 ib_spec += ((union ib_flow_spec *) ib_spec)->size; 3388 } 3389 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { 3390 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", 3391 i, cmd.flow_attr.size); 3392 err = -EINVAL; 3393 goto err_free; 3394 } 3395 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); 3396 if (IS_ERR(flow_id)) { 3397 err = PTR_ERR(flow_id); 3398 goto err_free; 3399 } 3400 flow_id->uobject = uobj; 3401 uobj->object = flow_id; 3402 3403 memset(&resp, 0, sizeof(resp)); 3404 resp.flow_handle = uobj->id; 3405 3406 err = ib_copy_to_udata(ucore, 3407 &resp, sizeof(resp)); 3408 if (err) 3409 goto err_copy; 3410 3411 uobj_put_obj_read(qp); 3412 uobj_alloc_commit(uobj); 3413 kfree(flow_attr); 3414 if (cmd.flow_attr.num_of_specs) 3415 kfree(kern_flow_attr); 3416 return 0; 3417 err_copy: 3418 ib_destroy_flow(flow_id); 3419 err_free: 3420 kfree(flow_attr); 3421 err_put: 3422 uobj_put_obj_read(qp); 3423 err_uobj: 3424 uobj_alloc_abort(uobj); 3425 err_free_attr: 3426 if (cmd.flow_attr.num_of_specs) 3427 kfree(kern_flow_attr); 3428 return err; 3429 } 3430 3431 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, 3432 struct ib_device *ib_dev, 3433 struct ib_udata *ucore, 3434 struct ib_udata *uhw) 3435 { 3436 struct ib_uverbs_destroy_flow cmd; 3437 struct ib_uobject *uobj; 3438 int ret; 3439 3440 if (ucore->inlen < sizeof(cmd)) 3441 return -EINVAL; 3442 3443 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3444 if (ret) 3445 return ret; 3446 3447 if (cmd.comp_mask) 3448 return -EINVAL; 3449 3450 uobj = uobj_get_write(uobj_get_type(flow), cmd.flow_handle, 3451 file->ucontext); 3452 if (IS_ERR(uobj)) 3453 return PTR_ERR(uobj); 3454 3455 ret = uobj_remove_commit(uobj); 3456 return ret; 3457 } 3458 3459 static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 3460 struct ib_device *ib_dev, 3461 struct ib_uverbs_create_xsrq *cmd, 3462 struct ib_udata *udata) 3463 { 3464 struct ib_uverbs_create_srq_resp resp; 3465 struct ib_usrq_object *obj; 3466 struct ib_pd *pd; 3467 struct ib_srq *srq; 3468 struct ib_uobject *uninitialized_var(xrcd_uobj); 3469 struct ib_srq_init_attr attr; 3470 int ret; 3471 3472 obj = (struct ib_usrq_object *)uobj_alloc(uobj_get_type(srq), 3473 file->ucontext); 3474 if (IS_ERR(obj)) 3475 return PTR_ERR(obj); 3476 3477 if (cmd->srq_type == IB_SRQT_XRC) { 3478 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd->xrcd_handle, 3479 file->ucontext); 3480 if (IS_ERR(xrcd_uobj)) { 3481 ret = -EINVAL; 3482 goto err; 3483 } 3484 3485 attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object; 3486 if (!attr.ext.xrc.xrcd) { 3487 ret = -EINVAL; 3488 goto err_put_xrcd; 3489 } 3490 3491 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 3492 atomic_inc(&obj->uxrcd->refcnt); 3493 3494 attr.ext.xrc.cq = uobj_get_obj_read(cq, cmd->cq_handle, 3495 file->ucontext); 3496 if (!attr.ext.xrc.cq) { 3497 ret = -EINVAL; 3498 goto err_put_xrcd; 3499 } 3500 } 3501 3502 pd = uobj_get_obj_read(pd, cmd->pd_handle, file->ucontext); 3503 if (!pd) { 3504 ret = -EINVAL; 3505 goto err_put_cq; 3506 } 3507 3508 attr.event_handler = ib_uverbs_srq_event_handler; 3509 attr.srq_context = file; 3510 attr.srq_type = cmd->srq_type; 3511 attr.attr.max_wr = cmd->max_wr; 3512 attr.attr.max_sge = cmd->max_sge; 3513 attr.attr.srq_limit = cmd->srq_limit; 3514 3515 obj->uevent.events_reported = 0; 3516 INIT_LIST_HEAD(&obj->uevent.event_list); 3517 3518 srq = pd->device->create_srq(pd, &attr, udata); 3519 if (IS_ERR(srq)) { 3520 ret = PTR_ERR(srq); 3521 goto err_put; 3522 } 3523 3524 srq->device = pd->device; 3525 srq->pd = pd; 3526 srq->srq_type = cmd->srq_type; 3527 srq->uobject = &obj->uevent.uobject; 3528 srq->event_handler = attr.event_handler; 3529 srq->srq_context = attr.srq_context; 3530 3531 if (cmd->srq_type == IB_SRQT_XRC) { 3532 srq->ext.xrc.cq = attr.ext.xrc.cq; 3533 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; 3534 atomic_inc(&attr.ext.xrc.cq->usecnt); 3535 atomic_inc(&attr.ext.xrc.xrcd->usecnt); 3536 } 3537 3538 atomic_inc(&pd->usecnt); 3539 atomic_set(&srq->usecnt, 0); 3540 3541 obj->uevent.uobject.object = srq; 3542 obj->uevent.uobject.user_handle = cmd->user_handle; 3543 3544 memset(&resp, 0, sizeof resp); 3545 resp.srq_handle = obj->uevent.uobject.id; 3546 resp.max_wr = attr.attr.max_wr; 3547 resp.max_sge = attr.attr.max_sge; 3548 if (cmd->srq_type == IB_SRQT_XRC) 3549 resp.srqn = srq->ext.xrc.srq_num; 3550 3551 if (copy_to_user((void __user *) (unsigned long) cmd->response, 3552 &resp, sizeof resp)) { 3553 ret = -EFAULT; 3554 goto err_copy; 3555 } 3556 3557 if (cmd->srq_type == IB_SRQT_XRC) { 3558 uobj_put_read(xrcd_uobj); 3559 uobj_put_obj_read(attr.ext.xrc.cq); 3560 } 3561 uobj_put_obj_read(pd); 3562 uobj_alloc_commit(&obj->uevent.uobject); 3563 3564 return 0; 3565 3566 err_copy: 3567 ib_destroy_srq(srq); 3568 3569 err_put: 3570 uobj_put_obj_read(pd); 3571 3572 err_put_cq: 3573 if (cmd->srq_type == IB_SRQT_XRC) 3574 uobj_put_obj_read(attr.ext.xrc.cq); 3575 3576 err_put_xrcd: 3577 if (cmd->srq_type == IB_SRQT_XRC) { 3578 atomic_dec(&obj->uxrcd->refcnt); 3579 uobj_put_read(xrcd_uobj); 3580 } 3581 3582 err: 3583 uobj_alloc_abort(&obj->uevent.uobject); 3584 return ret; 3585 } 3586 3587 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 3588 struct ib_device *ib_dev, 3589 const char __user *buf, int in_len, 3590 int out_len) 3591 { 3592 struct ib_uverbs_create_srq cmd; 3593 struct ib_uverbs_create_xsrq xcmd; 3594 struct ib_uverbs_create_srq_resp resp; 3595 struct ib_udata udata; 3596 int ret; 3597 3598 if (out_len < sizeof resp) 3599 return -ENOSPC; 3600 3601 if (copy_from_user(&cmd, buf, sizeof cmd)) 3602 return -EFAULT; 3603 3604 xcmd.response = cmd.response; 3605 xcmd.user_handle = cmd.user_handle; 3606 xcmd.srq_type = IB_SRQT_BASIC; 3607 xcmd.pd_handle = cmd.pd_handle; 3608 xcmd.max_wr = cmd.max_wr; 3609 xcmd.max_sge = cmd.max_sge; 3610 xcmd.srq_limit = cmd.srq_limit; 3611 3612 INIT_UDATA(&udata, buf + sizeof cmd, 3613 (unsigned long) cmd.response + sizeof resp, 3614 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr), 3615 out_len - sizeof resp); 3616 3617 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata); 3618 if (ret) 3619 return ret; 3620 3621 return in_len; 3622 } 3623 3624 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, 3625 struct ib_device *ib_dev, 3626 const char __user *buf, int in_len, int out_len) 3627 { 3628 struct ib_uverbs_create_xsrq cmd; 3629 struct ib_uverbs_create_srq_resp resp; 3630 struct ib_udata udata; 3631 int ret; 3632 3633 if (out_len < sizeof resp) 3634 return -ENOSPC; 3635 3636 if (copy_from_user(&cmd, buf, sizeof cmd)) 3637 return -EFAULT; 3638 3639 INIT_UDATA(&udata, buf + sizeof cmd, 3640 (unsigned long) cmd.response + sizeof resp, 3641 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr), 3642 out_len - sizeof resp); 3643 3644 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata); 3645 if (ret) 3646 return ret; 3647 3648 return in_len; 3649 } 3650 3651 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 3652 struct ib_device *ib_dev, 3653 const char __user *buf, int in_len, 3654 int out_len) 3655 { 3656 struct ib_uverbs_modify_srq cmd; 3657 struct ib_udata udata; 3658 struct ib_srq *srq; 3659 struct ib_srq_attr attr; 3660 int ret; 3661 3662 if (copy_from_user(&cmd, buf, sizeof cmd)) 3663 return -EFAULT; 3664 3665 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 3666 out_len); 3667 3668 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext); 3669 if (!srq) 3670 return -EINVAL; 3671 3672 attr.max_wr = cmd.max_wr; 3673 attr.srq_limit = cmd.srq_limit; 3674 3675 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 3676 3677 uobj_put_obj_read(srq); 3678 3679 return ret ? ret : in_len; 3680 } 3681 3682 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 3683 struct ib_device *ib_dev, 3684 const char __user *buf, 3685 int in_len, int out_len) 3686 { 3687 struct ib_uverbs_query_srq cmd; 3688 struct ib_uverbs_query_srq_resp resp; 3689 struct ib_srq_attr attr; 3690 struct ib_srq *srq; 3691 int ret; 3692 3693 if (out_len < sizeof resp) 3694 return -ENOSPC; 3695 3696 if (copy_from_user(&cmd, buf, sizeof cmd)) 3697 return -EFAULT; 3698 3699 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext); 3700 if (!srq) 3701 return -EINVAL; 3702 3703 ret = ib_query_srq(srq, &attr); 3704 3705 uobj_put_obj_read(srq); 3706 3707 if (ret) 3708 return ret; 3709 3710 memset(&resp, 0, sizeof resp); 3711 3712 resp.max_wr = attr.max_wr; 3713 resp.max_sge = attr.max_sge; 3714 resp.srq_limit = attr.srq_limit; 3715 3716 if (copy_to_user((void __user *) (unsigned long) cmd.response, 3717 &resp, sizeof resp)) 3718 return -EFAULT; 3719 3720 return in_len; 3721 } 3722 3723 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 3724 struct ib_device *ib_dev, 3725 const char __user *buf, int in_len, 3726 int out_len) 3727 { 3728 struct ib_uverbs_destroy_srq cmd; 3729 struct ib_uverbs_destroy_srq_resp resp; 3730 struct ib_uobject *uobj; 3731 struct ib_srq *srq; 3732 struct ib_uevent_object *obj; 3733 int ret = -EINVAL; 3734 enum ib_srq_type srq_type; 3735 3736 if (copy_from_user(&cmd, buf, sizeof cmd)) 3737 return -EFAULT; 3738 3739 uobj = uobj_get_write(uobj_get_type(srq), cmd.srq_handle, 3740 file->ucontext); 3741 if (IS_ERR(uobj)) 3742 return PTR_ERR(uobj); 3743 3744 srq = uobj->object; 3745 obj = container_of(uobj, struct ib_uevent_object, uobject); 3746 srq_type = srq->srq_type; 3747 /* 3748 * Make sure we don't free the memory in remove_commit as we still 3749 * needs the uobject memory to create the response. 3750 */ 3751 uverbs_uobject_get(uobj); 3752 3753 memset(&resp, 0, sizeof(resp)); 3754 3755 ret = uobj_remove_commit(uobj); 3756 if (ret) { 3757 uverbs_uobject_put(uobj); 3758 return ret; 3759 } 3760 resp.events_reported = obj->events_reported; 3761 uverbs_uobject_put(uobj); 3762 if (copy_to_user((void __user *)(unsigned long)cmd.response, 3763 &resp, sizeof(resp))) 3764 return -EFAULT; 3765 3766 return in_len; 3767 } 3768 3769 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, 3770 struct ib_device *ib_dev, 3771 struct ib_udata *ucore, 3772 struct ib_udata *uhw) 3773 { 3774 struct ib_uverbs_ex_query_device_resp resp = { {0} }; 3775 struct ib_uverbs_ex_query_device cmd; 3776 struct ib_device_attr attr = {0}; 3777 int err; 3778 3779 if (ucore->inlen < sizeof(cmd)) 3780 return -EINVAL; 3781 3782 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3783 if (err) 3784 return err; 3785 3786 if (cmd.comp_mask) 3787 return -EINVAL; 3788 3789 if (cmd.reserved) 3790 return -EINVAL; 3791 3792 resp.response_length = offsetof(typeof(resp), odp_caps); 3793 3794 if (ucore->outlen < resp.response_length) 3795 return -ENOSPC; 3796 3797 err = ib_dev->query_device(ib_dev, &attr, uhw); 3798 if (err) 3799 return err; 3800 3801 copy_query_dev_fields(file, ib_dev, &resp.base, &attr); 3802 3803 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps)) 3804 goto end; 3805 3806 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 3807 resp.odp_caps.general_caps = attr.odp_caps.general_caps; 3808 resp.odp_caps.per_transport_caps.rc_odp_caps = 3809 attr.odp_caps.per_transport_caps.rc_odp_caps; 3810 resp.odp_caps.per_transport_caps.uc_odp_caps = 3811 attr.odp_caps.per_transport_caps.uc_odp_caps; 3812 resp.odp_caps.per_transport_caps.ud_odp_caps = 3813 attr.odp_caps.per_transport_caps.ud_odp_caps; 3814 #endif 3815 resp.response_length += sizeof(resp.odp_caps); 3816 3817 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask)) 3818 goto end; 3819 3820 resp.timestamp_mask = attr.timestamp_mask; 3821 resp.response_length += sizeof(resp.timestamp_mask); 3822 3823 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock)) 3824 goto end; 3825 3826 resp.hca_core_clock = attr.hca_core_clock; 3827 resp.response_length += sizeof(resp.hca_core_clock); 3828 3829 if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex)) 3830 goto end; 3831 3832 resp.device_cap_flags_ex = attr.device_cap_flags; 3833 resp.response_length += sizeof(resp.device_cap_flags_ex); 3834 3835 if (ucore->outlen < resp.response_length + sizeof(resp.rss_caps)) 3836 goto end; 3837 3838 resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts; 3839 resp.rss_caps.max_rwq_indirection_tables = 3840 attr.rss_caps.max_rwq_indirection_tables; 3841 resp.rss_caps.max_rwq_indirection_table_size = 3842 attr.rss_caps.max_rwq_indirection_table_size; 3843 3844 resp.response_length += sizeof(resp.rss_caps); 3845 3846 if (ucore->outlen < resp.response_length + sizeof(resp.max_wq_type_rq)) 3847 goto end; 3848 3849 resp.max_wq_type_rq = attr.max_wq_type_rq; 3850 resp.response_length += sizeof(resp.max_wq_type_rq); 3851 3852 if (ucore->outlen < resp.response_length + sizeof(resp.raw_packet_caps)) 3853 goto end; 3854 3855 resp.raw_packet_caps = attr.raw_packet_caps; 3856 resp.response_length += sizeof(resp.raw_packet_caps); 3857 end: 3858 err = ib_copy_to_udata(ucore, &resp, resp.response_length); 3859 return err; 3860 } 3861