1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/file.h> 37 #include <linux/fs.h> 38 #include <linux/slab.h> 39 #include <linux/sched.h> 40 41 #include <linux/uaccess.h> 42 43 #include <rdma/uverbs_types.h> 44 #include <rdma/uverbs_std_types.h> 45 #include "rdma_core.h" 46 47 #include "uverbs.h" 48 #include "core_priv.h" 49 50 static struct ib_uverbs_completion_event_file * 51 ib_uverbs_lookup_comp_file(int fd, struct ib_ucontext *context) 52 { 53 struct ib_uobject *uobj = uobj_get_read(uobj_get_type(comp_channel), 54 fd, context); 55 struct ib_uobject_file *uobj_file; 56 57 if (IS_ERR(uobj)) 58 return (void *)uobj; 59 60 uverbs_uobject_get(uobj); 61 uobj_put_read(uobj); 62 63 uobj_file = container_of(uobj, struct ib_uobject_file, uobj); 64 return container_of(uobj_file, struct ib_uverbs_completion_event_file, 65 uobj_file); 66 } 67 68 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 69 struct ib_device *ib_dev, 70 const char __user *buf, 71 int in_len, int out_len) 72 { 73 struct ib_uverbs_get_context cmd; 74 struct ib_uverbs_get_context_resp resp; 75 struct ib_udata udata; 76 struct ib_ucontext *ucontext; 77 struct file *filp; 78 struct ib_rdmacg_object cg_obj; 79 int ret; 80 81 if (out_len < sizeof resp) 82 return -ENOSPC; 83 84 if (copy_from_user(&cmd, buf, sizeof cmd)) 85 return -EFAULT; 86 87 mutex_lock(&file->mutex); 88 89 if (file->ucontext) { 90 ret = -EINVAL; 91 goto err; 92 } 93 94 INIT_UDATA(&udata, buf + sizeof cmd, 95 (unsigned long) cmd.response + sizeof resp, 96 in_len - sizeof cmd, out_len - sizeof resp); 97 98 ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); 99 if (ret) 100 goto err; 101 102 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata); 103 if (IS_ERR(ucontext)) { 104 ret = PTR_ERR(ucontext); 105 goto err_alloc; 106 } 107 108 ucontext->device = ib_dev; 109 ucontext->cg_obj = cg_obj; 110 /* ufile is required when some objects are released */ 111 ucontext->ufile = file; 112 uverbs_initialize_ucontext(ucontext); 113 114 rcu_read_lock(); 115 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); 116 rcu_read_unlock(); 117 ucontext->closing = 0; 118 119 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 120 ucontext->umem_tree = RB_ROOT; 121 init_rwsem(&ucontext->umem_rwsem); 122 ucontext->odp_mrs_count = 0; 123 INIT_LIST_HEAD(&ucontext->no_private_counters); 124 125 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)) 126 ucontext->invalidate_range = NULL; 127 128 #endif 129 130 resp.num_comp_vectors = file->device->num_comp_vectors; 131 132 ret = get_unused_fd_flags(O_CLOEXEC); 133 if (ret < 0) 134 goto err_free; 135 resp.async_fd = ret; 136 137 filp = ib_uverbs_alloc_async_event_file(file, ib_dev); 138 if (IS_ERR(filp)) { 139 ret = PTR_ERR(filp); 140 goto err_fd; 141 } 142 143 if (copy_to_user((void __user *) (unsigned long) cmd.response, 144 &resp, sizeof resp)) { 145 ret = -EFAULT; 146 goto err_file; 147 } 148 149 file->ucontext = ucontext; 150 151 fd_install(resp.async_fd, filp); 152 153 mutex_unlock(&file->mutex); 154 155 return in_len; 156 157 err_file: 158 ib_uverbs_free_async_event_file(file); 159 fput(filp); 160 161 err_fd: 162 put_unused_fd(resp.async_fd); 163 164 err_free: 165 put_pid(ucontext->tgid); 166 ib_dev->dealloc_ucontext(ucontext); 167 168 err_alloc: 169 ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); 170 171 err: 172 mutex_unlock(&file->mutex); 173 return ret; 174 } 175 176 static void copy_query_dev_fields(struct ib_uverbs_file *file, 177 struct ib_device *ib_dev, 178 struct ib_uverbs_query_device_resp *resp, 179 struct ib_device_attr *attr) 180 { 181 resp->fw_ver = attr->fw_ver; 182 resp->node_guid = ib_dev->node_guid; 183 resp->sys_image_guid = attr->sys_image_guid; 184 resp->max_mr_size = attr->max_mr_size; 185 resp->page_size_cap = attr->page_size_cap; 186 resp->vendor_id = attr->vendor_id; 187 resp->vendor_part_id = attr->vendor_part_id; 188 resp->hw_ver = attr->hw_ver; 189 resp->max_qp = attr->max_qp; 190 resp->max_qp_wr = attr->max_qp_wr; 191 resp->device_cap_flags = lower_32_bits(attr->device_cap_flags); 192 resp->max_sge = attr->max_sge; 193 resp->max_sge_rd = attr->max_sge_rd; 194 resp->max_cq = attr->max_cq; 195 resp->max_cqe = attr->max_cqe; 196 resp->max_mr = attr->max_mr; 197 resp->max_pd = attr->max_pd; 198 resp->max_qp_rd_atom = attr->max_qp_rd_atom; 199 resp->max_ee_rd_atom = attr->max_ee_rd_atom; 200 resp->max_res_rd_atom = attr->max_res_rd_atom; 201 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom; 202 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom; 203 resp->atomic_cap = attr->atomic_cap; 204 resp->max_ee = attr->max_ee; 205 resp->max_rdd = attr->max_rdd; 206 resp->max_mw = attr->max_mw; 207 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp; 208 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp; 209 resp->max_mcast_grp = attr->max_mcast_grp; 210 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach; 211 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach; 212 resp->max_ah = attr->max_ah; 213 resp->max_fmr = attr->max_fmr; 214 resp->max_map_per_fmr = attr->max_map_per_fmr; 215 resp->max_srq = attr->max_srq; 216 resp->max_srq_wr = attr->max_srq_wr; 217 resp->max_srq_sge = attr->max_srq_sge; 218 resp->max_pkeys = attr->max_pkeys; 219 resp->local_ca_ack_delay = attr->local_ca_ack_delay; 220 resp->phys_port_cnt = ib_dev->phys_port_cnt; 221 } 222 223 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 224 struct ib_device *ib_dev, 225 const char __user *buf, 226 int in_len, int out_len) 227 { 228 struct ib_uverbs_query_device cmd; 229 struct ib_uverbs_query_device_resp resp; 230 231 if (out_len < sizeof resp) 232 return -ENOSPC; 233 234 if (copy_from_user(&cmd, buf, sizeof cmd)) 235 return -EFAULT; 236 237 memset(&resp, 0, sizeof resp); 238 copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs); 239 240 if (copy_to_user((void __user *) (unsigned long) cmd.response, 241 &resp, sizeof resp)) 242 return -EFAULT; 243 244 return in_len; 245 } 246 247 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 248 struct ib_device *ib_dev, 249 const char __user *buf, 250 int in_len, int out_len) 251 { 252 struct ib_uverbs_query_port cmd; 253 struct ib_uverbs_query_port_resp resp; 254 struct ib_port_attr attr; 255 int ret; 256 257 if (out_len < sizeof resp) 258 return -ENOSPC; 259 260 if (copy_from_user(&cmd, buf, sizeof cmd)) 261 return -EFAULT; 262 263 ret = ib_query_port(ib_dev, cmd.port_num, &attr); 264 if (ret) 265 return ret; 266 267 memset(&resp, 0, sizeof resp); 268 269 resp.state = attr.state; 270 resp.max_mtu = attr.max_mtu; 271 resp.active_mtu = attr.active_mtu; 272 resp.gid_tbl_len = attr.gid_tbl_len; 273 resp.port_cap_flags = attr.port_cap_flags; 274 resp.max_msg_sz = attr.max_msg_sz; 275 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 276 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 277 resp.pkey_tbl_len = attr.pkey_tbl_len; 278 resp.lid = attr.lid; 279 resp.sm_lid = attr.sm_lid; 280 resp.lmc = attr.lmc; 281 resp.max_vl_num = attr.max_vl_num; 282 resp.sm_sl = attr.sm_sl; 283 resp.subnet_timeout = attr.subnet_timeout; 284 resp.init_type_reply = attr.init_type_reply; 285 resp.active_width = attr.active_width; 286 resp.active_speed = attr.active_speed; 287 resp.phys_state = attr.phys_state; 288 resp.link_layer = rdma_port_get_link_layer(ib_dev, 289 cmd.port_num); 290 291 if (copy_to_user((void __user *) (unsigned long) cmd.response, 292 &resp, sizeof resp)) 293 return -EFAULT; 294 295 return in_len; 296 } 297 298 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 299 struct ib_device *ib_dev, 300 const char __user *buf, 301 int in_len, int out_len) 302 { 303 struct ib_uverbs_alloc_pd cmd; 304 struct ib_uverbs_alloc_pd_resp resp; 305 struct ib_udata udata; 306 struct ib_uobject *uobj; 307 struct ib_pd *pd; 308 int ret; 309 310 if (out_len < sizeof resp) 311 return -ENOSPC; 312 313 if (copy_from_user(&cmd, buf, sizeof cmd)) 314 return -EFAULT; 315 316 INIT_UDATA(&udata, buf + sizeof cmd, 317 (unsigned long) cmd.response + sizeof resp, 318 in_len - sizeof cmd, out_len - sizeof resp); 319 320 uobj = uobj_alloc(uobj_get_type(pd), file->ucontext); 321 if (IS_ERR(uobj)) 322 return PTR_ERR(uobj); 323 324 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata); 325 if (IS_ERR(pd)) { 326 ret = PTR_ERR(pd); 327 goto err; 328 } 329 330 pd->device = ib_dev; 331 pd->uobject = uobj; 332 pd->__internal_mr = NULL; 333 atomic_set(&pd->usecnt, 0); 334 335 uobj->object = pd; 336 memset(&resp, 0, sizeof resp); 337 resp.pd_handle = uobj->id; 338 339 if (copy_to_user((void __user *) (unsigned long) cmd.response, 340 &resp, sizeof resp)) { 341 ret = -EFAULT; 342 goto err_copy; 343 } 344 345 uobj_alloc_commit(uobj); 346 347 return in_len; 348 349 err_copy: 350 ib_dealloc_pd(pd); 351 352 err: 353 uobj_alloc_abort(uobj); 354 return ret; 355 } 356 357 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 358 struct ib_device *ib_dev, 359 const char __user *buf, 360 int in_len, int out_len) 361 { 362 struct ib_uverbs_dealloc_pd cmd; 363 struct ib_uobject *uobj; 364 int ret; 365 366 if (copy_from_user(&cmd, buf, sizeof cmd)) 367 return -EFAULT; 368 369 uobj = uobj_get_write(uobj_get_type(pd), cmd.pd_handle, 370 file->ucontext); 371 if (IS_ERR(uobj)) 372 return PTR_ERR(uobj); 373 374 ret = uobj_remove_commit(uobj); 375 376 return ret ?: in_len; 377 } 378 379 struct xrcd_table_entry { 380 struct rb_node node; 381 struct ib_xrcd *xrcd; 382 struct inode *inode; 383 }; 384 385 static int xrcd_table_insert(struct ib_uverbs_device *dev, 386 struct inode *inode, 387 struct ib_xrcd *xrcd) 388 { 389 struct xrcd_table_entry *entry, *scan; 390 struct rb_node **p = &dev->xrcd_tree.rb_node; 391 struct rb_node *parent = NULL; 392 393 entry = kmalloc(sizeof *entry, GFP_KERNEL); 394 if (!entry) 395 return -ENOMEM; 396 397 entry->xrcd = xrcd; 398 entry->inode = inode; 399 400 while (*p) { 401 parent = *p; 402 scan = rb_entry(parent, struct xrcd_table_entry, node); 403 404 if (inode < scan->inode) { 405 p = &(*p)->rb_left; 406 } else if (inode > scan->inode) { 407 p = &(*p)->rb_right; 408 } else { 409 kfree(entry); 410 return -EEXIST; 411 } 412 } 413 414 rb_link_node(&entry->node, parent, p); 415 rb_insert_color(&entry->node, &dev->xrcd_tree); 416 igrab(inode); 417 return 0; 418 } 419 420 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, 421 struct inode *inode) 422 { 423 struct xrcd_table_entry *entry; 424 struct rb_node *p = dev->xrcd_tree.rb_node; 425 426 while (p) { 427 entry = rb_entry(p, struct xrcd_table_entry, node); 428 429 if (inode < entry->inode) 430 p = p->rb_left; 431 else if (inode > entry->inode) 432 p = p->rb_right; 433 else 434 return entry; 435 } 436 437 return NULL; 438 } 439 440 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) 441 { 442 struct xrcd_table_entry *entry; 443 444 entry = xrcd_table_search(dev, inode); 445 if (!entry) 446 return NULL; 447 448 return entry->xrcd; 449 } 450 451 static void xrcd_table_delete(struct ib_uverbs_device *dev, 452 struct inode *inode) 453 { 454 struct xrcd_table_entry *entry; 455 456 entry = xrcd_table_search(dev, inode); 457 if (entry) { 458 iput(inode); 459 rb_erase(&entry->node, &dev->xrcd_tree); 460 kfree(entry); 461 } 462 } 463 464 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, 465 struct ib_device *ib_dev, 466 const char __user *buf, int in_len, 467 int out_len) 468 { 469 struct ib_uverbs_open_xrcd cmd; 470 struct ib_uverbs_open_xrcd_resp resp; 471 struct ib_udata udata; 472 struct ib_uxrcd_object *obj; 473 struct ib_xrcd *xrcd = NULL; 474 struct fd f = {NULL, 0}; 475 struct inode *inode = NULL; 476 int ret = 0; 477 int new_xrcd = 0; 478 479 if (out_len < sizeof resp) 480 return -ENOSPC; 481 482 if (copy_from_user(&cmd, buf, sizeof cmd)) 483 return -EFAULT; 484 485 INIT_UDATA(&udata, buf + sizeof cmd, 486 (unsigned long) cmd.response + sizeof resp, 487 in_len - sizeof cmd, out_len - sizeof resp); 488 489 mutex_lock(&file->device->xrcd_tree_mutex); 490 491 if (cmd.fd != -1) { 492 /* search for file descriptor */ 493 f = fdget(cmd.fd); 494 if (!f.file) { 495 ret = -EBADF; 496 goto err_tree_mutex_unlock; 497 } 498 499 inode = file_inode(f.file); 500 xrcd = find_xrcd(file->device, inode); 501 if (!xrcd && !(cmd.oflags & O_CREAT)) { 502 /* no file descriptor. Need CREATE flag */ 503 ret = -EAGAIN; 504 goto err_tree_mutex_unlock; 505 } 506 507 if (xrcd && cmd.oflags & O_EXCL) { 508 ret = -EINVAL; 509 goto err_tree_mutex_unlock; 510 } 511 } 512 513 obj = (struct ib_uxrcd_object *)uobj_alloc(uobj_get_type(xrcd), 514 file->ucontext); 515 if (IS_ERR(obj)) { 516 ret = PTR_ERR(obj); 517 goto err_tree_mutex_unlock; 518 } 519 520 if (!xrcd) { 521 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata); 522 if (IS_ERR(xrcd)) { 523 ret = PTR_ERR(xrcd); 524 goto err; 525 } 526 527 xrcd->inode = inode; 528 xrcd->device = ib_dev; 529 atomic_set(&xrcd->usecnt, 0); 530 mutex_init(&xrcd->tgt_qp_mutex); 531 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 532 new_xrcd = 1; 533 } 534 535 atomic_set(&obj->refcnt, 0); 536 obj->uobject.object = xrcd; 537 memset(&resp, 0, sizeof resp); 538 resp.xrcd_handle = obj->uobject.id; 539 540 if (inode) { 541 if (new_xrcd) { 542 /* create new inode/xrcd table entry */ 543 ret = xrcd_table_insert(file->device, inode, xrcd); 544 if (ret) 545 goto err_dealloc_xrcd; 546 } 547 atomic_inc(&xrcd->usecnt); 548 } 549 550 if (copy_to_user((void __user *) (unsigned long) cmd.response, 551 &resp, sizeof resp)) { 552 ret = -EFAULT; 553 goto err_copy; 554 } 555 556 if (f.file) 557 fdput(f); 558 559 uobj_alloc_commit(&obj->uobject); 560 561 mutex_unlock(&file->device->xrcd_tree_mutex); 562 return in_len; 563 564 err_copy: 565 if (inode) { 566 if (new_xrcd) 567 xrcd_table_delete(file->device, inode); 568 atomic_dec(&xrcd->usecnt); 569 } 570 571 err_dealloc_xrcd: 572 ib_dealloc_xrcd(xrcd); 573 574 err: 575 uobj_alloc_abort(&obj->uobject); 576 577 err_tree_mutex_unlock: 578 if (f.file) 579 fdput(f); 580 581 mutex_unlock(&file->device->xrcd_tree_mutex); 582 583 return ret; 584 } 585 586 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, 587 struct ib_device *ib_dev, 588 const char __user *buf, int in_len, 589 int out_len) 590 { 591 struct ib_uverbs_close_xrcd cmd; 592 struct ib_uobject *uobj; 593 int ret = 0; 594 595 if (copy_from_user(&cmd, buf, sizeof cmd)) 596 return -EFAULT; 597 598 uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle, 599 file->ucontext); 600 if (IS_ERR(uobj)) { 601 mutex_unlock(&file->device->xrcd_tree_mutex); 602 return PTR_ERR(uobj); 603 } 604 605 ret = uobj_remove_commit(uobj); 606 return ret ?: in_len; 607 } 608 609 int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, 610 struct ib_xrcd *xrcd, 611 enum rdma_remove_reason why) 612 { 613 struct inode *inode; 614 int ret; 615 616 inode = xrcd->inode; 617 if (inode && !atomic_dec_and_test(&xrcd->usecnt)) 618 return 0; 619 620 ret = ib_dealloc_xrcd(xrcd); 621 622 if (why == RDMA_REMOVE_DESTROY && ret) 623 atomic_inc(&xrcd->usecnt); 624 else if (inode) 625 xrcd_table_delete(dev, inode); 626 627 return ret; 628 } 629 630 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 631 struct ib_device *ib_dev, 632 const char __user *buf, int in_len, 633 int out_len) 634 { 635 struct ib_uverbs_reg_mr cmd; 636 struct ib_uverbs_reg_mr_resp resp; 637 struct ib_udata udata; 638 struct ib_uobject *uobj; 639 struct ib_pd *pd; 640 struct ib_mr *mr; 641 int ret; 642 643 if (out_len < sizeof resp) 644 return -ENOSPC; 645 646 if (copy_from_user(&cmd, buf, sizeof cmd)) 647 return -EFAULT; 648 649 INIT_UDATA(&udata, buf + sizeof cmd, 650 (unsigned long) cmd.response + sizeof resp, 651 in_len - sizeof cmd, out_len - sizeof resp); 652 653 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 654 return -EINVAL; 655 656 ret = ib_check_mr_access(cmd.access_flags); 657 if (ret) 658 return ret; 659 660 uobj = uobj_alloc(uobj_get_type(mr), file->ucontext); 661 if (IS_ERR(uobj)) 662 return PTR_ERR(uobj); 663 664 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 665 if (!pd) { 666 ret = -EINVAL; 667 goto err_free; 668 } 669 670 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) { 671 if (!(pd->device->attrs.device_cap_flags & 672 IB_DEVICE_ON_DEMAND_PAGING)) { 673 pr_debug("ODP support not available\n"); 674 ret = -EINVAL; 675 goto err_put; 676 } 677 } 678 679 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 680 cmd.access_flags, &udata); 681 if (IS_ERR(mr)) { 682 ret = PTR_ERR(mr); 683 goto err_put; 684 } 685 686 mr->device = pd->device; 687 mr->pd = pd; 688 mr->uobject = uobj; 689 atomic_inc(&pd->usecnt); 690 691 uobj->object = mr; 692 693 memset(&resp, 0, sizeof resp); 694 resp.lkey = mr->lkey; 695 resp.rkey = mr->rkey; 696 resp.mr_handle = uobj->id; 697 698 if (copy_to_user((void __user *) (unsigned long) cmd.response, 699 &resp, sizeof resp)) { 700 ret = -EFAULT; 701 goto err_copy; 702 } 703 704 uobj_put_obj_read(pd); 705 706 uobj_alloc_commit(uobj); 707 708 return in_len; 709 710 err_copy: 711 ib_dereg_mr(mr); 712 713 err_put: 714 uobj_put_obj_read(pd); 715 716 err_free: 717 uobj_alloc_abort(uobj); 718 return ret; 719 } 720 721 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, 722 struct ib_device *ib_dev, 723 const char __user *buf, int in_len, 724 int out_len) 725 { 726 struct ib_uverbs_rereg_mr cmd; 727 struct ib_uverbs_rereg_mr_resp resp; 728 struct ib_udata udata; 729 struct ib_pd *pd = NULL; 730 struct ib_mr *mr; 731 struct ib_pd *old_pd; 732 int ret; 733 struct ib_uobject *uobj; 734 735 if (out_len < sizeof(resp)) 736 return -ENOSPC; 737 738 if (copy_from_user(&cmd, buf, sizeof(cmd))) 739 return -EFAULT; 740 741 INIT_UDATA(&udata, buf + sizeof(cmd), 742 (unsigned long) cmd.response + sizeof(resp), 743 in_len - sizeof(cmd), out_len - sizeof(resp)); 744 745 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags) 746 return -EINVAL; 747 748 if ((cmd.flags & IB_MR_REREG_TRANS) && 749 (!cmd.start || !cmd.hca_va || 0 >= cmd.length || 750 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))) 751 return -EINVAL; 752 753 uobj = uobj_get_write(uobj_get_type(mr), cmd.mr_handle, 754 file->ucontext); 755 if (IS_ERR(uobj)) 756 return PTR_ERR(uobj); 757 758 mr = uobj->object; 759 760 if (cmd.flags & IB_MR_REREG_ACCESS) { 761 ret = ib_check_mr_access(cmd.access_flags); 762 if (ret) 763 goto put_uobjs; 764 } 765 766 if (cmd.flags & IB_MR_REREG_PD) { 767 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 768 if (!pd) { 769 ret = -EINVAL; 770 goto put_uobjs; 771 } 772 } 773 774 old_pd = mr->pd; 775 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, 776 cmd.length, cmd.hca_va, 777 cmd.access_flags, pd, &udata); 778 if (!ret) { 779 if (cmd.flags & IB_MR_REREG_PD) { 780 atomic_inc(&pd->usecnt); 781 mr->pd = pd; 782 atomic_dec(&old_pd->usecnt); 783 } 784 } else { 785 goto put_uobj_pd; 786 } 787 788 memset(&resp, 0, sizeof(resp)); 789 resp.lkey = mr->lkey; 790 resp.rkey = mr->rkey; 791 792 if (copy_to_user((void __user *)(unsigned long)cmd.response, 793 &resp, sizeof(resp))) 794 ret = -EFAULT; 795 else 796 ret = in_len; 797 798 put_uobj_pd: 799 if (cmd.flags & IB_MR_REREG_PD) 800 uobj_put_obj_read(pd); 801 802 put_uobjs: 803 uobj_put_write(uobj); 804 805 return ret; 806 } 807 808 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 809 struct ib_device *ib_dev, 810 const char __user *buf, int in_len, 811 int out_len) 812 { 813 struct ib_uverbs_dereg_mr cmd; 814 struct ib_uobject *uobj; 815 int ret = -EINVAL; 816 817 if (copy_from_user(&cmd, buf, sizeof cmd)) 818 return -EFAULT; 819 820 uobj = uobj_get_write(uobj_get_type(mr), cmd.mr_handle, 821 file->ucontext); 822 if (IS_ERR(uobj)) 823 return PTR_ERR(uobj); 824 825 ret = uobj_remove_commit(uobj); 826 827 return ret ?: in_len; 828 } 829 830 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, 831 struct ib_device *ib_dev, 832 const char __user *buf, int in_len, 833 int out_len) 834 { 835 struct ib_uverbs_alloc_mw cmd; 836 struct ib_uverbs_alloc_mw_resp resp; 837 struct ib_uobject *uobj; 838 struct ib_pd *pd; 839 struct ib_mw *mw; 840 struct ib_udata udata; 841 int ret; 842 843 if (out_len < sizeof(resp)) 844 return -ENOSPC; 845 846 if (copy_from_user(&cmd, buf, sizeof(cmd))) 847 return -EFAULT; 848 849 uobj = uobj_alloc(uobj_get_type(mw), file->ucontext); 850 if (IS_ERR(uobj)) 851 return PTR_ERR(uobj); 852 853 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 854 if (!pd) { 855 ret = -EINVAL; 856 goto err_free; 857 } 858 859 INIT_UDATA(&udata, buf + sizeof(cmd), 860 (unsigned long)cmd.response + sizeof(resp), 861 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 862 out_len - sizeof(resp)); 863 864 mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata); 865 if (IS_ERR(mw)) { 866 ret = PTR_ERR(mw); 867 goto err_put; 868 } 869 870 mw->device = pd->device; 871 mw->pd = pd; 872 mw->uobject = uobj; 873 atomic_inc(&pd->usecnt); 874 875 uobj->object = mw; 876 877 memset(&resp, 0, sizeof(resp)); 878 resp.rkey = mw->rkey; 879 resp.mw_handle = uobj->id; 880 881 if (copy_to_user((void __user *)(unsigned long)cmd.response, 882 &resp, sizeof(resp))) { 883 ret = -EFAULT; 884 goto err_copy; 885 } 886 887 uobj_put_obj_read(pd); 888 uobj_alloc_commit(uobj); 889 890 return in_len; 891 892 err_copy: 893 uverbs_dealloc_mw(mw); 894 err_put: 895 uobj_put_obj_read(pd); 896 err_free: 897 uobj_alloc_abort(uobj); 898 return ret; 899 } 900 901 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file, 902 struct ib_device *ib_dev, 903 const char __user *buf, int in_len, 904 int out_len) 905 { 906 struct ib_uverbs_dealloc_mw cmd; 907 struct ib_uobject *uobj; 908 int ret = -EINVAL; 909 910 if (copy_from_user(&cmd, buf, sizeof(cmd))) 911 return -EFAULT; 912 913 uobj = uobj_get_write(uobj_get_type(mw), cmd.mw_handle, 914 file->ucontext); 915 if (IS_ERR(uobj)) 916 return PTR_ERR(uobj); 917 918 ret = uobj_remove_commit(uobj); 919 return ret ?: in_len; 920 } 921 922 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 923 struct ib_device *ib_dev, 924 const char __user *buf, int in_len, 925 int out_len) 926 { 927 struct ib_uverbs_create_comp_channel cmd; 928 struct ib_uverbs_create_comp_channel_resp resp; 929 struct ib_uobject *uobj; 930 struct ib_uverbs_completion_event_file *ev_file; 931 932 if (out_len < sizeof resp) 933 return -ENOSPC; 934 935 if (copy_from_user(&cmd, buf, sizeof cmd)) 936 return -EFAULT; 937 938 uobj = uobj_alloc(uobj_get_type(comp_channel), file->ucontext); 939 if (IS_ERR(uobj)) 940 return PTR_ERR(uobj); 941 942 resp.fd = uobj->id; 943 944 ev_file = container_of(uobj, struct ib_uverbs_completion_event_file, 945 uobj_file.uobj); 946 ib_uverbs_init_event_queue(&ev_file->ev_queue); 947 948 if (copy_to_user((void __user *) (unsigned long) cmd.response, 949 &resp, sizeof resp)) { 950 uobj_alloc_abort(uobj); 951 return -EFAULT; 952 } 953 954 uobj_alloc_commit(uobj); 955 return in_len; 956 } 957 958 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, 959 struct ib_device *ib_dev, 960 struct ib_udata *ucore, 961 struct ib_udata *uhw, 962 struct ib_uverbs_ex_create_cq *cmd, 963 size_t cmd_sz, 964 int (*cb)(struct ib_uverbs_file *file, 965 struct ib_ucq_object *obj, 966 struct ib_uverbs_ex_create_cq_resp *resp, 967 struct ib_udata *udata, 968 void *context), 969 void *context) 970 { 971 struct ib_ucq_object *obj; 972 struct ib_uverbs_completion_event_file *ev_file = NULL; 973 struct ib_cq *cq; 974 int ret; 975 struct ib_uverbs_ex_create_cq_resp resp; 976 struct ib_cq_init_attr attr = {}; 977 978 if (cmd->comp_vector >= file->device->num_comp_vectors) 979 return ERR_PTR(-EINVAL); 980 981 obj = (struct ib_ucq_object *)uobj_alloc(uobj_get_type(cq), 982 file->ucontext); 983 if (IS_ERR(obj)) 984 return obj; 985 986 if (cmd->comp_channel >= 0) { 987 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel, 988 file->ucontext); 989 if (IS_ERR(ev_file)) { 990 ret = PTR_ERR(ev_file); 991 goto err; 992 } 993 } 994 995 obj->uobject.user_handle = cmd->user_handle; 996 obj->uverbs_file = file; 997 obj->comp_events_reported = 0; 998 obj->async_events_reported = 0; 999 INIT_LIST_HEAD(&obj->comp_list); 1000 INIT_LIST_HEAD(&obj->async_list); 1001 1002 attr.cqe = cmd->cqe; 1003 attr.comp_vector = cmd->comp_vector; 1004 1005 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags)) 1006 attr.flags = cmd->flags; 1007 1008 cq = ib_dev->create_cq(ib_dev, &attr, file->ucontext, uhw); 1009 if (IS_ERR(cq)) { 1010 ret = PTR_ERR(cq); 1011 goto err_file; 1012 } 1013 1014 cq->device = ib_dev; 1015 cq->uobject = &obj->uobject; 1016 cq->comp_handler = ib_uverbs_comp_handler; 1017 cq->event_handler = ib_uverbs_cq_event_handler; 1018 cq->cq_context = &ev_file->ev_queue; 1019 atomic_set(&cq->usecnt, 0); 1020 1021 obj->uobject.object = cq; 1022 memset(&resp, 0, sizeof resp); 1023 resp.base.cq_handle = obj->uobject.id; 1024 resp.base.cqe = cq->cqe; 1025 1026 resp.response_length = offsetof(typeof(resp), response_length) + 1027 sizeof(resp.response_length); 1028 1029 ret = cb(file, obj, &resp, ucore, context); 1030 if (ret) 1031 goto err_cb; 1032 1033 uobj_alloc_commit(&obj->uobject); 1034 1035 return obj; 1036 1037 err_cb: 1038 ib_destroy_cq(cq); 1039 1040 err_file: 1041 if (ev_file) 1042 ib_uverbs_release_ucq(file, ev_file, obj); 1043 1044 err: 1045 uobj_alloc_abort(&obj->uobject); 1046 1047 return ERR_PTR(ret); 1048 } 1049 1050 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file, 1051 struct ib_ucq_object *obj, 1052 struct ib_uverbs_ex_create_cq_resp *resp, 1053 struct ib_udata *ucore, void *context) 1054 { 1055 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1056 return -EFAULT; 1057 1058 return 0; 1059 } 1060 1061 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 1062 struct ib_device *ib_dev, 1063 const char __user *buf, int in_len, 1064 int out_len) 1065 { 1066 struct ib_uverbs_create_cq cmd; 1067 struct ib_uverbs_ex_create_cq cmd_ex; 1068 struct ib_uverbs_create_cq_resp resp; 1069 struct ib_udata ucore; 1070 struct ib_udata uhw; 1071 struct ib_ucq_object *obj; 1072 1073 if (out_len < sizeof(resp)) 1074 return -ENOSPC; 1075 1076 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1077 return -EFAULT; 1078 1079 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), sizeof(resp)); 1080 1081 INIT_UDATA(&uhw, buf + sizeof(cmd), 1082 (unsigned long)cmd.response + sizeof(resp), 1083 in_len - sizeof(cmd), out_len - sizeof(resp)); 1084 1085 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1086 cmd_ex.user_handle = cmd.user_handle; 1087 cmd_ex.cqe = cmd.cqe; 1088 cmd_ex.comp_vector = cmd.comp_vector; 1089 cmd_ex.comp_channel = cmd.comp_channel; 1090 1091 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex, 1092 offsetof(typeof(cmd_ex), comp_channel) + 1093 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb, 1094 NULL); 1095 1096 if (IS_ERR(obj)) 1097 return PTR_ERR(obj); 1098 1099 return in_len; 1100 } 1101 1102 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file, 1103 struct ib_ucq_object *obj, 1104 struct ib_uverbs_ex_create_cq_resp *resp, 1105 struct ib_udata *ucore, void *context) 1106 { 1107 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1108 return -EFAULT; 1109 1110 return 0; 1111 } 1112 1113 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file, 1114 struct ib_device *ib_dev, 1115 struct ib_udata *ucore, 1116 struct ib_udata *uhw) 1117 { 1118 struct ib_uverbs_ex_create_cq_resp resp; 1119 struct ib_uverbs_ex_create_cq cmd; 1120 struct ib_ucq_object *obj; 1121 int err; 1122 1123 if (ucore->inlen < sizeof(cmd)) 1124 return -EINVAL; 1125 1126 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 1127 if (err) 1128 return err; 1129 1130 if (cmd.comp_mask) 1131 return -EINVAL; 1132 1133 if (cmd.reserved) 1134 return -EINVAL; 1135 1136 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1137 sizeof(resp.response_length))) 1138 return -ENOSPC; 1139 1140 obj = create_cq(file, ib_dev, ucore, uhw, &cmd, 1141 min(ucore->inlen, sizeof(cmd)), 1142 ib_uverbs_ex_create_cq_cb, NULL); 1143 1144 if (IS_ERR(obj)) 1145 return PTR_ERR(obj); 1146 1147 return 0; 1148 } 1149 1150 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 1151 struct ib_device *ib_dev, 1152 const char __user *buf, int in_len, 1153 int out_len) 1154 { 1155 struct ib_uverbs_resize_cq cmd; 1156 struct ib_uverbs_resize_cq_resp resp; 1157 struct ib_udata udata; 1158 struct ib_cq *cq; 1159 int ret = -EINVAL; 1160 1161 if (copy_from_user(&cmd, buf, sizeof cmd)) 1162 return -EFAULT; 1163 1164 INIT_UDATA(&udata, buf + sizeof cmd, 1165 (unsigned long) cmd.response + sizeof resp, 1166 in_len - sizeof cmd, out_len - sizeof resp); 1167 1168 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext); 1169 if (!cq) 1170 return -EINVAL; 1171 1172 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 1173 if (ret) 1174 goto out; 1175 1176 resp.cqe = cq->cqe; 1177 1178 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1179 &resp, sizeof resp.cqe)) 1180 ret = -EFAULT; 1181 1182 out: 1183 uobj_put_obj_read(cq); 1184 1185 return ret ? ret : in_len; 1186 } 1187 1188 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc) 1189 { 1190 struct ib_uverbs_wc tmp; 1191 1192 tmp.wr_id = wc->wr_id; 1193 tmp.status = wc->status; 1194 tmp.opcode = wc->opcode; 1195 tmp.vendor_err = wc->vendor_err; 1196 tmp.byte_len = wc->byte_len; 1197 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data; 1198 tmp.qp_num = wc->qp->qp_num; 1199 tmp.src_qp = wc->src_qp; 1200 tmp.wc_flags = wc->wc_flags; 1201 tmp.pkey_index = wc->pkey_index; 1202 tmp.slid = wc->slid; 1203 tmp.sl = wc->sl; 1204 tmp.dlid_path_bits = wc->dlid_path_bits; 1205 tmp.port_num = wc->port_num; 1206 tmp.reserved = 0; 1207 1208 if (copy_to_user(dest, &tmp, sizeof tmp)) 1209 return -EFAULT; 1210 1211 return 0; 1212 } 1213 1214 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 1215 struct ib_device *ib_dev, 1216 const char __user *buf, int in_len, 1217 int out_len) 1218 { 1219 struct ib_uverbs_poll_cq cmd; 1220 struct ib_uverbs_poll_cq_resp resp; 1221 u8 __user *header_ptr; 1222 u8 __user *data_ptr; 1223 struct ib_cq *cq; 1224 struct ib_wc wc; 1225 int ret; 1226 1227 if (copy_from_user(&cmd, buf, sizeof cmd)) 1228 return -EFAULT; 1229 1230 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext); 1231 if (!cq) 1232 return -EINVAL; 1233 1234 /* we copy a struct ib_uverbs_poll_cq_resp to user space */ 1235 header_ptr = (void __user *)(unsigned long) cmd.response; 1236 data_ptr = header_ptr + sizeof resp; 1237 1238 memset(&resp, 0, sizeof resp); 1239 while (resp.count < cmd.ne) { 1240 ret = ib_poll_cq(cq, 1, &wc); 1241 if (ret < 0) 1242 goto out_put; 1243 if (!ret) 1244 break; 1245 1246 ret = copy_wc_to_user(data_ptr, &wc); 1247 if (ret) 1248 goto out_put; 1249 1250 data_ptr += sizeof(struct ib_uverbs_wc); 1251 ++resp.count; 1252 } 1253 1254 if (copy_to_user(header_ptr, &resp, sizeof resp)) { 1255 ret = -EFAULT; 1256 goto out_put; 1257 } 1258 1259 ret = in_len; 1260 1261 out_put: 1262 uobj_put_obj_read(cq); 1263 return ret; 1264 } 1265 1266 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 1267 struct ib_device *ib_dev, 1268 const char __user *buf, int in_len, 1269 int out_len) 1270 { 1271 struct ib_uverbs_req_notify_cq cmd; 1272 struct ib_cq *cq; 1273 1274 if (copy_from_user(&cmd, buf, sizeof cmd)) 1275 return -EFAULT; 1276 1277 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext); 1278 if (!cq) 1279 return -EINVAL; 1280 1281 ib_req_notify_cq(cq, cmd.solicited_only ? 1282 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 1283 1284 uobj_put_obj_read(cq); 1285 1286 return in_len; 1287 } 1288 1289 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 1290 struct ib_device *ib_dev, 1291 const char __user *buf, int in_len, 1292 int out_len) 1293 { 1294 struct ib_uverbs_destroy_cq cmd; 1295 struct ib_uverbs_destroy_cq_resp resp; 1296 struct ib_uobject *uobj; 1297 struct ib_cq *cq; 1298 struct ib_ucq_object *obj; 1299 struct ib_uverbs_event_queue *ev_queue; 1300 int ret = -EINVAL; 1301 1302 if (copy_from_user(&cmd, buf, sizeof cmd)) 1303 return -EFAULT; 1304 1305 uobj = uobj_get_write(uobj_get_type(cq), cmd.cq_handle, 1306 file->ucontext); 1307 if (IS_ERR(uobj)) 1308 return PTR_ERR(uobj); 1309 1310 /* 1311 * Make sure we don't free the memory in remove_commit as we still 1312 * needs the uobject memory to create the response. 1313 */ 1314 uverbs_uobject_get(uobj); 1315 cq = uobj->object; 1316 ev_queue = cq->cq_context; 1317 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1318 1319 memset(&resp, 0, sizeof(resp)); 1320 1321 ret = uobj_remove_commit(uobj); 1322 if (ret) { 1323 uverbs_uobject_put(uobj); 1324 return ret; 1325 } 1326 1327 resp.comp_events_reported = obj->comp_events_reported; 1328 resp.async_events_reported = obj->async_events_reported; 1329 1330 uverbs_uobject_put(uobj); 1331 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1332 &resp, sizeof resp)) 1333 return -EFAULT; 1334 1335 return in_len; 1336 } 1337 1338 static int create_qp(struct ib_uverbs_file *file, 1339 struct ib_udata *ucore, 1340 struct ib_udata *uhw, 1341 struct ib_uverbs_ex_create_qp *cmd, 1342 size_t cmd_sz, 1343 int (*cb)(struct ib_uverbs_file *file, 1344 struct ib_uverbs_ex_create_qp_resp *resp, 1345 struct ib_udata *udata), 1346 void *context) 1347 { 1348 struct ib_uqp_object *obj; 1349 struct ib_device *device; 1350 struct ib_pd *pd = NULL; 1351 struct ib_xrcd *xrcd = NULL; 1352 struct ib_uobject *xrcd_uobj = ERR_PTR(-ENOENT); 1353 struct ib_cq *scq = NULL, *rcq = NULL; 1354 struct ib_srq *srq = NULL; 1355 struct ib_qp *qp; 1356 char *buf; 1357 struct ib_qp_init_attr attr = {}; 1358 struct ib_uverbs_ex_create_qp_resp resp; 1359 int ret; 1360 struct ib_rwq_ind_table *ind_tbl = NULL; 1361 bool has_sq = true; 1362 1363 if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) 1364 return -EPERM; 1365 1366 obj = (struct ib_uqp_object *)uobj_alloc(uobj_get_type(qp), 1367 file->ucontext); 1368 if (IS_ERR(obj)) 1369 return PTR_ERR(obj); 1370 obj->uxrcd = NULL; 1371 obj->uevent.uobject.user_handle = cmd->user_handle; 1372 mutex_init(&obj->mcast_lock); 1373 1374 if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) + 1375 sizeof(cmd->rwq_ind_tbl_handle) && 1376 (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) { 1377 ind_tbl = uobj_get_obj_read(rwq_ind_table, 1378 cmd->rwq_ind_tbl_handle, 1379 file->ucontext); 1380 if (!ind_tbl) { 1381 ret = -EINVAL; 1382 goto err_put; 1383 } 1384 1385 attr.rwq_ind_tbl = ind_tbl; 1386 } 1387 1388 if ((cmd_sz >= offsetof(typeof(*cmd), reserved1) + 1389 sizeof(cmd->reserved1)) && cmd->reserved1) { 1390 ret = -EOPNOTSUPP; 1391 goto err_put; 1392 } 1393 1394 if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) { 1395 ret = -EINVAL; 1396 goto err_put; 1397 } 1398 1399 if (ind_tbl && !cmd->max_send_wr) 1400 has_sq = false; 1401 1402 if (cmd->qp_type == IB_QPT_XRC_TGT) { 1403 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd->pd_handle, 1404 file->ucontext); 1405 1406 if (IS_ERR(xrcd_uobj)) { 1407 ret = -EINVAL; 1408 goto err_put; 1409 } 1410 1411 xrcd = (struct ib_xrcd *)xrcd_uobj->object; 1412 if (!xrcd) { 1413 ret = -EINVAL; 1414 goto err_put; 1415 } 1416 device = xrcd->device; 1417 } else { 1418 if (cmd->qp_type == IB_QPT_XRC_INI) { 1419 cmd->max_recv_wr = 0; 1420 cmd->max_recv_sge = 0; 1421 } else { 1422 if (cmd->is_srq) { 1423 srq = uobj_get_obj_read(srq, cmd->srq_handle, 1424 file->ucontext); 1425 if (!srq || srq->srq_type != IB_SRQT_BASIC) { 1426 ret = -EINVAL; 1427 goto err_put; 1428 } 1429 } 1430 1431 if (!ind_tbl) { 1432 if (cmd->recv_cq_handle != cmd->send_cq_handle) { 1433 rcq = uobj_get_obj_read(cq, cmd->recv_cq_handle, 1434 file->ucontext); 1435 if (!rcq) { 1436 ret = -EINVAL; 1437 goto err_put; 1438 } 1439 } 1440 } 1441 } 1442 1443 if (has_sq) 1444 scq = uobj_get_obj_read(cq, cmd->send_cq_handle, 1445 file->ucontext); 1446 if (!ind_tbl) 1447 rcq = rcq ?: scq; 1448 pd = uobj_get_obj_read(pd, cmd->pd_handle, file->ucontext); 1449 if (!pd || (!scq && has_sq)) { 1450 ret = -EINVAL; 1451 goto err_put; 1452 } 1453 1454 device = pd->device; 1455 } 1456 1457 attr.event_handler = ib_uverbs_qp_event_handler; 1458 attr.qp_context = file; 1459 attr.send_cq = scq; 1460 attr.recv_cq = rcq; 1461 attr.srq = srq; 1462 attr.xrcd = xrcd; 1463 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR : 1464 IB_SIGNAL_REQ_WR; 1465 attr.qp_type = cmd->qp_type; 1466 attr.create_flags = 0; 1467 1468 attr.cap.max_send_wr = cmd->max_send_wr; 1469 attr.cap.max_recv_wr = cmd->max_recv_wr; 1470 attr.cap.max_send_sge = cmd->max_send_sge; 1471 attr.cap.max_recv_sge = cmd->max_recv_sge; 1472 attr.cap.max_inline_data = cmd->max_inline_data; 1473 1474 obj->uevent.events_reported = 0; 1475 INIT_LIST_HEAD(&obj->uevent.event_list); 1476 INIT_LIST_HEAD(&obj->mcast_list); 1477 1478 if (cmd_sz >= offsetof(typeof(*cmd), create_flags) + 1479 sizeof(cmd->create_flags)) 1480 attr.create_flags = cmd->create_flags; 1481 1482 if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | 1483 IB_QP_CREATE_CROSS_CHANNEL | 1484 IB_QP_CREATE_MANAGED_SEND | 1485 IB_QP_CREATE_MANAGED_RECV | 1486 IB_QP_CREATE_SCATTER_FCS | 1487 IB_QP_CREATE_CVLAN_STRIPPING)) { 1488 ret = -EINVAL; 1489 goto err_put; 1490 } 1491 1492 buf = (void *)cmd + sizeof(*cmd); 1493 if (cmd_sz > sizeof(*cmd)) 1494 if (!(buf[0] == 0 && !memcmp(buf, buf + 1, 1495 cmd_sz - sizeof(*cmd) - 1))) { 1496 ret = -EINVAL; 1497 goto err_put; 1498 } 1499 1500 if (cmd->qp_type == IB_QPT_XRC_TGT) 1501 qp = ib_create_qp(pd, &attr); 1502 else 1503 qp = device->create_qp(pd, &attr, uhw); 1504 1505 if (IS_ERR(qp)) { 1506 ret = PTR_ERR(qp); 1507 goto err_put; 1508 } 1509 1510 if (cmd->qp_type != IB_QPT_XRC_TGT) { 1511 ret = ib_create_qp_security(qp, device); 1512 if (ret) 1513 goto err_cb; 1514 1515 qp->real_qp = qp; 1516 qp->device = device; 1517 qp->pd = pd; 1518 qp->send_cq = attr.send_cq; 1519 qp->recv_cq = attr.recv_cq; 1520 qp->srq = attr.srq; 1521 qp->rwq_ind_tbl = ind_tbl; 1522 qp->event_handler = attr.event_handler; 1523 qp->qp_context = attr.qp_context; 1524 qp->qp_type = attr.qp_type; 1525 atomic_set(&qp->usecnt, 0); 1526 atomic_inc(&pd->usecnt); 1527 if (attr.send_cq) 1528 atomic_inc(&attr.send_cq->usecnt); 1529 if (attr.recv_cq) 1530 atomic_inc(&attr.recv_cq->usecnt); 1531 if (attr.srq) 1532 atomic_inc(&attr.srq->usecnt); 1533 if (ind_tbl) 1534 atomic_inc(&ind_tbl->usecnt); 1535 } 1536 qp->uobject = &obj->uevent.uobject; 1537 1538 obj->uevent.uobject.object = qp; 1539 1540 memset(&resp, 0, sizeof resp); 1541 resp.base.qpn = qp->qp_num; 1542 resp.base.qp_handle = obj->uevent.uobject.id; 1543 resp.base.max_recv_sge = attr.cap.max_recv_sge; 1544 resp.base.max_send_sge = attr.cap.max_send_sge; 1545 resp.base.max_recv_wr = attr.cap.max_recv_wr; 1546 resp.base.max_send_wr = attr.cap.max_send_wr; 1547 resp.base.max_inline_data = attr.cap.max_inline_data; 1548 1549 resp.response_length = offsetof(typeof(resp), response_length) + 1550 sizeof(resp.response_length); 1551 1552 ret = cb(file, &resp, ucore); 1553 if (ret) 1554 goto err_cb; 1555 1556 if (xrcd) { 1557 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, 1558 uobject); 1559 atomic_inc(&obj->uxrcd->refcnt); 1560 uobj_put_read(xrcd_uobj); 1561 } 1562 1563 if (pd) 1564 uobj_put_obj_read(pd); 1565 if (scq) 1566 uobj_put_obj_read(scq); 1567 if (rcq && rcq != scq) 1568 uobj_put_obj_read(rcq); 1569 if (srq) 1570 uobj_put_obj_read(srq); 1571 if (ind_tbl) 1572 uobj_put_obj_read(ind_tbl); 1573 1574 uobj_alloc_commit(&obj->uevent.uobject); 1575 1576 return 0; 1577 err_cb: 1578 ib_destroy_qp(qp); 1579 1580 err_put: 1581 if (!IS_ERR(xrcd_uobj)) 1582 uobj_put_read(xrcd_uobj); 1583 if (pd) 1584 uobj_put_obj_read(pd); 1585 if (scq) 1586 uobj_put_obj_read(scq); 1587 if (rcq && rcq != scq) 1588 uobj_put_obj_read(rcq); 1589 if (srq) 1590 uobj_put_obj_read(srq); 1591 if (ind_tbl) 1592 uobj_put_obj_read(ind_tbl); 1593 1594 uobj_alloc_abort(&obj->uevent.uobject); 1595 return ret; 1596 } 1597 1598 static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file, 1599 struct ib_uverbs_ex_create_qp_resp *resp, 1600 struct ib_udata *ucore) 1601 { 1602 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1603 return -EFAULT; 1604 1605 return 0; 1606 } 1607 1608 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 1609 struct ib_device *ib_dev, 1610 const char __user *buf, int in_len, 1611 int out_len) 1612 { 1613 struct ib_uverbs_create_qp cmd; 1614 struct ib_uverbs_ex_create_qp cmd_ex; 1615 struct ib_udata ucore; 1616 struct ib_udata uhw; 1617 ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp); 1618 int err; 1619 1620 if (out_len < resp_size) 1621 return -ENOSPC; 1622 1623 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1624 return -EFAULT; 1625 1626 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), 1627 resp_size); 1628 INIT_UDATA(&uhw, buf + sizeof(cmd), 1629 (unsigned long)cmd.response + resp_size, 1630 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1631 out_len - resp_size); 1632 1633 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1634 cmd_ex.user_handle = cmd.user_handle; 1635 cmd_ex.pd_handle = cmd.pd_handle; 1636 cmd_ex.send_cq_handle = cmd.send_cq_handle; 1637 cmd_ex.recv_cq_handle = cmd.recv_cq_handle; 1638 cmd_ex.srq_handle = cmd.srq_handle; 1639 cmd_ex.max_send_wr = cmd.max_send_wr; 1640 cmd_ex.max_recv_wr = cmd.max_recv_wr; 1641 cmd_ex.max_send_sge = cmd.max_send_sge; 1642 cmd_ex.max_recv_sge = cmd.max_recv_sge; 1643 cmd_ex.max_inline_data = cmd.max_inline_data; 1644 cmd_ex.sq_sig_all = cmd.sq_sig_all; 1645 cmd_ex.qp_type = cmd.qp_type; 1646 cmd_ex.is_srq = cmd.is_srq; 1647 1648 err = create_qp(file, &ucore, &uhw, &cmd_ex, 1649 offsetof(typeof(cmd_ex), is_srq) + 1650 sizeof(cmd.is_srq), ib_uverbs_create_qp_cb, 1651 NULL); 1652 1653 if (err) 1654 return err; 1655 1656 return in_len; 1657 } 1658 1659 static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file, 1660 struct ib_uverbs_ex_create_qp_resp *resp, 1661 struct ib_udata *ucore) 1662 { 1663 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1664 return -EFAULT; 1665 1666 return 0; 1667 } 1668 1669 int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file, 1670 struct ib_device *ib_dev, 1671 struct ib_udata *ucore, 1672 struct ib_udata *uhw) 1673 { 1674 struct ib_uverbs_ex_create_qp_resp resp; 1675 struct ib_uverbs_ex_create_qp cmd = {0}; 1676 int err; 1677 1678 if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) + 1679 sizeof(cmd.comp_mask))) 1680 return -EINVAL; 1681 1682 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 1683 if (err) 1684 return err; 1685 1686 if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK) 1687 return -EINVAL; 1688 1689 if (cmd.reserved) 1690 return -EINVAL; 1691 1692 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1693 sizeof(resp.response_length))) 1694 return -ENOSPC; 1695 1696 err = create_qp(file, ucore, uhw, &cmd, 1697 min(ucore->inlen, sizeof(cmd)), 1698 ib_uverbs_ex_create_qp_cb, NULL); 1699 1700 if (err) 1701 return err; 1702 1703 return 0; 1704 } 1705 1706 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, 1707 struct ib_device *ib_dev, 1708 const char __user *buf, int in_len, int out_len) 1709 { 1710 struct ib_uverbs_open_qp cmd; 1711 struct ib_uverbs_create_qp_resp resp; 1712 struct ib_udata udata; 1713 struct ib_uqp_object *obj; 1714 struct ib_xrcd *xrcd; 1715 struct ib_uobject *uninitialized_var(xrcd_uobj); 1716 struct ib_qp *qp; 1717 struct ib_qp_open_attr attr; 1718 int ret; 1719 1720 if (out_len < sizeof resp) 1721 return -ENOSPC; 1722 1723 if (copy_from_user(&cmd, buf, sizeof cmd)) 1724 return -EFAULT; 1725 1726 INIT_UDATA(&udata, buf + sizeof cmd, 1727 (unsigned long) cmd.response + sizeof resp, 1728 in_len - sizeof cmd, out_len - sizeof resp); 1729 1730 obj = (struct ib_uqp_object *)uobj_alloc(uobj_get_type(qp), 1731 file->ucontext); 1732 if (IS_ERR(obj)) 1733 return PTR_ERR(obj); 1734 1735 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd.pd_handle, 1736 file->ucontext); 1737 if (IS_ERR(xrcd_uobj)) { 1738 ret = -EINVAL; 1739 goto err_put; 1740 } 1741 1742 xrcd = (struct ib_xrcd *)xrcd_uobj->object; 1743 if (!xrcd) { 1744 ret = -EINVAL; 1745 goto err_xrcd; 1746 } 1747 1748 attr.event_handler = ib_uverbs_qp_event_handler; 1749 attr.qp_context = file; 1750 attr.qp_num = cmd.qpn; 1751 attr.qp_type = cmd.qp_type; 1752 1753 obj->uevent.events_reported = 0; 1754 INIT_LIST_HEAD(&obj->uevent.event_list); 1755 INIT_LIST_HEAD(&obj->mcast_list); 1756 1757 qp = ib_open_qp(xrcd, &attr); 1758 if (IS_ERR(qp)) { 1759 ret = PTR_ERR(qp); 1760 goto err_xrcd; 1761 } 1762 1763 obj->uevent.uobject.object = qp; 1764 obj->uevent.uobject.user_handle = cmd.user_handle; 1765 1766 memset(&resp, 0, sizeof resp); 1767 resp.qpn = qp->qp_num; 1768 resp.qp_handle = obj->uevent.uobject.id; 1769 1770 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1771 &resp, sizeof resp)) { 1772 ret = -EFAULT; 1773 goto err_destroy; 1774 } 1775 1776 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 1777 atomic_inc(&obj->uxrcd->refcnt); 1778 qp->uobject = &obj->uevent.uobject; 1779 uobj_put_read(xrcd_uobj); 1780 1781 1782 uobj_alloc_commit(&obj->uevent.uobject); 1783 1784 return in_len; 1785 1786 err_destroy: 1787 ib_destroy_qp(qp); 1788 err_xrcd: 1789 uobj_put_read(xrcd_uobj); 1790 err_put: 1791 uobj_alloc_abort(&obj->uevent.uobject); 1792 return ret; 1793 } 1794 1795 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 1796 struct ib_device *ib_dev, 1797 const char __user *buf, int in_len, 1798 int out_len) 1799 { 1800 struct ib_uverbs_query_qp cmd; 1801 struct ib_uverbs_query_qp_resp resp; 1802 struct ib_qp *qp; 1803 struct ib_qp_attr *attr; 1804 struct ib_qp_init_attr *init_attr; 1805 const struct ib_global_route *grh; 1806 int ret; 1807 1808 if (copy_from_user(&cmd, buf, sizeof cmd)) 1809 return -EFAULT; 1810 1811 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1812 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 1813 if (!attr || !init_attr) { 1814 ret = -ENOMEM; 1815 goto out; 1816 } 1817 1818 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 1819 if (!qp) { 1820 ret = -EINVAL; 1821 goto out; 1822 } 1823 1824 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 1825 1826 uobj_put_obj_read(qp); 1827 1828 if (ret) 1829 goto out; 1830 1831 memset(&resp, 0, sizeof resp); 1832 1833 resp.qp_state = attr->qp_state; 1834 resp.cur_qp_state = attr->cur_qp_state; 1835 resp.path_mtu = attr->path_mtu; 1836 resp.path_mig_state = attr->path_mig_state; 1837 resp.qkey = attr->qkey; 1838 resp.rq_psn = attr->rq_psn; 1839 resp.sq_psn = attr->sq_psn; 1840 resp.dest_qp_num = attr->dest_qp_num; 1841 resp.qp_access_flags = attr->qp_access_flags; 1842 resp.pkey_index = attr->pkey_index; 1843 resp.alt_pkey_index = attr->alt_pkey_index; 1844 resp.sq_draining = attr->sq_draining; 1845 resp.max_rd_atomic = attr->max_rd_atomic; 1846 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 1847 resp.min_rnr_timer = attr->min_rnr_timer; 1848 resp.port_num = attr->port_num; 1849 resp.timeout = attr->timeout; 1850 resp.retry_cnt = attr->retry_cnt; 1851 resp.rnr_retry = attr->rnr_retry; 1852 resp.alt_port_num = attr->alt_port_num; 1853 resp.alt_timeout = attr->alt_timeout; 1854 1855 resp.dest.dlid = rdma_ah_get_dlid(&attr->ah_attr); 1856 resp.dest.sl = rdma_ah_get_sl(&attr->ah_attr); 1857 resp.dest.src_path_bits = rdma_ah_get_path_bits(&attr->ah_attr); 1858 resp.dest.static_rate = rdma_ah_get_static_rate(&attr->ah_attr); 1859 resp.dest.is_global = !!(rdma_ah_get_ah_flags(&attr->ah_attr) & 1860 IB_AH_GRH); 1861 if (resp.dest.is_global) { 1862 grh = rdma_ah_read_grh(&attr->ah_attr); 1863 memcpy(resp.dest.dgid, grh->dgid.raw, 16); 1864 resp.dest.flow_label = grh->flow_label; 1865 resp.dest.sgid_index = grh->sgid_index; 1866 resp.dest.hop_limit = grh->hop_limit; 1867 resp.dest.traffic_class = grh->traffic_class; 1868 } 1869 resp.dest.port_num = rdma_ah_get_port_num(&attr->ah_attr); 1870 1871 resp.alt_dest.dlid = rdma_ah_get_dlid(&attr->alt_ah_attr); 1872 resp.alt_dest.sl = rdma_ah_get_sl(&attr->alt_ah_attr); 1873 resp.alt_dest.src_path_bits = rdma_ah_get_path_bits(&attr->alt_ah_attr); 1874 resp.alt_dest.static_rate 1875 = rdma_ah_get_static_rate(&attr->alt_ah_attr); 1876 resp.alt_dest.is_global 1877 = !!(rdma_ah_get_ah_flags(&attr->alt_ah_attr) & 1878 IB_AH_GRH); 1879 if (resp.alt_dest.is_global) { 1880 grh = rdma_ah_read_grh(&attr->alt_ah_attr); 1881 memcpy(resp.alt_dest.dgid, grh->dgid.raw, 16); 1882 resp.alt_dest.flow_label = grh->flow_label; 1883 resp.alt_dest.sgid_index = grh->sgid_index; 1884 resp.alt_dest.hop_limit = grh->hop_limit; 1885 resp.alt_dest.traffic_class = grh->traffic_class; 1886 } 1887 resp.alt_dest.port_num = rdma_ah_get_port_num(&attr->alt_ah_attr); 1888 1889 resp.max_send_wr = init_attr->cap.max_send_wr; 1890 resp.max_recv_wr = init_attr->cap.max_recv_wr; 1891 resp.max_send_sge = init_attr->cap.max_send_sge; 1892 resp.max_recv_sge = init_attr->cap.max_recv_sge; 1893 resp.max_inline_data = init_attr->cap.max_inline_data; 1894 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 1895 1896 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1897 &resp, sizeof resp)) 1898 ret = -EFAULT; 1899 1900 out: 1901 kfree(attr); 1902 kfree(init_attr); 1903 1904 return ret ? ret : in_len; 1905 } 1906 1907 /* Remove ignored fields set in the attribute mask */ 1908 static int modify_qp_mask(enum ib_qp_type qp_type, int mask) 1909 { 1910 switch (qp_type) { 1911 case IB_QPT_XRC_INI: 1912 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); 1913 case IB_QPT_XRC_TGT: 1914 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | 1915 IB_QP_RNR_RETRY); 1916 default: 1917 return mask; 1918 } 1919 } 1920 1921 static int modify_qp(struct ib_uverbs_file *file, 1922 struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata) 1923 { 1924 struct ib_qp_attr *attr; 1925 struct ib_qp *qp; 1926 int ret; 1927 1928 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1929 if (!attr) 1930 return -ENOMEM; 1931 1932 qp = uobj_get_obj_read(qp, cmd->base.qp_handle, file->ucontext); 1933 if (!qp) { 1934 ret = -EINVAL; 1935 goto out; 1936 } 1937 1938 if (!rdma_is_port_valid(qp->device, cmd->base.port_num)) { 1939 ret = -EINVAL; 1940 goto release_qp; 1941 } 1942 1943 attr->qp_state = cmd->base.qp_state; 1944 attr->cur_qp_state = cmd->base.cur_qp_state; 1945 attr->path_mtu = cmd->base.path_mtu; 1946 attr->path_mig_state = cmd->base.path_mig_state; 1947 attr->qkey = cmd->base.qkey; 1948 attr->rq_psn = cmd->base.rq_psn; 1949 attr->sq_psn = cmd->base.sq_psn; 1950 attr->dest_qp_num = cmd->base.dest_qp_num; 1951 attr->qp_access_flags = cmd->base.qp_access_flags; 1952 attr->pkey_index = cmd->base.pkey_index; 1953 attr->alt_pkey_index = cmd->base.alt_pkey_index; 1954 attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify; 1955 attr->max_rd_atomic = cmd->base.max_rd_atomic; 1956 attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic; 1957 attr->min_rnr_timer = cmd->base.min_rnr_timer; 1958 attr->port_num = cmd->base.port_num; 1959 attr->timeout = cmd->base.timeout; 1960 attr->retry_cnt = cmd->base.retry_cnt; 1961 attr->rnr_retry = cmd->base.rnr_retry; 1962 attr->alt_port_num = cmd->base.alt_port_num; 1963 attr->alt_timeout = cmd->base.alt_timeout; 1964 attr->rate_limit = cmd->rate_limit; 1965 1966 attr->ah_attr.type = rdma_ah_find_type(qp->device, 1967 cmd->base.dest.port_num); 1968 if (cmd->base.dest.is_global) { 1969 rdma_ah_set_grh(&attr->ah_attr, NULL, 1970 cmd->base.dest.flow_label, 1971 cmd->base.dest.sgid_index, 1972 cmd->base.dest.hop_limit, 1973 cmd->base.dest.traffic_class); 1974 rdma_ah_set_dgid_raw(&attr->ah_attr, cmd->base.dest.dgid); 1975 } else { 1976 rdma_ah_set_ah_flags(&attr->ah_attr, 0); 1977 } 1978 rdma_ah_set_dlid(&attr->ah_attr, cmd->base.dest.dlid); 1979 rdma_ah_set_sl(&attr->ah_attr, cmd->base.dest.sl); 1980 rdma_ah_set_path_bits(&attr->ah_attr, cmd->base.dest.src_path_bits); 1981 rdma_ah_set_static_rate(&attr->ah_attr, cmd->base.dest.static_rate); 1982 rdma_ah_set_port_num(&attr->ah_attr, 1983 cmd->base.dest.port_num); 1984 1985 attr->alt_ah_attr.type = rdma_ah_find_type(qp->device, 1986 cmd->base.dest.port_num); 1987 if (cmd->base.alt_dest.is_global) { 1988 rdma_ah_set_grh(&attr->alt_ah_attr, NULL, 1989 cmd->base.alt_dest.flow_label, 1990 cmd->base.alt_dest.sgid_index, 1991 cmd->base.alt_dest.hop_limit, 1992 cmd->base.alt_dest.traffic_class); 1993 rdma_ah_set_dgid_raw(&attr->alt_ah_attr, 1994 cmd->base.alt_dest.dgid); 1995 } else { 1996 rdma_ah_set_ah_flags(&attr->alt_ah_attr, 0); 1997 } 1998 1999 rdma_ah_set_dlid(&attr->alt_ah_attr, cmd->base.alt_dest.dlid); 2000 rdma_ah_set_sl(&attr->alt_ah_attr, cmd->base.alt_dest.sl); 2001 rdma_ah_set_path_bits(&attr->alt_ah_attr, 2002 cmd->base.alt_dest.src_path_bits); 2003 rdma_ah_set_static_rate(&attr->alt_ah_attr, 2004 cmd->base.alt_dest.static_rate); 2005 rdma_ah_set_port_num(&attr->alt_ah_attr, 2006 cmd->base.alt_dest.port_num); 2007 2008 if (qp->real_qp == qp) { 2009 if (cmd->base.attr_mask & IB_QP_AV) { 2010 ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr); 2011 if (ret) 2012 goto release_qp; 2013 } 2014 ret = ib_security_modify_qp(qp, 2015 attr, 2016 modify_qp_mask(qp->qp_type, 2017 cmd->base.attr_mask), 2018 udata); 2019 } else { 2020 ret = ib_security_modify_qp(qp, 2021 attr, 2022 modify_qp_mask(qp->qp_type, 2023 cmd->base.attr_mask), 2024 NULL); 2025 } 2026 2027 release_qp: 2028 uobj_put_obj_read(qp); 2029 2030 out: 2031 kfree(attr); 2032 2033 return ret; 2034 } 2035 2036 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 2037 struct ib_device *ib_dev, 2038 const char __user *buf, int in_len, 2039 int out_len) 2040 { 2041 struct ib_uverbs_ex_modify_qp cmd = {}; 2042 struct ib_udata udata; 2043 int ret; 2044 2045 if (copy_from_user(&cmd.base, buf, sizeof(cmd.base))) 2046 return -EFAULT; 2047 2048 if (cmd.base.attr_mask & 2049 ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1)) 2050 return -EOPNOTSUPP; 2051 2052 INIT_UDATA(&udata, buf + sizeof(cmd.base), NULL, 2053 in_len - sizeof(cmd.base), out_len); 2054 2055 ret = modify_qp(file, &cmd, &udata); 2056 if (ret) 2057 return ret; 2058 2059 return in_len; 2060 } 2061 2062 int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file, 2063 struct ib_device *ib_dev, 2064 struct ib_udata *ucore, 2065 struct ib_udata *uhw) 2066 { 2067 struct ib_uverbs_ex_modify_qp cmd = {}; 2068 int ret; 2069 2070 /* 2071 * Last bit is reserved for extending the attr_mask by 2072 * using another field. 2073 */ 2074 BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31)); 2075 2076 if (ucore->inlen < sizeof(cmd.base)) 2077 return -EINVAL; 2078 2079 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 2080 if (ret) 2081 return ret; 2082 2083 if (cmd.base.attr_mask & 2084 ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1)) 2085 return -EOPNOTSUPP; 2086 2087 if (ucore->inlen > sizeof(cmd)) { 2088 if (ib_is_udata_cleared(ucore, sizeof(cmd), 2089 ucore->inlen - sizeof(cmd))) 2090 return -EOPNOTSUPP; 2091 } 2092 2093 ret = modify_qp(file, &cmd, uhw); 2094 2095 return ret; 2096 } 2097 2098 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 2099 struct ib_device *ib_dev, 2100 const char __user *buf, int in_len, 2101 int out_len) 2102 { 2103 struct ib_uverbs_destroy_qp cmd; 2104 struct ib_uverbs_destroy_qp_resp resp; 2105 struct ib_uobject *uobj; 2106 struct ib_qp *qp; 2107 struct ib_uqp_object *obj; 2108 int ret = -EINVAL; 2109 2110 if (copy_from_user(&cmd, buf, sizeof cmd)) 2111 return -EFAULT; 2112 2113 memset(&resp, 0, sizeof resp); 2114 2115 uobj = uobj_get_write(uobj_get_type(qp), cmd.qp_handle, 2116 file->ucontext); 2117 if (IS_ERR(uobj)) 2118 return PTR_ERR(uobj); 2119 2120 qp = uobj->object; 2121 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 2122 /* 2123 * Make sure we don't free the memory in remove_commit as we still 2124 * needs the uobject memory to create the response. 2125 */ 2126 uverbs_uobject_get(uobj); 2127 2128 ret = uobj_remove_commit(uobj); 2129 if (ret) { 2130 uverbs_uobject_put(uobj); 2131 return ret; 2132 } 2133 2134 resp.events_reported = obj->uevent.events_reported; 2135 uverbs_uobject_put(uobj); 2136 2137 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2138 &resp, sizeof resp)) 2139 return -EFAULT; 2140 2141 return in_len; 2142 } 2143 2144 static void *alloc_wr(size_t wr_size, __u32 num_sge) 2145 { 2146 if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) / 2147 sizeof (struct ib_sge)) 2148 return NULL; 2149 2150 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) + 2151 num_sge * sizeof (struct ib_sge), GFP_KERNEL); 2152 } 2153 2154 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 2155 struct ib_device *ib_dev, 2156 const char __user *buf, int in_len, 2157 int out_len) 2158 { 2159 struct ib_uverbs_post_send cmd; 2160 struct ib_uverbs_post_send_resp resp; 2161 struct ib_uverbs_send_wr *user_wr; 2162 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 2163 struct ib_qp *qp; 2164 int i, sg_ind; 2165 int is_ud; 2166 ssize_t ret = -EINVAL; 2167 size_t next_size; 2168 2169 if (copy_from_user(&cmd, buf, sizeof cmd)) 2170 return -EFAULT; 2171 2172 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 2173 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 2174 return -EINVAL; 2175 2176 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 2177 return -EINVAL; 2178 2179 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 2180 if (!user_wr) 2181 return -ENOMEM; 2182 2183 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 2184 if (!qp) 2185 goto out; 2186 2187 is_ud = qp->qp_type == IB_QPT_UD; 2188 sg_ind = 0; 2189 last = NULL; 2190 for (i = 0; i < cmd.wr_count; ++i) { 2191 if (copy_from_user(user_wr, 2192 buf + sizeof cmd + i * cmd.wqe_size, 2193 cmd.wqe_size)) { 2194 ret = -EFAULT; 2195 goto out_put; 2196 } 2197 2198 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 2199 ret = -EINVAL; 2200 goto out_put; 2201 } 2202 2203 if (is_ud) { 2204 struct ib_ud_wr *ud; 2205 2206 if (user_wr->opcode != IB_WR_SEND && 2207 user_wr->opcode != IB_WR_SEND_WITH_IMM) { 2208 ret = -EINVAL; 2209 goto out_put; 2210 } 2211 2212 next_size = sizeof(*ud); 2213 ud = alloc_wr(next_size, user_wr->num_sge); 2214 if (!ud) { 2215 ret = -ENOMEM; 2216 goto out_put; 2217 } 2218 2219 ud->ah = uobj_get_obj_read(ah, user_wr->wr.ud.ah, 2220 file->ucontext); 2221 if (!ud->ah) { 2222 kfree(ud); 2223 ret = -EINVAL; 2224 goto out_put; 2225 } 2226 ud->remote_qpn = user_wr->wr.ud.remote_qpn; 2227 ud->remote_qkey = user_wr->wr.ud.remote_qkey; 2228 2229 next = &ud->wr; 2230 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || 2231 user_wr->opcode == IB_WR_RDMA_WRITE || 2232 user_wr->opcode == IB_WR_RDMA_READ) { 2233 struct ib_rdma_wr *rdma; 2234 2235 next_size = sizeof(*rdma); 2236 rdma = alloc_wr(next_size, user_wr->num_sge); 2237 if (!rdma) { 2238 ret = -ENOMEM; 2239 goto out_put; 2240 } 2241 2242 rdma->remote_addr = user_wr->wr.rdma.remote_addr; 2243 rdma->rkey = user_wr->wr.rdma.rkey; 2244 2245 next = &rdma->wr; 2246 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || 2247 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 2248 struct ib_atomic_wr *atomic; 2249 2250 next_size = sizeof(*atomic); 2251 atomic = alloc_wr(next_size, user_wr->num_sge); 2252 if (!atomic) { 2253 ret = -ENOMEM; 2254 goto out_put; 2255 } 2256 2257 atomic->remote_addr = user_wr->wr.atomic.remote_addr; 2258 atomic->compare_add = user_wr->wr.atomic.compare_add; 2259 atomic->swap = user_wr->wr.atomic.swap; 2260 atomic->rkey = user_wr->wr.atomic.rkey; 2261 2262 next = &atomic->wr; 2263 } else if (user_wr->opcode == IB_WR_SEND || 2264 user_wr->opcode == IB_WR_SEND_WITH_IMM || 2265 user_wr->opcode == IB_WR_SEND_WITH_INV) { 2266 next_size = sizeof(*next); 2267 next = alloc_wr(next_size, user_wr->num_sge); 2268 if (!next) { 2269 ret = -ENOMEM; 2270 goto out_put; 2271 } 2272 } else { 2273 ret = -EINVAL; 2274 goto out_put; 2275 } 2276 2277 if (user_wr->opcode == IB_WR_SEND_WITH_IMM || 2278 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 2279 next->ex.imm_data = 2280 (__be32 __force) user_wr->ex.imm_data; 2281 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) { 2282 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey; 2283 } 2284 2285 if (!last) 2286 wr = next; 2287 else 2288 last->next = next; 2289 last = next; 2290 2291 next->next = NULL; 2292 next->wr_id = user_wr->wr_id; 2293 next->num_sge = user_wr->num_sge; 2294 next->opcode = user_wr->opcode; 2295 next->send_flags = user_wr->send_flags; 2296 2297 if (next->num_sge) { 2298 next->sg_list = (void *) next + 2299 ALIGN(next_size, sizeof(struct ib_sge)); 2300 if (copy_from_user(next->sg_list, 2301 buf + sizeof cmd + 2302 cmd.wr_count * cmd.wqe_size + 2303 sg_ind * sizeof (struct ib_sge), 2304 next->num_sge * sizeof (struct ib_sge))) { 2305 ret = -EFAULT; 2306 goto out_put; 2307 } 2308 sg_ind += next->num_sge; 2309 } else 2310 next->sg_list = NULL; 2311 } 2312 2313 resp.bad_wr = 0; 2314 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); 2315 if (ret) 2316 for (next = wr; next; next = next->next) { 2317 ++resp.bad_wr; 2318 if (next == bad_wr) 2319 break; 2320 } 2321 2322 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2323 &resp, sizeof resp)) 2324 ret = -EFAULT; 2325 2326 out_put: 2327 uobj_put_obj_read(qp); 2328 2329 while (wr) { 2330 if (is_ud && ud_wr(wr)->ah) 2331 uobj_put_obj_read(ud_wr(wr)->ah); 2332 next = wr->next; 2333 kfree(wr); 2334 wr = next; 2335 } 2336 2337 out: 2338 kfree(user_wr); 2339 2340 return ret ? ret : in_len; 2341 } 2342 2343 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 2344 int in_len, 2345 u32 wr_count, 2346 u32 sge_count, 2347 u32 wqe_size) 2348 { 2349 struct ib_uverbs_recv_wr *user_wr; 2350 struct ib_recv_wr *wr = NULL, *last, *next; 2351 int sg_ind; 2352 int i; 2353 int ret; 2354 2355 if (in_len < wqe_size * wr_count + 2356 sge_count * sizeof (struct ib_uverbs_sge)) 2357 return ERR_PTR(-EINVAL); 2358 2359 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 2360 return ERR_PTR(-EINVAL); 2361 2362 user_wr = kmalloc(wqe_size, GFP_KERNEL); 2363 if (!user_wr) 2364 return ERR_PTR(-ENOMEM); 2365 2366 sg_ind = 0; 2367 last = NULL; 2368 for (i = 0; i < wr_count; ++i) { 2369 if (copy_from_user(user_wr, buf + i * wqe_size, 2370 wqe_size)) { 2371 ret = -EFAULT; 2372 goto err; 2373 } 2374 2375 if (user_wr->num_sge + sg_ind > sge_count) { 2376 ret = -EINVAL; 2377 goto err; 2378 } 2379 2380 if (user_wr->num_sge >= 2381 (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) / 2382 sizeof (struct ib_sge)) { 2383 ret = -EINVAL; 2384 goto err; 2385 } 2386 2387 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2388 user_wr->num_sge * sizeof (struct ib_sge), 2389 GFP_KERNEL); 2390 if (!next) { 2391 ret = -ENOMEM; 2392 goto err; 2393 } 2394 2395 if (!last) 2396 wr = next; 2397 else 2398 last->next = next; 2399 last = next; 2400 2401 next->next = NULL; 2402 next->wr_id = user_wr->wr_id; 2403 next->num_sge = user_wr->num_sge; 2404 2405 if (next->num_sge) { 2406 next->sg_list = (void *) next + 2407 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2408 if (copy_from_user(next->sg_list, 2409 buf + wr_count * wqe_size + 2410 sg_ind * sizeof (struct ib_sge), 2411 next->num_sge * sizeof (struct ib_sge))) { 2412 ret = -EFAULT; 2413 goto err; 2414 } 2415 sg_ind += next->num_sge; 2416 } else 2417 next->sg_list = NULL; 2418 } 2419 2420 kfree(user_wr); 2421 return wr; 2422 2423 err: 2424 kfree(user_wr); 2425 2426 while (wr) { 2427 next = wr->next; 2428 kfree(wr); 2429 wr = next; 2430 } 2431 2432 return ERR_PTR(ret); 2433 } 2434 2435 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 2436 struct ib_device *ib_dev, 2437 const char __user *buf, int in_len, 2438 int out_len) 2439 { 2440 struct ib_uverbs_post_recv cmd; 2441 struct ib_uverbs_post_recv_resp resp; 2442 struct ib_recv_wr *wr, *next, *bad_wr; 2443 struct ib_qp *qp; 2444 ssize_t ret = -EINVAL; 2445 2446 if (copy_from_user(&cmd, buf, sizeof cmd)) 2447 return -EFAULT; 2448 2449 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2450 in_len - sizeof cmd, cmd.wr_count, 2451 cmd.sge_count, cmd.wqe_size); 2452 if (IS_ERR(wr)) 2453 return PTR_ERR(wr); 2454 2455 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 2456 if (!qp) 2457 goto out; 2458 2459 resp.bad_wr = 0; 2460 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); 2461 2462 uobj_put_obj_read(qp); 2463 if (ret) { 2464 for (next = wr; next; next = next->next) { 2465 ++resp.bad_wr; 2466 if (next == bad_wr) 2467 break; 2468 } 2469 } 2470 2471 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2472 &resp, sizeof resp)) 2473 ret = -EFAULT; 2474 2475 out: 2476 while (wr) { 2477 next = wr->next; 2478 kfree(wr); 2479 wr = next; 2480 } 2481 2482 return ret ? ret : in_len; 2483 } 2484 2485 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 2486 struct ib_device *ib_dev, 2487 const char __user *buf, int in_len, 2488 int out_len) 2489 { 2490 struct ib_uverbs_post_srq_recv cmd; 2491 struct ib_uverbs_post_srq_recv_resp resp; 2492 struct ib_recv_wr *wr, *next, *bad_wr; 2493 struct ib_srq *srq; 2494 ssize_t ret = -EINVAL; 2495 2496 if (copy_from_user(&cmd, buf, sizeof cmd)) 2497 return -EFAULT; 2498 2499 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2500 in_len - sizeof cmd, cmd.wr_count, 2501 cmd.sge_count, cmd.wqe_size); 2502 if (IS_ERR(wr)) 2503 return PTR_ERR(wr); 2504 2505 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext); 2506 if (!srq) 2507 goto out; 2508 2509 resp.bad_wr = 0; 2510 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 2511 2512 uobj_put_obj_read(srq); 2513 2514 if (ret) 2515 for (next = wr; next; next = next->next) { 2516 ++resp.bad_wr; 2517 if (next == bad_wr) 2518 break; 2519 } 2520 2521 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2522 &resp, sizeof resp)) 2523 ret = -EFAULT; 2524 2525 out: 2526 while (wr) { 2527 next = wr->next; 2528 kfree(wr); 2529 wr = next; 2530 } 2531 2532 return ret ? ret : in_len; 2533 } 2534 2535 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 2536 struct ib_device *ib_dev, 2537 const char __user *buf, int in_len, 2538 int out_len) 2539 { 2540 struct ib_uverbs_create_ah cmd; 2541 struct ib_uverbs_create_ah_resp resp; 2542 struct ib_uobject *uobj; 2543 struct ib_pd *pd; 2544 struct ib_ah *ah; 2545 struct rdma_ah_attr attr; 2546 int ret; 2547 struct ib_udata udata; 2548 u8 *dmac; 2549 2550 if (out_len < sizeof resp) 2551 return -ENOSPC; 2552 2553 if (copy_from_user(&cmd, buf, sizeof cmd)) 2554 return -EFAULT; 2555 2556 if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) 2557 return -EINVAL; 2558 2559 INIT_UDATA(&udata, buf + sizeof(cmd), 2560 (unsigned long)cmd.response + sizeof(resp), 2561 in_len - sizeof(cmd), out_len - sizeof(resp)); 2562 2563 uobj = uobj_alloc(uobj_get_type(ah), file->ucontext); 2564 if (IS_ERR(uobj)) 2565 return PTR_ERR(uobj); 2566 2567 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 2568 if (!pd) { 2569 ret = -EINVAL; 2570 goto err; 2571 } 2572 2573 attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num); 2574 rdma_ah_set_dlid(&attr, cmd.attr.dlid); 2575 rdma_ah_set_sl(&attr, cmd.attr.sl); 2576 rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits); 2577 rdma_ah_set_static_rate(&attr, cmd.attr.static_rate); 2578 rdma_ah_set_port_num(&attr, cmd.attr.port_num); 2579 2580 if (cmd.attr.is_global) { 2581 rdma_ah_set_grh(&attr, NULL, cmd.attr.grh.flow_label, 2582 cmd.attr.grh.sgid_index, 2583 cmd.attr.grh.hop_limit, 2584 cmd.attr.grh.traffic_class); 2585 rdma_ah_set_dgid_raw(&attr, cmd.attr.grh.dgid); 2586 } else { 2587 rdma_ah_set_ah_flags(&attr, 0); 2588 } 2589 dmac = rdma_ah_retrieve_dmac(&attr); 2590 if (dmac) 2591 memset(dmac, 0, ETH_ALEN); 2592 2593 ah = pd->device->create_ah(pd, &attr, &udata); 2594 2595 if (IS_ERR(ah)) { 2596 ret = PTR_ERR(ah); 2597 goto err_put; 2598 } 2599 2600 ah->device = pd->device; 2601 ah->pd = pd; 2602 atomic_inc(&pd->usecnt); 2603 ah->uobject = uobj; 2604 uobj->user_handle = cmd.user_handle; 2605 uobj->object = ah; 2606 2607 resp.ah_handle = uobj->id; 2608 2609 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2610 &resp, sizeof resp)) { 2611 ret = -EFAULT; 2612 goto err_copy; 2613 } 2614 2615 uobj_put_obj_read(pd); 2616 uobj_alloc_commit(uobj); 2617 2618 return in_len; 2619 2620 err_copy: 2621 rdma_destroy_ah(ah); 2622 2623 err_put: 2624 uobj_put_obj_read(pd); 2625 2626 err: 2627 uobj_alloc_abort(uobj); 2628 return ret; 2629 } 2630 2631 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 2632 struct ib_device *ib_dev, 2633 const char __user *buf, int in_len, int out_len) 2634 { 2635 struct ib_uverbs_destroy_ah cmd; 2636 struct ib_uobject *uobj; 2637 int ret; 2638 2639 if (copy_from_user(&cmd, buf, sizeof cmd)) 2640 return -EFAULT; 2641 2642 uobj = uobj_get_write(uobj_get_type(ah), cmd.ah_handle, 2643 file->ucontext); 2644 if (IS_ERR(uobj)) 2645 return PTR_ERR(uobj); 2646 2647 ret = uobj_remove_commit(uobj); 2648 return ret ?: in_len; 2649 } 2650 2651 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 2652 struct ib_device *ib_dev, 2653 const char __user *buf, int in_len, 2654 int out_len) 2655 { 2656 struct ib_uverbs_attach_mcast cmd; 2657 struct ib_qp *qp; 2658 struct ib_uqp_object *obj; 2659 struct ib_uverbs_mcast_entry *mcast; 2660 int ret; 2661 2662 if (copy_from_user(&cmd, buf, sizeof cmd)) 2663 return -EFAULT; 2664 2665 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 2666 if (!qp) 2667 return -EINVAL; 2668 2669 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2670 2671 mutex_lock(&obj->mcast_lock); 2672 list_for_each_entry(mcast, &obj->mcast_list, list) 2673 if (cmd.mlid == mcast->lid && 2674 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2675 ret = 0; 2676 goto out_put; 2677 } 2678 2679 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 2680 if (!mcast) { 2681 ret = -ENOMEM; 2682 goto out_put; 2683 } 2684 2685 mcast->lid = cmd.mlid; 2686 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 2687 2688 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 2689 if (!ret) 2690 list_add_tail(&mcast->list, &obj->mcast_list); 2691 else 2692 kfree(mcast); 2693 2694 out_put: 2695 mutex_unlock(&obj->mcast_lock); 2696 uobj_put_obj_read(qp); 2697 2698 return ret ? ret : in_len; 2699 } 2700 2701 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 2702 struct ib_device *ib_dev, 2703 const char __user *buf, int in_len, 2704 int out_len) 2705 { 2706 struct ib_uverbs_detach_mcast cmd; 2707 struct ib_uqp_object *obj; 2708 struct ib_qp *qp; 2709 struct ib_uverbs_mcast_entry *mcast; 2710 int ret = -EINVAL; 2711 bool found = false; 2712 2713 if (copy_from_user(&cmd, buf, sizeof cmd)) 2714 return -EFAULT; 2715 2716 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 2717 if (!qp) 2718 return -EINVAL; 2719 2720 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2721 mutex_lock(&obj->mcast_lock); 2722 2723 list_for_each_entry(mcast, &obj->mcast_list, list) 2724 if (cmd.mlid == mcast->lid && 2725 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2726 list_del(&mcast->list); 2727 kfree(mcast); 2728 found = true; 2729 break; 2730 } 2731 2732 if (!found) { 2733 ret = -EINVAL; 2734 goto out_put; 2735 } 2736 2737 ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid); 2738 2739 out_put: 2740 mutex_unlock(&obj->mcast_lock); 2741 uobj_put_obj_read(qp); 2742 return ret ? ret : in_len; 2743 } 2744 2745 static int kern_spec_to_ib_spec_action(struct ib_uverbs_flow_spec *kern_spec, 2746 union ib_flow_spec *ib_spec) 2747 { 2748 ib_spec->type = kern_spec->type; 2749 switch (ib_spec->type) { 2750 case IB_FLOW_SPEC_ACTION_TAG: 2751 if (kern_spec->flow_tag.size != 2752 sizeof(struct ib_uverbs_flow_spec_action_tag)) 2753 return -EINVAL; 2754 2755 ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag); 2756 ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id; 2757 break; 2758 case IB_FLOW_SPEC_ACTION_DROP: 2759 if (kern_spec->drop.size != 2760 sizeof(struct ib_uverbs_flow_spec_action_drop)) 2761 return -EINVAL; 2762 2763 ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop); 2764 break; 2765 default: 2766 return -EINVAL; 2767 } 2768 return 0; 2769 } 2770 2771 static size_t kern_spec_filter_sz(struct ib_uverbs_flow_spec_hdr *spec) 2772 { 2773 /* Returns user space filter size, includes padding */ 2774 return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2; 2775 } 2776 2777 static ssize_t spec_filter_size(void *kern_spec_filter, u16 kern_filter_size, 2778 u16 ib_real_filter_sz) 2779 { 2780 /* 2781 * User space filter structures must be 64 bit aligned, otherwise this 2782 * may pass, but we won't handle additional new attributes. 2783 */ 2784 2785 if (kern_filter_size > ib_real_filter_sz) { 2786 if (memchr_inv(kern_spec_filter + 2787 ib_real_filter_sz, 0, 2788 kern_filter_size - ib_real_filter_sz)) 2789 return -EINVAL; 2790 return ib_real_filter_sz; 2791 } 2792 return kern_filter_size; 2793 } 2794 2795 static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec, 2796 union ib_flow_spec *ib_spec) 2797 { 2798 ssize_t actual_filter_sz; 2799 ssize_t kern_filter_sz; 2800 ssize_t ib_filter_sz; 2801 void *kern_spec_mask; 2802 void *kern_spec_val; 2803 2804 if (kern_spec->reserved) 2805 return -EINVAL; 2806 2807 ib_spec->type = kern_spec->type; 2808 2809 kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr); 2810 /* User flow spec size must be aligned to 4 bytes */ 2811 if (kern_filter_sz != ALIGN(kern_filter_sz, 4)) 2812 return -EINVAL; 2813 2814 kern_spec_val = (void *)kern_spec + 2815 sizeof(struct ib_uverbs_flow_spec_hdr); 2816 kern_spec_mask = kern_spec_val + kern_filter_sz; 2817 if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL)) 2818 return -EINVAL; 2819 2820 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) { 2821 case IB_FLOW_SPEC_ETH: 2822 ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz); 2823 actual_filter_sz = spec_filter_size(kern_spec_mask, 2824 kern_filter_sz, 2825 ib_filter_sz); 2826 if (actual_filter_sz <= 0) 2827 return -EINVAL; 2828 ib_spec->size = sizeof(struct ib_flow_spec_eth); 2829 memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz); 2830 memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz); 2831 break; 2832 case IB_FLOW_SPEC_IPV4: 2833 ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz); 2834 actual_filter_sz = spec_filter_size(kern_spec_mask, 2835 kern_filter_sz, 2836 ib_filter_sz); 2837 if (actual_filter_sz <= 0) 2838 return -EINVAL; 2839 ib_spec->size = sizeof(struct ib_flow_spec_ipv4); 2840 memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz); 2841 memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz); 2842 break; 2843 case IB_FLOW_SPEC_IPV6: 2844 ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz); 2845 actual_filter_sz = spec_filter_size(kern_spec_mask, 2846 kern_filter_sz, 2847 ib_filter_sz); 2848 if (actual_filter_sz <= 0) 2849 return -EINVAL; 2850 ib_spec->size = sizeof(struct ib_flow_spec_ipv6); 2851 memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz); 2852 memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz); 2853 2854 if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) || 2855 (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20)) 2856 return -EINVAL; 2857 break; 2858 case IB_FLOW_SPEC_TCP: 2859 case IB_FLOW_SPEC_UDP: 2860 ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz); 2861 actual_filter_sz = spec_filter_size(kern_spec_mask, 2862 kern_filter_sz, 2863 ib_filter_sz); 2864 if (actual_filter_sz <= 0) 2865 return -EINVAL; 2866 ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp); 2867 memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz); 2868 memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz); 2869 break; 2870 case IB_FLOW_SPEC_VXLAN_TUNNEL: 2871 ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz); 2872 actual_filter_sz = spec_filter_size(kern_spec_mask, 2873 kern_filter_sz, 2874 ib_filter_sz); 2875 if (actual_filter_sz <= 0) 2876 return -EINVAL; 2877 ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel); 2878 memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz); 2879 memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz); 2880 2881 if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) || 2882 (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24)) 2883 return -EINVAL; 2884 break; 2885 default: 2886 return -EINVAL; 2887 } 2888 return 0; 2889 } 2890 2891 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, 2892 union ib_flow_spec *ib_spec) 2893 { 2894 if (kern_spec->reserved) 2895 return -EINVAL; 2896 2897 if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG) 2898 return kern_spec_to_ib_spec_action(kern_spec, ib_spec); 2899 else 2900 return kern_spec_to_ib_spec_filter(kern_spec, ib_spec); 2901 } 2902 2903 int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file, 2904 struct ib_device *ib_dev, 2905 struct ib_udata *ucore, 2906 struct ib_udata *uhw) 2907 { 2908 struct ib_uverbs_ex_create_wq cmd = {}; 2909 struct ib_uverbs_ex_create_wq_resp resp = {}; 2910 struct ib_uwq_object *obj; 2911 int err = 0; 2912 struct ib_cq *cq; 2913 struct ib_pd *pd; 2914 struct ib_wq *wq; 2915 struct ib_wq_init_attr wq_init_attr = {}; 2916 size_t required_cmd_sz; 2917 size_t required_resp_len; 2918 2919 required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge); 2920 required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn); 2921 2922 if (ucore->inlen < required_cmd_sz) 2923 return -EINVAL; 2924 2925 if (ucore->outlen < required_resp_len) 2926 return -ENOSPC; 2927 2928 if (ucore->inlen > sizeof(cmd) && 2929 !ib_is_udata_cleared(ucore, sizeof(cmd), 2930 ucore->inlen - sizeof(cmd))) 2931 return -EOPNOTSUPP; 2932 2933 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 2934 if (err) 2935 return err; 2936 2937 if (cmd.comp_mask) 2938 return -EOPNOTSUPP; 2939 2940 obj = (struct ib_uwq_object *)uobj_alloc(uobj_get_type(wq), 2941 file->ucontext); 2942 if (IS_ERR(obj)) 2943 return PTR_ERR(obj); 2944 2945 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 2946 if (!pd) { 2947 err = -EINVAL; 2948 goto err_uobj; 2949 } 2950 2951 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext); 2952 if (!cq) { 2953 err = -EINVAL; 2954 goto err_put_pd; 2955 } 2956 2957 wq_init_attr.cq = cq; 2958 wq_init_attr.max_sge = cmd.max_sge; 2959 wq_init_attr.max_wr = cmd.max_wr; 2960 wq_init_attr.wq_context = file; 2961 wq_init_attr.wq_type = cmd.wq_type; 2962 wq_init_attr.event_handler = ib_uverbs_wq_event_handler; 2963 if (ucore->inlen >= (offsetof(typeof(cmd), create_flags) + 2964 sizeof(cmd.create_flags))) 2965 wq_init_attr.create_flags = cmd.create_flags; 2966 obj->uevent.events_reported = 0; 2967 INIT_LIST_HEAD(&obj->uevent.event_list); 2968 wq = pd->device->create_wq(pd, &wq_init_attr, uhw); 2969 if (IS_ERR(wq)) { 2970 err = PTR_ERR(wq); 2971 goto err_put_cq; 2972 } 2973 2974 wq->uobject = &obj->uevent.uobject; 2975 obj->uevent.uobject.object = wq; 2976 wq->wq_type = wq_init_attr.wq_type; 2977 wq->cq = cq; 2978 wq->pd = pd; 2979 wq->device = pd->device; 2980 wq->wq_context = wq_init_attr.wq_context; 2981 atomic_set(&wq->usecnt, 0); 2982 atomic_inc(&pd->usecnt); 2983 atomic_inc(&cq->usecnt); 2984 wq->uobject = &obj->uevent.uobject; 2985 obj->uevent.uobject.object = wq; 2986 2987 memset(&resp, 0, sizeof(resp)); 2988 resp.wq_handle = obj->uevent.uobject.id; 2989 resp.max_sge = wq_init_attr.max_sge; 2990 resp.max_wr = wq_init_attr.max_wr; 2991 resp.wqn = wq->wq_num; 2992 resp.response_length = required_resp_len; 2993 err = ib_copy_to_udata(ucore, 2994 &resp, resp.response_length); 2995 if (err) 2996 goto err_copy; 2997 2998 uobj_put_obj_read(pd); 2999 uobj_put_obj_read(cq); 3000 uobj_alloc_commit(&obj->uevent.uobject); 3001 return 0; 3002 3003 err_copy: 3004 ib_destroy_wq(wq); 3005 err_put_cq: 3006 uobj_put_obj_read(cq); 3007 err_put_pd: 3008 uobj_put_obj_read(pd); 3009 err_uobj: 3010 uobj_alloc_abort(&obj->uevent.uobject); 3011 3012 return err; 3013 } 3014 3015 int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file, 3016 struct ib_device *ib_dev, 3017 struct ib_udata *ucore, 3018 struct ib_udata *uhw) 3019 { 3020 struct ib_uverbs_ex_destroy_wq cmd = {}; 3021 struct ib_uverbs_ex_destroy_wq_resp resp = {}; 3022 struct ib_wq *wq; 3023 struct ib_uobject *uobj; 3024 struct ib_uwq_object *obj; 3025 size_t required_cmd_sz; 3026 size_t required_resp_len; 3027 int ret; 3028 3029 required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle); 3030 required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); 3031 3032 if (ucore->inlen < required_cmd_sz) 3033 return -EINVAL; 3034 3035 if (ucore->outlen < required_resp_len) 3036 return -ENOSPC; 3037 3038 if (ucore->inlen > sizeof(cmd) && 3039 !ib_is_udata_cleared(ucore, sizeof(cmd), 3040 ucore->inlen - sizeof(cmd))) 3041 return -EOPNOTSUPP; 3042 3043 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3044 if (ret) 3045 return ret; 3046 3047 if (cmd.comp_mask) 3048 return -EOPNOTSUPP; 3049 3050 resp.response_length = required_resp_len; 3051 uobj = uobj_get_write(uobj_get_type(wq), cmd.wq_handle, 3052 file->ucontext); 3053 if (IS_ERR(uobj)) 3054 return PTR_ERR(uobj); 3055 3056 wq = uobj->object; 3057 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject); 3058 /* 3059 * Make sure we don't free the memory in remove_commit as we still 3060 * needs the uobject memory to create the response. 3061 */ 3062 uverbs_uobject_get(uobj); 3063 3064 ret = uobj_remove_commit(uobj); 3065 resp.events_reported = obj->uevent.events_reported; 3066 uverbs_uobject_put(uobj); 3067 if (ret) 3068 return ret; 3069 3070 return ib_copy_to_udata(ucore, &resp, resp.response_length); 3071 } 3072 3073 int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file, 3074 struct ib_device *ib_dev, 3075 struct ib_udata *ucore, 3076 struct ib_udata *uhw) 3077 { 3078 struct ib_uverbs_ex_modify_wq cmd = {}; 3079 struct ib_wq *wq; 3080 struct ib_wq_attr wq_attr = {}; 3081 size_t required_cmd_sz; 3082 int ret; 3083 3084 required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state); 3085 if (ucore->inlen < required_cmd_sz) 3086 return -EINVAL; 3087 3088 if (ucore->inlen > sizeof(cmd) && 3089 !ib_is_udata_cleared(ucore, sizeof(cmd), 3090 ucore->inlen - sizeof(cmd))) 3091 return -EOPNOTSUPP; 3092 3093 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3094 if (ret) 3095 return ret; 3096 3097 if (!cmd.attr_mask) 3098 return -EINVAL; 3099 3100 if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS)) 3101 return -EINVAL; 3102 3103 wq = uobj_get_obj_read(wq, cmd.wq_handle, file->ucontext); 3104 if (!wq) 3105 return -EINVAL; 3106 3107 wq_attr.curr_wq_state = cmd.curr_wq_state; 3108 wq_attr.wq_state = cmd.wq_state; 3109 if (cmd.attr_mask & IB_WQ_FLAGS) { 3110 wq_attr.flags = cmd.flags; 3111 wq_attr.flags_mask = cmd.flags_mask; 3112 } 3113 ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw); 3114 uobj_put_obj_read(wq); 3115 return ret; 3116 } 3117 3118 int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, 3119 struct ib_device *ib_dev, 3120 struct ib_udata *ucore, 3121 struct ib_udata *uhw) 3122 { 3123 struct ib_uverbs_ex_create_rwq_ind_table cmd = {}; 3124 struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {}; 3125 struct ib_uobject *uobj; 3126 int err = 0; 3127 struct ib_rwq_ind_table_init_attr init_attr = {}; 3128 struct ib_rwq_ind_table *rwq_ind_tbl; 3129 struct ib_wq **wqs = NULL; 3130 u32 *wqs_handles = NULL; 3131 struct ib_wq *wq = NULL; 3132 int i, j, num_read_wqs; 3133 u32 num_wq_handles; 3134 u32 expected_in_size; 3135 size_t required_cmd_sz_header; 3136 size_t required_resp_len; 3137 3138 required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size); 3139 required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num); 3140 3141 if (ucore->inlen < required_cmd_sz_header) 3142 return -EINVAL; 3143 3144 if (ucore->outlen < required_resp_len) 3145 return -ENOSPC; 3146 3147 err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header); 3148 if (err) 3149 return err; 3150 3151 ucore->inbuf += required_cmd_sz_header; 3152 ucore->inlen -= required_cmd_sz_header; 3153 3154 if (cmd.comp_mask) 3155 return -EOPNOTSUPP; 3156 3157 if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE) 3158 return -EINVAL; 3159 3160 num_wq_handles = 1 << cmd.log_ind_tbl_size; 3161 expected_in_size = num_wq_handles * sizeof(__u32); 3162 if (num_wq_handles == 1) 3163 /* input size for wq handles is u64 aligned */ 3164 expected_in_size += sizeof(__u32); 3165 3166 if (ucore->inlen < expected_in_size) 3167 return -EINVAL; 3168 3169 if (ucore->inlen > expected_in_size && 3170 !ib_is_udata_cleared(ucore, expected_in_size, 3171 ucore->inlen - expected_in_size)) 3172 return -EOPNOTSUPP; 3173 3174 wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles), 3175 GFP_KERNEL); 3176 if (!wqs_handles) 3177 return -ENOMEM; 3178 3179 err = ib_copy_from_udata(wqs_handles, ucore, 3180 num_wq_handles * sizeof(__u32)); 3181 if (err) 3182 goto err_free; 3183 3184 wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL); 3185 if (!wqs) { 3186 err = -ENOMEM; 3187 goto err_free; 3188 } 3189 3190 for (num_read_wqs = 0; num_read_wqs < num_wq_handles; 3191 num_read_wqs++) { 3192 wq = uobj_get_obj_read(wq, wqs_handles[num_read_wqs], 3193 file->ucontext); 3194 if (!wq) { 3195 err = -EINVAL; 3196 goto put_wqs; 3197 } 3198 3199 wqs[num_read_wqs] = wq; 3200 } 3201 3202 uobj = uobj_alloc(uobj_get_type(rwq_ind_table), file->ucontext); 3203 if (IS_ERR(uobj)) { 3204 err = PTR_ERR(uobj); 3205 goto put_wqs; 3206 } 3207 3208 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size; 3209 init_attr.ind_tbl = wqs; 3210 rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw); 3211 3212 if (IS_ERR(rwq_ind_tbl)) { 3213 err = PTR_ERR(rwq_ind_tbl); 3214 goto err_uobj; 3215 } 3216 3217 rwq_ind_tbl->ind_tbl = wqs; 3218 rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size; 3219 rwq_ind_tbl->uobject = uobj; 3220 uobj->object = rwq_ind_tbl; 3221 rwq_ind_tbl->device = ib_dev; 3222 atomic_set(&rwq_ind_tbl->usecnt, 0); 3223 3224 for (i = 0; i < num_wq_handles; i++) 3225 atomic_inc(&wqs[i]->usecnt); 3226 3227 resp.ind_tbl_handle = uobj->id; 3228 resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num; 3229 resp.response_length = required_resp_len; 3230 3231 err = ib_copy_to_udata(ucore, 3232 &resp, resp.response_length); 3233 if (err) 3234 goto err_copy; 3235 3236 kfree(wqs_handles); 3237 3238 for (j = 0; j < num_read_wqs; j++) 3239 uobj_put_obj_read(wqs[j]); 3240 3241 uobj_alloc_commit(uobj); 3242 return 0; 3243 3244 err_copy: 3245 ib_destroy_rwq_ind_table(rwq_ind_tbl); 3246 err_uobj: 3247 uobj_alloc_abort(uobj); 3248 put_wqs: 3249 for (j = 0; j < num_read_wqs; j++) 3250 uobj_put_obj_read(wqs[j]); 3251 err_free: 3252 kfree(wqs_handles); 3253 kfree(wqs); 3254 return err; 3255 } 3256 3257 int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file, 3258 struct ib_device *ib_dev, 3259 struct ib_udata *ucore, 3260 struct ib_udata *uhw) 3261 { 3262 struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {}; 3263 struct ib_uobject *uobj; 3264 int ret; 3265 size_t required_cmd_sz; 3266 3267 required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle); 3268 3269 if (ucore->inlen < required_cmd_sz) 3270 return -EINVAL; 3271 3272 if (ucore->inlen > sizeof(cmd) && 3273 !ib_is_udata_cleared(ucore, sizeof(cmd), 3274 ucore->inlen - sizeof(cmd))) 3275 return -EOPNOTSUPP; 3276 3277 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3278 if (ret) 3279 return ret; 3280 3281 if (cmd.comp_mask) 3282 return -EOPNOTSUPP; 3283 3284 uobj = uobj_get_write(uobj_get_type(rwq_ind_table), cmd.ind_tbl_handle, 3285 file->ucontext); 3286 if (IS_ERR(uobj)) 3287 return PTR_ERR(uobj); 3288 3289 return uobj_remove_commit(uobj); 3290 } 3291 3292 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, 3293 struct ib_device *ib_dev, 3294 struct ib_udata *ucore, 3295 struct ib_udata *uhw) 3296 { 3297 struct ib_uverbs_create_flow cmd; 3298 struct ib_uverbs_create_flow_resp resp; 3299 struct ib_uobject *uobj; 3300 struct ib_flow *flow_id; 3301 struct ib_uverbs_flow_attr *kern_flow_attr; 3302 struct ib_flow_attr *flow_attr; 3303 struct ib_qp *qp; 3304 int err = 0; 3305 void *kern_spec; 3306 void *ib_spec; 3307 int i; 3308 3309 if (ucore->inlen < sizeof(cmd)) 3310 return -EINVAL; 3311 3312 if (ucore->outlen < sizeof(resp)) 3313 return -ENOSPC; 3314 3315 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3316 if (err) 3317 return err; 3318 3319 ucore->inbuf += sizeof(cmd); 3320 ucore->inlen -= sizeof(cmd); 3321 3322 if (cmd.comp_mask) 3323 return -EINVAL; 3324 3325 if (!capable(CAP_NET_RAW)) 3326 return -EPERM; 3327 3328 if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED) 3329 return -EINVAL; 3330 3331 if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && 3332 ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) || 3333 (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT))) 3334 return -EINVAL; 3335 3336 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) 3337 return -EINVAL; 3338 3339 if (cmd.flow_attr.size > ucore->inlen || 3340 cmd.flow_attr.size > 3341 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) 3342 return -EINVAL; 3343 3344 if (cmd.flow_attr.reserved[0] || 3345 cmd.flow_attr.reserved[1]) 3346 return -EINVAL; 3347 3348 if (cmd.flow_attr.num_of_specs) { 3349 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, 3350 GFP_KERNEL); 3351 if (!kern_flow_attr) 3352 return -ENOMEM; 3353 3354 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); 3355 err = ib_copy_from_udata(kern_flow_attr + 1, ucore, 3356 cmd.flow_attr.size); 3357 if (err) 3358 goto err_free_attr; 3359 } else { 3360 kern_flow_attr = &cmd.flow_attr; 3361 } 3362 3363 uobj = uobj_alloc(uobj_get_type(flow), file->ucontext); 3364 if (IS_ERR(uobj)) { 3365 err = PTR_ERR(uobj); 3366 goto err_free_attr; 3367 } 3368 3369 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 3370 if (!qp) { 3371 err = -EINVAL; 3372 goto err_uobj; 3373 } 3374 3375 flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs * 3376 sizeof(union ib_flow_spec), GFP_KERNEL); 3377 if (!flow_attr) { 3378 err = -ENOMEM; 3379 goto err_put; 3380 } 3381 3382 flow_attr->type = kern_flow_attr->type; 3383 flow_attr->priority = kern_flow_attr->priority; 3384 flow_attr->num_of_specs = kern_flow_attr->num_of_specs; 3385 flow_attr->port = kern_flow_attr->port; 3386 flow_attr->flags = kern_flow_attr->flags; 3387 flow_attr->size = sizeof(*flow_attr); 3388 3389 kern_spec = kern_flow_attr + 1; 3390 ib_spec = flow_attr + 1; 3391 for (i = 0; i < flow_attr->num_of_specs && 3392 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && 3393 cmd.flow_attr.size >= 3394 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { 3395 err = kern_spec_to_ib_spec(kern_spec, ib_spec); 3396 if (err) 3397 goto err_free; 3398 flow_attr->size += 3399 ((union ib_flow_spec *) ib_spec)->size; 3400 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; 3401 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size; 3402 ib_spec += ((union ib_flow_spec *) ib_spec)->size; 3403 } 3404 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { 3405 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", 3406 i, cmd.flow_attr.size); 3407 err = -EINVAL; 3408 goto err_free; 3409 } 3410 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); 3411 if (IS_ERR(flow_id)) { 3412 err = PTR_ERR(flow_id); 3413 goto err_free; 3414 } 3415 flow_id->uobject = uobj; 3416 uobj->object = flow_id; 3417 3418 memset(&resp, 0, sizeof(resp)); 3419 resp.flow_handle = uobj->id; 3420 3421 err = ib_copy_to_udata(ucore, 3422 &resp, sizeof(resp)); 3423 if (err) 3424 goto err_copy; 3425 3426 uobj_put_obj_read(qp); 3427 uobj_alloc_commit(uobj); 3428 kfree(flow_attr); 3429 if (cmd.flow_attr.num_of_specs) 3430 kfree(kern_flow_attr); 3431 return 0; 3432 err_copy: 3433 ib_destroy_flow(flow_id); 3434 err_free: 3435 kfree(flow_attr); 3436 err_put: 3437 uobj_put_obj_read(qp); 3438 err_uobj: 3439 uobj_alloc_abort(uobj); 3440 err_free_attr: 3441 if (cmd.flow_attr.num_of_specs) 3442 kfree(kern_flow_attr); 3443 return err; 3444 } 3445 3446 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, 3447 struct ib_device *ib_dev, 3448 struct ib_udata *ucore, 3449 struct ib_udata *uhw) 3450 { 3451 struct ib_uverbs_destroy_flow cmd; 3452 struct ib_uobject *uobj; 3453 int ret; 3454 3455 if (ucore->inlen < sizeof(cmd)) 3456 return -EINVAL; 3457 3458 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3459 if (ret) 3460 return ret; 3461 3462 if (cmd.comp_mask) 3463 return -EINVAL; 3464 3465 uobj = uobj_get_write(uobj_get_type(flow), cmd.flow_handle, 3466 file->ucontext); 3467 if (IS_ERR(uobj)) 3468 return PTR_ERR(uobj); 3469 3470 ret = uobj_remove_commit(uobj); 3471 return ret; 3472 } 3473 3474 static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 3475 struct ib_device *ib_dev, 3476 struct ib_uverbs_create_xsrq *cmd, 3477 struct ib_udata *udata) 3478 { 3479 struct ib_uverbs_create_srq_resp resp; 3480 struct ib_usrq_object *obj; 3481 struct ib_pd *pd; 3482 struct ib_srq *srq; 3483 struct ib_uobject *uninitialized_var(xrcd_uobj); 3484 struct ib_srq_init_attr attr; 3485 int ret; 3486 3487 obj = (struct ib_usrq_object *)uobj_alloc(uobj_get_type(srq), 3488 file->ucontext); 3489 if (IS_ERR(obj)) 3490 return PTR_ERR(obj); 3491 3492 if (cmd->srq_type == IB_SRQT_XRC) { 3493 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd->xrcd_handle, 3494 file->ucontext); 3495 if (IS_ERR(xrcd_uobj)) { 3496 ret = -EINVAL; 3497 goto err; 3498 } 3499 3500 attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object; 3501 if (!attr.ext.xrc.xrcd) { 3502 ret = -EINVAL; 3503 goto err_put_xrcd; 3504 } 3505 3506 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 3507 atomic_inc(&obj->uxrcd->refcnt); 3508 3509 attr.ext.xrc.cq = uobj_get_obj_read(cq, cmd->cq_handle, 3510 file->ucontext); 3511 if (!attr.ext.xrc.cq) { 3512 ret = -EINVAL; 3513 goto err_put_xrcd; 3514 } 3515 } 3516 3517 pd = uobj_get_obj_read(pd, cmd->pd_handle, file->ucontext); 3518 if (!pd) { 3519 ret = -EINVAL; 3520 goto err_put_cq; 3521 } 3522 3523 attr.event_handler = ib_uverbs_srq_event_handler; 3524 attr.srq_context = file; 3525 attr.srq_type = cmd->srq_type; 3526 attr.attr.max_wr = cmd->max_wr; 3527 attr.attr.max_sge = cmd->max_sge; 3528 attr.attr.srq_limit = cmd->srq_limit; 3529 3530 obj->uevent.events_reported = 0; 3531 INIT_LIST_HEAD(&obj->uevent.event_list); 3532 3533 srq = pd->device->create_srq(pd, &attr, udata); 3534 if (IS_ERR(srq)) { 3535 ret = PTR_ERR(srq); 3536 goto err_put; 3537 } 3538 3539 srq->device = pd->device; 3540 srq->pd = pd; 3541 srq->srq_type = cmd->srq_type; 3542 srq->uobject = &obj->uevent.uobject; 3543 srq->event_handler = attr.event_handler; 3544 srq->srq_context = attr.srq_context; 3545 3546 if (cmd->srq_type == IB_SRQT_XRC) { 3547 srq->ext.xrc.cq = attr.ext.xrc.cq; 3548 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; 3549 atomic_inc(&attr.ext.xrc.cq->usecnt); 3550 atomic_inc(&attr.ext.xrc.xrcd->usecnt); 3551 } 3552 3553 atomic_inc(&pd->usecnt); 3554 atomic_set(&srq->usecnt, 0); 3555 3556 obj->uevent.uobject.object = srq; 3557 obj->uevent.uobject.user_handle = cmd->user_handle; 3558 3559 memset(&resp, 0, sizeof resp); 3560 resp.srq_handle = obj->uevent.uobject.id; 3561 resp.max_wr = attr.attr.max_wr; 3562 resp.max_sge = attr.attr.max_sge; 3563 if (cmd->srq_type == IB_SRQT_XRC) 3564 resp.srqn = srq->ext.xrc.srq_num; 3565 3566 if (copy_to_user((void __user *) (unsigned long) cmd->response, 3567 &resp, sizeof resp)) { 3568 ret = -EFAULT; 3569 goto err_copy; 3570 } 3571 3572 if (cmd->srq_type == IB_SRQT_XRC) { 3573 uobj_put_read(xrcd_uobj); 3574 uobj_put_obj_read(attr.ext.xrc.cq); 3575 } 3576 uobj_put_obj_read(pd); 3577 uobj_alloc_commit(&obj->uevent.uobject); 3578 3579 return 0; 3580 3581 err_copy: 3582 ib_destroy_srq(srq); 3583 3584 err_put: 3585 uobj_put_obj_read(pd); 3586 3587 err_put_cq: 3588 if (cmd->srq_type == IB_SRQT_XRC) 3589 uobj_put_obj_read(attr.ext.xrc.cq); 3590 3591 err_put_xrcd: 3592 if (cmd->srq_type == IB_SRQT_XRC) { 3593 atomic_dec(&obj->uxrcd->refcnt); 3594 uobj_put_read(xrcd_uobj); 3595 } 3596 3597 err: 3598 uobj_alloc_abort(&obj->uevent.uobject); 3599 return ret; 3600 } 3601 3602 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 3603 struct ib_device *ib_dev, 3604 const char __user *buf, int in_len, 3605 int out_len) 3606 { 3607 struct ib_uverbs_create_srq cmd; 3608 struct ib_uverbs_create_xsrq xcmd; 3609 struct ib_uverbs_create_srq_resp resp; 3610 struct ib_udata udata; 3611 int ret; 3612 3613 if (out_len < sizeof resp) 3614 return -ENOSPC; 3615 3616 if (copy_from_user(&cmd, buf, sizeof cmd)) 3617 return -EFAULT; 3618 3619 xcmd.response = cmd.response; 3620 xcmd.user_handle = cmd.user_handle; 3621 xcmd.srq_type = IB_SRQT_BASIC; 3622 xcmd.pd_handle = cmd.pd_handle; 3623 xcmd.max_wr = cmd.max_wr; 3624 xcmd.max_sge = cmd.max_sge; 3625 xcmd.srq_limit = cmd.srq_limit; 3626 3627 INIT_UDATA(&udata, buf + sizeof cmd, 3628 (unsigned long) cmd.response + sizeof resp, 3629 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr), 3630 out_len - sizeof resp); 3631 3632 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata); 3633 if (ret) 3634 return ret; 3635 3636 return in_len; 3637 } 3638 3639 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, 3640 struct ib_device *ib_dev, 3641 const char __user *buf, int in_len, int out_len) 3642 { 3643 struct ib_uverbs_create_xsrq cmd; 3644 struct ib_uverbs_create_srq_resp resp; 3645 struct ib_udata udata; 3646 int ret; 3647 3648 if (out_len < sizeof resp) 3649 return -ENOSPC; 3650 3651 if (copy_from_user(&cmd, buf, sizeof cmd)) 3652 return -EFAULT; 3653 3654 INIT_UDATA(&udata, buf + sizeof cmd, 3655 (unsigned long) cmd.response + sizeof resp, 3656 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr), 3657 out_len - sizeof resp); 3658 3659 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata); 3660 if (ret) 3661 return ret; 3662 3663 return in_len; 3664 } 3665 3666 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 3667 struct ib_device *ib_dev, 3668 const char __user *buf, int in_len, 3669 int out_len) 3670 { 3671 struct ib_uverbs_modify_srq cmd; 3672 struct ib_udata udata; 3673 struct ib_srq *srq; 3674 struct ib_srq_attr attr; 3675 int ret; 3676 3677 if (copy_from_user(&cmd, buf, sizeof cmd)) 3678 return -EFAULT; 3679 3680 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 3681 out_len); 3682 3683 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext); 3684 if (!srq) 3685 return -EINVAL; 3686 3687 attr.max_wr = cmd.max_wr; 3688 attr.srq_limit = cmd.srq_limit; 3689 3690 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 3691 3692 uobj_put_obj_read(srq); 3693 3694 return ret ? ret : in_len; 3695 } 3696 3697 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 3698 struct ib_device *ib_dev, 3699 const char __user *buf, 3700 int in_len, int out_len) 3701 { 3702 struct ib_uverbs_query_srq cmd; 3703 struct ib_uverbs_query_srq_resp resp; 3704 struct ib_srq_attr attr; 3705 struct ib_srq *srq; 3706 int ret; 3707 3708 if (out_len < sizeof resp) 3709 return -ENOSPC; 3710 3711 if (copy_from_user(&cmd, buf, sizeof cmd)) 3712 return -EFAULT; 3713 3714 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext); 3715 if (!srq) 3716 return -EINVAL; 3717 3718 ret = ib_query_srq(srq, &attr); 3719 3720 uobj_put_obj_read(srq); 3721 3722 if (ret) 3723 return ret; 3724 3725 memset(&resp, 0, sizeof resp); 3726 3727 resp.max_wr = attr.max_wr; 3728 resp.max_sge = attr.max_sge; 3729 resp.srq_limit = attr.srq_limit; 3730 3731 if (copy_to_user((void __user *) (unsigned long) cmd.response, 3732 &resp, sizeof resp)) 3733 return -EFAULT; 3734 3735 return in_len; 3736 } 3737 3738 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 3739 struct ib_device *ib_dev, 3740 const char __user *buf, int in_len, 3741 int out_len) 3742 { 3743 struct ib_uverbs_destroy_srq cmd; 3744 struct ib_uverbs_destroy_srq_resp resp; 3745 struct ib_uobject *uobj; 3746 struct ib_srq *srq; 3747 struct ib_uevent_object *obj; 3748 int ret = -EINVAL; 3749 enum ib_srq_type srq_type; 3750 3751 if (copy_from_user(&cmd, buf, sizeof cmd)) 3752 return -EFAULT; 3753 3754 uobj = uobj_get_write(uobj_get_type(srq), cmd.srq_handle, 3755 file->ucontext); 3756 if (IS_ERR(uobj)) 3757 return PTR_ERR(uobj); 3758 3759 srq = uobj->object; 3760 obj = container_of(uobj, struct ib_uevent_object, uobject); 3761 srq_type = srq->srq_type; 3762 /* 3763 * Make sure we don't free the memory in remove_commit as we still 3764 * needs the uobject memory to create the response. 3765 */ 3766 uverbs_uobject_get(uobj); 3767 3768 memset(&resp, 0, sizeof(resp)); 3769 3770 ret = uobj_remove_commit(uobj); 3771 if (ret) { 3772 uverbs_uobject_put(uobj); 3773 return ret; 3774 } 3775 resp.events_reported = obj->events_reported; 3776 uverbs_uobject_put(uobj); 3777 if (copy_to_user((void __user *)(unsigned long)cmd.response, 3778 &resp, sizeof(resp))) 3779 return -EFAULT; 3780 3781 return in_len; 3782 } 3783 3784 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, 3785 struct ib_device *ib_dev, 3786 struct ib_udata *ucore, 3787 struct ib_udata *uhw) 3788 { 3789 struct ib_uverbs_ex_query_device_resp resp = { {0} }; 3790 struct ib_uverbs_ex_query_device cmd; 3791 struct ib_device_attr attr = {0}; 3792 int err; 3793 3794 if (ucore->inlen < sizeof(cmd)) 3795 return -EINVAL; 3796 3797 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3798 if (err) 3799 return err; 3800 3801 if (cmd.comp_mask) 3802 return -EINVAL; 3803 3804 if (cmd.reserved) 3805 return -EINVAL; 3806 3807 resp.response_length = offsetof(typeof(resp), odp_caps); 3808 3809 if (ucore->outlen < resp.response_length) 3810 return -ENOSPC; 3811 3812 err = ib_dev->query_device(ib_dev, &attr, uhw); 3813 if (err) 3814 return err; 3815 3816 copy_query_dev_fields(file, ib_dev, &resp.base, &attr); 3817 3818 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps)) 3819 goto end; 3820 3821 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 3822 resp.odp_caps.general_caps = attr.odp_caps.general_caps; 3823 resp.odp_caps.per_transport_caps.rc_odp_caps = 3824 attr.odp_caps.per_transport_caps.rc_odp_caps; 3825 resp.odp_caps.per_transport_caps.uc_odp_caps = 3826 attr.odp_caps.per_transport_caps.uc_odp_caps; 3827 resp.odp_caps.per_transport_caps.ud_odp_caps = 3828 attr.odp_caps.per_transport_caps.ud_odp_caps; 3829 #endif 3830 resp.response_length += sizeof(resp.odp_caps); 3831 3832 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask)) 3833 goto end; 3834 3835 resp.timestamp_mask = attr.timestamp_mask; 3836 resp.response_length += sizeof(resp.timestamp_mask); 3837 3838 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock)) 3839 goto end; 3840 3841 resp.hca_core_clock = attr.hca_core_clock; 3842 resp.response_length += sizeof(resp.hca_core_clock); 3843 3844 if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex)) 3845 goto end; 3846 3847 resp.device_cap_flags_ex = attr.device_cap_flags; 3848 resp.response_length += sizeof(resp.device_cap_flags_ex); 3849 3850 if (ucore->outlen < resp.response_length + sizeof(resp.rss_caps)) 3851 goto end; 3852 3853 resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts; 3854 resp.rss_caps.max_rwq_indirection_tables = 3855 attr.rss_caps.max_rwq_indirection_tables; 3856 resp.rss_caps.max_rwq_indirection_table_size = 3857 attr.rss_caps.max_rwq_indirection_table_size; 3858 3859 resp.response_length += sizeof(resp.rss_caps); 3860 3861 if (ucore->outlen < resp.response_length + sizeof(resp.max_wq_type_rq)) 3862 goto end; 3863 3864 resp.max_wq_type_rq = attr.max_wq_type_rq; 3865 resp.response_length += sizeof(resp.max_wq_type_rq); 3866 3867 if (ucore->outlen < resp.response_length + sizeof(resp.raw_packet_caps)) 3868 goto end; 3869 3870 resp.raw_packet_caps = attr.raw_packet_caps; 3871 resp.response_length += sizeof(resp.raw_packet_caps); 3872 end: 3873 err = ib_copy_to_udata(ucore, &resp, resp.response_length); 3874 return err; 3875 } 3876