1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/file.h> 37 #include <linux/fs.h> 38 #include <linux/slab.h> 39 #include <linux/sched.h> 40 41 #include <linux/uaccess.h> 42 43 #include <rdma/uverbs_types.h> 44 #include <rdma/uverbs_std_types.h> 45 #include "rdma_core.h" 46 47 #include "uverbs.h" 48 #include "core_priv.h" 49 50 static struct ib_uverbs_completion_event_file * 51 ib_uverbs_lookup_comp_file(int fd, struct ib_ucontext *context) 52 { 53 struct ib_uobject *uobj = uobj_get_read(uobj_get_type(comp_channel), 54 fd, context); 55 struct ib_uobject_file *uobj_file; 56 57 if (IS_ERR(uobj)) 58 return (void *)uobj; 59 60 uverbs_uobject_get(uobj); 61 uobj_put_read(uobj); 62 63 uobj_file = container_of(uobj, struct ib_uobject_file, uobj); 64 return container_of(uobj_file, struct ib_uverbs_completion_event_file, 65 uobj_file); 66 } 67 68 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 69 struct ib_device *ib_dev, 70 const char __user *buf, 71 int in_len, int out_len) 72 { 73 struct ib_uverbs_get_context cmd; 74 struct ib_uverbs_get_context_resp resp; 75 struct ib_udata udata; 76 struct ib_ucontext *ucontext; 77 struct file *filp; 78 struct ib_rdmacg_object cg_obj; 79 int ret; 80 81 if (out_len < sizeof resp) 82 return -ENOSPC; 83 84 if (copy_from_user(&cmd, buf, sizeof cmd)) 85 return -EFAULT; 86 87 mutex_lock(&file->mutex); 88 89 if (file->ucontext) { 90 ret = -EINVAL; 91 goto err; 92 } 93 94 INIT_UDATA(&udata, buf + sizeof(cmd), 95 (unsigned long) cmd.response + sizeof(resp), 96 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 97 out_len - sizeof(resp)); 98 99 ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); 100 if (ret) 101 goto err; 102 103 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata); 104 if (IS_ERR(ucontext)) { 105 ret = PTR_ERR(ucontext); 106 goto err_alloc; 107 } 108 109 ucontext->device = ib_dev; 110 ucontext->cg_obj = cg_obj; 111 /* ufile is required when some objects are released */ 112 ucontext->ufile = file; 113 uverbs_initialize_ucontext(ucontext); 114 115 rcu_read_lock(); 116 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); 117 rcu_read_unlock(); 118 ucontext->closing = 0; 119 120 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 121 ucontext->umem_tree = RB_ROOT_CACHED; 122 init_rwsem(&ucontext->umem_rwsem); 123 ucontext->odp_mrs_count = 0; 124 INIT_LIST_HEAD(&ucontext->no_private_counters); 125 126 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)) 127 ucontext->invalidate_range = NULL; 128 129 #endif 130 131 resp.num_comp_vectors = file->device->num_comp_vectors; 132 133 ret = get_unused_fd_flags(O_CLOEXEC); 134 if (ret < 0) 135 goto err_free; 136 resp.async_fd = ret; 137 138 filp = ib_uverbs_alloc_async_event_file(file, ib_dev); 139 if (IS_ERR(filp)) { 140 ret = PTR_ERR(filp); 141 goto err_fd; 142 } 143 144 if (copy_to_user((void __user *) (unsigned long) cmd.response, 145 &resp, sizeof resp)) { 146 ret = -EFAULT; 147 goto err_file; 148 } 149 150 file->ucontext = ucontext; 151 152 fd_install(resp.async_fd, filp); 153 154 mutex_unlock(&file->mutex); 155 156 return in_len; 157 158 err_file: 159 ib_uverbs_free_async_event_file(file); 160 fput(filp); 161 162 err_fd: 163 put_unused_fd(resp.async_fd); 164 165 err_free: 166 put_pid(ucontext->tgid); 167 ib_dev->dealloc_ucontext(ucontext); 168 169 err_alloc: 170 ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); 171 172 err: 173 mutex_unlock(&file->mutex); 174 return ret; 175 } 176 177 static void copy_query_dev_fields(struct ib_uverbs_file *file, 178 struct ib_device *ib_dev, 179 struct ib_uverbs_query_device_resp *resp, 180 struct ib_device_attr *attr) 181 { 182 resp->fw_ver = attr->fw_ver; 183 resp->node_guid = ib_dev->node_guid; 184 resp->sys_image_guid = attr->sys_image_guid; 185 resp->max_mr_size = attr->max_mr_size; 186 resp->page_size_cap = attr->page_size_cap; 187 resp->vendor_id = attr->vendor_id; 188 resp->vendor_part_id = attr->vendor_part_id; 189 resp->hw_ver = attr->hw_ver; 190 resp->max_qp = attr->max_qp; 191 resp->max_qp_wr = attr->max_qp_wr; 192 resp->device_cap_flags = lower_32_bits(attr->device_cap_flags); 193 resp->max_sge = attr->max_sge; 194 resp->max_sge_rd = attr->max_sge_rd; 195 resp->max_cq = attr->max_cq; 196 resp->max_cqe = attr->max_cqe; 197 resp->max_mr = attr->max_mr; 198 resp->max_pd = attr->max_pd; 199 resp->max_qp_rd_atom = attr->max_qp_rd_atom; 200 resp->max_ee_rd_atom = attr->max_ee_rd_atom; 201 resp->max_res_rd_atom = attr->max_res_rd_atom; 202 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom; 203 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom; 204 resp->atomic_cap = attr->atomic_cap; 205 resp->max_ee = attr->max_ee; 206 resp->max_rdd = attr->max_rdd; 207 resp->max_mw = attr->max_mw; 208 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp; 209 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp; 210 resp->max_mcast_grp = attr->max_mcast_grp; 211 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach; 212 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach; 213 resp->max_ah = attr->max_ah; 214 resp->max_fmr = attr->max_fmr; 215 resp->max_map_per_fmr = attr->max_map_per_fmr; 216 resp->max_srq = attr->max_srq; 217 resp->max_srq_wr = attr->max_srq_wr; 218 resp->max_srq_sge = attr->max_srq_sge; 219 resp->max_pkeys = attr->max_pkeys; 220 resp->local_ca_ack_delay = attr->local_ca_ack_delay; 221 resp->phys_port_cnt = ib_dev->phys_port_cnt; 222 } 223 224 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 225 struct ib_device *ib_dev, 226 const char __user *buf, 227 int in_len, int out_len) 228 { 229 struct ib_uverbs_query_device cmd; 230 struct ib_uverbs_query_device_resp resp; 231 232 if (out_len < sizeof resp) 233 return -ENOSPC; 234 235 if (copy_from_user(&cmd, buf, sizeof cmd)) 236 return -EFAULT; 237 238 memset(&resp, 0, sizeof resp); 239 copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs); 240 241 if (copy_to_user((void __user *) (unsigned long) cmd.response, 242 &resp, sizeof resp)) 243 return -EFAULT; 244 245 return in_len; 246 } 247 248 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 249 struct ib_device *ib_dev, 250 const char __user *buf, 251 int in_len, int out_len) 252 { 253 struct ib_uverbs_query_port cmd; 254 struct ib_uverbs_query_port_resp resp; 255 struct ib_port_attr attr; 256 int ret; 257 258 if (out_len < sizeof resp) 259 return -ENOSPC; 260 261 if (copy_from_user(&cmd, buf, sizeof cmd)) 262 return -EFAULT; 263 264 ret = ib_query_port(ib_dev, cmd.port_num, &attr); 265 if (ret) 266 return ret; 267 268 memset(&resp, 0, sizeof resp); 269 270 resp.state = attr.state; 271 resp.max_mtu = attr.max_mtu; 272 resp.active_mtu = attr.active_mtu; 273 resp.gid_tbl_len = attr.gid_tbl_len; 274 resp.port_cap_flags = attr.port_cap_flags; 275 resp.max_msg_sz = attr.max_msg_sz; 276 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 277 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 278 resp.pkey_tbl_len = attr.pkey_tbl_len; 279 280 if (rdma_cap_opa_ah(ib_dev, cmd.port_num)) { 281 resp.lid = OPA_TO_IB_UCAST_LID(attr.lid); 282 resp.sm_lid = OPA_TO_IB_UCAST_LID(attr.sm_lid); 283 } else { 284 resp.lid = ib_lid_cpu16(attr.lid); 285 resp.sm_lid = ib_lid_cpu16(attr.sm_lid); 286 } 287 resp.lmc = attr.lmc; 288 resp.max_vl_num = attr.max_vl_num; 289 resp.sm_sl = attr.sm_sl; 290 resp.subnet_timeout = attr.subnet_timeout; 291 resp.init_type_reply = attr.init_type_reply; 292 resp.active_width = attr.active_width; 293 resp.active_speed = attr.active_speed; 294 resp.phys_state = attr.phys_state; 295 resp.link_layer = rdma_port_get_link_layer(ib_dev, 296 cmd.port_num); 297 298 if (copy_to_user((void __user *) (unsigned long) cmd.response, 299 &resp, sizeof resp)) 300 return -EFAULT; 301 302 return in_len; 303 } 304 305 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 306 struct ib_device *ib_dev, 307 const char __user *buf, 308 int in_len, int out_len) 309 { 310 struct ib_uverbs_alloc_pd cmd; 311 struct ib_uverbs_alloc_pd_resp resp; 312 struct ib_udata udata; 313 struct ib_uobject *uobj; 314 struct ib_pd *pd; 315 int ret; 316 317 if (out_len < sizeof resp) 318 return -ENOSPC; 319 320 if (copy_from_user(&cmd, buf, sizeof cmd)) 321 return -EFAULT; 322 323 INIT_UDATA(&udata, buf + sizeof(cmd), 324 (unsigned long) cmd.response + sizeof(resp), 325 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 326 out_len - sizeof(resp)); 327 328 uobj = uobj_alloc(uobj_get_type(pd), file->ucontext); 329 if (IS_ERR(uobj)) 330 return PTR_ERR(uobj); 331 332 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata); 333 if (IS_ERR(pd)) { 334 ret = PTR_ERR(pd); 335 goto err; 336 } 337 338 pd->device = ib_dev; 339 pd->uobject = uobj; 340 pd->__internal_mr = NULL; 341 atomic_set(&pd->usecnt, 0); 342 343 uobj->object = pd; 344 memset(&resp, 0, sizeof resp); 345 resp.pd_handle = uobj->id; 346 347 if (copy_to_user((void __user *) (unsigned long) cmd.response, 348 &resp, sizeof resp)) { 349 ret = -EFAULT; 350 goto err_copy; 351 } 352 353 uobj_alloc_commit(uobj); 354 355 return in_len; 356 357 err_copy: 358 ib_dealloc_pd(pd); 359 360 err: 361 uobj_alloc_abort(uobj); 362 return ret; 363 } 364 365 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 366 struct ib_device *ib_dev, 367 const char __user *buf, 368 int in_len, int out_len) 369 { 370 struct ib_uverbs_dealloc_pd cmd; 371 struct ib_uobject *uobj; 372 int ret; 373 374 if (copy_from_user(&cmd, buf, sizeof cmd)) 375 return -EFAULT; 376 377 uobj = uobj_get_write(uobj_get_type(pd), cmd.pd_handle, 378 file->ucontext); 379 if (IS_ERR(uobj)) 380 return PTR_ERR(uobj); 381 382 ret = uobj_remove_commit(uobj); 383 384 return ret ?: in_len; 385 } 386 387 struct xrcd_table_entry { 388 struct rb_node node; 389 struct ib_xrcd *xrcd; 390 struct inode *inode; 391 }; 392 393 static int xrcd_table_insert(struct ib_uverbs_device *dev, 394 struct inode *inode, 395 struct ib_xrcd *xrcd) 396 { 397 struct xrcd_table_entry *entry, *scan; 398 struct rb_node **p = &dev->xrcd_tree.rb_node; 399 struct rb_node *parent = NULL; 400 401 entry = kmalloc(sizeof *entry, GFP_KERNEL); 402 if (!entry) 403 return -ENOMEM; 404 405 entry->xrcd = xrcd; 406 entry->inode = inode; 407 408 while (*p) { 409 parent = *p; 410 scan = rb_entry(parent, struct xrcd_table_entry, node); 411 412 if (inode < scan->inode) { 413 p = &(*p)->rb_left; 414 } else if (inode > scan->inode) { 415 p = &(*p)->rb_right; 416 } else { 417 kfree(entry); 418 return -EEXIST; 419 } 420 } 421 422 rb_link_node(&entry->node, parent, p); 423 rb_insert_color(&entry->node, &dev->xrcd_tree); 424 igrab(inode); 425 return 0; 426 } 427 428 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, 429 struct inode *inode) 430 { 431 struct xrcd_table_entry *entry; 432 struct rb_node *p = dev->xrcd_tree.rb_node; 433 434 while (p) { 435 entry = rb_entry(p, struct xrcd_table_entry, node); 436 437 if (inode < entry->inode) 438 p = p->rb_left; 439 else if (inode > entry->inode) 440 p = p->rb_right; 441 else 442 return entry; 443 } 444 445 return NULL; 446 } 447 448 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) 449 { 450 struct xrcd_table_entry *entry; 451 452 entry = xrcd_table_search(dev, inode); 453 if (!entry) 454 return NULL; 455 456 return entry->xrcd; 457 } 458 459 static void xrcd_table_delete(struct ib_uverbs_device *dev, 460 struct inode *inode) 461 { 462 struct xrcd_table_entry *entry; 463 464 entry = xrcd_table_search(dev, inode); 465 if (entry) { 466 iput(inode); 467 rb_erase(&entry->node, &dev->xrcd_tree); 468 kfree(entry); 469 } 470 } 471 472 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, 473 struct ib_device *ib_dev, 474 const char __user *buf, int in_len, 475 int out_len) 476 { 477 struct ib_uverbs_open_xrcd cmd; 478 struct ib_uverbs_open_xrcd_resp resp; 479 struct ib_udata udata; 480 struct ib_uxrcd_object *obj; 481 struct ib_xrcd *xrcd = NULL; 482 struct fd f = {NULL, 0}; 483 struct inode *inode = NULL; 484 int ret = 0; 485 int new_xrcd = 0; 486 487 if (out_len < sizeof resp) 488 return -ENOSPC; 489 490 if (copy_from_user(&cmd, buf, sizeof cmd)) 491 return -EFAULT; 492 493 INIT_UDATA(&udata, buf + sizeof(cmd), 494 (unsigned long) cmd.response + sizeof(resp), 495 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 496 out_len - sizeof(resp)); 497 498 mutex_lock(&file->device->xrcd_tree_mutex); 499 500 if (cmd.fd != -1) { 501 /* search for file descriptor */ 502 f = fdget(cmd.fd); 503 if (!f.file) { 504 ret = -EBADF; 505 goto err_tree_mutex_unlock; 506 } 507 508 inode = file_inode(f.file); 509 xrcd = find_xrcd(file->device, inode); 510 if (!xrcd && !(cmd.oflags & O_CREAT)) { 511 /* no file descriptor. Need CREATE flag */ 512 ret = -EAGAIN; 513 goto err_tree_mutex_unlock; 514 } 515 516 if (xrcd && cmd.oflags & O_EXCL) { 517 ret = -EINVAL; 518 goto err_tree_mutex_unlock; 519 } 520 } 521 522 obj = (struct ib_uxrcd_object *)uobj_alloc(uobj_get_type(xrcd), 523 file->ucontext); 524 if (IS_ERR(obj)) { 525 ret = PTR_ERR(obj); 526 goto err_tree_mutex_unlock; 527 } 528 529 if (!xrcd) { 530 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata); 531 if (IS_ERR(xrcd)) { 532 ret = PTR_ERR(xrcd); 533 goto err; 534 } 535 536 xrcd->inode = inode; 537 xrcd->device = ib_dev; 538 atomic_set(&xrcd->usecnt, 0); 539 mutex_init(&xrcd->tgt_qp_mutex); 540 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 541 new_xrcd = 1; 542 } 543 544 atomic_set(&obj->refcnt, 0); 545 obj->uobject.object = xrcd; 546 memset(&resp, 0, sizeof resp); 547 resp.xrcd_handle = obj->uobject.id; 548 549 if (inode) { 550 if (new_xrcd) { 551 /* create new inode/xrcd table entry */ 552 ret = xrcd_table_insert(file->device, inode, xrcd); 553 if (ret) 554 goto err_dealloc_xrcd; 555 } 556 atomic_inc(&xrcd->usecnt); 557 } 558 559 if (copy_to_user((void __user *) (unsigned long) cmd.response, 560 &resp, sizeof resp)) { 561 ret = -EFAULT; 562 goto err_copy; 563 } 564 565 if (f.file) 566 fdput(f); 567 568 uobj_alloc_commit(&obj->uobject); 569 570 mutex_unlock(&file->device->xrcd_tree_mutex); 571 return in_len; 572 573 err_copy: 574 if (inode) { 575 if (new_xrcd) 576 xrcd_table_delete(file->device, inode); 577 atomic_dec(&xrcd->usecnt); 578 } 579 580 err_dealloc_xrcd: 581 ib_dealloc_xrcd(xrcd); 582 583 err: 584 uobj_alloc_abort(&obj->uobject); 585 586 err_tree_mutex_unlock: 587 if (f.file) 588 fdput(f); 589 590 mutex_unlock(&file->device->xrcd_tree_mutex); 591 592 return ret; 593 } 594 595 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, 596 struct ib_device *ib_dev, 597 const char __user *buf, int in_len, 598 int out_len) 599 { 600 struct ib_uverbs_close_xrcd cmd; 601 struct ib_uobject *uobj; 602 int ret = 0; 603 604 if (copy_from_user(&cmd, buf, sizeof cmd)) 605 return -EFAULT; 606 607 uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle, 608 file->ucontext); 609 if (IS_ERR(uobj)) { 610 mutex_unlock(&file->device->xrcd_tree_mutex); 611 return PTR_ERR(uobj); 612 } 613 614 ret = uobj_remove_commit(uobj); 615 return ret ?: in_len; 616 } 617 618 int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, 619 struct ib_xrcd *xrcd, 620 enum rdma_remove_reason why) 621 { 622 struct inode *inode; 623 int ret; 624 625 inode = xrcd->inode; 626 if (inode && !atomic_dec_and_test(&xrcd->usecnt)) 627 return 0; 628 629 ret = ib_dealloc_xrcd(xrcd); 630 631 if (why == RDMA_REMOVE_DESTROY && ret) 632 atomic_inc(&xrcd->usecnt); 633 else if (inode) 634 xrcd_table_delete(dev, inode); 635 636 return ret; 637 } 638 639 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 640 struct ib_device *ib_dev, 641 const char __user *buf, int in_len, 642 int out_len) 643 { 644 struct ib_uverbs_reg_mr cmd; 645 struct ib_uverbs_reg_mr_resp resp; 646 struct ib_udata udata; 647 struct ib_uobject *uobj; 648 struct ib_pd *pd; 649 struct ib_mr *mr; 650 int ret; 651 652 if (out_len < sizeof resp) 653 return -ENOSPC; 654 655 if (copy_from_user(&cmd, buf, sizeof cmd)) 656 return -EFAULT; 657 658 INIT_UDATA(&udata, buf + sizeof(cmd), 659 (unsigned long) cmd.response + sizeof(resp), 660 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 661 out_len - sizeof(resp)); 662 663 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 664 return -EINVAL; 665 666 ret = ib_check_mr_access(cmd.access_flags); 667 if (ret) 668 return ret; 669 670 uobj = uobj_alloc(uobj_get_type(mr), file->ucontext); 671 if (IS_ERR(uobj)) 672 return PTR_ERR(uobj); 673 674 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 675 if (!pd) { 676 ret = -EINVAL; 677 goto err_free; 678 } 679 680 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) { 681 if (!(pd->device->attrs.device_cap_flags & 682 IB_DEVICE_ON_DEMAND_PAGING)) { 683 pr_debug("ODP support not available\n"); 684 ret = -EINVAL; 685 goto err_put; 686 } 687 } 688 689 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 690 cmd.access_flags, &udata); 691 if (IS_ERR(mr)) { 692 ret = PTR_ERR(mr); 693 goto err_put; 694 } 695 696 mr->device = pd->device; 697 mr->pd = pd; 698 mr->uobject = uobj; 699 atomic_inc(&pd->usecnt); 700 701 uobj->object = mr; 702 703 memset(&resp, 0, sizeof resp); 704 resp.lkey = mr->lkey; 705 resp.rkey = mr->rkey; 706 resp.mr_handle = uobj->id; 707 708 if (copy_to_user((void __user *) (unsigned long) cmd.response, 709 &resp, sizeof resp)) { 710 ret = -EFAULT; 711 goto err_copy; 712 } 713 714 uobj_put_obj_read(pd); 715 716 uobj_alloc_commit(uobj); 717 718 return in_len; 719 720 err_copy: 721 ib_dereg_mr(mr); 722 723 err_put: 724 uobj_put_obj_read(pd); 725 726 err_free: 727 uobj_alloc_abort(uobj); 728 return ret; 729 } 730 731 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, 732 struct ib_device *ib_dev, 733 const char __user *buf, int in_len, 734 int out_len) 735 { 736 struct ib_uverbs_rereg_mr cmd; 737 struct ib_uverbs_rereg_mr_resp resp; 738 struct ib_udata udata; 739 struct ib_pd *pd = NULL; 740 struct ib_mr *mr; 741 struct ib_pd *old_pd; 742 int ret; 743 struct ib_uobject *uobj; 744 745 if (out_len < sizeof(resp)) 746 return -ENOSPC; 747 748 if (copy_from_user(&cmd, buf, sizeof(cmd))) 749 return -EFAULT; 750 751 INIT_UDATA(&udata, buf + sizeof(cmd), 752 (unsigned long) cmd.response + sizeof(resp), 753 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 754 out_len - sizeof(resp)); 755 756 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags) 757 return -EINVAL; 758 759 if ((cmd.flags & IB_MR_REREG_TRANS) && 760 (!cmd.start || !cmd.hca_va || 0 >= cmd.length || 761 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))) 762 return -EINVAL; 763 764 uobj = uobj_get_write(uobj_get_type(mr), cmd.mr_handle, 765 file->ucontext); 766 if (IS_ERR(uobj)) 767 return PTR_ERR(uobj); 768 769 mr = uobj->object; 770 771 if (cmd.flags & IB_MR_REREG_ACCESS) { 772 ret = ib_check_mr_access(cmd.access_flags); 773 if (ret) 774 goto put_uobjs; 775 } 776 777 if (cmd.flags & IB_MR_REREG_PD) { 778 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 779 if (!pd) { 780 ret = -EINVAL; 781 goto put_uobjs; 782 } 783 } 784 785 old_pd = mr->pd; 786 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, 787 cmd.length, cmd.hca_va, 788 cmd.access_flags, pd, &udata); 789 if (!ret) { 790 if (cmd.flags & IB_MR_REREG_PD) { 791 atomic_inc(&pd->usecnt); 792 mr->pd = pd; 793 atomic_dec(&old_pd->usecnt); 794 } 795 } else { 796 goto put_uobj_pd; 797 } 798 799 memset(&resp, 0, sizeof(resp)); 800 resp.lkey = mr->lkey; 801 resp.rkey = mr->rkey; 802 803 if (copy_to_user((void __user *)(unsigned long)cmd.response, 804 &resp, sizeof(resp))) 805 ret = -EFAULT; 806 else 807 ret = in_len; 808 809 put_uobj_pd: 810 if (cmd.flags & IB_MR_REREG_PD) 811 uobj_put_obj_read(pd); 812 813 put_uobjs: 814 uobj_put_write(uobj); 815 816 return ret; 817 } 818 819 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 820 struct ib_device *ib_dev, 821 const char __user *buf, int in_len, 822 int out_len) 823 { 824 struct ib_uverbs_dereg_mr cmd; 825 struct ib_uobject *uobj; 826 int ret = -EINVAL; 827 828 if (copy_from_user(&cmd, buf, sizeof cmd)) 829 return -EFAULT; 830 831 uobj = uobj_get_write(uobj_get_type(mr), cmd.mr_handle, 832 file->ucontext); 833 if (IS_ERR(uobj)) 834 return PTR_ERR(uobj); 835 836 ret = uobj_remove_commit(uobj); 837 838 return ret ?: in_len; 839 } 840 841 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, 842 struct ib_device *ib_dev, 843 const char __user *buf, int in_len, 844 int out_len) 845 { 846 struct ib_uverbs_alloc_mw cmd; 847 struct ib_uverbs_alloc_mw_resp resp; 848 struct ib_uobject *uobj; 849 struct ib_pd *pd; 850 struct ib_mw *mw; 851 struct ib_udata udata; 852 int ret; 853 854 if (out_len < sizeof(resp)) 855 return -ENOSPC; 856 857 if (copy_from_user(&cmd, buf, sizeof(cmd))) 858 return -EFAULT; 859 860 uobj = uobj_alloc(uobj_get_type(mw), file->ucontext); 861 if (IS_ERR(uobj)) 862 return PTR_ERR(uobj); 863 864 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 865 if (!pd) { 866 ret = -EINVAL; 867 goto err_free; 868 } 869 870 INIT_UDATA(&udata, buf + sizeof(cmd), 871 (unsigned long)cmd.response + sizeof(resp), 872 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 873 out_len - sizeof(resp)); 874 875 mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata); 876 if (IS_ERR(mw)) { 877 ret = PTR_ERR(mw); 878 goto err_put; 879 } 880 881 mw->device = pd->device; 882 mw->pd = pd; 883 mw->uobject = uobj; 884 atomic_inc(&pd->usecnt); 885 886 uobj->object = mw; 887 888 memset(&resp, 0, sizeof(resp)); 889 resp.rkey = mw->rkey; 890 resp.mw_handle = uobj->id; 891 892 if (copy_to_user((void __user *)(unsigned long)cmd.response, 893 &resp, sizeof(resp))) { 894 ret = -EFAULT; 895 goto err_copy; 896 } 897 898 uobj_put_obj_read(pd); 899 uobj_alloc_commit(uobj); 900 901 return in_len; 902 903 err_copy: 904 uverbs_dealloc_mw(mw); 905 err_put: 906 uobj_put_obj_read(pd); 907 err_free: 908 uobj_alloc_abort(uobj); 909 return ret; 910 } 911 912 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file, 913 struct ib_device *ib_dev, 914 const char __user *buf, int in_len, 915 int out_len) 916 { 917 struct ib_uverbs_dealloc_mw cmd; 918 struct ib_uobject *uobj; 919 int ret = -EINVAL; 920 921 if (copy_from_user(&cmd, buf, sizeof(cmd))) 922 return -EFAULT; 923 924 uobj = uobj_get_write(uobj_get_type(mw), cmd.mw_handle, 925 file->ucontext); 926 if (IS_ERR(uobj)) 927 return PTR_ERR(uobj); 928 929 ret = uobj_remove_commit(uobj); 930 return ret ?: in_len; 931 } 932 933 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 934 struct ib_device *ib_dev, 935 const char __user *buf, int in_len, 936 int out_len) 937 { 938 struct ib_uverbs_create_comp_channel cmd; 939 struct ib_uverbs_create_comp_channel_resp resp; 940 struct ib_uobject *uobj; 941 struct ib_uverbs_completion_event_file *ev_file; 942 943 if (out_len < sizeof resp) 944 return -ENOSPC; 945 946 if (copy_from_user(&cmd, buf, sizeof cmd)) 947 return -EFAULT; 948 949 uobj = uobj_alloc(uobj_get_type(comp_channel), file->ucontext); 950 if (IS_ERR(uobj)) 951 return PTR_ERR(uobj); 952 953 resp.fd = uobj->id; 954 955 ev_file = container_of(uobj, struct ib_uverbs_completion_event_file, 956 uobj_file.uobj); 957 ib_uverbs_init_event_queue(&ev_file->ev_queue); 958 959 if (copy_to_user((void __user *) (unsigned long) cmd.response, 960 &resp, sizeof resp)) { 961 uobj_alloc_abort(uobj); 962 return -EFAULT; 963 } 964 965 uobj_alloc_commit(uobj); 966 return in_len; 967 } 968 969 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, 970 struct ib_device *ib_dev, 971 struct ib_udata *ucore, 972 struct ib_udata *uhw, 973 struct ib_uverbs_ex_create_cq *cmd, 974 size_t cmd_sz, 975 int (*cb)(struct ib_uverbs_file *file, 976 struct ib_ucq_object *obj, 977 struct ib_uverbs_ex_create_cq_resp *resp, 978 struct ib_udata *udata, 979 void *context), 980 void *context) 981 { 982 struct ib_ucq_object *obj; 983 struct ib_uverbs_completion_event_file *ev_file = NULL; 984 struct ib_cq *cq; 985 int ret; 986 struct ib_uverbs_ex_create_cq_resp resp; 987 struct ib_cq_init_attr attr = {}; 988 989 if (cmd->comp_vector >= file->device->num_comp_vectors) 990 return ERR_PTR(-EINVAL); 991 992 obj = (struct ib_ucq_object *)uobj_alloc(uobj_get_type(cq), 993 file->ucontext); 994 if (IS_ERR(obj)) 995 return obj; 996 997 if (cmd->comp_channel >= 0) { 998 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel, 999 file->ucontext); 1000 if (IS_ERR(ev_file)) { 1001 ret = PTR_ERR(ev_file); 1002 goto err; 1003 } 1004 } 1005 1006 obj->uobject.user_handle = cmd->user_handle; 1007 obj->uverbs_file = file; 1008 obj->comp_events_reported = 0; 1009 obj->async_events_reported = 0; 1010 INIT_LIST_HEAD(&obj->comp_list); 1011 INIT_LIST_HEAD(&obj->async_list); 1012 1013 attr.cqe = cmd->cqe; 1014 attr.comp_vector = cmd->comp_vector; 1015 1016 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags)) 1017 attr.flags = cmd->flags; 1018 1019 cq = ib_dev->create_cq(ib_dev, &attr, file->ucontext, uhw); 1020 if (IS_ERR(cq)) { 1021 ret = PTR_ERR(cq); 1022 goto err_file; 1023 } 1024 1025 cq->device = ib_dev; 1026 cq->uobject = &obj->uobject; 1027 cq->comp_handler = ib_uverbs_comp_handler; 1028 cq->event_handler = ib_uverbs_cq_event_handler; 1029 cq->cq_context = ev_file ? &ev_file->ev_queue : NULL; 1030 atomic_set(&cq->usecnt, 0); 1031 1032 obj->uobject.object = cq; 1033 memset(&resp, 0, sizeof resp); 1034 resp.base.cq_handle = obj->uobject.id; 1035 resp.base.cqe = cq->cqe; 1036 1037 resp.response_length = offsetof(typeof(resp), response_length) + 1038 sizeof(resp.response_length); 1039 1040 ret = cb(file, obj, &resp, ucore, context); 1041 if (ret) 1042 goto err_cb; 1043 1044 uobj_alloc_commit(&obj->uobject); 1045 1046 return obj; 1047 1048 err_cb: 1049 ib_destroy_cq(cq); 1050 1051 err_file: 1052 if (ev_file) 1053 ib_uverbs_release_ucq(file, ev_file, obj); 1054 1055 err: 1056 uobj_alloc_abort(&obj->uobject); 1057 1058 return ERR_PTR(ret); 1059 } 1060 1061 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file, 1062 struct ib_ucq_object *obj, 1063 struct ib_uverbs_ex_create_cq_resp *resp, 1064 struct ib_udata *ucore, void *context) 1065 { 1066 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1067 return -EFAULT; 1068 1069 return 0; 1070 } 1071 1072 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 1073 struct ib_device *ib_dev, 1074 const char __user *buf, int in_len, 1075 int out_len) 1076 { 1077 struct ib_uverbs_create_cq cmd; 1078 struct ib_uverbs_ex_create_cq cmd_ex; 1079 struct ib_uverbs_create_cq_resp resp; 1080 struct ib_udata ucore; 1081 struct ib_udata uhw; 1082 struct ib_ucq_object *obj; 1083 1084 if (out_len < sizeof(resp)) 1085 return -ENOSPC; 1086 1087 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1088 return -EFAULT; 1089 1090 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), sizeof(resp)); 1091 1092 INIT_UDATA(&uhw, buf + sizeof(cmd), 1093 (unsigned long)cmd.response + sizeof(resp), 1094 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1095 out_len - sizeof(resp)); 1096 1097 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1098 cmd_ex.user_handle = cmd.user_handle; 1099 cmd_ex.cqe = cmd.cqe; 1100 cmd_ex.comp_vector = cmd.comp_vector; 1101 cmd_ex.comp_channel = cmd.comp_channel; 1102 1103 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex, 1104 offsetof(typeof(cmd_ex), comp_channel) + 1105 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb, 1106 NULL); 1107 1108 if (IS_ERR(obj)) 1109 return PTR_ERR(obj); 1110 1111 return in_len; 1112 } 1113 1114 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file, 1115 struct ib_ucq_object *obj, 1116 struct ib_uverbs_ex_create_cq_resp *resp, 1117 struct ib_udata *ucore, void *context) 1118 { 1119 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1120 return -EFAULT; 1121 1122 return 0; 1123 } 1124 1125 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file, 1126 struct ib_device *ib_dev, 1127 struct ib_udata *ucore, 1128 struct ib_udata *uhw) 1129 { 1130 struct ib_uverbs_ex_create_cq_resp resp; 1131 struct ib_uverbs_ex_create_cq cmd; 1132 struct ib_ucq_object *obj; 1133 int err; 1134 1135 if (ucore->inlen < sizeof(cmd)) 1136 return -EINVAL; 1137 1138 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 1139 if (err) 1140 return err; 1141 1142 if (cmd.comp_mask) 1143 return -EINVAL; 1144 1145 if (cmd.reserved) 1146 return -EINVAL; 1147 1148 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1149 sizeof(resp.response_length))) 1150 return -ENOSPC; 1151 1152 obj = create_cq(file, ib_dev, ucore, uhw, &cmd, 1153 min(ucore->inlen, sizeof(cmd)), 1154 ib_uverbs_ex_create_cq_cb, NULL); 1155 1156 if (IS_ERR(obj)) 1157 return PTR_ERR(obj); 1158 1159 return 0; 1160 } 1161 1162 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 1163 struct ib_device *ib_dev, 1164 const char __user *buf, int in_len, 1165 int out_len) 1166 { 1167 struct ib_uverbs_resize_cq cmd; 1168 struct ib_uverbs_resize_cq_resp resp = {}; 1169 struct ib_udata udata; 1170 struct ib_cq *cq; 1171 int ret = -EINVAL; 1172 1173 if (copy_from_user(&cmd, buf, sizeof cmd)) 1174 return -EFAULT; 1175 1176 INIT_UDATA(&udata, buf + sizeof(cmd), 1177 (unsigned long) cmd.response + sizeof(resp), 1178 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1179 out_len - sizeof(resp)); 1180 1181 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext); 1182 if (!cq) 1183 return -EINVAL; 1184 1185 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 1186 if (ret) 1187 goto out; 1188 1189 resp.cqe = cq->cqe; 1190 1191 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1192 &resp, sizeof resp.cqe)) 1193 ret = -EFAULT; 1194 1195 out: 1196 uobj_put_obj_read(cq); 1197 1198 return ret ? ret : in_len; 1199 } 1200 1201 static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest, 1202 struct ib_wc *wc) 1203 { 1204 struct ib_uverbs_wc tmp; 1205 1206 tmp.wr_id = wc->wr_id; 1207 tmp.status = wc->status; 1208 tmp.opcode = wc->opcode; 1209 tmp.vendor_err = wc->vendor_err; 1210 tmp.byte_len = wc->byte_len; 1211 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data; 1212 tmp.qp_num = wc->qp->qp_num; 1213 tmp.src_qp = wc->src_qp; 1214 tmp.wc_flags = wc->wc_flags; 1215 tmp.pkey_index = wc->pkey_index; 1216 if (rdma_cap_opa_ah(ib_dev, wc->port_num)) 1217 tmp.slid = OPA_TO_IB_UCAST_LID(wc->slid); 1218 else 1219 tmp.slid = ib_lid_cpu16(wc->slid); 1220 tmp.sl = wc->sl; 1221 tmp.dlid_path_bits = wc->dlid_path_bits; 1222 tmp.port_num = wc->port_num; 1223 tmp.reserved = 0; 1224 1225 if (copy_to_user(dest, &tmp, sizeof tmp)) 1226 return -EFAULT; 1227 1228 return 0; 1229 } 1230 1231 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 1232 struct ib_device *ib_dev, 1233 const char __user *buf, int in_len, 1234 int out_len) 1235 { 1236 struct ib_uverbs_poll_cq cmd; 1237 struct ib_uverbs_poll_cq_resp resp; 1238 u8 __user *header_ptr; 1239 u8 __user *data_ptr; 1240 struct ib_cq *cq; 1241 struct ib_wc wc; 1242 int ret; 1243 1244 if (copy_from_user(&cmd, buf, sizeof cmd)) 1245 return -EFAULT; 1246 1247 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext); 1248 if (!cq) 1249 return -EINVAL; 1250 1251 /* we copy a struct ib_uverbs_poll_cq_resp to user space */ 1252 header_ptr = (void __user *)(unsigned long) cmd.response; 1253 data_ptr = header_ptr + sizeof resp; 1254 1255 memset(&resp, 0, sizeof resp); 1256 while (resp.count < cmd.ne) { 1257 ret = ib_poll_cq(cq, 1, &wc); 1258 if (ret < 0) 1259 goto out_put; 1260 if (!ret) 1261 break; 1262 1263 ret = copy_wc_to_user(ib_dev, data_ptr, &wc); 1264 if (ret) 1265 goto out_put; 1266 1267 data_ptr += sizeof(struct ib_uverbs_wc); 1268 ++resp.count; 1269 } 1270 1271 if (copy_to_user(header_ptr, &resp, sizeof resp)) { 1272 ret = -EFAULT; 1273 goto out_put; 1274 } 1275 1276 ret = in_len; 1277 1278 out_put: 1279 uobj_put_obj_read(cq); 1280 return ret; 1281 } 1282 1283 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 1284 struct ib_device *ib_dev, 1285 const char __user *buf, int in_len, 1286 int out_len) 1287 { 1288 struct ib_uverbs_req_notify_cq cmd; 1289 struct ib_cq *cq; 1290 1291 if (copy_from_user(&cmd, buf, sizeof cmd)) 1292 return -EFAULT; 1293 1294 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext); 1295 if (!cq) 1296 return -EINVAL; 1297 1298 ib_req_notify_cq(cq, cmd.solicited_only ? 1299 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 1300 1301 uobj_put_obj_read(cq); 1302 1303 return in_len; 1304 } 1305 1306 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 1307 struct ib_device *ib_dev, 1308 const char __user *buf, int in_len, 1309 int out_len) 1310 { 1311 struct ib_uverbs_destroy_cq cmd; 1312 struct ib_uverbs_destroy_cq_resp resp; 1313 struct ib_uobject *uobj; 1314 struct ib_cq *cq; 1315 struct ib_ucq_object *obj; 1316 int ret = -EINVAL; 1317 1318 if (copy_from_user(&cmd, buf, sizeof cmd)) 1319 return -EFAULT; 1320 1321 uobj = uobj_get_write(uobj_get_type(cq), cmd.cq_handle, 1322 file->ucontext); 1323 if (IS_ERR(uobj)) 1324 return PTR_ERR(uobj); 1325 1326 /* 1327 * Make sure we don't free the memory in remove_commit as we still 1328 * needs the uobject memory to create the response. 1329 */ 1330 uverbs_uobject_get(uobj); 1331 cq = uobj->object; 1332 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1333 1334 memset(&resp, 0, sizeof(resp)); 1335 1336 ret = uobj_remove_commit(uobj); 1337 if (ret) { 1338 uverbs_uobject_put(uobj); 1339 return ret; 1340 } 1341 1342 resp.comp_events_reported = obj->comp_events_reported; 1343 resp.async_events_reported = obj->async_events_reported; 1344 1345 uverbs_uobject_put(uobj); 1346 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1347 &resp, sizeof resp)) 1348 return -EFAULT; 1349 1350 return in_len; 1351 } 1352 1353 static int create_qp(struct ib_uverbs_file *file, 1354 struct ib_udata *ucore, 1355 struct ib_udata *uhw, 1356 struct ib_uverbs_ex_create_qp *cmd, 1357 size_t cmd_sz, 1358 int (*cb)(struct ib_uverbs_file *file, 1359 struct ib_uverbs_ex_create_qp_resp *resp, 1360 struct ib_udata *udata), 1361 void *context) 1362 { 1363 struct ib_uqp_object *obj; 1364 struct ib_device *device; 1365 struct ib_pd *pd = NULL; 1366 struct ib_xrcd *xrcd = NULL; 1367 struct ib_uobject *xrcd_uobj = ERR_PTR(-ENOENT); 1368 struct ib_cq *scq = NULL, *rcq = NULL; 1369 struct ib_srq *srq = NULL; 1370 struct ib_qp *qp; 1371 char *buf; 1372 struct ib_qp_init_attr attr = {}; 1373 struct ib_uverbs_ex_create_qp_resp resp; 1374 int ret; 1375 struct ib_rwq_ind_table *ind_tbl = NULL; 1376 bool has_sq = true; 1377 1378 if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) 1379 return -EPERM; 1380 1381 obj = (struct ib_uqp_object *)uobj_alloc(uobj_get_type(qp), 1382 file->ucontext); 1383 if (IS_ERR(obj)) 1384 return PTR_ERR(obj); 1385 obj->uxrcd = NULL; 1386 obj->uevent.uobject.user_handle = cmd->user_handle; 1387 mutex_init(&obj->mcast_lock); 1388 1389 if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) + 1390 sizeof(cmd->rwq_ind_tbl_handle) && 1391 (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) { 1392 ind_tbl = uobj_get_obj_read(rwq_ind_table, 1393 cmd->rwq_ind_tbl_handle, 1394 file->ucontext); 1395 if (!ind_tbl) { 1396 ret = -EINVAL; 1397 goto err_put; 1398 } 1399 1400 attr.rwq_ind_tbl = ind_tbl; 1401 } 1402 1403 if (cmd_sz > sizeof(*cmd) && 1404 !ib_is_udata_cleared(ucore, sizeof(*cmd), 1405 cmd_sz - sizeof(*cmd))) { 1406 ret = -EOPNOTSUPP; 1407 goto err_put; 1408 } 1409 1410 if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) { 1411 ret = -EINVAL; 1412 goto err_put; 1413 } 1414 1415 if (ind_tbl && !cmd->max_send_wr) 1416 has_sq = false; 1417 1418 if (cmd->qp_type == IB_QPT_XRC_TGT) { 1419 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd->pd_handle, 1420 file->ucontext); 1421 1422 if (IS_ERR(xrcd_uobj)) { 1423 ret = -EINVAL; 1424 goto err_put; 1425 } 1426 1427 xrcd = (struct ib_xrcd *)xrcd_uobj->object; 1428 if (!xrcd) { 1429 ret = -EINVAL; 1430 goto err_put; 1431 } 1432 device = xrcd->device; 1433 } else { 1434 if (cmd->qp_type == IB_QPT_XRC_INI) { 1435 cmd->max_recv_wr = 0; 1436 cmd->max_recv_sge = 0; 1437 } else { 1438 if (cmd->is_srq) { 1439 srq = uobj_get_obj_read(srq, cmd->srq_handle, 1440 file->ucontext); 1441 if (!srq || srq->srq_type == IB_SRQT_XRC) { 1442 ret = -EINVAL; 1443 goto err_put; 1444 } 1445 } 1446 1447 if (!ind_tbl) { 1448 if (cmd->recv_cq_handle != cmd->send_cq_handle) { 1449 rcq = uobj_get_obj_read(cq, cmd->recv_cq_handle, 1450 file->ucontext); 1451 if (!rcq) { 1452 ret = -EINVAL; 1453 goto err_put; 1454 } 1455 } 1456 } 1457 } 1458 1459 if (has_sq) 1460 scq = uobj_get_obj_read(cq, cmd->send_cq_handle, 1461 file->ucontext); 1462 if (!ind_tbl) 1463 rcq = rcq ?: scq; 1464 pd = uobj_get_obj_read(pd, cmd->pd_handle, file->ucontext); 1465 if (!pd || (!scq && has_sq)) { 1466 ret = -EINVAL; 1467 goto err_put; 1468 } 1469 1470 device = pd->device; 1471 } 1472 1473 attr.event_handler = ib_uverbs_qp_event_handler; 1474 attr.qp_context = file; 1475 attr.send_cq = scq; 1476 attr.recv_cq = rcq; 1477 attr.srq = srq; 1478 attr.xrcd = xrcd; 1479 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR : 1480 IB_SIGNAL_REQ_WR; 1481 attr.qp_type = cmd->qp_type; 1482 attr.create_flags = 0; 1483 1484 attr.cap.max_send_wr = cmd->max_send_wr; 1485 attr.cap.max_recv_wr = cmd->max_recv_wr; 1486 attr.cap.max_send_sge = cmd->max_send_sge; 1487 attr.cap.max_recv_sge = cmd->max_recv_sge; 1488 attr.cap.max_inline_data = cmd->max_inline_data; 1489 1490 obj->uevent.events_reported = 0; 1491 INIT_LIST_HEAD(&obj->uevent.event_list); 1492 INIT_LIST_HEAD(&obj->mcast_list); 1493 1494 if (cmd_sz >= offsetof(typeof(*cmd), create_flags) + 1495 sizeof(cmd->create_flags)) 1496 attr.create_flags = cmd->create_flags; 1497 1498 if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | 1499 IB_QP_CREATE_CROSS_CHANNEL | 1500 IB_QP_CREATE_MANAGED_SEND | 1501 IB_QP_CREATE_MANAGED_RECV | 1502 IB_QP_CREATE_SCATTER_FCS | 1503 IB_QP_CREATE_CVLAN_STRIPPING | 1504 IB_QP_CREATE_SOURCE_QPN)) { 1505 ret = -EINVAL; 1506 goto err_put; 1507 } 1508 1509 if (attr.create_flags & IB_QP_CREATE_SOURCE_QPN) { 1510 if (!capable(CAP_NET_RAW)) { 1511 ret = -EPERM; 1512 goto err_put; 1513 } 1514 1515 attr.source_qpn = cmd->source_qpn; 1516 } 1517 1518 buf = (void *)cmd + sizeof(*cmd); 1519 if (cmd_sz > sizeof(*cmd)) 1520 if (!(buf[0] == 0 && !memcmp(buf, buf + 1, 1521 cmd_sz - sizeof(*cmd) - 1))) { 1522 ret = -EINVAL; 1523 goto err_put; 1524 } 1525 1526 if (cmd->qp_type == IB_QPT_XRC_TGT) 1527 qp = ib_create_qp(pd, &attr); 1528 else 1529 qp = device->create_qp(pd, &attr, uhw); 1530 1531 if (IS_ERR(qp)) { 1532 ret = PTR_ERR(qp); 1533 goto err_put; 1534 } 1535 1536 if (cmd->qp_type != IB_QPT_XRC_TGT) { 1537 ret = ib_create_qp_security(qp, device); 1538 if (ret) 1539 goto err_cb; 1540 1541 qp->real_qp = qp; 1542 qp->device = device; 1543 qp->pd = pd; 1544 qp->send_cq = attr.send_cq; 1545 qp->recv_cq = attr.recv_cq; 1546 qp->srq = attr.srq; 1547 qp->rwq_ind_tbl = ind_tbl; 1548 qp->event_handler = attr.event_handler; 1549 qp->qp_context = attr.qp_context; 1550 qp->qp_type = attr.qp_type; 1551 atomic_set(&qp->usecnt, 0); 1552 atomic_inc(&pd->usecnt); 1553 qp->port = 0; 1554 if (attr.send_cq) 1555 atomic_inc(&attr.send_cq->usecnt); 1556 if (attr.recv_cq) 1557 atomic_inc(&attr.recv_cq->usecnt); 1558 if (attr.srq) 1559 atomic_inc(&attr.srq->usecnt); 1560 if (ind_tbl) 1561 atomic_inc(&ind_tbl->usecnt); 1562 } 1563 qp->uobject = &obj->uevent.uobject; 1564 1565 obj->uevent.uobject.object = qp; 1566 1567 memset(&resp, 0, sizeof resp); 1568 resp.base.qpn = qp->qp_num; 1569 resp.base.qp_handle = obj->uevent.uobject.id; 1570 resp.base.max_recv_sge = attr.cap.max_recv_sge; 1571 resp.base.max_send_sge = attr.cap.max_send_sge; 1572 resp.base.max_recv_wr = attr.cap.max_recv_wr; 1573 resp.base.max_send_wr = attr.cap.max_send_wr; 1574 resp.base.max_inline_data = attr.cap.max_inline_data; 1575 1576 resp.response_length = offsetof(typeof(resp), response_length) + 1577 sizeof(resp.response_length); 1578 1579 ret = cb(file, &resp, ucore); 1580 if (ret) 1581 goto err_cb; 1582 1583 if (xrcd) { 1584 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, 1585 uobject); 1586 atomic_inc(&obj->uxrcd->refcnt); 1587 uobj_put_read(xrcd_uobj); 1588 } 1589 1590 if (pd) 1591 uobj_put_obj_read(pd); 1592 if (scq) 1593 uobj_put_obj_read(scq); 1594 if (rcq && rcq != scq) 1595 uobj_put_obj_read(rcq); 1596 if (srq) 1597 uobj_put_obj_read(srq); 1598 if (ind_tbl) 1599 uobj_put_obj_read(ind_tbl); 1600 1601 uobj_alloc_commit(&obj->uevent.uobject); 1602 1603 return 0; 1604 err_cb: 1605 ib_destroy_qp(qp); 1606 1607 err_put: 1608 if (!IS_ERR(xrcd_uobj)) 1609 uobj_put_read(xrcd_uobj); 1610 if (pd) 1611 uobj_put_obj_read(pd); 1612 if (scq) 1613 uobj_put_obj_read(scq); 1614 if (rcq && rcq != scq) 1615 uobj_put_obj_read(rcq); 1616 if (srq) 1617 uobj_put_obj_read(srq); 1618 if (ind_tbl) 1619 uobj_put_obj_read(ind_tbl); 1620 1621 uobj_alloc_abort(&obj->uevent.uobject); 1622 return ret; 1623 } 1624 1625 static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file, 1626 struct ib_uverbs_ex_create_qp_resp *resp, 1627 struct ib_udata *ucore) 1628 { 1629 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1630 return -EFAULT; 1631 1632 return 0; 1633 } 1634 1635 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 1636 struct ib_device *ib_dev, 1637 const char __user *buf, int in_len, 1638 int out_len) 1639 { 1640 struct ib_uverbs_create_qp cmd; 1641 struct ib_uverbs_ex_create_qp cmd_ex; 1642 struct ib_udata ucore; 1643 struct ib_udata uhw; 1644 ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp); 1645 int err; 1646 1647 if (out_len < resp_size) 1648 return -ENOSPC; 1649 1650 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1651 return -EFAULT; 1652 1653 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), 1654 resp_size); 1655 INIT_UDATA(&uhw, buf + sizeof(cmd), 1656 (unsigned long)cmd.response + resp_size, 1657 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1658 out_len - resp_size); 1659 1660 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1661 cmd_ex.user_handle = cmd.user_handle; 1662 cmd_ex.pd_handle = cmd.pd_handle; 1663 cmd_ex.send_cq_handle = cmd.send_cq_handle; 1664 cmd_ex.recv_cq_handle = cmd.recv_cq_handle; 1665 cmd_ex.srq_handle = cmd.srq_handle; 1666 cmd_ex.max_send_wr = cmd.max_send_wr; 1667 cmd_ex.max_recv_wr = cmd.max_recv_wr; 1668 cmd_ex.max_send_sge = cmd.max_send_sge; 1669 cmd_ex.max_recv_sge = cmd.max_recv_sge; 1670 cmd_ex.max_inline_data = cmd.max_inline_data; 1671 cmd_ex.sq_sig_all = cmd.sq_sig_all; 1672 cmd_ex.qp_type = cmd.qp_type; 1673 cmd_ex.is_srq = cmd.is_srq; 1674 1675 err = create_qp(file, &ucore, &uhw, &cmd_ex, 1676 offsetof(typeof(cmd_ex), is_srq) + 1677 sizeof(cmd.is_srq), ib_uverbs_create_qp_cb, 1678 NULL); 1679 1680 if (err) 1681 return err; 1682 1683 return in_len; 1684 } 1685 1686 static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file, 1687 struct ib_uverbs_ex_create_qp_resp *resp, 1688 struct ib_udata *ucore) 1689 { 1690 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1691 return -EFAULT; 1692 1693 return 0; 1694 } 1695 1696 int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file, 1697 struct ib_device *ib_dev, 1698 struct ib_udata *ucore, 1699 struct ib_udata *uhw) 1700 { 1701 struct ib_uverbs_ex_create_qp_resp resp; 1702 struct ib_uverbs_ex_create_qp cmd = {0}; 1703 int err; 1704 1705 if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) + 1706 sizeof(cmd.comp_mask))) 1707 return -EINVAL; 1708 1709 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 1710 if (err) 1711 return err; 1712 1713 if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK) 1714 return -EINVAL; 1715 1716 if (cmd.reserved) 1717 return -EINVAL; 1718 1719 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1720 sizeof(resp.response_length))) 1721 return -ENOSPC; 1722 1723 err = create_qp(file, ucore, uhw, &cmd, 1724 min(ucore->inlen, sizeof(cmd)), 1725 ib_uverbs_ex_create_qp_cb, NULL); 1726 1727 if (err) 1728 return err; 1729 1730 return 0; 1731 } 1732 1733 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, 1734 struct ib_device *ib_dev, 1735 const char __user *buf, int in_len, int out_len) 1736 { 1737 struct ib_uverbs_open_qp cmd; 1738 struct ib_uverbs_create_qp_resp resp; 1739 struct ib_udata udata; 1740 struct ib_uqp_object *obj; 1741 struct ib_xrcd *xrcd; 1742 struct ib_uobject *uninitialized_var(xrcd_uobj); 1743 struct ib_qp *qp; 1744 struct ib_qp_open_attr attr; 1745 int ret; 1746 1747 if (out_len < sizeof resp) 1748 return -ENOSPC; 1749 1750 if (copy_from_user(&cmd, buf, sizeof cmd)) 1751 return -EFAULT; 1752 1753 INIT_UDATA(&udata, buf + sizeof(cmd), 1754 (unsigned long) cmd.response + sizeof(resp), 1755 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1756 out_len - sizeof(resp)); 1757 1758 obj = (struct ib_uqp_object *)uobj_alloc(uobj_get_type(qp), 1759 file->ucontext); 1760 if (IS_ERR(obj)) 1761 return PTR_ERR(obj); 1762 1763 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd.pd_handle, 1764 file->ucontext); 1765 if (IS_ERR(xrcd_uobj)) { 1766 ret = -EINVAL; 1767 goto err_put; 1768 } 1769 1770 xrcd = (struct ib_xrcd *)xrcd_uobj->object; 1771 if (!xrcd) { 1772 ret = -EINVAL; 1773 goto err_xrcd; 1774 } 1775 1776 attr.event_handler = ib_uverbs_qp_event_handler; 1777 attr.qp_context = file; 1778 attr.qp_num = cmd.qpn; 1779 attr.qp_type = cmd.qp_type; 1780 1781 obj->uevent.events_reported = 0; 1782 INIT_LIST_HEAD(&obj->uevent.event_list); 1783 INIT_LIST_HEAD(&obj->mcast_list); 1784 1785 qp = ib_open_qp(xrcd, &attr); 1786 if (IS_ERR(qp)) { 1787 ret = PTR_ERR(qp); 1788 goto err_xrcd; 1789 } 1790 1791 obj->uevent.uobject.object = qp; 1792 obj->uevent.uobject.user_handle = cmd.user_handle; 1793 1794 memset(&resp, 0, sizeof resp); 1795 resp.qpn = qp->qp_num; 1796 resp.qp_handle = obj->uevent.uobject.id; 1797 1798 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1799 &resp, sizeof resp)) { 1800 ret = -EFAULT; 1801 goto err_destroy; 1802 } 1803 1804 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 1805 atomic_inc(&obj->uxrcd->refcnt); 1806 qp->uobject = &obj->uevent.uobject; 1807 uobj_put_read(xrcd_uobj); 1808 1809 1810 uobj_alloc_commit(&obj->uevent.uobject); 1811 1812 return in_len; 1813 1814 err_destroy: 1815 ib_destroy_qp(qp); 1816 err_xrcd: 1817 uobj_put_read(xrcd_uobj); 1818 err_put: 1819 uobj_alloc_abort(&obj->uevent.uobject); 1820 return ret; 1821 } 1822 1823 static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr, 1824 struct rdma_ah_attr *rdma_attr) 1825 { 1826 const struct ib_global_route *grh; 1827 1828 uverb_attr->dlid = rdma_ah_get_dlid(rdma_attr); 1829 uverb_attr->sl = rdma_ah_get_sl(rdma_attr); 1830 uverb_attr->src_path_bits = rdma_ah_get_path_bits(rdma_attr); 1831 uverb_attr->static_rate = rdma_ah_get_static_rate(rdma_attr); 1832 uverb_attr->is_global = !!(rdma_ah_get_ah_flags(rdma_attr) & 1833 IB_AH_GRH); 1834 if (uverb_attr->is_global) { 1835 grh = rdma_ah_read_grh(rdma_attr); 1836 memcpy(uverb_attr->dgid, grh->dgid.raw, 16); 1837 uverb_attr->flow_label = grh->flow_label; 1838 uverb_attr->sgid_index = grh->sgid_index; 1839 uverb_attr->hop_limit = grh->hop_limit; 1840 uverb_attr->traffic_class = grh->traffic_class; 1841 } 1842 uverb_attr->port_num = rdma_ah_get_port_num(rdma_attr); 1843 } 1844 1845 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 1846 struct ib_device *ib_dev, 1847 const char __user *buf, int in_len, 1848 int out_len) 1849 { 1850 struct ib_uverbs_query_qp cmd; 1851 struct ib_uverbs_query_qp_resp resp; 1852 struct ib_qp *qp; 1853 struct ib_qp_attr *attr; 1854 struct ib_qp_init_attr *init_attr; 1855 int ret; 1856 1857 if (copy_from_user(&cmd, buf, sizeof cmd)) 1858 return -EFAULT; 1859 1860 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1861 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 1862 if (!attr || !init_attr) { 1863 ret = -ENOMEM; 1864 goto out; 1865 } 1866 1867 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 1868 if (!qp) { 1869 ret = -EINVAL; 1870 goto out; 1871 } 1872 1873 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 1874 1875 uobj_put_obj_read(qp); 1876 1877 if (ret) 1878 goto out; 1879 1880 memset(&resp, 0, sizeof resp); 1881 1882 resp.qp_state = attr->qp_state; 1883 resp.cur_qp_state = attr->cur_qp_state; 1884 resp.path_mtu = attr->path_mtu; 1885 resp.path_mig_state = attr->path_mig_state; 1886 resp.qkey = attr->qkey; 1887 resp.rq_psn = attr->rq_psn; 1888 resp.sq_psn = attr->sq_psn; 1889 resp.dest_qp_num = attr->dest_qp_num; 1890 resp.qp_access_flags = attr->qp_access_flags; 1891 resp.pkey_index = attr->pkey_index; 1892 resp.alt_pkey_index = attr->alt_pkey_index; 1893 resp.sq_draining = attr->sq_draining; 1894 resp.max_rd_atomic = attr->max_rd_atomic; 1895 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 1896 resp.min_rnr_timer = attr->min_rnr_timer; 1897 resp.port_num = attr->port_num; 1898 resp.timeout = attr->timeout; 1899 resp.retry_cnt = attr->retry_cnt; 1900 resp.rnr_retry = attr->rnr_retry; 1901 resp.alt_port_num = attr->alt_port_num; 1902 resp.alt_timeout = attr->alt_timeout; 1903 1904 copy_ah_attr_to_uverbs(&resp.dest, &attr->ah_attr); 1905 copy_ah_attr_to_uverbs(&resp.alt_dest, &attr->alt_ah_attr); 1906 1907 resp.max_send_wr = init_attr->cap.max_send_wr; 1908 resp.max_recv_wr = init_attr->cap.max_recv_wr; 1909 resp.max_send_sge = init_attr->cap.max_send_sge; 1910 resp.max_recv_sge = init_attr->cap.max_recv_sge; 1911 resp.max_inline_data = init_attr->cap.max_inline_data; 1912 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 1913 1914 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1915 &resp, sizeof resp)) 1916 ret = -EFAULT; 1917 1918 out: 1919 kfree(attr); 1920 kfree(init_attr); 1921 1922 return ret ? ret : in_len; 1923 } 1924 1925 /* Remove ignored fields set in the attribute mask */ 1926 static int modify_qp_mask(enum ib_qp_type qp_type, int mask) 1927 { 1928 switch (qp_type) { 1929 case IB_QPT_XRC_INI: 1930 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); 1931 case IB_QPT_XRC_TGT: 1932 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | 1933 IB_QP_RNR_RETRY); 1934 default: 1935 return mask; 1936 } 1937 } 1938 1939 static void copy_ah_attr_from_uverbs(struct ib_device *dev, 1940 struct rdma_ah_attr *rdma_attr, 1941 struct ib_uverbs_qp_dest *uverb_attr) 1942 { 1943 rdma_attr->type = rdma_ah_find_type(dev, uverb_attr->port_num); 1944 if (uverb_attr->is_global) { 1945 rdma_ah_set_grh(rdma_attr, NULL, 1946 uverb_attr->flow_label, 1947 uverb_attr->sgid_index, 1948 uverb_attr->hop_limit, 1949 uverb_attr->traffic_class); 1950 rdma_ah_set_dgid_raw(rdma_attr, uverb_attr->dgid); 1951 } else { 1952 rdma_ah_set_ah_flags(rdma_attr, 0); 1953 } 1954 rdma_ah_set_dlid(rdma_attr, uverb_attr->dlid); 1955 rdma_ah_set_sl(rdma_attr, uverb_attr->sl); 1956 rdma_ah_set_path_bits(rdma_attr, uverb_attr->src_path_bits); 1957 rdma_ah_set_static_rate(rdma_attr, uverb_attr->static_rate); 1958 rdma_ah_set_port_num(rdma_attr, uverb_attr->port_num); 1959 rdma_ah_set_make_grd(rdma_attr, false); 1960 } 1961 1962 static int modify_qp(struct ib_uverbs_file *file, 1963 struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata) 1964 { 1965 struct ib_qp_attr *attr; 1966 struct ib_qp *qp; 1967 int ret; 1968 1969 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1970 if (!attr) 1971 return -ENOMEM; 1972 1973 qp = uobj_get_obj_read(qp, cmd->base.qp_handle, file->ucontext); 1974 if (!qp) { 1975 ret = -EINVAL; 1976 goto out; 1977 } 1978 1979 if ((cmd->base.attr_mask & IB_QP_PORT) && 1980 !rdma_is_port_valid(qp->device, cmd->base.port_num)) { 1981 ret = -EINVAL; 1982 goto release_qp; 1983 } 1984 1985 attr->qp_state = cmd->base.qp_state; 1986 attr->cur_qp_state = cmd->base.cur_qp_state; 1987 attr->path_mtu = cmd->base.path_mtu; 1988 attr->path_mig_state = cmd->base.path_mig_state; 1989 attr->qkey = cmd->base.qkey; 1990 attr->rq_psn = cmd->base.rq_psn; 1991 attr->sq_psn = cmd->base.sq_psn; 1992 attr->dest_qp_num = cmd->base.dest_qp_num; 1993 attr->qp_access_flags = cmd->base.qp_access_flags; 1994 attr->pkey_index = cmd->base.pkey_index; 1995 attr->alt_pkey_index = cmd->base.alt_pkey_index; 1996 attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify; 1997 attr->max_rd_atomic = cmd->base.max_rd_atomic; 1998 attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic; 1999 attr->min_rnr_timer = cmd->base.min_rnr_timer; 2000 attr->port_num = cmd->base.port_num; 2001 attr->timeout = cmd->base.timeout; 2002 attr->retry_cnt = cmd->base.retry_cnt; 2003 attr->rnr_retry = cmd->base.rnr_retry; 2004 attr->alt_port_num = cmd->base.alt_port_num; 2005 attr->alt_timeout = cmd->base.alt_timeout; 2006 attr->rate_limit = cmd->rate_limit; 2007 2008 if (cmd->base.attr_mask & IB_QP_AV) 2009 copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr, 2010 &cmd->base.dest); 2011 2012 if (cmd->base.attr_mask & IB_QP_ALT_PATH) 2013 copy_ah_attr_from_uverbs(qp->device, &attr->alt_ah_attr, 2014 &cmd->base.alt_dest); 2015 2016 ret = ib_modify_qp_with_udata(qp, attr, 2017 modify_qp_mask(qp->qp_type, 2018 cmd->base.attr_mask), 2019 udata); 2020 2021 release_qp: 2022 uobj_put_obj_read(qp); 2023 out: 2024 kfree(attr); 2025 2026 return ret; 2027 } 2028 2029 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 2030 struct ib_device *ib_dev, 2031 const char __user *buf, int in_len, 2032 int out_len) 2033 { 2034 struct ib_uverbs_ex_modify_qp cmd = {}; 2035 struct ib_udata udata; 2036 int ret; 2037 2038 if (copy_from_user(&cmd.base, buf, sizeof(cmd.base))) 2039 return -EFAULT; 2040 2041 if (cmd.base.attr_mask & 2042 ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1)) 2043 return -EOPNOTSUPP; 2044 2045 INIT_UDATA(&udata, buf + sizeof(cmd.base), NULL, 2046 in_len - sizeof(cmd.base) - sizeof(struct ib_uverbs_cmd_hdr), 2047 out_len); 2048 2049 ret = modify_qp(file, &cmd, &udata); 2050 if (ret) 2051 return ret; 2052 2053 return in_len; 2054 } 2055 2056 int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file, 2057 struct ib_device *ib_dev, 2058 struct ib_udata *ucore, 2059 struct ib_udata *uhw) 2060 { 2061 struct ib_uverbs_ex_modify_qp cmd = {}; 2062 int ret; 2063 2064 /* 2065 * Last bit is reserved for extending the attr_mask by 2066 * using another field. 2067 */ 2068 BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31)); 2069 2070 if (ucore->inlen < sizeof(cmd.base)) 2071 return -EINVAL; 2072 2073 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 2074 if (ret) 2075 return ret; 2076 2077 if (cmd.base.attr_mask & 2078 ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1)) 2079 return -EOPNOTSUPP; 2080 2081 if (ucore->inlen > sizeof(cmd)) { 2082 if (ib_is_udata_cleared(ucore, sizeof(cmd), 2083 ucore->inlen - sizeof(cmd))) 2084 return -EOPNOTSUPP; 2085 } 2086 2087 ret = modify_qp(file, &cmd, uhw); 2088 2089 return ret; 2090 } 2091 2092 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 2093 struct ib_device *ib_dev, 2094 const char __user *buf, int in_len, 2095 int out_len) 2096 { 2097 struct ib_uverbs_destroy_qp cmd; 2098 struct ib_uverbs_destroy_qp_resp resp; 2099 struct ib_uobject *uobj; 2100 struct ib_uqp_object *obj; 2101 int ret = -EINVAL; 2102 2103 if (copy_from_user(&cmd, buf, sizeof cmd)) 2104 return -EFAULT; 2105 2106 memset(&resp, 0, sizeof resp); 2107 2108 uobj = uobj_get_write(uobj_get_type(qp), cmd.qp_handle, 2109 file->ucontext); 2110 if (IS_ERR(uobj)) 2111 return PTR_ERR(uobj); 2112 2113 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 2114 /* 2115 * Make sure we don't free the memory in remove_commit as we still 2116 * needs the uobject memory to create the response. 2117 */ 2118 uverbs_uobject_get(uobj); 2119 2120 ret = uobj_remove_commit(uobj); 2121 if (ret) { 2122 uverbs_uobject_put(uobj); 2123 return ret; 2124 } 2125 2126 resp.events_reported = obj->uevent.events_reported; 2127 uverbs_uobject_put(uobj); 2128 2129 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2130 &resp, sizeof resp)) 2131 return -EFAULT; 2132 2133 return in_len; 2134 } 2135 2136 static void *alloc_wr(size_t wr_size, __u32 num_sge) 2137 { 2138 if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) / 2139 sizeof (struct ib_sge)) 2140 return NULL; 2141 2142 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) + 2143 num_sge * sizeof (struct ib_sge), GFP_KERNEL); 2144 } 2145 2146 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 2147 struct ib_device *ib_dev, 2148 const char __user *buf, int in_len, 2149 int out_len) 2150 { 2151 struct ib_uverbs_post_send cmd; 2152 struct ib_uverbs_post_send_resp resp; 2153 struct ib_uverbs_send_wr *user_wr; 2154 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 2155 struct ib_qp *qp; 2156 int i, sg_ind; 2157 int is_ud; 2158 ssize_t ret = -EINVAL; 2159 size_t next_size; 2160 2161 if (copy_from_user(&cmd, buf, sizeof cmd)) 2162 return -EFAULT; 2163 2164 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 2165 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 2166 return -EINVAL; 2167 2168 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 2169 return -EINVAL; 2170 2171 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 2172 if (!user_wr) 2173 return -ENOMEM; 2174 2175 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 2176 if (!qp) 2177 goto out; 2178 2179 is_ud = qp->qp_type == IB_QPT_UD; 2180 sg_ind = 0; 2181 last = NULL; 2182 for (i = 0; i < cmd.wr_count; ++i) { 2183 if (copy_from_user(user_wr, 2184 buf + sizeof cmd + i * cmd.wqe_size, 2185 cmd.wqe_size)) { 2186 ret = -EFAULT; 2187 goto out_put; 2188 } 2189 2190 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 2191 ret = -EINVAL; 2192 goto out_put; 2193 } 2194 2195 if (is_ud) { 2196 struct ib_ud_wr *ud; 2197 2198 if (user_wr->opcode != IB_WR_SEND && 2199 user_wr->opcode != IB_WR_SEND_WITH_IMM) { 2200 ret = -EINVAL; 2201 goto out_put; 2202 } 2203 2204 next_size = sizeof(*ud); 2205 ud = alloc_wr(next_size, user_wr->num_sge); 2206 if (!ud) { 2207 ret = -ENOMEM; 2208 goto out_put; 2209 } 2210 2211 ud->ah = uobj_get_obj_read(ah, user_wr->wr.ud.ah, 2212 file->ucontext); 2213 if (!ud->ah) { 2214 kfree(ud); 2215 ret = -EINVAL; 2216 goto out_put; 2217 } 2218 ud->remote_qpn = user_wr->wr.ud.remote_qpn; 2219 ud->remote_qkey = user_wr->wr.ud.remote_qkey; 2220 2221 next = &ud->wr; 2222 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || 2223 user_wr->opcode == IB_WR_RDMA_WRITE || 2224 user_wr->opcode == IB_WR_RDMA_READ) { 2225 struct ib_rdma_wr *rdma; 2226 2227 next_size = sizeof(*rdma); 2228 rdma = alloc_wr(next_size, user_wr->num_sge); 2229 if (!rdma) { 2230 ret = -ENOMEM; 2231 goto out_put; 2232 } 2233 2234 rdma->remote_addr = user_wr->wr.rdma.remote_addr; 2235 rdma->rkey = user_wr->wr.rdma.rkey; 2236 2237 next = &rdma->wr; 2238 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || 2239 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 2240 struct ib_atomic_wr *atomic; 2241 2242 next_size = sizeof(*atomic); 2243 atomic = alloc_wr(next_size, user_wr->num_sge); 2244 if (!atomic) { 2245 ret = -ENOMEM; 2246 goto out_put; 2247 } 2248 2249 atomic->remote_addr = user_wr->wr.atomic.remote_addr; 2250 atomic->compare_add = user_wr->wr.atomic.compare_add; 2251 atomic->swap = user_wr->wr.atomic.swap; 2252 atomic->rkey = user_wr->wr.atomic.rkey; 2253 2254 next = &atomic->wr; 2255 } else if (user_wr->opcode == IB_WR_SEND || 2256 user_wr->opcode == IB_WR_SEND_WITH_IMM || 2257 user_wr->opcode == IB_WR_SEND_WITH_INV) { 2258 next_size = sizeof(*next); 2259 next = alloc_wr(next_size, user_wr->num_sge); 2260 if (!next) { 2261 ret = -ENOMEM; 2262 goto out_put; 2263 } 2264 } else { 2265 ret = -EINVAL; 2266 goto out_put; 2267 } 2268 2269 if (user_wr->opcode == IB_WR_SEND_WITH_IMM || 2270 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 2271 next->ex.imm_data = 2272 (__be32 __force) user_wr->ex.imm_data; 2273 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) { 2274 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey; 2275 } 2276 2277 if (!last) 2278 wr = next; 2279 else 2280 last->next = next; 2281 last = next; 2282 2283 next->next = NULL; 2284 next->wr_id = user_wr->wr_id; 2285 next->num_sge = user_wr->num_sge; 2286 next->opcode = user_wr->opcode; 2287 next->send_flags = user_wr->send_flags; 2288 2289 if (next->num_sge) { 2290 next->sg_list = (void *) next + 2291 ALIGN(next_size, sizeof(struct ib_sge)); 2292 if (copy_from_user(next->sg_list, 2293 buf + sizeof cmd + 2294 cmd.wr_count * cmd.wqe_size + 2295 sg_ind * sizeof (struct ib_sge), 2296 next->num_sge * sizeof (struct ib_sge))) { 2297 ret = -EFAULT; 2298 goto out_put; 2299 } 2300 sg_ind += next->num_sge; 2301 } else 2302 next->sg_list = NULL; 2303 } 2304 2305 resp.bad_wr = 0; 2306 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); 2307 if (ret) 2308 for (next = wr; next; next = next->next) { 2309 ++resp.bad_wr; 2310 if (next == bad_wr) 2311 break; 2312 } 2313 2314 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2315 &resp, sizeof resp)) 2316 ret = -EFAULT; 2317 2318 out_put: 2319 uobj_put_obj_read(qp); 2320 2321 while (wr) { 2322 if (is_ud && ud_wr(wr)->ah) 2323 uobj_put_obj_read(ud_wr(wr)->ah); 2324 next = wr->next; 2325 kfree(wr); 2326 wr = next; 2327 } 2328 2329 out: 2330 kfree(user_wr); 2331 2332 return ret ? ret : in_len; 2333 } 2334 2335 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 2336 int in_len, 2337 u32 wr_count, 2338 u32 sge_count, 2339 u32 wqe_size) 2340 { 2341 struct ib_uverbs_recv_wr *user_wr; 2342 struct ib_recv_wr *wr = NULL, *last, *next; 2343 int sg_ind; 2344 int i; 2345 int ret; 2346 2347 if (in_len < wqe_size * wr_count + 2348 sge_count * sizeof (struct ib_uverbs_sge)) 2349 return ERR_PTR(-EINVAL); 2350 2351 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 2352 return ERR_PTR(-EINVAL); 2353 2354 user_wr = kmalloc(wqe_size, GFP_KERNEL); 2355 if (!user_wr) 2356 return ERR_PTR(-ENOMEM); 2357 2358 sg_ind = 0; 2359 last = NULL; 2360 for (i = 0; i < wr_count; ++i) { 2361 if (copy_from_user(user_wr, buf + i * wqe_size, 2362 wqe_size)) { 2363 ret = -EFAULT; 2364 goto err; 2365 } 2366 2367 if (user_wr->num_sge + sg_ind > sge_count) { 2368 ret = -EINVAL; 2369 goto err; 2370 } 2371 2372 if (user_wr->num_sge >= 2373 (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) / 2374 sizeof (struct ib_sge)) { 2375 ret = -EINVAL; 2376 goto err; 2377 } 2378 2379 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2380 user_wr->num_sge * sizeof (struct ib_sge), 2381 GFP_KERNEL); 2382 if (!next) { 2383 ret = -ENOMEM; 2384 goto err; 2385 } 2386 2387 if (!last) 2388 wr = next; 2389 else 2390 last->next = next; 2391 last = next; 2392 2393 next->next = NULL; 2394 next->wr_id = user_wr->wr_id; 2395 next->num_sge = user_wr->num_sge; 2396 2397 if (next->num_sge) { 2398 next->sg_list = (void *) next + 2399 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2400 if (copy_from_user(next->sg_list, 2401 buf + wr_count * wqe_size + 2402 sg_ind * sizeof (struct ib_sge), 2403 next->num_sge * sizeof (struct ib_sge))) { 2404 ret = -EFAULT; 2405 goto err; 2406 } 2407 sg_ind += next->num_sge; 2408 } else 2409 next->sg_list = NULL; 2410 } 2411 2412 kfree(user_wr); 2413 return wr; 2414 2415 err: 2416 kfree(user_wr); 2417 2418 while (wr) { 2419 next = wr->next; 2420 kfree(wr); 2421 wr = next; 2422 } 2423 2424 return ERR_PTR(ret); 2425 } 2426 2427 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 2428 struct ib_device *ib_dev, 2429 const char __user *buf, int in_len, 2430 int out_len) 2431 { 2432 struct ib_uverbs_post_recv cmd; 2433 struct ib_uverbs_post_recv_resp resp; 2434 struct ib_recv_wr *wr, *next, *bad_wr; 2435 struct ib_qp *qp; 2436 ssize_t ret = -EINVAL; 2437 2438 if (copy_from_user(&cmd, buf, sizeof cmd)) 2439 return -EFAULT; 2440 2441 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2442 in_len - sizeof cmd, cmd.wr_count, 2443 cmd.sge_count, cmd.wqe_size); 2444 if (IS_ERR(wr)) 2445 return PTR_ERR(wr); 2446 2447 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 2448 if (!qp) 2449 goto out; 2450 2451 resp.bad_wr = 0; 2452 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); 2453 2454 uobj_put_obj_read(qp); 2455 if (ret) { 2456 for (next = wr; next; next = next->next) { 2457 ++resp.bad_wr; 2458 if (next == bad_wr) 2459 break; 2460 } 2461 } 2462 2463 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2464 &resp, sizeof resp)) 2465 ret = -EFAULT; 2466 2467 out: 2468 while (wr) { 2469 next = wr->next; 2470 kfree(wr); 2471 wr = next; 2472 } 2473 2474 return ret ? ret : in_len; 2475 } 2476 2477 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 2478 struct ib_device *ib_dev, 2479 const char __user *buf, int in_len, 2480 int out_len) 2481 { 2482 struct ib_uverbs_post_srq_recv cmd; 2483 struct ib_uverbs_post_srq_recv_resp resp; 2484 struct ib_recv_wr *wr, *next, *bad_wr; 2485 struct ib_srq *srq; 2486 ssize_t ret = -EINVAL; 2487 2488 if (copy_from_user(&cmd, buf, sizeof cmd)) 2489 return -EFAULT; 2490 2491 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2492 in_len - sizeof cmd, cmd.wr_count, 2493 cmd.sge_count, cmd.wqe_size); 2494 if (IS_ERR(wr)) 2495 return PTR_ERR(wr); 2496 2497 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext); 2498 if (!srq) 2499 goto out; 2500 2501 resp.bad_wr = 0; 2502 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 2503 2504 uobj_put_obj_read(srq); 2505 2506 if (ret) 2507 for (next = wr; next; next = next->next) { 2508 ++resp.bad_wr; 2509 if (next == bad_wr) 2510 break; 2511 } 2512 2513 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2514 &resp, sizeof resp)) 2515 ret = -EFAULT; 2516 2517 out: 2518 while (wr) { 2519 next = wr->next; 2520 kfree(wr); 2521 wr = next; 2522 } 2523 2524 return ret ? ret : in_len; 2525 } 2526 2527 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 2528 struct ib_device *ib_dev, 2529 const char __user *buf, int in_len, 2530 int out_len) 2531 { 2532 struct ib_uverbs_create_ah cmd; 2533 struct ib_uverbs_create_ah_resp resp; 2534 struct ib_uobject *uobj; 2535 struct ib_pd *pd; 2536 struct ib_ah *ah; 2537 struct rdma_ah_attr attr; 2538 int ret; 2539 struct ib_udata udata; 2540 u8 *dmac; 2541 2542 if (out_len < sizeof resp) 2543 return -ENOSPC; 2544 2545 if (copy_from_user(&cmd, buf, sizeof cmd)) 2546 return -EFAULT; 2547 2548 if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) 2549 return -EINVAL; 2550 2551 INIT_UDATA(&udata, buf + sizeof(cmd), 2552 (unsigned long)cmd.response + sizeof(resp), 2553 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 2554 out_len - sizeof(resp)); 2555 2556 uobj = uobj_alloc(uobj_get_type(ah), file->ucontext); 2557 if (IS_ERR(uobj)) 2558 return PTR_ERR(uobj); 2559 2560 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 2561 if (!pd) { 2562 ret = -EINVAL; 2563 goto err; 2564 } 2565 2566 attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num); 2567 rdma_ah_set_make_grd(&attr, false); 2568 rdma_ah_set_dlid(&attr, cmd.attr.dlid); 2569 rdma_ah_set_sl(&attr, cmd.attr.sl); 2570 rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits); 2571 rdma_ah_set_static_rate(&attr, cmd.attr.static_rate); 2572 rdma_ah_set_port_num(&attr, cmd.attr.port_num); 2573 2574 if (cmd.attr.is_global) { 2575 rdma_ah_set_grh(&attr, NULL, cmd.attr.grh.flow_label, 2576 cmd.attr.grh.sgid_index, 2577 cmd.attr.grh.hop_limit, 2578 cmd.attr.grh.traffic_class); 2579 rdma_ah_set_dgid_raw(&attr, cmd.attr.grh.dgid); 2580 } else { 2581 rdma_ah_set_ah_flags(&attr, 0); 2582 } 2583 dmac = rdma_ah_retrieve_dmac(&attr); 2584 if (dmac) 2585 memset(dmac, 0, ETH_ALEN); 2586 2587 ah = pd->device->create_ah(pd, &attr, &udata); 2588 2589 if (IS_ERR(ah)) { 2590 ret = PTR_ERR(ah); 2591 goto err_put; 2592 } 2593 2594 ah->device = pd->device; 2595 ah->pd = pd; 2596 atomic_inc(&pd->usecnt); 2597 ah->uobject = uobj; 2598 uobj->user_handle = cmd.user_handle; 2599 uobj->object = ah; 2600 2601 resp.ah_handle = uobj->id; 2602 2603 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2604 &resp, sizeof resp)) { 2605 ret = -EFAULT; 2606 goto err_copy; 2607 } 2608 2609 uobj_put_obj_read(pd); 2610 uobj_alloc_commit(uobj); 2611 2612 return in_len; 2613 2614 err_copy: 2615 rdma_destroy_ah(ah); 2616 2617 err_put: 2618 uobj_put_obj_read(pd); 2619 2620 err: 2621 uobj_alloc_abort(uobj); 2622 return ret; 2623 } 2624 2625 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 2626 struct ib_device *ib_dev, 2627 const char __user *buf, int in_len, int out_len) 2628 { 2629 struct ib_uverbs_destroy_ah cmd; 2630 struct ib_uobject *uobj; 2631 int ret; 2632 2633 if (copy_from_user(&cmd, buf, sizeof cmd)) 2634 return -EFAULT; 2635 2636 uobj = uobj_get_write(uobj_get_type(ah), cmd.ah_handle, 2637 file->ucontext); 2638 if (IS_ERR(uobj)) 2639 return PTR_ERR(uobj); 2640 2641 ret = uobj_remove_commit(uobj); 2642 return ret ?: in_len; 2643 } 2644 2645 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 2646 struct ib_device *ib_dev, 2647 const char __user *buf, int in_len, 2648 int out_len) 2649 { 2650 struct ib_uverbs_attach_mcast cmd; 2651 struct ib_qp *qp; 2652 struct ib_uqp_object *obj; 2653 struct ib_uverbs_mcast_entry *mcast; 2654 int ret; 2655 2656 if (copy_from_user(&cmd, buf, sizeof cmd)) 2657 return -EFAULT; 2658 2659 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 2660 if (!qp) 2661 return -EINVAL; 2662 2663 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2664 2665 mutex_lock(&obj->mcast_lock); 2666 list_for_each_entry(mcast, &obj->mcast_list, list) 2667 if (cmd.mlid == mcast->lid && 2668 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2669 ret = 0; 2670 goto out_put; 2671 } 2672 2673 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 2674 if (!mcast) { 2675 ret = -ENOMEM; 2676 goto out_put; 2677 } 2678 2679 mcast->lid = cmd.mlid; 2680 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 2681 2682 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 2683 if (!ret) 2684 list_add_tail(&mcast->list, &obj->mcast_list); 2685 else 2686 kfree(mcast); 2687 2688 out_put: 2689 mutex_unlock(&obj->mcast_lock); 2690 uobj_put_obj_read(qp); 2691 2692 return ret ? ret : in_len; 2693 } 2694 2695 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 2696 struct ib_device *ib_dev, 2697 const char __user *buf, int in_len, 2698 int out_len) 2699 { 2700 struct ib_uverbs_detach_mcast cmd; 2701 struct ib_uqp_object *obj; 2702 struct ib_qp *qp; 2703 struct ib_uverbs_mcast_entry *mcast; 2704 int ret = -EINVAL; 2705 bool found = false; 2706 2707 if (copy_from_user(&cmd, buf, sizeof cmd)) 2708 return -EFAULT; 2709 2710 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 2711 if (!qp) 2712 return -EINVAL; 2713 2714 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2715 mutex_lock(&obj->mcast_lock); 2716 2717 list_for_each_entry(mcast, &obj->mcast_list, list) 2718 if (cmd.mlid == mcast->lid && 2719 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2720 list_del(&mcast->list); 2721 kfree(mcast); 2722 found = true; 2723 break; 2724 } 2725 2726 if (!found) { 2727 ret = -EINVAL; 2728 goto out_put; 2729 } 2730 2731 ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid); 2732 2733 out_put: 2734 mutex_unlock(&obj->mcast_lock); 2735 uobj_put_obj_read(qp); 2736 return ret ? ret : in_len; 2737 } 2738 2739 static int kern_spec_to_ib_spec_action(struct ib_uverbs_flow_spec *kern_spec, 2740 union ib_flow_spec *ib_spec) 2741 { 2742 ib_spec->type = kern_spec->type; 2743 switch (ib_spec->type) { 2744 case IB_FLOW_SPEC_ACTION_TAG: 2745 if (kern_spec->flow_tag.size != 2746 sizeof(struct ib_uverbs_flow_spec_action_tag)) 2747 return -EINVAL; 2748 2749 ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag); 2750 ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id; 2751 break; 2752 case IB_FLOW_SPEC_ACTION_DROP: 2753 if (kern_spec->drop.size != 2754 sizeof(struct ib_uverbs_flow_spec_action_drop)) 2755 return -EINVAL; 2756 2757 ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop); 2758 break; 2759 default: 2760 return -EINVAL; 2761 } 2762 return 0; 2763 } 2764 2765 static size_t kern_spec_filter_sz(struct ib_uverbs_flow_spec_hdr *spec) 2766 { 2767 /* Returns user space filter size, includes padding */ 2768 return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2; 2769 } 2770 2771 static ssize_t spec_filter_size(void *kern_spec_filter, u16 kern_filter_size, 2772 u16 ib_real_filter_sz) 2773 { 2774 /* 2775 * User space filter structures must be 64 bit aligned, otherwise this 2776 * may pass, but we won't handle additional new attributes. 2777 */ 2778 2779 if (kern_filter_size > ib_real_filter_sz) { 2780 if (memchr_inv(kern_spec_filter + 2781 ib_real_filter_sz, 0, 2782 kern_filter_size - ib_real_filter_sz)) 2783 return -EINVAL; 2784 return ib_real_filter_sz; 2785 } 2786 return kern_filter_size; 2787 } 2788 2789 static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec, 2790 union ib_flow_spec *ib_spec) 2791 { 2792 ssize_t actual_filter_sz; 2793 ssize_t kern_filter_sz; 2794 ssize_t ib_filter_sz; 2795 void *kern_spec_mask; 2796 void *kern_spec_val; 2797 2798 if (kern_spec->reserved) 2799 return -EINVAL; 2800 2801 ib_spec->type = kern_spec->type; 2802 2803 kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr); 2804 /* User flow spec size must be aligned to 4 bytes */ 2805 if (kern_filter_sz != ALIGN(kern_filter_sz, 4)) 2806 return -EINVAL; 2807 2808 kern_spec_val = (void *)kern_spec + 2809 sizeof(struct ib_uverbs_flow_spec_hdr); 2810 kern_spec_mask = kern_spec_val + kern_filter_sz; 2811 if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL)) 2812 return -EINVAL; 2813 2814 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) { 2815 case IB_FLOW_SPEC_ETH: 2816 ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz); 2817 actual_filter_sz = spec_filter_size(kern_spec_mask, 2818 kern_filter_sz, 2819 ib_filter_sz); 2820 if (actual_filter_sz <= 0) 2821 return -EINVAL; 2822 ib_spec->size = sizeof(struct ib_flow_spec_eth); 2823 memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz); 2824 memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz); 2825 break; 2826 case IB_FLOW_SPEC_IPV4: 2827 ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz); 2828 actual_filter_sz = spec_filter_size(kern_spec_mask, 2829 kern_filter_sz, 2830 ib_filter_sz); 2831 if (actual_filter_sz <= 0) 2832 return -EINVAL; 2833 ib_spec->size = sizeof(struct ib_flow_spec_ipv4); 2834 memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz); 2835 memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz); 2836 break; 2837 case IB_FLOW_SPEC_IPV6: 2838 ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz); 2839 actual_filter_sz = spec_filter_size(kern_spec_mask, 2840 kern_filter_sz, 2841 ib_filter_sz); 2842 if (actual_filter_sz <= 0) 2843 return -EINVAL; 2844 ib_spec->size = sizeof(struct ib_flow_spec_ipv6); 2845 memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz); 2846 memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz); 2847 2848 if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) || 2849 (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20)) 2850 return -EINVAL; 2851 break; 2852 case IB_FLOW_SPEC_TCP: 2853 case IB_FLOW_SPEC_UDP: 2854 ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz); 2855 actual_filter_sz = spec_filter_size(kern_spec_mask, 2856 kern_filter_sz, 2857 ib_filter_sz); 2858 if (actual_filter_sz <= 0) 2859 return -EINVAL; 2860 ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp); 2861 memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz); 2862 memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz); 2863 break; 2864 case IB_FLOW_SPEC_VXLAN_TUNNEL: 2865 ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz); 2866 actual_filter_sz = spec_filter_size(kern_spec_mask, 2867 kern_filter_sz, 2868 ib_filter_sz); 2869 if (actual_filter_sz <= 0) 2870 return -EINVAL; 2871 ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel); 2872 memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz); 2873 memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz); 2874 2875 if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) || 2876 (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24)) 2877 return -EINVAL; 2878 break; 2879 default: 2880 return -EINVAL; 2881 } 2882 return 0; 2883 } 2884 2885 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, 2886 union ib_flow_spec *ib_spec) 2887 { 2888 if (kern_spec->reserved) 2889 return -EINVAL; 2890 2891 if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG) 2892 return kern_spec_to_ib_spec_action(kern_spec, ib_spec); 2893 else 2894 return kern_spec_to_ib_spec_filter(kern_spec, ib_spec); 2895 } 2896 2897 int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file, 2898 struct ib_device *ib_dev, 2899 struct ib_udata *ucore, 2900 struct ib_udata *uhw) 2901 { 2902 struct ib_uverbs_ex_create_wq cmd = {}; 2903 struct ib_uverbs_ex_create_wq_resp resp = {}; 2904 struct ib_uwq_object *obj; 2905 int err = 0; 2906 struct ib_cq *cq; 2907 struct ib_pd *pd; 2908 struct ib_wq *wq; 2909 struct ib_wq_init_attr wq_init_attr = {}; 2910 size_t required_cmd_sz; 2911 size_t required_resp_len; 2912 2913 required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge); 2914 required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn); 2915 2916 if (ucore->inlen < required_cmd_sz) 2917 return -EINVAL; 2918 2919 if (ucore->outlen < required_resp_len) 2920 return -ENOSPC; 2921 2922 if (ucore->inlen > sizeof(cmd) && 2923 !ib_is_udata_cleared(ucore, sizeof(cmd), 2924 ucore->inlen - sizeof(cmd))) 2925 return -EOPNOTSUPP; 2926 2927 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 2928 if (err) 2929 return err; 2930 2931 if (cmd.comp_mask) 2932 return -EOPNOTSUPP; 2933 2934 obj = (struct ib_uwq_object *)uobj_alloc(uobj_get_type(wq), 2935 file->ucontext); 2936 if (IS_ERR(obj)) 2937 return PTR_ERR(obj); 2938 2939 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 2940 if (!pd) { 2941 err = -EINVAL; 2942 goto err_uobj; 2943 } 2944 2945 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext); 2946 if (!cq) { 2947 err = -EINVAL; 2948 goto err_put_pd; 2949 } 2950 2951 wq_init_attr.cq = cq; 2952 wq_init_attr.max_sge = cmd.max_sge; 2953 wq_init_attr.max_wr = cmd.max_wr; 2954 wq_init_attr.wq_context = file; 2955 wq_init_attr.wq_type = cmd.wq_type; 2956 wq_init_attr.event_handler = ib_uverbs_wq_event_handler; 2957 if (ucore->inlen >= (offsetof(typeof(cmd), create_flags) + 2958 sizeof(cmd.create_flags))) 2959 wq_init_attr.create_flags = cmd.create_flags; 2960 obj->uevent.events_reported = 0; 2961 INIT_LIST_HEAD(&obj->uevent.event_list); 2962 wq = pd->device->create_wq(pd, &wq_init_attr, uhw); 2963 if (IS_ERR(wq)) { 2964 err = PTR_ERR(wq); 2965 goto err_put_cq; 2966 } 2967 2968 wq->uobject = &obj->uevent.uobject; 2969 obj->uevent.uobject.object = wq; 2970 wq->wq_type = wq_init_attr.wq_type; 2971 wq->cq = cq; 2972 wq->pd = pd; 2973 wq->device = pd->device; 2974 wq->wq_context = wq_init_attr.wq_context; 2975 atomic_set(&wq->usecnt, 0); 2976 atomic_inc(&pd->usecnt); 2977 atomic_inc(&cq->usecnt); 2978 wq->uobject = &obj->uevent.uobject; 2979 obj->uevent.uobject.object = wq; 2980 2981 memset(&resp, 0, sizeof(resp)); 2982 resp.wq_handle = obj->uevent.uobject.id; 2983 resp.max_sge = wq_init_attr.max_sge; 2984 resp.max_wr = wq_init_attr.max_wr; 2985 resp.wqn = wq->wq_num; 2986 resp.response_length = required_resp_len; 2987 err = ib_copy_to_udata(ucore, 2988 &resp, resp.response_length); 2989 if (err) 2990 goto err_copy; 2991 2992 uobj_put_obj_read(pd); 2993 uobj_put_obj_read(cq); 2994 uobj_alloc_commit(&obj->uevent.uobject); 2995 return 0; 2996 2997 err_copy: 2998 ib_destroy_wq(wq); 2999 err_put_cq: 3000 uobj_put_obj_read(cq); 3001 err_put_pd: 3002 uobj_put_obj_read(pd); 3003 err_uobj: 3004 uobj_alloc_abort(&obj->uevent.uobject); 3005 3006 return err; 3007 } 3008 3009 int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file, 3010 struct ib_device *ib_dev, 3011 struct ib_udata *ucore, 3012 struct ib_udata *uhw) 3013 { 3014 struct ib_uverbs_ex_destroy_wq cmd = {}; 3015 struct ib_uverbs_ex_destroy_wq_resp resp = {}; 3016 struct ib_uobject *uobj; 3017 struct ib_uwq_object *obj; 3018 size_t required_cmd_sz; 3019 size_t required_resp_len; 3020 int ret; 3021 3022 required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle); 3023 required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); 3024 3025 if (ucore->inlen < required_cmd_sz) 3026 return -EINVAL; 3027 3028 if (ucore->outlen < required_resp_len) 3029 return -ENOSPC; 3030 3031 if (ucore->inlen > sizeof(cmd) && 3032 !ib_is_udata_cleared(ucore, sizeof(cmd), 3033 ucore->inlen - sizeof(cmd))) 3034 return -EOPNOTSUPP; 3035 3036 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3037 if (ret) 3038 return ret; 3039 3040 if (cmd.comp_mask) 3041 return -EOPNOTSUPP; 3042 3043 resp.response_length = required_resp_len; 3044 uobj = uobj_get_write(uobj_get_type(wq), cmd.wq_handle, 3045 file->ucontext); 3046 if (IS_ERR(uobj)) 3047 return PTR_ERR(uobj); 3048 3049 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject); 3050 /* 3051 * Make sure we don't free the memory in remove_commit as we still 3052 * needs the uobject memory to create the response. 3053 */ 3054 uverbs_uobject_get(uobj); 3055 3056 ret = uobj_remove_commit(uobj); 3057 resp.events_reported = obj->uevent.events_reported; 3058 uverbs_uobject_put(uobj); 3059 if (ret) 3060 return ret; 3061 3062 return ib_copy_to_udata(ucore, &resp, resp.response_length); 3063 } 3064 3065 int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file, 3066 struct ib_device *ib_dev, 3067 struct ib_udata *ucore, 3068 struct ib_udata *uhw) 3069 { 3070 struct ib_uverbs_ex_modify_wq cmd = {}; 3071 struct ib_wq *wq; 3072 struct ib_wq_attr wq_attr = {}; 3073 size_t required_cmd_sz; 3074 int ret; 3075 3076 required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state); 3077 if (ucore->inlen < required_cmd_sz) 3078 return -EINVAL; 3079 3080 if (ucore->inlen > sizeof(cmd) && 3081 !ib_is_udata_cleared(ucore, sizeof(cmd), 3082 ucore->inlen - sizeof(cmd))) 3083 return -EOPNOTSUPP; 3084 3085 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3086 if (ret) 3087 return ret; 3088 3089 if (!cmd.attr_mask) 3090 return -EINVAL; 3091 3092 if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS)) 3093 return -EINVAL; 3094 3095 wq = uobj_get_obj_read(wq, cmd.wq_handle, file->ucontext); 3096 if (!wq) 3097 return -EINVAL; 3098 3099 wq_attr.curr_wq_state = cmd.curr_wq_state; 3100 wq_attr.wq_state = cmd.wq_state; 3101 if (cmd.attr_mask & IB_WQ_FLAGS) { 3102 wq_attr.flags = cmd.flags; 3103 wq_attr.flags_mask = cmd.flags_mask; 3104 } 3105 ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw); 3106 uobj_put_obj_read(wq); 3107 return ret; 3108 } 3109 3110 int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, 3111 struct ib_device *ib_dev, 3112 struct ib_udata *ucore, 3113 struct ib_udata *uhw) 3114 { 3115 struct ib_uverbs_ex_create_rwq_ind_table cmd = {}; 3116 struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {}; 3117 struct ib_uobject *uobj; 3118 int err = 0; 3119 struct ib_rwq_ind_table_init_attr init_attr = {}; 3120 struct ib_rwq_ind_table *rwq_ind_tbl; 3121 struct ib_wq **wqs = NULL; 3122 u32 *wqs_handles = NULL; 3123 struct ib_wq *wq = NULL; 3124 int i, j, num_read_wqs; 3125 u32 num_wq_handles; 3126 u32 expected_in_size; 3127 size_t required_cmd_sz_header; 3128 size_t required_resp_len; 3129 3130 required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size); 3131 required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num); 3132 3133 if (ucore->inlen < required_cmd_sz_header) 3134 return -EINVAL; 3135 3136 if (ucore->outlen < required_resp_len) 3137 return -ENOSPC; 3138 3139 err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header); 3140 if (err) 3141 return err; 3142 3143 ucore->inbuf += required_cmd_sz_header; 3144 ucore->inlen -= required_cmd_sz_header; 3145 3146 if (cmd.comp_mask) 3147 return -EOPNOTSUPP; 3148 3149 if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE) 3150 return -EINVAL; 3151 3152 num_wq_handles = 1 << cmd.log_ind_tbl_size; 3153 expected_in_size = num_wq_handles * sizeof(__u32); 3154 if (num_wq_handles == 1) 3155 /* input size for wq handles is u64 aligned */ 3156 expected_in_size += sizeof(__u32); 3157 3158 if (ucore->inlen < expected_in_size) 3159 return -EINVAL; 3160 3161 if (ucore->inlen > expected_in_size && 3162 !ib_is_udata_cleared(ucore, expected_in_size, 3163 ucore->inlen - expected_in_size)) 3164 return -EOPNOTSUPP; 3165 3166 wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles), 3167 GFP_KERNEL); 3168 if (!wqs_handles) 3169 return -ENOMEM; 3170 3171 err = ib_copy_from_udata(wqs_handles, ucore, 3172 num_wq_handles * sizeof(__u32)); 3173 if (err) 3174 goto err_free; 3175 3176 wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL); 3177 if (!wqs) { 3178 err = -ENOMEM; 3179 goto err_free; 3180 } 3181 3182 for (num_read_wqs = 0; num_read_wqs < num_wq_handles; 3183 num_read_wqs++) { 3184 wq = uobj_get_obj_read(wq, wqs_handles[num_read_wqs], 3185 file->ucontext); 3186 if (!wq) { 3187 err = -EINVAL; 3188 goto put_wqs; 3189 } 3190 3191 wqs[num_read_wqs] = wq; 3192 } 3193 3194 uobj = uobj_alloc(uobj_get_type(rwq_ind_table), file->ucontext); 3195 if (IS_ERR(uobj)) { 3196 err = PTR_ERR(uobj); 3197 goto put_wqs; 3198 } 3199 3200 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size; 3201 init_attr.ind_tbl = wqs; 3202 rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw); 3203 3204 if (IS_ERR(rwq_ind_tbl)) { 3205 err = PTR_ERR(rwq_ind_tbl); 3206 goto err_uobj; 3207 } 3208 3209 rwq_ind_tbl->ind_tbl = wqs; 3210 rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size; 3211 rwq_ind_tbl->uobject = uobj; 3212 uobj->object = rwq_ind_tbl; 3213 rwq_ind_tbl->device = ib_dev; 3214 atomic_set(&rwq_ind_tbl->usecnt, 0); 3215 3216 for (i = 0; i < num_wq_handles; i++) 3217 atomic_inc(&wqs[i]->usecnt); 3218 3219 resp.ind_tbl_handle = uobj->id; 3220 resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num; 3221 resp.response_length = required_resp_len; 3222 3223 err = ib_copy_to_udata(ucore, 3224 &resp, resp.response_length); 3225 if (err) 3226 goto err_copy; 3227 3228 kfree(wqs_handles); 3229 3230 for (j = 0; j < num_read_wqs; j++) 3231 uobj_put_obj_read(wqs[j]); 3232 3233 uobj_alloc_commit(uobj); 3234 return 0; 3235 3236 err_copy: 3237 ib_destroy_rwq_ind_table(rwq_ind_tbl); 3238 err_uobj: 3239 uobj_alloc_abort(uobj); 3240 put_wqs: 3241 for (j = 0; j < num_read_wqs; j++) 3242 uobj_put_obj_read(wqs[j]); 3243 err_free: 3244 kfree(wqs_handles); 3245 kfree(wqs); 3246 return err; 3247 } 3248 3249 int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file, 3250 struct ib_device *ib_dev, 3251 struct ib_udata *ucore, 3252 struct ib_udata *uhw) 3253 { 3254 struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {}; 3255 struct ib_uobject *uobj; 3256 int ret; 3257 size_t required_cmd_sz; 3258 3259 required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle); 3260 3261 if (ucore->inlen < required_cmd_sz) 3262 return -EINVAL; 3263 3264 if (ucore->inlen > sizeof(cmd) && 3265 !ib_is_udata_cleared(ucore, sizeof(cmd), 3266 ucore->inlen - sizeof(cmd))) 3267 return -EOPNOTSUPP; 3268 3269 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3270 if (ret) 3271 return ret; 3272 3273 if (cmd.comp_mask) 3274 return -EOPNOTSUPP; 3275 3276 uobj = uobj_get_write(uobj_get_type(rwq_ind_table), cmd.ind_tbl_handle, 3277 file->ucontext); 3278 if (IS_ERR(uobj)) 3279 return PTR_ERR(uobj); 3280 3281 return uobj_remove_commit(uobj); 3282 } 3283 3284 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, 3285 struct ib_device *ib_dev, 3286 struct ib_udata *ucore, 3287 struct ib_udata *uhw) 3288 { 3289 struct ib_uverbs_create_flow cmd; 3290 struct ib_uverbs_create_flow_resp resp; 3291 struct ib_uobject *uobj; 3292 struct ib_flow *flow_id; 3293 struct ib_uverbs_flow_attr *kern_flow_attr; 3294 struct ib_flow_attr *flow_attr; 3295 struct ib_qp *qp; 3296 int err = 0; 3297 void *kern_spec; 3298 void *ib_spec; 3299 int i; 3300 3301 if (ucore->inlen < sizeof(cmd)) 3302 return -EINVAL; 3303 3304 if (ucore->outlen < sizeof(resp)) 3305 return -ENOSPC; 3306 3307 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3308 if (err) 3309 return err; 3310 3311 ucore->inbuf += sizeof(cmd); 3312 ucore->inlen -= sizeof(cmd); 3313 3314 if (cmd.comp_mask) 3315 return -EINVAL; 3316 3317 if (!capable(CAP_NET_RAW)) 3318 return -EPERM; 3319 3320 if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED) 3321 return -EINVAL; 3322 3323 if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && 3324 ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) || 3325 (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT))) 3326 return -EINVAL; 3327 3328 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) 3329 return -EINVAL; 3330 3331 if (cmd.flow_attr.size > ucore->inlen || 3332 cmd.flow_attr.size > 3333 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) 3334 return -EINVAL; 3335 3336 if (cmd.flow_attr.reserved[0] || 3337 cmd.flow_attr.reserved[1]) 3338 return -EINVAL; 3339 3340 if (cmd.flow_attr.num_of_specs) { 3341 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, 3342 GFP_KERNEL); 3343 if (!kern_flow_attr) 3344 return -ENOMEM; 3345 3346 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); 3347 err = ib_copy_from_udata(kern_flow_attr + 1, ucore, 3348 cmd.flow_attr.size); 3349 if (err) 3350 goto err_free_attr; 3351 } else { 3352 kern_flow_attr = &cmd.flow_attr; 3353 } 3354 3355 uobj = uobj_alloc(uobj_get_type(flow), file->ucontext); 3356 if (IS_ERR(uobj)) { 3357 err = PTR_ERR(uobj); 3358 goto err_free_attr; 3359 } 3360 3361 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 3362 if (!qp) { 3363 err = -EINVAL; 3364 goto err_uobj; 3365 } 3366 3367 flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs * 3368 sizeof(union ib_flow_spec), GFP_KERNEL); 3369 if (!flow_attr) { 3370 err = -ENOMEM; 3371 goto err_put; 3372 } 3373 3374 flow_attr->type = kern_flow_attr->type; 3375 flow_attr->priority = kern_flow_attr->priority; 3376 flow_attr->num_of_specs = kern_flow_attr->num_of_specs; 3377 flow_attr->port = kern_flow_attr->port; 3378 flow_attr->flags = kern_flow_attr->flags; 3379 flow_attr->size = sizeof(*flow_attr); 3380 3381 kern_spec = kern_flow_attr + 1; 3382 ib_spec = flow_attr + 1; 3383 for (i = 0; i < flow_attr->num_of_specs && 3384 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && 3385 cmd.flow_attr.size >= 3386 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { 3387 err = kern_spec_to_ib_spec(kern_spec, ib_spec); 3388 if (err) 3389 goto err_free; 3390 flow_attr->size += 3391 ((union ib_flow_spec *) ib_spec)->size; 3392 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; 3393 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size; 3394 ib_spec += ((union ib_flow_spec *) ib_spec)->size; 3395 } 3396 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { 3397 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", 3398 i, cmd.flow_attr.size); 3399 err = -EINVAL; 3400 goto err_free; 3401 } 3402 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); 3403 if (IS_ERR(flow_id)) { 3404 err = PTR_ERR(flow_id); 3405 goto err_free; 3406 } 3407 flow_id->uobject = uobj; 3408 uobj->object = flow_id; 3409 3410 memset(&resp, 0, sizeof(resp)); 3411 resp.flow_handle = uobj->id; 3412 3413 err = ib_copy_to_udata(ucore, 3414 &resp, sizeof(resp)); 3415 if (err) 3416 goto err_copy; 3417 3418 uobj_put_obj_read(qp); 3419 uobj_alloc_commit(uobj); 3420 kfree(flow_attr); 3421 if (cmd.flow_attr.num_of_specs) 3422 kfree(kern_flow_attr); 3423 return 0; 3424 err_copy: 3425 ib_destroy_flow(flow_id); 3426 err_free: 3427 kfree(flow_attr); 3428 err_put: 3429 uobj_put_obj_read(qp); 3430 err_uobj: 3431 uobj_alloc_abort(uobj); 3432 err_free_attr: 3433 if (cmd.flow_attr.num_of_specs) 3434 kfree(kern_flow_attr); 3435 return err; 3436 } 3437 3438 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, 3439 struct ib_device *ib_dev, 3440 struct ib_udata *ucore, 3441 struct ib_udata *uhw) 3442 { 3443 struct ib_uverbs_destroy_flow cmd; 3444 struct ib_uobject *uobj; 3445 int ret; 3446 3447 if (ucore->inlen < sizeof(cmd)) 3448 return -EINVAL; 3449 3450 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3451 if (ret) 3452 return ret; 3453 3454 if (cmd.comp_mask) 3455 return -EINVAL; 3456 3457 uobj = uobj_get_write(uobj_get_type(flow), cmd.flow_handle, 3458 file->ucontext); 3459 if (IS_ERR(uobj)) 3460 return PTR_ERR(uobj); 3461 3462 ret = uobj_remove_commit(uobj); 3463 return ret; 3464 } 3465 3466 static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 3467 struct ib_device *ib_dev, 3468 struct ib_uverbs_create_xsrq *cmd, 3469 struct ib_udata *udata) 3470 { 3471 struct ib_uverbs_create_srq_resp resp; 3472 struct ib_usrq_object *obj; 3473 struct ib_pd *pd; 3474 struct ib_srq *srq; 3475 struct ib_uobject *uninitialized_var(xrcd_uobj); 3476 struct ib_srq_init_attr attr; 3477 int ret; 3478 3479 obj = (struct ib_usrq_object *)uobj_alloc(uobj_get_type(srq), 3480 file->ucontext); 3481 if (IS_ERR(obj)) 3482 return PTR_ERR(obj); 3483 3484 if (cmd->srq_type == IB_SRQT_TM) 3485 attr.ext.tag_matching.max_num_tags = cmd->max_num_tags; 3486 3487 if (cmd->srq_type == IB_SRQT_XRC) { 3488 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd->xrcd_handle, 3489 file->ucontext); 3490 if (IS_ERR(xrcd_uobj)) { 3491 ret = -EINVAL; 3492 goto err; 3493 } 3494 3495 attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object; 3496 if (!attr.ext.xrc.xrcd) { 3497 ret = -EINVAL; 3498 goto err_put_xrcd; 3499 } 3500 3501 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 3502 atomic_inc(&obj->uxrcd->refcnt); 3503 } 3504 3505 if (ib_srq_has_cq(cmd->srq_type)) { 3506 attr.ext.cq = uobj_get_obj_read(cq, cmd->cq_handle, 3507 file->ucontext); 3508 if (!attr.ext.cq) { 3509 ret = -EINVAL; 3510 goto err_put_xrcd; 3511 } 3512 } 3513 3514 pd = uobj_get_obj_read(pd, cmd->pd_handle, file->ucontext); 3515 if (!pd) { 3516 ret = -EINVAL; 3517 goto err_put_cq; 3518 } 3519 3520 attr.event_handler = ib_uverbs_srq_event_handler; 3521 attr.srq_context = file; 3522 attr.srq_type = cmd->srq_type; 3523 attr.attr.max_wr = cmd->max_wr; 3524 attr.attr.max_sge = cmd->max_sge; 3525 attr.attr.srq_limit = cmd->srq_limit; 3526 3527 obj->uevent.events_reported = 0; 3528 INIT_LIST_HEAD(&obj->uevent.event_list); 3529 3530 srq = pd->device->create_srq(pd, &attr, udata); 3531 if (IS_ERR(srq)) { 3532 ret = PTR_ERR(srq); 3533 goto err_put; 3534 } 3535 3536 srq->device = pd->device; 3537 srq->pd = pd; 3538 srq->srq_type = cmd->srq_type; 3539 srq->uobject = &obj->uevent.uobject; 3540 srq->event_handler = attr.event_handler; 3541 srq->srq_context = attr.srq_context; 3542 3543 if (ib_srq_has_cq(cmd->srq_type)) { 3544 srq->ext.cq = attr.ext.cq; 3545 atomic_inc(&attr.ext.cq->usecnt); 3546 } 3547 3548 if (cmd->srq_type == IB_SRQT_XRC) { 3549 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; 3550 atomic_inc(&attr.ext.xrc.xrcd->usecnt); 3551 } 3552 3553 atomic_inc(&pd->usecnt); 3554 atomic_set(&srq->usecnt, 0); 3555 3556 obj->uevent.uobject.object = srq; 3557 obj->uevent.uobject.user_handle = cmd->user_handle; 3558 3559 memset(&resp, 0, sizeof resp); 3560 resp.srq_handle = obj->uevent.uobject.id; 3561 resp.max_wr = attr.attr.max_wr; 3562 resp.max_sge = attr.attr.max_sge; 3563 if (cmd->srq_type == IB_SRQT_XRC) 3564 resp.srqn = srq->ext.xrc.srq_num; 3565 3566 if (copy_to_user((void __user *) (unsigned long) cmd->response, 3567 &resp, sizeof resp)) { 3568 ret = -EFAULT; 3569 goto err_copy; 3570 } 3571 3572 if (cmd->srq_type == IB_SRQT_XRC) 3573 uobj_put_read(xrcd_uobj); 3574 3575 if (ib_srq_has_cq(cmd->srq_type)) 3576 uobj_put_obj_read(attr.ext.cq); 3577 3578 uobj_put_obj_read(pd); 3579 uobj_alloc_commit(&obj->uevent.uobject); 3580 3581 return 0; 3582 3583 err_copy: 3584 ib_destroy_srq(srq); 3585 3586 err_put: 3587 uobj_put_obj_read(pd); 3588 3589 err_put_cq: 3590 if (ib_srq_has_cq(cmd->srq_type)) 3591 uobj_put_obj_read(attr.ext.cq); 3592 3593 err_put_xrcd: 3594 if (cmd->srq_type == IB_SRQT_XRC) { 3595 atomic_dec(&obj->uxrcd->refcnt); 3596 uobj_put_read(xrcd_uobj); 3597 } 3598 3599 err: 3600 uobj_alloc_abort(&obj->uevent.uobject); 3601 return ret; 3602 } 3603 3604 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 3605 struct ib_device *ib_dev, 3606 const char __user *buf, int in_len, 3607 int out_len) 3608 { 3609 struct ib_uverbs_create_srq cmd; 3610 struct ib_uverbs_create_xsrq xcmd; 3611 struct ib_uverbs_create_srq_resp resp; 3612 struct ib_udata udata; 3613 int ret; 3614 3615 if (out_len < sizeof resp) 3616 return -ENOSPC; 3617 3618 if (copy_from_user(&cmd, buf, sizeof cmd)) 3619 return -EFAULT; 3620 3621 memset(&xcmd, 0, sizeof(xcmd)); 3622 xcmd.response = cmd.response; 3623 xcmd.user_handle = cmd.user_handle; 3624 xcmd.srq_type = IB_SRQT_BASIC; 3625 xcmd.pd_handle = cmd.pd_handle; 3626 xcmd.max_wr = cmd.max_wr; 3627 xcmd.max_sge = cmd.max_sge; 3628 xcmd.srq_limit = cmd.srq_limit; 3629 3630 INIT_UDATA(&udata, buf + sizeof(cmd), 3631 (unsigned long) cmd.response + sizeof(resp), 3632 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 3633 out_len - sizeof(resp)); 3634 3635 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata); 3636 if (ret) 3637 return ret; 3638 3639 return in_len; 3640 } 3641 3642 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, 3643 struct ib_device *ib_dev, 3644 const char __user *buf, int in_len, int out_len) 3645 { 3646 struct ib_uverbs_create_xsrq cmd; 3647 struct ib_uverbs_create_srq_resp resp; 3648 struct ib_udata udata; 3649 int ret; 3650 3651 if (out_len < sizeof resp) 3652 return -ENOSPC; 3653 3654 if (copy_from_user(&cmd, buf, sizeof cmd)) 3655 return -EFAULT; 3656 3657 INIT_UDATA(&udata, buf + sizeof(cmd), 3658 (unsigned long) cmd.response + sizeof(resp), 3659 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 3660 out_len - sizeof(resp)); 3661 3662 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata); 3663 if (ret) 3664 return ret; 3665 3666 return in_len; 3667 } 3668 3669 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 3670 struct ib_device *ib_dev, 3671 const char __user *buf, int in_len, 3672 int out_len) 3673 { 3674 struct ib_uverbs_modify_srq cmd; 3675 struct ib_udata udata; 3676 struct ib_srq *srq; 3677 struct ib_srq_attr attr; 3678 int ret; 3679 3680 if (copy_from_user(&cmd, buf, sizeof cmd)) 3681 return -EFAULT; 3682 3683 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 3684 out_len); 3685 3686 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext); 3687 if (!srq) 3688 return -EINVAL; 3689 3690 attr.max_wr = cmd.max_wr; 3691 attr.srq_limit = cmd.srq_limit; 3692 3693 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 3694 3695 uobj_put_obj_read(srq); 3696 3697 return ret ? ret : in_len; 3698 } 3699 3700 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 3701 struct ib_device *ib_dev, 3702 const char __user *buf, 3703 int in_len, int out_len) 3704 { 3705 struct ib_uverbs_query_srq cmd; 3706 struct ib_uverbs_query_srq_resp resp; 3707 struct ib_srq_attr attr; 3708 struct ib_srq *srq; 3709 int ret; 3710 3711 if (out_len < sizeof resp) 3712 return -ENOSPC; 3713 3714 if (copy_from_user(&cmd, buf, sizeof cmd)) 3715 return -EFAULT; 3716 3717 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext); 3718 if (!srq) 3719 return -EINVAL; 3720 3721 ret = ib_query_srq(srq, &attr); 3722 3723 uobj_put_obj_read(srq); 3724 3725 if (ret) 3726 return ret; 3727 3728 memset(&resp, 0, sizeof resp); 3729 3730 resp.max_wr = attr.max_wr; 3731 resp.max_sge = attr.max_sge; 3732 resp.srq_limit = attr.srq_limit; 3733 3734 if (copy_to_user((void __user *) (unsigned long) cmd.response, 3735 &resp, sizeof resp)) 3736 return -EFAULT; 3737 3738 return in_len; 3739 } 3740 3741 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 3742 struct ib_device *ib_dev, 3743 const char __user *buf, int in_len, 3744 int out_len) 3745 { 3746 struct ib_uverbs_destroy_srq cmd; 3747 struct ib_uverbs_destroy_srq_resp resp; 3748 struct ib_uobject *uobj; 3749 struct ib_uevent_object *obj; 3750 int ret = -EINVAL; 3751 3752 if (copy_from_user(&cmd, buf, sizeof cmd)) 3753 return -EFAULT; 3754 3755 uobj = uobj_get_write(uobj_get_type(srq), cmd.srq_handle, 3756 file->ucontext); 3757 if (IS_ERR(uobj)) 3758 return PTR_ERR(uobj); 3759 3760 obj = container_of(uobj, struct ib_uevent_object, uobject); 3761 /* 3762 * Make sure we don't free the memory in remove_commit as we still 3763 * needs the uobject memory to create the response. 3764 */ 3765 uverbs_uobject_get(uobj); 3766 3767 memset(&resp, 0, sizeof(resp)); 3768 3769 ret = uobj_remove_commit(uobj); 3770 if (ret) { 3771 uverbs_uobject_put(uobj); 3772 return ret; 3773 } 3774 resp.events_reported = obj->events_reported; 3775 uverbs_uobject_put(uobj); 3776 if (copy_to_user((void __user *)(unsigned long)cmd.response, 3777 &resp, sizeof(resp))) 3778 return -EFAULT; 3779 3780 return in_len; 3781 } 3782 3783 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, 3784 struct ib_device *ib_dev, 3785 struct ib_udata *ucore, 3786 struct ib_udata *uhw) 3787 { 3788 struct ib_uverbs_ex_query_device_resp resp = { {0} }; 3789 struct ib_uverbs_ex_query_device cmd; 3790 struct ib_device_attr attr = {0}; 3791 int err; 3792 3793 if (ucore->inlen < sizeof(cmd)) 3794 return -EINVAL; 3795 3796 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3797 if (err) 3798 return err; 3799 3800 if (cmd.comp_mask) 3801 return -EINVAL; 3802 3803 if (cmd.reserved) 3804 return -EINVAL; 3805 3806 resp.response_length = offsetof(typeof(resp), odp_caps); 3807 3808 if (ucore->outlen < resp.response_length) 3809 return -ENOSPC; 3810 3811 err = ib_dev->query_device(ib_dev, &attr, uhw); 3812 if (err) 3813 return err; 3814 3815 copy_query_dev_fields(file, ib_dev, &resp.base, &attr); 3816 3817 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps)) 3818 goto end; 3819 3820 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 3821 resp.odp_caps.general_caps = attr.odp_caps.general_caps; 3822 resp.odp_caps.per_transport_caps.rc_odp_caps = 3823 attr.odp_caps.per_transport_caps.rc_odp_caps; 3824 resp.odp_caps.per_transport_caps.uc_odp_caps = 3825 attr.odp_caps.per_transport_caps.uc_odp_caps; 3826 resp.odp_caps.per_transport_caps.ud_odp_caps = 3827 attr.odp_caps.per_transport_caps.ud_odp_caps; 3828 #endif 3829 resp.response_length += sizeof(resp.odp_caps); 3830 3831 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask)) 3832 goto end; 3833 3834 resp.timestamp_mask = attr.timestamp_mask; 3835 resp.response_length += sizeof(resp.timestamp_mask); 3836 3837 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock)) 3838 goto end; 3839 3840 resp.hca_core_clock = attr.hca_core_clock; 3841 resp.response_length += sizeof(resp.hca_core_clock); 3842 3843 if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex)) 3844 goto end; 3845 3846 resp.device_cap_flags_ex = attr.device_cap_flags; 3847 resp.response_length += sizeof(resp.device_cap_flags_ex); 3848 3849 if (ucore->outlen < resp.response_length + sizeof(resp.rss_caps)) 3850 goto end; 3851 3852 resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts; 3853 resp.rss_caps.max_rwq_indirection_tables = 3854 attr.rss_caps.max_rwq_indirection_tables; 3855 resp.rss_caps.max_rwq_indirection_table_size = 3856 attr.rss_caps.max_rwq_indirection_table_size; 3857 3858 resp.response_length += sizeof(resp.rss_caps); 3859 3860 if (ucore->outlen < resp.response_length + sizeof(resp.max_wq_type_rq)) 3861 goto end; 3862 3863 resp.max_wq_type_rq = attr.max_wq_type_rq; 3864 resp.response_length += sizeof(resp.max_wq_type_rq); 3865 3866 if (ucore->outlen < resp.response_length + sizeof(resp.raw_packet_caps)) 3867 goto end; 3868 3869 resp.raw_packet_caps = attr.raw_packet_caps; 3870 resp.response_length += sizeof(resp.raw_packet_caps); 3871 3872 if (ucore->outlen < resp.response_length + sizeof(resp.tm_caps)) 3873 goto end; 3874 3875 resp.tm_caps.max_rndv_hdr_size = attr.tm_caps.max_rndv_hdr_size; 3876 resp.tm_caps.max_num_tags = attr.tm_caps.max_num_tags; 3877 resp.tm_caps.max_ops = attr.tm_caps.max_ops; 3878 resp.tm_caps.max_sge = attr.tm_caps.max_sge; 3879 resp.tm_caps.flags = attr.tm_caps.flags; 3880 resp.response_length += sizeof(resp.tm_caps); 3881 end: 3882 err = ib_copy_to_udata(ucore, &resp, resp.response_length); 3883 return err; 3884 } 3885