1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/file.h> 37 #include <linux/fs.h> 38 #include <linux/slab.h> 39 #include <linux/sched.h> 40 41 #include <linux/uaccess.h> 42 43 #include <rdma/uverbs_types.h> 44 #include <rdma/uverbs_std_types.h> 45 #include "rdma_core.h" 46 47 #include "uverbs.h" 48 #include "core_priv.h" 49 50 static struct ib_uverbs_completion_event_file * 51 ib_uverbs_lookup_comp_file(int fd, struct ib_ucontext *context) 52 { 53 struct ib_uobject *uobj = uobj_get_read(uobj_get_type(comp_channel), 54 fd, context); 55 struct ib_uobject_file *uobj_file; 56 57 if (IS_ERR(uobj)) 58 return (void *)uobj; 59 60 uverbs_uobject_get(uobj); 61 uobj_put_read(uobj); 62 63 uobj_file = container_of(uobj, struct ib_uobject_file, uobj); 64 return container_of(uobj_file, struct ib_uverbs_completion_event_file, 65 uobj_file); 66 } 67 68 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 69 struct ib_device *ib_dev, 70 const char __user *buf, 71 int in_len, int out_len) 72 { 73 struct ib_uverbs_get_context cmd; 74 struct ib_uverbs_get_context_resp resp; 75 struct ib_udata udata; 76 struct ib_ucontext *ucontext; 77 struct file *filp; 78 struct ib_rdmacg_object cg_obj; 79 int ret; 80 81 if (out_len < sizeof resp) 82 return -ENOSPC; 83 84 if (copy_from_user(&cmd, buf, sizeof cmd)) 85 return -EFAULT; 86 87 mutex_lock(&file->mutex); 88 89 if (file->ucontext) { 90 ret = -EINVAL; 91 goto err; 92 } 93 94 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 95 u64_to_user_ptr(cmd.response) + sizeof(resp), 96 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 97 out_len - sizeof(resp)); 98 99 ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); 100 if (ret) 101 goto err; 102 103 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata); 104 if (IS_ERR(ucontext)) { 105 ret = PTR_ERR(ucontext); 106 goto err_alloc; 107 } 108 109 ucontext->device = ib_dev; 110 ucontext->cg_obj = cg_obj; 111 /* ufile is required when some objects are released */ 112 ucontext->ufile = file; 113 uverbs_initialize_ucontext(ucontext); 114 115 rcu_read_lock(); 116 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); 117 rcu_read_unlock(); 118 ucontext->closing = 0; 119 120 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 121 ucontext->umem_tree = RB_ROOT_CACHED; 122 init_rwsem(&ucontext->umem_rwsem); 123 ucontext->odp_mrs_count = 0; 124 INIT_LIST_HEAD(&ucontext->no_private_counters); 125 126 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)) 127 ucontext->invalidate_range = NULL; 128 129 #endif 130 131 resp.num_comp_vectors = file->device->num_comp_vectors; 132 133 ret = get_unused_fd_flags(O_CLOEXEC); 134 if (ret < 0) 135 goto err_free; 136 resp.async_fd = ret; 137 138 filp = ib_uverbs_alloc_async_event_file(file, ib_dev); 139 if (IS_ERR(filp)) { 140 ret = PTR_ERR(filp); 141 goto err_fd; 142 } 143 144 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 145 ret = -EFAULT; 146 goto err_file; 147 } 148 149 file->ucontext = ucontext; 150 151 fd_install(resp.async_fd, filp); 152 153 mutex_unlock(&file->mutex); 154 155 return in_len; 156 157 err_file: 158 ib_uverbs_free_async_event_file(file); 159 fput(filp); 160 161 err_fd: 162 put_unused_fd(resp.async_fd); 163 164 err_free: 165 put_pid(ucontext->tgid); 166 ib_dev->dealloc_ucontext(ucontext); 167 168 err_alloc: 169 ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); 170 171 err: 172 mutex_unlock(&file->mutex); 173 return ret; 174 } 175 176 static void copy_query_dev_fields(struct ib_uverbs_file *file, 177 struct ib_device *ib_dev, 178 struct ib_uverbs_query_device_resp *resp, 179 struct ib_device_attr *attr) 180 { 181 resp->fw_ver = attr->fw_ver; 182 resp->node_guid = ib_dev->node_guid; 183 resp->sys_image_guid = attr->sys_image_guid; 184 resp->max_mr_size = attr->max_mr_size; 185 resp->page_size_cap = attr->page_size_cap; 186 resp->vendor_id = attr->vendor_id; 187 resp->vendor_part_id = attr->vendor_part_id; 188 resp->hw_ver = attr->hw_ver; 189 resp->max_qp = attr->max_qp; 190 resp->max_qp_wr = attr->max_qp_wr; 191 resp->device_cap_flags = lower_32_bits(attr->device_cap_flags); 192 resp->max_sge = attr->max_sge; 193 resp->max_sge_rd = attr->max_sge_rd; 194 resp->max_cq = attr->max_cq; 195 resp->max_cqe = attr->max_cqe; 196 resp->max_mr = attr->max_mr; 197 resp->max_pd = attr->max_pd; 198 resp->max_qp_rd_atom = attr->max_qp_rd_atom; 199 resp->max_ee_rd_atom = attr->max_ee_rd_atom; 200 resp->max_res_rd_atom = attr->max_res_rd_atom; 201 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom; 202 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom; 203 resp->atomic_cap = attr->atomic_cap; 204 resp->max_ee = attr->max_ee; 205 resp->max_rdd = attr->max_rdd; 206 resp->max_mw = attr->max_mw; 207 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp; 208 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp; 209 resp->max_mcast_grp = attr->max_mcast_grp; 210 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach; 211 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach; 212 resp->max_ah = attr->max_ah; 213 resp->max_fmr = attr->max_fmr; 214 resp->max_map_per_fmr = attr->max_map_per_fmr; 215 resp->max_srq = attr->max_srq; 216 resp->max_srq_wr = attr->max_srq_wr; 217 resp->max_srq_sge = attr->max_srq_sge; 218 resp->max_pkeys = attr->max_pkeys; 219 resp->local_ca_ack_delay = attr->local_ca_ack_delay; 220 resp->phys_port_cnt = ib_dev->phys_port_cnt; 221 } 222 223 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 224 struct ib_device *ib_dev, 225 const char __user *buf, 226 int in_len, int out_len) 227 { 228 struct ib_uverbs_query_device cmd; 229 struct ib_uverbs_query_device_resp resp; 230 231 if (out_len < sizeof resp) 232 return -ENOSPC; 233 234 if (copy_from_user(&cmd, buf, sizeof cmd)) 235 return -EFAULT; 236 237 memset(&resp, 0, sizeof resp); 238 copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs); 239 240 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 241 return -EFAULT; 242 243 return in_len; 244 } 245 246 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 247 struct ib_device *ib_dev, 248 const char __user *buf, 249 int in_len, int out_len) 250 { 251 struct ib_uverbs_query_port cmd; 252 struct ib_uverbs_query_port_resp resp; 253 struct ib_port_attr attr; 254 int ret; 255 256 if (out_len < sizeof resp) 257 return -ENOSPC; 258 259 if (copy_from_user(&cmd, buf, sizeof cmd)) 260 return -EFAULT; 261 262 ret = ib_query_port(ib_dev, cmd.port_num, &attr); 263 if (ret) 264 return ret; 265 266 memset(&resp, 0, sizeof resp); 267 268 resp.state = attr.state; 269 resp.max_mtu = attr.max_mtu; 270 resp.active_mtu = attr.active_mtu; 271 resp.gid_tbl_len = attr.gid_tbl_len; 272 resp.port_cap_flags = attr.port_cap_flags; 273 resp.max_msg_sz = attr.max_msg_sz; 274 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 275 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 276 resp.pkey_tbl_len = attr.pkey_tbl_len; 277 278 if (rdma_cap_opa_ah(ib_dev, cmd.port_num)) { 279 resp.lid = OPA_TO_IB_UCAST_LID(attr.lid); 280 resp.sm_lid = OPA_TO_IB_UCAST_LID(attr.sm_lid); 281 } else { 282 resp.lid = ib_lid_cpu16(attr.lid); 283 resp.sm_lid = ib_lid_cpu16(attr.sm_lid); 284 } 285 resp.lmc = attr.lmc; 286 resp.max_vl_num = attr.max_vl_num; 287 resp.sm_sl = attr.sm_sl; 288 resp.subnet_timeout = attr.subnet_timeout; 289 resp.init_type_reply = attr.init_type_reply; 290 resp.active_width = attr.active_width; 291 resp.active_speed = attr.active_speed; 292 resp.phys_state = attr.phys_state; 293 resp.link_layer = rdma_port_get_link_layer(ib_dev, 294 cmd.port_num); 295 296 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 297 return -EFAULT; 298 299 return in_len; 300 } 301 302 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 303 struct ib_device *ib_dev, 304 const char __user *buf, 305 int in_len, int out_len) 306 { 307 struct ib_uverbs_alloc_pd cmd; 308 struct ib_uverbs_alloc_pd_resp resp; 309 struct ib_udata udata; 310 struct ib_uobject *uobj; 311 struct ib_pd *pd; 312 int ret; 313 314 if (out_len < sizeof resp) 315 return -ENOSPC; 316 317 if (copy_from_user(&cmd, buf, sizeof cmd)) 318 return -EFAULT; 319 320 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 321 u64_to_user_ptr(cmd.response) + sizeof(resp), 322 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 323 out_len - sizeof(resp)); 324 325 uobj = uobj_alloc(uobj_get_type(pd), file->ucontext); 326 if (IS_ERR(uobj)) 327 return PTR_ERR(uobj); 328 329 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata); 330 if (IS_ERR(pd)) { 331 ret = PTR_ERR(pd); 332 goto err; 333 } 334 335 pd->device = ib_dev; 336 pd->uobject = uobj; 337 pd->__internal_mr = NULL; 338 atomic_set(&pd->usecnt, 0); 339 340 uobj->object = pd; 341 memset(&resp, 0, sizeof resp); 342 resp.pd_handle = uobj->id; 343 pd->res.type = RDMA_RESTRACK_PD; 344 rdma_restrack_add(&pd->res); 345 346 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 347 ret = -EFAULT; 348 goto err_copy; 349 } 350 351 uobj_alloc_commit(uobj); 352 353 return in_len; 354 355 err_copy: 356 ib_dealloc_pd(pd); 357 358 err: 359 uobj_alloc_abort(uobj); 360 return ret; 361 } 362 363 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 364 struct ib_device *ib_dev, 365 const char __user *buf, 366 int in_len, int out_len) 367 { 368 struct ib_uverbs_dealloc_pd cmd; 369 struct ib_uobject *uobj; 370 int ret; 371 372 if (copy_from_user(&cmd, buf, sizeof cmd)) 373 return -EFAULT; 374 375 uobj = uobj_get_write(uobj_get_type(pd), cmd.pd_handle, 376 file->ucontext); 377 if (IS_ERR(uobj)) 378 return PTR_ERR(uobj); 379 380 ret = uobj_remove_commit(uobj); 381 382 return ret ?: in_len; 383 } 384 385 struct xrcd_table_entry { 386 struct rb_node node; 387 struct ib_xrcd *xrcd; 388 struct inode *inode; 389 }; 390 391 static int xrcd_table_insert(struct ib_uverbs_device *dev, 392 struct inode *inode, 393 struct ib_xrcd *xrcd) 394 { 395 struct xrcd_table_entry *entry, *scan; 396 struct rb_node **p = &dev->xrcd_tree.rb_node; 397 struct rb_node *parent = NULL; 398 399 entry = kmalloc(sizeof *entry, GFP_KERNEL); 400 if (!entry) 401 return -ENOMEM; 402 403 entry->xrcd = xrcd; 404 entry->inode = inode; 405 406 while (*p) { 407 parent = *p; 408 scan = rb_entry(parent, struct xrcd_table_entry, node); 409 410 if (inode < scan->inode) { 411 p = &(*p)->rb_left; 412 } else if (inode > scan->inode) { 413 p = &(*p)->rb_right; 414 } else { 415 kfree(entry); 416 return -EEXIST; 417 } 418 } 419 420 rb_link_node(&entry->node, parent, p); 421 rb_insert_color(&entry->node, &dev->xrcd_tree); 422 igrab(inode); 423 return 0; 424 } 425 426 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, 427 struct inode *inode) 428 { 429 struct xrcd_table_entry *entry; 430 struct rb_node *p = dev->xrcd_tree.rb_node; 431 432 while (p) { 433 entry = rb_entry(p, struct xrcd_table_entry, node); 434 435 if (inode < entry->inode) 436 p = p->rb_left; 437 else if (inode > entry->inode) 438 p = p->rb_right; 439 else 440 return entry; 441 } 442 443 return NULL; 444 } 445 446 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) 447 { 448 struct xrcd_table_entry *entry; 449 450 entry = xrcd_table_search(dev, inode); 451 if (!entry) 452 return NULL; 453 454 return entry->xrcd; 455 } 456 457 static void xrcd_table_delete(struct ib_uverbs_device *dev, 458 struct inode *inode) 459 { 460 struct xrcd_table_entry *entry; 461 462 entry = xrcd_table_search(dev, inode); 463 if (entry) { 464 iput(inode); 465 rb_erase(&entry->node, &dev->xrcd_tree); 466 kfree(entry); 467 } 468 } 469 470 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, 471 struct ib_device *ib_dev, 472 const char __user *buf, int in_len, 473 int out_len) 474 { 475 struct ib_uverbs_open_xrcd cmd; 476 struct ib_uverbs_open_xrcd_resp resp; 477 struct ib_udata udata; 478 struct ib_uxrcd_object *obj; 479 struct ib_xrcd *xrcd = NULL; 480 struct fd f = {NULL, 0}; 481 struct inode *inode = NULL; 482 int ret = 0; 483 int new_xrcd = 0; 484 485 if (out_len < sizeof resp) 486 return -ENOSPC; 487 488 if (copy_from_user(&cmd, buf, sizeof cmd)) 489 return -EFAULT; 490 491 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 492 u64_to_user_ptr(cmd.response) + sizeof(resp), 493 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 494 out_len - sizeof(resp)); 495 496 mutex_lock(&file->device->xrcd_tree_mutex); 497 498 if (cmd.fd != -1) { 499 /* search for file descriptor */ 500 f = fdget(cmd.fd); 501 if (!f.file) { 502 ret = -EBADF; 503 goto err_tree_mutex_unlock; 504 } 505 506 inode = file_inode(f.file); 507 xrcd = find_xrcd(file->device, inode); 508 if (!xrcd && !(cmd.oflags & O_CREAT)) { 509 /* no file descriptor. Need CREATE flag */ 510 ret = -EAGAIN; 511 goto err_tree_mutex_unlock; 512 } 513 514 if (xrcd && cmd.oflags & O_EXCL) { 515 ret = -EINVAL; 516 goto err_tree_mutex_unlock; 517 } 518 } 519 520 obj = (struct ib_uxrcd_object *)uobj_alloc(uobj_get_type(xrcd), 521 file->ucontext); 522 if (IS_ERR(obj)) { 523 ret = PTR_ERR(obj); 524 goto err_tree_mutex_unlock; 525 } 526 527 if (!xrcd) { 528 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata); 529 if (IS_ERR(xrcd)) { 530 ret = PTR_ERR(xrcd); 531 goto err; 532 } 533 534 xrcd->inode = inode; 535 xrcd->device = ib_dev; 536 atomic_set(&xrcd->usecnt, 0); 537 mutex_init(&xrcd->tgt_qp_mutex); 538 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 539 new_xrcd = 1; 540 } 541 542 atomic_set(&obj->refcnt, 0); 543 obj->uobject.object = xrcd; 544 memset(&resp, 0, sizeof resp); 545 resp.xrcd_handle = obj->uobject.id; 546 547 if (inode) { 548 if (new_xrcd) { 549 /* create new inode/xrcd table entry */ 550 ret = xrcd_table_insert(file->device, inode, xrcd); 551 if (ret) 552 goto err_dealloc_xrcd; 553 } 554 atomic_inc(&xrcd->usecnt); 555 } 556 557 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 558 ret = -EFAULT; 559 goto err_copy; 560 } 561 562 if (f.file) 563 fdput(f); 564 565 mutex_unlock(&file->device->xrcd_tree_mutex); 566 567 uobj_alloc_commit(&obj->uobject); 568 569 return in_len; 570 571 err_copy: 572 if (inode) { 573 if (new_xrcd) 574 xrcd_table_delete(file->device, inode); 575 atomic_dec(&xrcd->usecnt); 576 } 577 578 err_dealloc_xrcd: 579 ib_dealloc_xrcd(xrcd); 580 581 err: 582 uobj_alloc_abort(&obj->uobject); 583 584 err_tree_mutex_unlock: 585 if (f.file) 586 fdput(f); 587 588 mutex_unlock(&file->device->xrcd_tree_mutex); 589 590 return ret; 591 } 592 593 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, 594 struct ib_device *ib_dev, 595 const char __user *buf, int in_len, 596 int out_len) 597 { 598 struct ib_uverbs_close_xrcd cmd; 599 struct ib_uobject *uobj; 600 int ret = 0; 601 602 if (copy_from_user(&cmd, buf, sizeof cmd)) 603 return -EFAULT; 604 605 uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle, 606 file->ucontext); 607 if (IS_ERR(uobj)) 608 return PTR_ERR(uobj); 609 610 ret = uobj_remove_commit(uobj); 611 return ret ?: in_len; 612 } 613 614 int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, 615 struct ib_xrcd *xrcd, 616 enum rdma_remove_reason why) 617 { 618 struct inode *inode; 619 int ret; 620 621 inode = xrcd->inode; 622 if (inode && !atomic_dec_and_test(&xrcd->usecnt)) 623 return 0; 624 625 ret = ib_dealloc_xrcd(xrcd); 626 627 if (why == RDMA_REMOVE_DESTROY && ret) 628 atomic_inc(&xrcd->usecnt); 629 else if (inode) 630 xrcd_table_delete(dev, inode); 631 632 return ret; 633 } 634 635 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 636 struct ib_device *ib_dev, 637 const char __user *buf, int in_len, 638 int out_len) 639 { 640 struct ib_uverbs_reg_mr cmd; 641 struct ib_uverbs_reg_mr_resp resp; 642 struct ib_udata udata; 643 struct ib_uobject *uobj; 644 struct ib_pd *pd; 645 struct ib_mr *mr; 646 int ret; 647 648 if (out_len < sizeof resp) 649 return -ENOSPC; 650 651 if (copy_from_user(&cmd, buf, sizeof cmd)) 652 return -EFAULT; 653 654 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 655 u64_to_user_ptr(cmd.response) + sizeof(resp), 656 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 657 out_len - sizeof(resp)); 658 659 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 660 return -EINVAL; 661 662 ret = ib_check_mr_access(cmd.access_flags); 663 if (ret) 664 return ret; 665 666 uobj = uobj_alloc(uobj_get_type(mr), file->ucontext); 667 if (IS_ERR(uobj)) 668 return PTR_ERR(uobj); 669 670 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 671 if (!pd) { 672 ret = -EINVAL; 673 goto err_free; 674 } 675 676 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) { 677 if (!(pd->device->attrs.device_cap_flags & 678 IB_DEVICE_ON_DEMAND_PAGING)) { 679 pr_debug("ODP support not available\n"); 680 ret = -EINVAL; 681 goto err_put; 682 } 683 } 684 685 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 686 cmd.access_flags, &udata); 687 if (IS_ERR(mr)) { 688 ret = PTR_ERR(mr); 689 goto err_put; 690 } 691 692 mr->device = pd->device; 693 mr->pd = pd; 694 mr->uobject = uobj; 695 atomic_inc(&pd->usecnt); 696 697 uobj->object = mr; 698 699 memset(&resp, 0, sizeof resp); 700 resp.lkey = mr->lkey; 701 resp.rkey = mr->rkey; 702 resp.mr_handle = uobj->id; 703 704 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 705 ret = -EFAULT; 706 goto err_copy; 707 } 708 709 uobj_put_obj_read(pd); 710 711 uobj_alloc_commit(uobj); 712 713 return in_len; 714 715 err_copy: 716 ib_dereg_mr(mr); 717 718 err_put: 719 uobj_put_obj_read(pd); 720 721 err_free: 722 uobj_alloc_abort(uobj); 723 return ret; 724 } 725 726 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, 727 struct ib_device *ib_dev, 728 const char __user *buf, int in_len, 729 int out_len) 730 { 731 struct ib_uverbs_rereg_mr cmd; 732 struct ib_uverbs_rereg_mr_resp resp; 733 struct ib_udata udata; 734 struct ib_pd *pd = NULL; 735 struct ib_mr *mr; 736 struct ib_pd *old_pd; 737 int ret; 738 struct ib_uobject *uobj; 739 740 if (out_len < sizeof(resp)) 741 return -ENOSPC; 742 743 if (copy_from_user(&cmd, buf, sizeof(cmd))) 744 return -EFAULT; 745 746 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 747 u64_to_user_ptr(cmd.response) + sizeof(resp), 748 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 749 out_len - sizeof(resp)); 750 751 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags) 752 return -EINVAL; 753 754 if ((cmd.flags & IB_MR_REREG_TRANS) && 755 (!cmd.start || !cmd.hca_va || 0 >= cmd.length || 756 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))) 757 return -EINVAL; 758 759 uobj = uobj_get_write(uobj_get_type(mr), cmd.mr_handle, 760 file->ucontext); 761 if (IS_ERR(uobj)) 762 return PTR_ERR(uobj); 763 764 mr = uobj->object; 765 766 if (cmd.flags & IB_MR_REREG_ACCESS) { 767 ret = ib_check_mr_access(cmd.access_flags); 768 if (ret) 769 goto put_uobjs; 770 } 771 772 if (cmd.flags & IB_MR_REREG_PD) { 773 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 774 if (!pd) { 775 ret = -EINVAL; 776 goto put_uobjs; 777 } 778 } 779 780 old_pd = mr->pd; 781 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, 782 cmd.length, cmd.hca_va, 783 cmd.access_flags, pd, &udata); 784 if (!ret) { 785 if (cmd.flags & IB_MR_REREG_PD) { 786 atomic_inc(&pd->usecnt); 787 mr->pd = pd; 788 atomic_dec(&old_pd->usecnt); 789 } 790 } else { 791 goto put_uobj_pd; 792 } 793 794 memset(&resp, 0, sizeof(resp)); 795 resp.lkey = mr->lkey; 796 resp.rkey = mr->rkey; 797 798 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) 799 ret = -EFAULT; 800 else 801 ret = in_len; 802 803 put_uobj_pd: 804 if (cmd.flags & IB_MR_REREG_PD) 805 uobj_put_obj_read(pd); 806 807 put_uobjs: 808 uobj_put_write(uobj); 809 810 return ret; 811 } 812 813 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 814 struct ib_device *ib_dev, 815 const char __user *buf, int in_len, 816 int out_len) 817 { 818 struct ib_uverbs_dereg_mr cmd; 819 struct ib_uobject *uobj; 820 int ret = -EINVAL; 821 822 if (copy_from_user(&cmd, buf, sizeof cmd)) 823 return -EFAULT; 824 825 uobj = uobj_get_write(uobj_get_type(mr), cmd.mr_handle, 826 file->ucontext); 827 if (IS_ERR(uobj)) 828 return PTR_ERR(uobj); 829 830 ret = uobj_remove_commit(uobj); 831 832 return ret ?: in_len; 833 } 834 835 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, 836 struct ib_device *ib_dev, 837 const char __user *buf, int in_len, 838 int out_len) 839 { 840 struct ib_uverbs_alloc_mw cmd; 841 struct ib_uverbs_alloc_mw_resp resp; 842 struct ib_uobject *uobj; 843 struct ib_pd *pd; 844 struct ib_mw *mw; 845 struct ib_udata udata; 846 int ret; 847 848 if (out_len < sizeof(resp)) 849 return -ENOSPC; 850 851 if (copy_from_user(&cmd, buf, sizeof(cmd))) 852 return -EFAULT; 853 854 uobj = uobj_alloc(uobj_get_type(mw), file->ucontext); 855 if (IS_ERR(uobj)) 856 return PTR_ERR(uobj); 857 858 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 859 if (!pd) { 860 ret = -EINVAL; 861 goto err_free; 862 } 863 864 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 865 u64_to_user_ptr(cmd.response) + sizeof(resp), 866 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 867 out_len - sizeof(resp)); 868 869 mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata); 870 if (IS_ERR(mw)) { 871 ret = PTR_ERR(mw); 872 goto err_put; 873 } 874 875 mw->device = pd->device; 876 mw->pd = pd; 877 mw->uobject = uobj; 878 atomic_inc(&pd->usecnt); 879 880 uobj->object = mw; 881 882 memset(&resp, 0, sizeof(resp)); 883 resp.rkey = mw->rkey; 884 resp.mw_handle = uobj->id; 885 886 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) { 887 ret = -EFAULT; 888 goto err_copy; 889 } 890 891 uobj_put_obj_read(pd); 892 uobj_alloc_commit(uobj); 893 894 return in_len; 895 896 err_copy: 897 uverbs_dealloc_mw(mw); 898 err_put: 899 uobj_put_obj_read(pd); 900 err_free: 901 uobj_alloc_abort(uobj); 902 return ret; 903 } 904 905 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file, 906 struct ib_device *ib_dev, 907 const char __user *buf, int in_len, 908 int out_len) 909 { 910 struct ib_uverbs_dealloc_mw cmd; 911 struct ib_uobject *uobj; 912 int ret = -EINVAL; 913 914 if (copy_from_user(&cmd, buf, sizeof(cmd))) 915 return -EFAULT; 916 917 uobj = uobj_get_write(uobj_get_type(mw), cmd.mw_handle, 918 file->ucontext); 919 if (IS_ERR(uobj)) 920 return PTR_ERR(uobj); 921 922 ret = uobj_remove_commit(uobj); 923 return ret ?: in_len; 924 } 925 926 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 927 struct ib_device *ib_dev, 928 const char __user *buf, int in_len, 929 int out_len) 930 { 931 struct ib_uverbs_create_comp_channel cmd; 932 struct ib_uverbs_create_comp_channel_resp resp; 933 struct ib_uobject *uobj; 934 struct ib_uverbs_completion_event_file *ev_file; 935 936 if (out_len < sizeof resp) 937 return -ENOSPC; 938 939 if (copy_from_user(&cmd, buf, sizeof cmd)) 940 return -EFAULT; 941 942 uobj = uobj_alloc(uobj_get_type(comp_channel), file->ucontext); 943 if (IS_ERR(uobj)) 944 return PTR_ERR(uobj); 945 946 resp.fd = uobj->id; 947 948 ev_file = container_of(uobj, struct ib_uverbs_completion_event_file, 949 uobj_file.uobj); 950 ib_uverbs_init_event_queue(&ev_file->ev_queue); 951 952 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 953 uobj_alloc_abort(uobj); 954 return -EFAULT; 955 } 956 957 uobj_alloc_commit(uobj); 958 return in_len; 959 } 960 961 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, 962 struct ib_device *ib_dev, 963 struct ib_udata *ucore, 964 struct ib_udata *uhw, 965 struct ib_uverbs_ex_create_cq *cmd, 966 size_t cmd_sz, 967 int (*cb)(struct ib_uverbs_file *file, 968 struct ib_ucq_object *obj, 969 struct ib_uverbs_ex_create_cq_resp *resp, 970 struct ib_udata *udata, 971 void *context), 972 void *context) 973 { 974 struct ib_ucq_object *obj; 975 struct ib_uverbs_completion_event_file *ev_file = NULL; 976 struct ib_cq *cq; 977 int ret; 978 struct ib_uverbs_ex_create_cq_resp resp; 979 struct ib_cq_init_attr attr = {}; 980 981 if (!ib_dev->create_cq) 982 return ERR_PTR(-EOPNOTSUPP); 983 984 if (cmd->comp_vector >= file->device->num_comp_vectors) 985 return ERR_PTR(-EINVAL); 986 987 obj = (struct ib_ucq_object *)uobj_alloc(uobj_get_type(cq), 988 file->ucontext); 989 if (IS_ERR(obj)) 990 return obj; 991 992 if (cmd->comp_channel >= 0) { 993 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel, 994 file->ucontext); 995 if (IS_ERR(ev_file)) { 996 ret = PTR_ERR(ev_file); 997 goto err; 998 } 999 } 1000 1001 obj->uobject.user_handle = cmd->user_handle; 1002 obj->uverbs_file = file; 1003 obj->comp_events_reported = 0; 1004 obj->async_events_reported = 0; 1005 INIT_LIST_HEAD(&obj->comp_list); 1006 INIT_LIST_HEAD(&obj->async_list); 1007 1008 attr.cqe = cmd->cqe; 1009 attr.comp_vector = cmd->comp_vector; 1010 1011 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags)) 1012 attr.flags = cmd->flags; 1013 1014 cq = ib_dev->create_cq(ib_dev, &attr, file->ucontext, uhw); 1015 if (IS_ERR(cq)) { 1016 ret = PTR_ERR(cq); 1017 goto err_file; 1018 } 1019 1020 cq->device = ib_dev; 1021 cq->uobject = &obj->uobject; 1022 cq->comp_handler = ib_uverbs_comp_handler; 1023 cq->event_handler = ib_uverbs_cq_event_handler; 1024 cq->cq_context = ev_file ? &ev_file->ev_queue : NULL; 1025 atomic_set(&cq->usecnt, 0); 1026 1027 obj->uobject.object = cq; 1028 memset(&resp, 0, sizeof resp); 1029 resp.base.cq_handle = obj->uobject.id; 1030 resp.base.cqe = cq->cqe; 1031 1032 resp.response_length = offsetof(typeof(resp), response_length) + 1033 sizeof(resp.response_length); 1034 1035 cq->res.type = RDMA_RESTRACK_CQ; 1036 rdma_restrack_add(&cq->res); 1037 1038 ret = cb(file, obj, &resp, ucore, context); 1039 if (ret) 1040 goto err_cb; 1041 1042 uobj_alloc_commit(&obj->uobject); 1043 return obj; 1044 1045 err_cb: 1046 ib_destroy_cq(cq); 1047 1048 err_file: 1049 if (ev_file) 1050 ib_uverbs_release_ucq(file, ev_file, obj); 1051 1052 err: 1053 uobj_alloc_abort(&obj->uobject); 1054 1055 return ERR_PTR(ret); 1056 } 1057 1058 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file, 1059 struct ib_ucq_object *obj, 1060 struct ib_uverbs_ex_create_cq_resp *resp, 1061 struct ib_udata *ucore, void *context) 1062 { 1063 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1064 return -EFAULT; 1065 1066 return 0; 1067 } 1068 1069 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 1070 struct ib_device *ib_dev, 1071 const char __user *buf, int in_len, 1072 int out_len) 1073 { 1074 struct ib_uverbs_create_cq cmd; 1075 struct ib_uverbs_ex_create_cq cmd_ex; 1076 struct ib_uverbs_create_cq_resp resp; 1077 struct ib_udata ucore; 1078 struct ib_udata uhw; 1079 struct ib_ucq_object *obj; 1080 1081 if (out_len < sizeof(resp)) 1082 return -ENOSPC; 1083 1084 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1085 return -EFAULT; 1086 1087 ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response), 1088 sizeof(cmd), sizeof(resp)); 1089 1090 ib_uverbs_init_udata(&uhw, buf + sizeof(cmd), 1091 u64_to_user_ptr(cmd.response) + sizeof(resp), 1092 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1093 out_len - sizeof(resp)); 1094 1095 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1096 cmd_ex.user_handle = cmd.user_handle; 1097 cmd_ex.cqe = cmd.cqe; 1098 cmd_ex.comp_vector = cmd.comp_vector; 1099 cmd_ex.comp_channel = cmd.comp_channel; 1100 1101 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex, 1102 offsetof(typeof(cmd_ex), comp_channel) + 1103 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb, 1104 NULL); 1105 1106 if (IS_ERR(obj)) 1107 return PTR_ERR(obj); 1108 1109 return in_len; 1110 } 1111 1112 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file, 1113 struct ib_ucq_object *obj, 1114 struct ib_uverbs_ex_create_cq_resp *resp, 1115 struct ib_udata *ucore, void *context) 1116 { 1117 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1118 return -EFAULT; 1119 1120 return 0; 1121 } 1122 1123 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file, 1124 struct ib_device *ib_dev, 1125 struct ib_udata *ucore, 1126 struct ib_udata *uhw) 1127 { 1128 struct ib_uverbs_ex_create_cq_resp resp; 1129 struct ib_uverbs_ex_create_cq cmd; 1130 struct ib_ucq_object *obj; 1131 int err; 1132 1133 if (ucore->inlen < sizeof(cmd)) 1134 return -EINVAL; 1135 1136 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 1137 if (err) 1138 return err; 1139 1140 if (cmd.comp_mask) 1141 return -EINVAL; 1142 1143 if (cmd.reserved) 1144 return -EINVAL; 1145 1146 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1147 sizeof(resp.response_length))) 1148 return -ENOSPC; 1149 1150 obj = create_cq(file, ib_dev, ucore, uhw, &cmd, 1151 min(ucore->inlen, sizeof(cmd)), 1152 ib_uverbs_ex_create_cq_cb, NULL); 1153 1154 return PTR_ERR_OR_ZERO(obj); 1155 } 1156 1157 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 1158 struct ib_device *ib_dev, 1159 const char __user *buf, int in_len, 1160 int out_len) 1161 { 1162 struct ib_uverbs_resize_cq cmd; 1163 struct ib_uverbs_resize_cq_resp resp = {}; 1164 struct ib_udata udata; 1165 struct ib_cq *cq; 1166 int ret = -EINVAL; 1167 1168 if (copy_from_user(&cmd, buf, sizeof cmd)) 1169 return -EFAULT; 1170 1171 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 1172 u64_to_user_ptr(cmd.response) + sizeof(resp), 1173 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1174 out_len - sizeof(resp)); 1175 1176 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext); 1177 if (!cq) 1178 return -EINVAL; 1179 1180 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 1181 if (ret) 1182 goto out; 1183 1184 resp.cqe = cq->cqe; 1185 1186 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp.cqe)) 1187 ret = -EFAULT; 1188 1189 out: 1190 uobj_put_obj_read(cq); 1191 1192 return ret ? ret : in_len; 1193 } 1194 1195 static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest, 1196 struct ib_wc *wc) 1197 { 1198 struct ib_uverbs_wc tmp; 1199 1200 tmp.wr_id = wc->wr_id; 1201 tmp.status = wc->status; 1202 tmp.opcode = wc->opcode; 1203 tmp.vendor_err = wc->vendor_err; 1204 tmp.byte_len = wc->byte_len; 1205 tmp.ex.imm_data = wc->ex.imm_data; 1206 tmp.qp_num = wc->qp->qp_num; 1207 tmp.src_qp = wc->src_qp; 1208 tmp.wc_flags = wc->wc_flags; 1209 tmp.pkey_index = wc->pkey_index; 1210 if (rdma_cap_opa_ah(ib_dev, wc->port_num)) 1211 tmp.slid = OPA_TO_IB_UCAST_LID(wc->slid); 1212 else 1213 tmp.slid = ib_lid_cpu16(wc->slid); 1214 tmp.sl = wc->sl; 1215 tmp.dlid_path_bits = wc->dlid_path_bits; 1216 tmp.port_num = wc->port_num; 1217 tmp.reserved = 0; 1218 1219 if (copy_to_user(dest, &tmp, sizeof tmp)) 1220 return -EFAULT; 1221 1222 return 0; 1223 } 1224 1225 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 1226 struct ib_device *ib_dev, 1227 const char __user *buf, int in_len, 1228 int out_len) 1229 { 1230 struct ib_uverbs_poll_cq cmd; 1231 struct ib_uverbs_poll_cq_resp resp; 1232 u8 __user *header_ptr; 1233 u8 __user *data_ptr; 1234 struct ib_cq *cq; 1235 struct ib_wc wc; 1236 int ret; 1237 1238 if (copy_from_user(&cmd, buf, sizeof cmd)) 1239 return -EFAULT; 1240 1241 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext); 1242 if (!cq) 1243 return -EINVAL; 1244 1245 /* we copy a struct ib_uverbs_poll_cq_resp to user space */ 1246 header_ptr = u64_to_user_ptr(cmd.response); 1247 data_ptr = header_ptr + sizeof resp; 1248 1249 memset(&resp, 0, sizeof resp); 1250 while (resp.count < cmd.ne) { 1251 ret = ib_poll_cq(cq, 1, &wc); 1252 if (ret < 0) 1253 goto out_put; 1254 if (!ret) 1255 break; 1256 1257 ret = copy_wc_to_user(ib_dev, data_ptr, &wc); 1258 if (ret) 1259 goto out_put; 1260 1261 data_ptr += sizeof(struct ib_uverbs_wc); 1262 ++resp.count; 1263 } 1264 1265 if (copy_to_user(header_ptr, &resp, sizeof resp)) { 1266 ret = -EFAULT; 1267 goto out_put; 1268 } 1269 1270 ret = in_len; 1271 1272 out_put: 1273 uobj_put_obj_read(cq); 1274 return ret; 1275 } 1276 1277 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 1278 struct ib_device *ib_dev, 1279 const char __user *buf, int in_len, 1280 int out_len) 1281 { 1282 struct ib_uverbs_req_notify_cq cmd; 1283 struct ib_cq *cq; 1284 1285 if (copy_from_user(&cmd, buf, sizeof cmd)) 1286 return -EFAULT; 1287 1288 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext); 1289 if (!cq) 1290 return -EINVAL; 1291 1292 ib_req_notify_cq(cq, cmd.solicited_only ? 1293 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 1294 1295 uobj_put_obj_read(cq); 1296 1297 return in_len; 1298 } 1299 1300 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 1301 struct ib_device *ib_dev, 1302 const char __user *buf, int in_len, 1303 int out_len) 1304 { 1305 struct ib_uverbs_destroy_cq cmd; 1306 struct ib_uverbs_destroy_cq_resp resp; 1307 struct ib_uobject *uobj; 1308 struct ib_cq *cq; 1309 struct ib_ucq_object *obj; 1310 int ret = -EINVAL; 1311 1312 if (copy_from_user(&cmd, buf, sizeof cmd)) 1313 return -EFAULT; 1314 1315 uobj = uobj_get_write(uobj_get_type(cq), cmd.cq_handle, 1316 file->ucontext); 1317 if (IS_ERR(uobj)) 1318 return PTR_ERR(uobj); 1319 1320 /* 1321 * Make sure we don't free the memory in remove_commit as we still 1322 * needs the uobject memory to create the response. 1323 */ 1324 uverbs_uobject_get(uobj); 1325 cq = uobj->object; 1326 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1327 1328 memset(&resp, 0, sizeof(resp)); 1329 1330 ret = uobj_remove_commit(uobj); 1331 if (ret) { 1332 uverbs_uobject_put(uobj); 1333 return ret; 1334 } 1335 1336 resp.comp_events_reported = obj->comp_events_reported; 1337 resp.async_events_reported = obj->async_events_reported; 1338 1339 uverbs_uobject_put(uobj); 1340 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 1341 return -EFAULT; 1342 1343 return in_len; 1344 } 1345 1346 static int create_qp(struct ib_uverbs_file *file, 1347 struct ib_udata *ucore, 1348 struct ib_udata *uhw, 1349 struct ib_uverbs_ex_create_qp *cmd, 1350 size_t cmd_sz, 1351 int (*cb)(struct ib_uverbs_file *file, 1352 struct ib_uverbs_ex_create_qp_resp *resp, 1353 struct ib_udata *udata), 1354 void *context) 1355 { 1356 struct ib_uqp_object *obj; 1357 struct ib_device *device; 1358 struct ib_pd *pd = NULL; 1359 struct ib_xrcd *xrcd = NULL; 1360 struct ib_uobject *xrcd_uobj = ERR_PTR(-ENOENT); 1361 struct ib_cq *scq = NULL, *rcq = NULL; 1362 struct ib_srq *srq = NULL; 1363 struct ib_qp *qp; 1364 char *buf; 1365 struct ib_qp_init_attr attr = {}; 1366 struct ib_uverbs_ex_create_qp_resp resp; 1367 int ret; 1368 struct ib_rwq_ind_table *ind_tbl = NULL; 1369 bool has_sq = true; 1370 1371 if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) 1372 return -EPERM; 1373 1374 obj = (struct ib_uqp_object *)uobj_alloc(uobj_get_type(qp), 1375 file->ucontext); 1376 if (IS_ERR(obj)) 1377 return PTR_ERR(obj); 1378 obj->uxrcd = NULL; 1379 obj->uevent.uobject.user_handle = cmd->user_handle; 1380 mutex_init(&obj->mcast_lock); 1381 1382 if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) + 1383 sizeof(cmd->rwq_ind_tbl_handle) && 1384 (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) { 1385 ind_tbl = uobj_get_obj_read(rwq_ind_table, 1386 cmd->rwq_ind_tbl_handle, 1387 file->ucontext); 1388 if (!ind_tbl) { 1389 ret = -EINVAL; 1390 goto err_put; 1391 } 1392 1393 attr.rwq_ind_tbl = ind_tbl; 1394 } 1395 1396 if (cmd_sz > sizeof(*cmd) && 1397 !ib_is_udata_cleared(ucore, sizeof(*cmd), 1398 cmd_sz - sizeof(*cmd))) { 1399 ret = -EOPNOTSUPP; 1400 goto err_put; 1401 } 1402 1403 if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) { 1404 ret = -EINVAL; 1405 goto err_put; 1406 } 1407 1408 if (ind_tbl && !cmd->max_send_wr) 1409 has_sq = false; 1410 1411 if (cmd->qp_type == IB_QPT_XRC_TGT) { 1412 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd->pd_handle, 1413 file->ucontext); 1414 1415 if (IS_ERR(xrcd_uobj)) { 1416 ret = -EINVAL; 1417 goto err_put; 1418 } 1419 1420 xrcd = (struct ib_xrcd *)xrcd_uobj->object; 1421 if (!xrcd) { 1422 ret = -EINVAL; 1423 goto err_put; 1424 } 1425 device = xrcd->device; 1426 } else { 1427 if (cmd->qp_type == IB_QPT_XRC_INI) { 1428 cmd->max_recv_wr = 0; 1429 cmd->max_recv_sge = 0; 1430 } else { 1431 if (cmd->is_srq) { 1432 srq = uobj_get_obj_read(srq, cmd->srq_handle, 1433 file->ucontext); 1434 if (!srq || srq->srq_type == IB_SRQT_XRC) { 1435 ret = -EINVAL; 1436 goto err_put; 1437 } 1438 } 1439 1440 if (!ind_tbl) { 1441 if (cmd->recv_cq_handle != cmd->send_cq_handle) { 1442 rcq = uobj_get_obj_read(cq, cmd->recv_cq_handle, 1443 file->ucontext); 1444 if (!rcq) { 1445 ret = -EINVAL; 1446 goto err_put; 1447 } 1448 } 1449 } 1450 } 1451 1452 if (has_sq) 1453 scq = uobj_get_obj_read(cq, cmd->send_cq_handle, 1454 file->ucontext); 1455 if (!ind_tbl) 1456 rcq = rcq ?: scq; 1457 pd = uobj_get_obj_read(pd, cmd->pd_handle, file->ucontext); 1458 if (!pd || (!scq && has_sq)) { 1459 ret = -EINVAL; 1460 goto err_put; 1461 } 1462 1463 device = pd->device; 1464 } 1465 1466 attr.event_handler = ib_uverbs_qp_event_handler; 1467 attr.qp_context = file; 1468 attr.send_cq = scq; 1469 attr.recv_cq = rcq; 1470 attr.srq = srq; 1471 attr.xrcd = xrcd; 1472 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR : 1473 IB_SIGNAL_REQ_WR; 1474 attr.qp_type = cmd->qp_type; 1475 attr.create_flags = 0; 1476 1477 attr.cap.max_send_wr = cmd->max_send_wr; 1478 attr.cap.max_recv_wr = cmd->max_recv_wr; 1479 attr.cap.max_send_sge = cmd->max_send_sge; 1480 attr.cap.max_recv_sge = cmd->max_recv_sge; 1481 attr.cap.max_inline_data = cmd->max_inline_data; 1482 1483 obj->uevent.events_reported = 0; 1484 INIT_LIST_HEAD(&obj->uevent.event_list); 1485 INIT_LIST_HEAD(&obj->mcast_list); 1486 1487 if (cmd_sz >= offsetof(typeof(*cmd), create_flags) + 1488 sizeof(cmd->create_flags)) 1489 attr.create_flags = cmd->create_flags; 1490 1491 if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | 1492 IB_QP_CREATE_CROSS_CHANNEL | 1493 IB_QP_CREATE_MANAGED_SEND | 1494 IB_QP_CREATE_MANAGED_RECV | 1495 IB_QP_CREATE_SCATTER_FCS | 1496 IB_QP_CREATE_CVLAN_STRIPPING | 1497 IB_QP_CREATE_SOURCE_QPN | 1498 IB_QP_CREATE_PCI_WRITE_END_PADDING)) { 1499 ret = -EINVAL; 1500 goto err_put; 1501 } 1502 1503 if (attr.create_flags & IB_QP_CREATE_SOURCE_QPN) { 1504 if (!capable(CAP_NET_RAW)) { 1505 ret = -EPERM; 1506 goto err_put; 1507 } 1508 1509 attr.source_qpn = cmd->source_qpn; 1510 } 1511 1512 buf = (void *)cmd + sizeof(*cmd); 1513 if (cmd_sz > sizeof(*cmd)) 1514 if (!(buf[0] == 0 && !memcmp(buf, buf + 1, 1515 cmd_sz - sizeof(*cmd) - 1))) { 1516 ret = -EINVAL; 1517 goto err_put; 1518 } 1519 1520 if (cmd->qp_type == IB_QPT_XRC_TGT) 1521 qp = ib_create_qp(pd, &attr); 1522 else 1523 qp = _ib_create_qp(device, pd, &attr, uhw, 1524 &obj->uevent.uobject); 1525 1526 if (IS_ERR(qp)) { 1527 ret = PTR_ERR(qp); 1528 goto err_put; 1529 } 1530 1531 if (cmd->qp_type != IB_QPT_XRC_TGT) { 1532 ret = ib_create_qp_security(qp, device); 1533 if (ret) 1534 goto err_cb; 1535 1536 qp->real_qp = qp; 1537 qp->pd = pd; 1538 qp->send_cq = attr.send_cq; 1539 qp->recv_cq = attr.recv_cq; 1540 qp->srq = attr.srq; 1541 qp->rwq_ind_tbl = ind_tbl; 1542 qp->event_handler = attr.event_handler; 1543 qp->qp_context = attr.qp_context; 1544 qp->qp_type = attr.qp_type; 1545 atomic_set(&qp->usecnt, 0); 1546 atomic_inc(&pd->usecnt); 1547 qp->port = 0; 1548 if (attr.send_cq) 1549 atomic_inc(&attr.send_cq->usecnt); 1550 if (attr.recv_cq) 1551 atomic_inc(&attr.recv_cq->usecnt); 1552 if (attr.srq) 1553 atomic_inc(&attr.srq->usecnt); 1554 if (ind_tbl) 1555 atomic_inc(&ind_tbl->usecnt); 1556 } else { 1557 /* It is done in _ib_create_qp for other QP types */ 1558 qp->uobject = &obj->uevent.uobject; 1559 } 1560 1561 obj->uevent.uobject.object = qp; 1562 1563 memset(&resp, 0, sizeof resp); 1564 resp.base.qpn = qp->qp_num; 1565 resp.base.qp_handle = obj->uevent.uobject.id; 1566 resp.base.max_recv_sge = attr.cap.max_recv_sge; 1567 resp.base.max_send_sge = attr.cap.max_send_sge; 1568 resp.base.max_recv_wr = attr.cap.max_recv_wr; 1569 resp.base.max_send_wr = attr.cap.max_send_wr; 1570 resp.base.max_inline_data = attr.cap.max_inline_data; 1571 1572 resp.response_length = offsetof(typeof(resp), response_length) + 1573 sizeof(resp.response_length); 1574 1575 ret = cb(file, &resp, ucore); 1576 if (ret) 1577 goto err_cb; 1578 1579 if (xrcd) { 1580 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, 1581 uobject); 1582 atomic_inc(&obj->uxrcd->refcnt); 1583 uobj_put_read(xrcd_uobj); 1584 } 1585 1586 if (pd) 1587 uobj_put_obj_read(pd); 1588 if (scq) 1589 uobj_put_obj_read(scq); 1590 if (rcq && rcq != scq) 1591 uobj_put_obj_read(rcq); 1592 if (srq) 1593 uobj_put_obj_read(srq); 1594 if (ind_tbl) 1595 uobj_put_obj_read(ind_tbl); 1596 1597 uobj_alloc_commit(&obj->uevent.uobject); 1598 1599 return 0; 1600 err_cb: 1601 ib_destroy_qp(qp); 1602 1603 err_put: 1604 if (!IS_ERR(xrcd_uobj)) 1605 uobj_put_read(xrcd_uobj); 1606 if (pd) 1607 uobj_put_obj_read(pd); 1608 if (scq) 1609 uobj_put_obj_read(scq); 1610 if (rcq && rcq != scq) 1611 uobj_put_obj_read(rcq); 1612 if (srq) 1613 uobj_put_obj_read(srq); 1614 if (ind_tbl) 1615 uobj_put_obj_read(ind_tbl); 1616 1617 uobj_alloc_abort(&obj->uevent.uobject); 1618 return ret; 1619 } 1620 1621 static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file, 1622 struct ib_uverbs_ex_create_qp_resp *resp, 1623 struct ib_udata *ucore) 1624 { 1625 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1626 return -EFAULT; 1627 1628 return 0; 1629 } 1630 1631 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 1632 struct ib_device *ib_dev, 1633 const char __user *buf, int in_len, 1634 int out_len) 1635 { 1636 struct ib_uverbs_create_qp cmd; 1637 struct ib_uverbs_ex_create_qp cmd_ex; 1638 struct ib_udata ucore; 1639 struct ib_udata uhw; 1640 ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp); 1641 int err; 1642 1643 if (out_len < resp_size) 1644 return -ENOSPC; 1645 1646 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1647 return -EFAULT; 1648 1649 ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response), 1650 sizeof(cmd), resp_size); 1651 ib_uverbs_init_udata(&uhw, buf + sizeof(cmd), 1652 u64_to_user_ptr(cmd.response) + resp_size, 1653 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1654 out_len - resp_size); 1655 1656 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1657 cmd_ex.user_handle = cmd.user_handle; 1658 cmd_ex.pd_handle = cmd.pd_handle; 1659 cmd_ex.send_cq_handle = cmd.send_cq_handle; 1660 cmd_ex.recv_cq_handle = cmd.recv_cq_handle; 1661 cmd_ex.srq_handle = cmd.srq_handle; 1662 cmd_ex.max_send_wr = cmd.max_send_wr; 1663 cmd_ex.max_recv_wr = cmd.max_recv_wr; 1664 cmd_ex.max_send_sge = cmd.max_send_sge; 1665 cmd_ex.max_recv_sge = cmd.max_recv_sge; 1666 cmd_ex.max_inline_data = cmd.max_inline_data; 1667 cmd_ex.sq_sig_all = cmd.sq_sig_all; 1668 cmd_ex.qp_type = cmd.qp_type; 1669 cmd_ex.is_srq = cmd.is_srq; 1670 1671 err = create_qp(file, &ucore, &uhw, &cmd_ex, 1672 offsetof(typeof(cmd_ex), is_srq) + 1673 sizeof(cmd.is_srq), ib_uverbs_create_qp_cb, 1674 NULL); 1675 1676 if (err) 1677 return err; 1678 1679 return in_len; 1680 } 1681 1682 static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file, 1683 struct ib_uverbs_ex_create_qp_resp *resp, 1684 struct ib_udata *ucore) 1685 { 1686 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1687 return -EFAULT; 1688 1689 return 0; 1690 } 1691 1692 int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file, 1693 struct ib_device *ib_dev, 1694 struct ib_udata *ucore, 1695 struct ib_udata *uhw) 1696 { 1697 struct ib_uverbs_ex_create_qp_resp resp; 1698 struct ib_uverbs_ex_create_qp cmd = {0}; 1699 int err; 1700 1701 if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) + 1702 sizeof(cmd.comp_mask))) 1703 return -EINVAL; 1704 1705 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 1706 if (err) 1707 return err; 1708 1709 if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK) 1710 return -EINVAL; 1711 1712 if (cmd.reserved) 1713 return -EINVAL; 1714 1715 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1716 sizeof(resp.response_length))) 1717 return -ENOSPC; 1718 1719 err = create_qp(file, ucore, uhw, &cmd, 1720 min(ucore->inlen, sizeof(cmd)), 1721 ib_uverbs_ex_create_qp_cb, NULL); 1722 1723 if (err) 1724 return err; 1725 1726 return 0; 1727 } 1728 1729 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, 1730 struct ib_device *ib_dev, 1731 const char __user *buf, int in_len, int out_len) 1732 { 1733 struct ib_uverbs_open_qp cmd; 1734 struct ib_uverbs_create_qp_resp resp; 1735 struct ib_udata udata; 1736 struct ib_uqp_object *obj; 1737 struct ib_xrcd *xrcd; 1738 struct ib_uobject *uninitialized_var(xrcd_uobj); 1739 struct ib_qp *qp; 1740 struct ib_qp_open_attr attr; 1741 int ret; 1742 1743 if (out_len < sizeof resp) 1744 return -ENOSPC; 1745 1746 if (copy_from_user(&cmd, buf, sizeof cmd)) 1747 return -EFAULT; 1748 1749 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 1750 u64_to_user_ptr(cmd.response) + sizeof(resp), 1751 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1752 out_len - sizeof(resp)); 1753 1754 obj = (struct ib_uqp_object *)uobj_alloc(uobj_get_type(qp), 1755 file->ucontext); 1756 if (IS_ERR(obj)) 1757 return PTR_ERR(obj); 1758 1759 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd.pd_handle, 1760 file->ucontext); 1761 if (IS_ERR(xrcd_uobj)) { 1762 ret = -EINVAL; 1763 goto err_put; 1764 } 1765 1766 xrcd = (struct ib_xrcd *)xrcd_uobj->object; 1767 if (!xrcd) { 1768 ret = -EINVAL; 1769 goto err_xrcd; 1770 } 1771 1772 attr.event_handler = ib_uverbs_qp_event_handler; 1773 attr.qp_context = file; 1774 attr.qp_num = cmd.qpn; 1775 attr.qp_type = cmd.qp_type; 1776 1777 obj->uevent.events_reported = 0; 1778 INIT_LIST_HEAD(&obj->uevent.event_list); 1779 INIT_LIST_HEAD(&obj->mcast_list); 1780 1781 qp = ib_open_qp(xrcd, &attr); 1782 if (IS_ERR(qp)) { 1783 ret = PTR_ERR(qp); 1784 goto err_xrcd; 1785 } 1786 1787 obj->uevent.uobject.object = qp; 1788 obj->uevent.uobject.user_handle = cmd.user_handle; 1789 1790 memset(&resp, 0, sizeof resp); 1791 resp.qpn = qp->qp_num; 1792 resp.qp_handle = obj->uevent.uobject.id; 1793 1794 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 1795 ret = -EFAULT; 1796 goto err_destroy; 1797 } 1798 1799 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 1800 atomic_inc(&obj->uxrcd->refcnt); 1801 qp->uobject = &obj->uevent.uobject; 1802 uobj_put_read(xrcd_uobj); 1803 1804 1805 uobj_alloc_commit(&obj->uevent.uobject); 1806 1807 return in_len; 1808 1809 err_destroy: 1810 ib_destroy_qp(qp); 1811 err_xrcd: 1812 uobj_put_read(xrcd_uobj); 1813 err_put: 1814 uobj_alloc_abort(&obj->uevent.uobject); 1815 return ret; 1816 } 1817 1818 static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr, 1819 struct rdma_ah_attr *rdma_attr) 1820 { 1821 const struct ib_global_route *grh; 1822 1823 uverb_attr->dlid = rdma_ah_get_dlid(rdma_attr); 1824 uverb_attr->sl = rdma_ah_get_sl(rdma_attr); 1825 uverb_attr->src_path_bits = rdma_ah_get_path_bits(rdma_attr); 1826 uverb_attr->static_rate = rdma_ah_get_static_rate(rdma_attr); 1827 uverb_attr->is_global = !!(rdma_ah_get_ah_flags(rdma_attr) & 1828 IB_AH_GRH); 1829 if (uverb_attr->is_global) { 1830 grh = rdma_ah_read_grh(rdma_attr); 1831 memcpy(uverb_attr->dgid, grh->dgid.raw, 16); 1832 uverb_attr->flow_label = grh->flow_label; 1833 uverb_attr->sgid_index = grh->sgid_index; 1834 uverb_attr->hop_limit = grh->hop_limit; 1835 uverb_attr->traffic_class = grh->traffic_class; 1836 } 1837 uverb_attr->port_num = rdma_ah_get_port_num(rdma_attr); 1838 } 1839 1840 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 1841 struct ib_device *ib_dev, 1842 const char __user *buf, int in_len, 1843 int out_len) 1844 { 1845 struct ib_uverbs_query_qp cmd; 1846 struct ib_uverbs_query_qp_resp resp; 1847 struct ib_qp *qp; 1848 struct ib_qp_attr *attr; 1849 struct ib_qp_init_attr *init_attr; 1850 int ret; 1851 1852 if (copy_from_user(&cmd, buf, sizeof cmd)) 1853 return -EFAULT; 1854 1855 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1856 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 1857 if (!attr || !init_attr) { 1858 ret = -ENOMEM; 1859 goto out; 1860 } 1861 1862 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 1863 if (!qp) { 1864 ret = -EINVAL; 1865 goto out; 1866 } 1867 1868 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 1869 1870 uobj_put_obj_read(qp); 1871 1872 if (ret) 1873 goto out; 1874 1875 memset(&resp, 0, sizeof resp); 1876 1877 resp.qp_state = attr->qp_state; 1878 resp.cur_qp_state = attr->cur_qp_state; 1879 resp.path_mtu = attr->path_mtu; 1880 resp.path_mig_state = attr->path_mig_state; 1881 resp.qkey = attr->qkey; 1882 resp.rq_psn = attr->rq_psn; 1883 resp.sq_psn = attr->sq_psn; 1884 resp.dest_qp_num = attr->dest_qp_num; 1885 resp.qp_access_flags = attr->qp_access_flags; 1886 resp.pkey_index = attr->pkey_index; 1887 resp.alt_pkey_index = attr->alt_pkey_index; 1888 resp.sq_draining = attr->sq_draining; 1889 resp.max_rd_atomic = attr->max_rd_atomic; 1890 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 1891 resp.min_rnr_timer = attr->min_rnr_timer; 1892 resp.port_num = attr->port_num; 1893 resp.timeout = attr->timeout; 1894 resp.retry_cnt = attr->retry_cnt; 1895 resp.rnr_retry = attr->rnr_retry; 1896 resp.alt_port_num = attr->alt_port_num; 1897 resp.alt_timeout = attr->alt_timeout; 1898 1899 copy_ah_attr_to_uverbs(&resp.dest, &attr->ah_attr); 1900 copy_ah_attr_to_uverbs(&resp.alt_dest, &attr->alt_ah_attr); 1901 1902 resp.max_send_wr = init_attr->cap.max_send_wr; 1903 resp.max_recv_wr = init_attr->cap.max_recv_wr; 1904 resp.max_send_sge = init_attr->cap.max_send_sge; 1905 resp.max_recv_sge = init_attr->cap.max_recv_sge; 1906 resp.max_inline_data = init_attr->cap.max_inline_data; 1907 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 1908 1909 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 1910 ret = -EFAULT; 1911 1912 out: 1913 kfree(attr); 1914 kfree(init_attr); 1915 1916 return ret ? ret : in_len; 1917 } 1918 1919 /* Remove ignored fields set in the attribute mask */ 1920 static int modify_qp_mask(enum ib_qp_type qp_type, int mask) 1921 { 1922 switch (qp_type) { 1923 case IB_QPT_XRC_INI: 1924 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); 1925 case IB_QPT_XRC_TGT: 1926 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | 1927 IB_QP_RNR_RETRY); 1928 default: 1929 return mask; 1930 } 1931 } 1932 1933 static void copy_ah_attr_from_uverbs(struct ib_device *dev, 1934 struct rdma_ah_attr *rdma_attr, 1935 struct ib_uverbs_qp_dest *uverb_attr) 1936 { 1937 rdma_attr->type = rdma_ah_find_type(dev, uverb_attr->port_num); 1938 if (uverb_attr->is_global) { 1939 rdma_ah_set_grh(rdma_attr, NULL, 1940 uverb_attr->flow_label, 1941 uverb_attr->sgid_index, 1942 uverb_attr->hop_limit, 1943 uverb_attr->traffic_class); 1944 rdma_ah_set_dgid_raw(rdma_attr, uverb_attr->dgid); 1945 } else { 1946 rdma_ah_set_ah_flags(rdma_attr, 0); 1947 } 1948 rdma_ah_set_dlid(rdma_attr, uverb_attr->dlid); 1949 rdma_ah_set_sl(rdma_attr, uverb_attr->sl); 1950 rdma_ah_set_path_bits(rdma_attr, uverb_attr->src_path_bits); 1951 rdma_ah_set_static_rate(rdma_attr, uverb_attr->static_rate); 1952 rdma_ah_set_port_num(rdma_attr, uverb_attr->port_num); 1953 rdma_ah_set_make_grd(rdma_attr, false); 1954 } 1955 1956 static int modify_qp(struct ib_uverbs_file *file, 1957 struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata) 1958 { 1959 struct ib_qp_attr *attr; 1960 struct ib_qp *qp; 1961 int ret; 1962 1963 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1964 if (!attr) 1965 return -ENOMEM; 1966 1967 qp = uobj_get_obj_read(qp, cmd->base.qp_handle, file->ucontext); 1968 if (!qp) { 1969 ret = -EINVAL; 1970 goto out; 1971 } 1972 1973 if ((cmd->base.attr_mask & IB_QP_PORT) && 1974 !rdma_is_port_valid(qp->device, cmd->base.port_num)) { 1975 ret = -EINVAL; 1976 goto release_qp; 1977 } 1978 1979 if ((cmd->base.attr_mask & IB_QP_AV) && 1980 !rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) { 1981 ret = -EINVAL; 1982 goto release_qp; 1983 } 1984 1985 if ((cmd->base.attr_mask & IB_QP_ALT_PATH) && 1986 (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) || 1987 !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num))) { 1988 ret = -EINVAL; 1989 goto release_qp; 1990 } 1991 1992 attr->qp_state = cmd->base.qp_state; 1993 attr->cur_qp_state = cmd->base.cur_qp_state; 1994 attr->path_mtu = cmd->base.path_mtu; 1995 attr->path_mig_state = cmd->base.path_mig_state; 1996 attr->qkey = cmd->base.qkey; 1997 attr->rq_psn = cmd->base.rq_psn; 1998 attr->sq_psn = cmd->base.sq_psn; 1999 attr->dest_qp_num = cmd->base.dest_qp_num; 2000 attr->qp_access_flags = cmd->base.qp_access_flags; 2001 attr->pkey_index = cmd->base.pkey_index; 2002 attr->alt_pkey_index = cmd->base.alt_pkey_index; 2003 attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify; 2004 attr->max_rd_atomic = cmd->base.max_rd_atomic; 2005 attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic; 2006 attr->min_rnr_timer = cmd->base.min_rnr_timer; 2007 attr->port_num = cmd->base.port_num; 2008 attr->timeout = cmd->base.timeout; 2009 attr->retry_cnt = cmd->base.retry_cnt; 2010 attr->rnr_retry = cmd->base.rnr_retry; 2011 attr->alt_port_num = cmd->base.alt_port_num; 2012 attr->alt_timeout = cmd->base.alt_timeout; 2013 attr->rate_limit = cmd->rate_limit; 2014 2015 if (cmd->base.attr_mask & IB_QP_AV) 2016 copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr, 2017 &cmd->base.dest); 2018 2019 if (cmd->base.attr_mask & IB_QP_ALT_PATH) 2020 copy_ah_attr_from_uverbs(qp->device, &attr->alt_ah_attr, 2021 &cmd->base.alt_dest); 2022 2023 ret = ib_modify_qp_with_udata(qp, attr, 2024 modify_qp_mask(qp->qp_type, 2025 cmd->base.attr_mask), 2026 udata); 2027 2028 release_qp: 2029 uobj_put_obj_read(qp); 2030 out: 2031 kfree(attr); 2032 2033 return ret; 2034 } 2035 2036 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 2037 struct ib_device *ib_dev, 2038 const char __user *buf, int in_len, 2039 int out_len) 2040 { 2041 struct ib_uverbs_ex_modify_qp cmd = {}; 2042 struct ib_udata udata; 2043 int ret; 2044 2045 if (copy_from_user(&cmd.base, buf, sizeof(cmd.base))) 2046 return -EFAULT; 2047 2048 if (cmd.base.attr_mask & 2049 ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1)) 2050 return -EOPNOTSUPP; 2051 2052 ib_uverbs_init_udata(&udata, buf + sizeof(cmd.base), NULL, 2053 in_len - sizeof(cmd.base) - sizeof(struct ib_uverbs_cmd_hdr), 2054 out_len); 2055 2056 ret = modify_qp(file, &cmd, &udata); 2057 if (ret) 2058 return ret; 2059 2060 return in_len; 2061 } 2062 2063 int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file, 2064 struct ib_device *ib_dev, 2065 struct ib_udata *ucore, 2066 struct ib_udata *uhw) 2067 { 2068 struct ib_uverbs_ex_modify_qp cmd = {}; 2069 int ret; 2070 2071 /* 2072 * Last bit is reserved for extending the attr_mask by 2073 * using another field. 2074 */ 2075 BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31)); 2076 2077 if (ucore->inlen < sizeof(cmd.base)) 2078 return -EINVAL; 2079 2080 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 2081 if (ret) 2082 return ret; 2083 2084 if (cmd.base.attr_mask & 2085 ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1)) 2086 return -EOPNOTSUPP; 2087 2088 if (ucore->inlen > sizeof(cmd)) { 2089 if (!ib_is_udata_cleared(ucore, sizeof(cmd), 2090 ucore->inlen - sizeof(cmd))) 2091 return -EOPNOTSUPP; 2092 } 2093 2094 ret = modify_qp(file, &cmd, uhw); 2095 2096 return ret; 2097 } 2098 2099 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 2100 struct ib_device *ib_dev, 2101 const char __user *buf, int in_len, 2102 int out_len) 2103 { 2104 struct ib_uverbs_destroy_qp cmd; 2105 struct ib_uverbs_destroy_qp_resp resp; 2106 struct ib_uobject *uobj; 2107 struct ib_uqp_object *obj; 2108 int ret = -EINVAL; 2109 2110 if (copy_from_user(&cmd, buf, sizeof cmd)) 2111 return -EFAULT; 2112 2113 memset(&resp, 0, sizeof resp); 2114 2115 uobj = uobj_get_write(uobj_get_type(qp), cmd.qp_handle, 2116 file->ucontext); 2117 if (IS_ERR(uobj)) 2118 return PTR_ERR(uobj); 2119 2120 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 2121 /* 2122 * Make sure we don't free the memory in remove_commit as we still 2123 * needs the uobject memory to create the response. 2124 */ 2125 uverbs_uobject_get(uobj); 2126 2127 ret = uobj_remove_commit(uobj); 2128 if (ret) { 2129 uverbs_uobject_put(uobj); 2130 return ret; 2131 } 2132 2133 resp.events_reported = obj->uevent.events_reported; 2134 uverbs_uobject_put(uobj); 2135 2136 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 2137 return -EFAULT; 2138 2139 return in_len; 2140 } 2141 2142 static void *alloc_wr(size_t wr_size, __u32 num_sge) 2143 { 2144 if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) / 2145 sizeof (struct ib_sge)) 2146 return NULL; 2147 2148 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) + 2149 num_sge * sizeof (struct ib_sge), GFP_KERNEL); 2150 } 2151 2152 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 2153 struct ib_device *ib_dev, 2154 const char __user *buf, int in_len, 2155 int out_len) 2156 { 2157 struct ib_uverbs_post_send cmd; 2158 struct ib_uverbs_post_send_resp resp; 2159 struct ib_uverbs_send_wr *user_wr; 2160 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 2161 struct ib_qp *qp; 2162 int i, sg_ind; 2163 int is_ud; 2164 ssize_t ret = -EINVAL; 2165 size_t next_size; 2166 2167 if (copy_from_user(&cmd, buf, sizeof cmd)) 2168 return -EFAULT; 2169 2170 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 2171 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 2172 return -EINVAL; 2173 2174 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 2175 return -EINVAL; 2176 2177 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 2178 if (!user_wr) 2179 return -ENOMEM; 2180 2181 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 2182 if (!qp) 2183 goto out; 2184 2185 is_ud = qp->qp_type == IB_QPT_UD; 2186 sg_ind = 0; 2187 last = NULL; 2188 for (i = 0; i < cmd.wr_count; ++i) { 2189 if (copy_from_user(user_wr, 2190 buf + sizeof cmd + i * cmd.wqe_size, 2191 cmd.wqe_size)) { 2192 ret = -EFAULT; 2193 goto out_put; 2194 } 2195 2196 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 2197 ret = -EINVAL; 2198 goto out_put; 2199 } 2200 2201 if (is_ud) { 2202 struct ib_ud_wr *ud; 2203 2204 if (user_wr->opcode != IB_WR_SEND && 2205 user_wr->opcode != IB_WR_SEND_WITH_IMM) { 2206 ret = -EINVAL; 2207 goto out_put; 2208 } 2209 2210 next_size = sizeof(*ud); 2211 ud = alloc_wr(next_size, user_wr->num_sge); 2212 if (!ud) { 2213 ret = -ENOMEM; 2214 goto out_put; 2215 } 2216 2217 ud->ah = uobj_get_obj_read(ah, user_wr->wr.ud.ah, 2218 file->ucontext); 2219 if (!ud->ah) { 2220 kfree(ud); 2221 ret = -EINVAL; 2222 goto out_put; 2223 } 2224 ud->remote_qpn = user_wr->wr.ud.remote_qpn; 2225 ud->remote_qkey = user_wr->wr.ud.remote_qkey; 2226 2227 next = &ud->wr; 2228 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || 2229 user_wr->opcode == IB_WR_RDMA_WRITE || 2230 user_wr->opcode == IB_WR_RDMA_READ) { 2231 struct ib_rdma_wr *rdma; 2232 2233 next_size = sizeof(*rdma); 2234 rdma = alloc_wr(next_size, user_wr->num_sge); 2235 if (!rdma) { 2236 ret = -ENOMEM; 2237 goto out_put; 2238 } 2239 2240 rdma->remote_addr = user_wr->wr.rdma.remote_addr; 2241 rdma->rkey = user_wr->wr.rdma.rkey; 2242 2243 next = &rdma->wr; 2244 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || 2245 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 2246 struct ib_atomic_wr *atomic; 2247 2248 next_size = sizeof(*atomic); 2249 atomic = alloc_wr(next_size, user_wr->num_sge); 2250 if (!atomic) { 2251 ret = -ENOMEM; 2252 goto out_put; 2253 } 2254 2255 atomic->remote_addr = user_wr->wr.atomic.remote_addr; 2256 atomic->compare_add = user_wr->wr.atomic.compare_add; 2257 atomic->swap = user_wr->wr.atomic.swap; 2258 atomic->rkey = user_wr->wr.atomic.rkey; 2259 2260 next = &atomic->wr; 2261 } else if (user_wr->opcode == IB_WR_SEND || 2262 user_wr->opcode == IB_WR_SEND_WITH_IMM || 2263 user_wr->opcode == IB_WR_SEND_WITH_INV) { 2264 next_size = sizeof(*next); 2265 next = alloc_wr(next_size, user_wr->num_sge); 2266 if (!next) { 2267 ret = -ENOMEM; 2268 goto out_put; 2269 } 2270 } else { 2271 ret = -EINVAL; 2272 goto out_put; 2273 } 2274 2275 if (user_wr->opcode == IB_WR_SEND_WITH_IMM || 2276 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 2277 next->ex.imm_data = 2278 (__be32 __force) user_wr->ex.imm_data; 2279 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) { 2280 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey; 2281 } 2282 2283 if (!last) 2284 wr = next; 2285 else 2286 last->next = next; 2287 last = next; 2288 2289 next->next = NULL; 2290 next->wr_id = user_wr->wr_id; 2291 next->num_sge = user_wr->num_sge; 2292 next->opcode = user_wr->opcode; 2293 next->send_flags = user_wr->send_flags; 2294 2295 if (next->num_sge) { 2296 next->sg_list = (void *) next + 2297 ALIGN(next_size, sizeof(struct ib_sge)); 2298 if (copy_from_user(next->sg_list, 2299 buf + sizeof cmd + 2300 cmd.wr_count * cmd.wqe_size + 2301 sg_ind * sizeof (struct ib_sge), 2302 next->num_sge * sizeof (struct ib_sge))) { 2303 ret = -EFAULT; 2304 goto out_put; 2305 } 2306 sg_ind += next->num_sge; 2307 } else 2308 next->sg_list = NULL; 2309 } 2310 2311 resp.bad_wr = 0; 2312 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); 2313 if (ret) 2314 for (next = wr; next; next = next->next) { 2315 ++resp.bad_wr; 2316 if (next == bad_wr) 2317 break; 2318 } 2319 2320 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 2321 ret = -EFAULT; 2322 2323 out_put: 2324 uobj_put_obj_read(qp); 2325 2326 while (wr) { 2327 if (is_ud && ud_wr(wr)->ah) 2328 uobj_put_obj_read(ud_wr(wr)->ah); 2329 next = wr->next; 2330 kfree(wr); 2331 wr = next; 2332 } 2333 2334 out: 2335 kfree(user_wr); 2336 2337 return ret ? ret : in_len; 2338 } 2339 2340 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 2341 int in_len, 2342 u32 wr_count, 2343 u32 sge_count, 2344 u32 wqe_size) 2345 { 2346 struct ib_uverbs_recv_wr *user_wr; 2347 struct ib_recv_wr *wr = NULL, *last, *next; 2348 int sg_ind; 2349 int i; 2350 int ret; 2351 2352 if (in_len < wqe_size * wr_count + 2353 sge_count * sizeof (struct ib_uverbs_sge)) 2354 return ERR_PTR(-EINVAL); 2355 2356 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 2357 return ERR_PTR(-EINVAL); 2358 2359 user_wr = kmalloc(wqe_size, GFP_KERNEL); 2360 if (!user_wr) 2361 return ERR_PTR(-ENOMEM); 2362 2363 sg_ind = 0; 2364 last = NULL; 2365 for (i = 0; i < wr_count; ++i) { 2366 if (copy_from_user(user_wr, buf + i * wqe_size, 2367 wqe_size)) { 2368 ret = -EFAULT; 2369 goto err; 2370 } 2371 2372 if (user_wr->num_sge + sg_ind > sge_count) { 2373 ret = -EINVAL; 2374 goto err; 2375 } 2376 2377 if (user_wr->num_sge >= 2378 (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) / 2379 sizeof (struct ib_sge)) { 2380 ret = -EINVAL; 2381 goto err; 2382 } 2383 2384 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2385 user_wr->num_sge * sizeof (struct ib_sge), 2386 GFP_KERNEL); 2387 if (!next) { 2388 ret = -ENOMEM; 2389 goto err; 2390 } 2391 2392 if (!last) 2393 wr = next; 2394 else 2395 last->next = next; 2396 last = next; 2397 2398 next->next = NULL; 2399 next->wr_id = user_wr->wr_id; 2400 next->num_sge = user_wr->num_sge; 2401 2402 if (next->num_sge) { 2403 next->sg_list = (void *) next + 2404 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2405 if (copy_from_user(next->sg_list, 2406 buf + wr_count * wqe_size + 2407 sg_ind * sizeof (struct ib_sge), 2408 next->num_sge * sizeof (struct ib_sge))) { 2409 ret = -EFAULT; 2410 goto err; 2411 } 2412 sg_ind += next->num_sge; 2413 } else 2414 next->sg_list = NULL; 2415 } 2416 2417 kfree(user_wr); 2418 return wr; 2419 2420 err: 2421 kfree(user_wr); 2422 2423 while (wr) { 2424 next = wr->next; 2425 kfree(wr); 2426 wr = next; 2427 } 2428 2429 return ERR_PTR(ret); 2430 } 2431 2432 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 2433 struct ib_device *ib_dev, 2434 const char __user *buf, int in_len, 2435 int out_len) 2436 { 2437 struct ib_uverbs_post_recv cmd; 2438 struct ib_uverbs_post_recv_resp resp; 2439 struct ib_recv_wr *wr, *next, *bad_wr; 2440 struct ib_qp *qp; 2441 ssize_t ret = -EINVAL; 2442 2443 if (copy_from_user(&cmd, buf, sizeof cmd)) 2444 return -EFAULT; 2445 2446 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2447 in_len - sizeof cmd, cmd.wr_count, 2448 cmd.sge_count, cmd.wqe_size); 2449 if (IS_ERR(wr)) 2450 return PTR_ERR(wr); 2451 2452 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 2453 if (!qp) 2454 goto out; 2455 2456 resp.bad_wr = 0; 2457 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); 2458 2459 uobj_put_obj_read(qp); 2460 if (ret) { 2461 for (next = wr; next; next = next->next) { 2462 ++resp.bad_wr; 2463 if (next == bad_wr) 2464 break; 2465 } 2466 } 2467 2468 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 2469 ret = -EFAULT; 2470 2471 out: 2472 while (wr) { 2473 next = wr->next; 2474 kfree(wr); 2475 wr = next; 2476 } 2477 2478 return ret ? ret : in_len; 2479 } 2480 2481 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 2482 struct ib_device *ib_dev, 2483 const char __user *buf, int in_len, 2484 int out_len) 2485 { 2486 struct ib_uverbs_post_srq_recv cmd; 2487 struct ib_uverbs_post_srq_recv_resp resp; 2488 struct ib_recv_wr *wr, *next, *bad_wr; 2489 struct ib_srq *srq; 2490 ssize_t ret = -EINVAL; 2491 2492 if (copy_from_user(&cmd, buf, sizeof cmd)) 2493 return -EFAULT; 2494 2495 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2496 in_len - sizeof cmd, cmd.wr_count, 2497 cmd.sge_count, cmd.wqe_size); 2498 if (IS_ERR(wr)) 2499 return PTR_ERR(wr); 2500 2501 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext); 2502 if (!srq) 2503 goto out; 2504 2505 resp.bad_wr = 0; 2506 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 2507 2508 uobj_put_obj_read(srq); 2509 2510 if (ret) 2511 for (next = wr; next; next = next->next) { 2512 ++resp.bad_wr; 2513 if (next == bad_wr) 2514 break; 2515 } 2516 2517 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 2518 ret = -EFAULT; 2519 2520 out: 2521 while (wr) { 2522 next = wr->next; 2523 kfree(wr); 2524 wr = next; 2525 } 2526 2527 return ret ? ret : in_len; 2528 } 2529 2530 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 2531 struct ib_device *ib_dev, 2532 const char __user *buf, int in_len, 2533 int out_len) 2534 { 2535 struct ib_uverbs_create_ah cmd; 2536 struct ib_uverbs_create_ah_resp resp; 2537 struct ib_uobject *uobj; 2538 struct ib_pd *pd; 2539 struct ib_ah *ah; 2540 struct rdma_ah_attr attr; 2541 int ret; 2542 struct ib_udata udata; 2543 2544 if (out_len < sizeof resp) 2545 return -ENOSPC; 2546 2547 if (copy_from_user(&cmd, buf, sizeof cmd)) 2548 return -EFAULT; 2549 2550 if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) 2551 return -EINVAL; 2552 2553 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 2554 u64_to_user_ptr(cmd.response) + sizeof(resp), 2555 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 2556 out_len - sizeof(resp)); 2557 2558 uobj = uobj_alloc(uobj_get_type(ah), file->ucontext); 2559 if (IS_ERR(uobj)) 2560 return PTR_ERR(uobj); 2561 2562 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 2563 if (!pd) { 2564 ret = -EINVAL; 2565 goto err; 2566 } 2567 2568 attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num); 2569 rdma_ah_set_make_grd(&attr, false); 2570 rdma_ah_set_dlid(&attr, cmd.attr.dlid); 2571 rdma_ah_set_sl(&attr, cmd.attr.sl); 2572 rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits); 2573 rdma_ah_set_static_rate(&attr, cmd.attr.static_rate); 2574 rdma_ah_set_port_num(&attr, cmd.attr.port_num); 2575 2576 if (cmd.attr.is_global) { 2577 rdma_ah_set_grh(&attr, NULL, cmd.attr.grh.flow_label, 2578 cmd.attr.grh.sgid_index, 2579 cmd.attr.grh.hop_limit, 2580 cmd.attr.grh.traffic_class); 2581 rdma_ah_set_dgid_raw(&attr, cmd.attr.grh.dgid); 2582 } else { 2583 rdma_ah_set_ah_flags(&attr, 0); 2584 } 2585 2586 ah = rdma_create_user_ah(pd, &attr, &udata); 2587 if (IS_ERR(ah)) { 2588 ret = PTR_ERR(ah); 2589 goto err_put; 2590 } 2591 2592 ah->uobject = uobj; 2593 uobj->user_handle = cmd.user_handle; 2594 uobj->object = ah; 2595 2596 resp.ah_handle = uobj->id; 2597 2598 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 2599 ret = -EFAULT; 2600 goto err_copy; 2601 } 2602 2603 uobj_put_obj_read(pd); 2604 uobj_alloc_commit(uobj); 2605 2606 return in_len; 2607 2608 err_copy: 2609 rdma_destroy_ah(ah); 2610 2611 err_put: 2612 uobj_put_obj_read(pd); 2613 2614 err: 2615 uobj_alloc_abort(uobj); 2616 return ret; 2617 } 2618 2619 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 2620 struct ib_device *ib_dev, 2621 const char __user *buf, int in_len, int out_len) 2622 { 2623 struct ib_uverbs_destroy_ah cmd; 2624 struct ib_uobject *uobj; 2625 int ret; 2626 2627 if (copy_from_user(&cmd, buf, sizeof cmd)) 2628 return -EFAULT; 2629 2630 uobj = uobj_get_write(uobj_get_type(ah), cmd.ah_handle, 2631 file->ucontext); 2632 if (IS_ERR(uobj)) 2633 return PTR_ERR(uobj); 2634 2635 ret = uobj_remove_commit(uobj); 2636 return ret ?: in_len; 2637 } 2638 2639 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 2640 struct ib_device *ib_dev, 2641 const char __user *buf, int in_len, 2642 int out_len) 2643 { 2644 struct ib_uverbs_attach_mcast cmd; 2645 struct ib_qp *qp; 2646 struct ib_uqp_object *obj; 2647 struct ib_uverbs_mcast_entry *mcast; 2648 int ret; 2649 2650 if (copy_from_user(&cmd, buf, sizeof cmd)) 2651 return -EFAULT; 2652 2653 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 2654 if (!qp) 2655 return -EINVAL; 2656 2657 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2658 2659 mutex_lock(&obj->mcast_lock); 2660 list_for_each_entry(mcast, &obj->mcast_list, list) 2661 if (cmd.mlid == mcast->lid && 2662 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2663 ret = 0; 2664 goto out_put; 2665 } 2666 2667 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 2668 if (!mcast) { 2669 ret = -ENOMEM; 2670 goto out_put; 2671 } 2672 2673 mcast->lid = cmd.mlid; 2674 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 2675 2676 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 2677 if (!ret) 2678 list_add_tail(&mcast->list, &obj->mcast_list); 2679 else 2680 kfree(mcast); 2681 2682 out_put: 2683 mutex_unlock(&obj->mcast_lock); 2684 uobj_put_obj_read(qp); 2685 2686 return ret ? ret : in_len; 2687 } 2688 2689 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 2690 struct ib_device *ib_dev, 2691 const char __user *buf, int in_len, 2692 int out_len) 2693 { 2694 struct ib_uverbs_detach_mcast cmd; 2695 struct ib_uqp_object *obj; 2696 struct ib_qp *qp; 2697 struct ib_uverbs_mcast_entry *mcast; 2698 int ret = -EINVAL; 2699 bool found = false; 2700 2701 if (copy_from_user(&cmd, buf, sizeof cmd)) 2702 return -EFAULT; 2703 2704 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 2705 if (!qp) 2706 return -EINVAL; 2707 2708 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2709 mutex_lock(&obj->mcast_lock); 2710 2711 list_for_each_entry(mcast, &obj->mcast_list, list) 2712 if (cmd.mlid == mcast->lid && 2713 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2714 list_del(&mcast->list); 2715 kfree(mcast); 2716 found = true; 2717 break; 2718 } 2719 2720 if (!found) { 2721 ret = -EINVAL; 2722 goto out_put; 2723 } 2724 2725 ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid); 2726 2727 out_put: 2728 mutex_unlock(&obj->mcast_lock); 2729 uobj_put_obj_read(qp); 2730 return ret ? ret : in_len; 2731 } 2732 2733 static int kern_spec_to_ib_spec_action(struct ib_uverbs_flow_spec *kern_spec, 2734 union ib_flow_spec *ib_spec) 2735 { 2736 ib_spec->type = kern_spec->type; 2737 switch (ib_spec->type) { 2738 case IB_FLOW_SPEC_ACTION_TAG: 2739 if (kern_spec->flow_tag.size != 2740 sizeof(struct ib_uverbs_flow_spec_action_tag)) 2741 return -EINVAL; 2742 2743 ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag); 2744 ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id; 2745 break; 2746 case IB_FLOW_SPEC_ACTION_DROP: 2747 if (kern_spec->drop.size != 2748 sizeof(struct ib_uverbs_flow_spec_action_drop)) 2749 return -EINVAL; 2750 2751 ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop); 2752 break; 2753 default: 2754 return -EINVAL; 2755 } 2756 return 0; 2757 } 2758 2759 static size_t kern_spec_filter_sz(struct ib_uverbs_flow_spec_hdr *spec) 2760 { 2761 /* Returns user space filter size, includes padding */ 2762 return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2; 2763 } 2764 2765 static ssize_t spec_filter_size(void *kern_spec_filter, u16 kern_filter_size, 2766 u16 ib_real_filter_sz) 2767 { 2768 /* 2769 * User space filter structures must be 64 bit aligned, otherwise this 2770 * may pass, but we won't handle additional new attributes. 2771 */ 2772 2773 if (kern_filter_size > ib_real_filter_sz) { 2774 if (memchr_inv(kern_spec_filter + 2775 ib_real_filter_sz, 0, 2776 kern_filter_size - ib_real_filter_sz)) 2777 return -EINVAL; 2778 return ib_real_filter_sz; 2779 } 2780 return kern_filter_size; 2781 } 2782 2783 static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec, 2784 union ib_flow_spec *ib_spec) 2785 { 2786 ssize_t actual_filter_sz; 2787 ssize_t kern_filter_sz; 2788 ssize_t ib_filter_sz; 2789 void *kern_spec_mask; 2790 void *kern_spec_val; 2791 2792 if (kern_spec->reserved) 2793 return -EINVAL; 2794 2795 ib_spec->type = kern_spec->type; 2796 2797 kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr); 2798 /* User flow spec size must be aligned to 4 bytes */ 2799 if (kern_filter_sz != ALIGN(kern_filter_sz, 4)) 2800 return -EINVAL; 2801 2802 kern_spec_val = (void *)kern_spec + 2803 sizeof(struct ib_uverbs_flow_spec_hdr); 2804 kern_spec_mask = kern_spec_val + kern_filter_sz; 2805 if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL)) 2806 return -EINVAL; 2807 2808 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) { 2809 case IB_FLOW_SPEC_ETH: 2810 ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz); 2811 actual_filter_sz = spec_filter_size(kern_spec_mask, 2812 kern_filter_sz, 2813 ib_filter_sz); 2814 if (actual_filter_sz <= 0) 2815 return -EINVAL; 2816 ib_spec->size = sizeof(struct ib_flow_spec_eth); 2817 memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz); 2818 memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz); 2819 break; 2820 case IB_FLOW_SPEC_IPV4: 2821 ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz); 2822 actual_filter_sz = spec_filter_size(kern_spec_mask, 2823 kern_filter_sz, 2824 ib_filter_sz); 2825 if (actual_filter_sz <= 0) 2826 return -EINVAL; 2827 ib_spec->size = sizeof(struct ib_flow_spec_ipv4); 2828 memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz); 2829 memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz); 2830 break; 2831 case IB_FLOW_SPEC_IPV6: 2832 ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz); 2833 actual_filter_sz = spec_filter_size(kern_spec_mask, 2834 kern_filter_sz, 2835 ib_filter_sz); 2836 if (actual_filter_sz <= 0) 2837 return -EINVAL; 2838 ib_spec->size = sizeof(struct ib_flow_spec_ipv6); 2839 memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz); 2840 memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz); 2841 2842 if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) || 2843 (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20)) 2844 return -EINVAL; 2845 break; 2846 case IB_FLOW_SPEC_TCP: 2847 case IB_FLOW_SPEC_UDP: 2848 ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz); 2849 actual_filter_sz = spec_filter_size(kern_spec_mask, 2850 kern_filter_sz, 2851 ib_filter_sz); 2852 if (actual_filter_sz <= 0) 2853 return -EINVAL; 2854 ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp); 2855 memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz); 2856 memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz); 2857 break; 2858 case IB_FLOW_SPEC_VXLAN_TUNNEL: 2859 ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz); 2860 actual_filter_sz = spec_filter_size(kern_spec_mask, 2861 kern_filter_sz, 2862 ib_filter_sz); 2863 if (actual_filter_sz <= 0) 2864 return -EINVAL; 2865 ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel); 2866 memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz); 2867 memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz); 2868 2869 if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) || 2870 (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24)) 2871 return -EINVAL; 2872 break; 2873 default: 2874 return -EINVAL; 2875 } 2876 return 0; 2877 } 2878 2879 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, 2880 union ib_flow_spec *ib_spec) 2881 { 2882 if (kern_spec->reserved) 2883 return -EINVAL; 2884 2885 if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG) 2886 return kern_spec_to_ib_spec_action(kern_spec, ib_spec); 2887 else 2888 return kern_spec_to_ib_spec_filter(kern_spec, ib_spec); 2889 } 2890 2891 int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file, 2892 struct ib_device *ib_dev, 2893 struct ib_udata *ucore, 2894 struct ib_udata *uhw) 2895 { 2896 struct ib_uverbs_ex_create_wq cmd = {}; 2897 struct ib_uverbs_ex_create_wq_resp resp = {}; 2898 struct ib_uwq_object *obj; 2899 int err = 0; 2900 struct ib_cq *cq; 2901 struct ib_pd *pd; 2902 struct ib_wq *wq; 2903 struct ib_wq_init_attr wq_init_attr = {}; 2904 size_t required_cmd_sz; 2905 size_t required_resp_len; 2906 2907 required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge); 2908 required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn); 2909 2910 if (ucore->inlen < required_cmd_sz) 2911 return -EINVAL; 2912 2913 if (ucore->outlen < required_resp_len) 2914 return -ENOSPC; 2915 2916 if (ucore->inlen > sizeof(cmd) && 2917 !ib_is_udata_cleared(ucore, sizeof(cmd), 2918 ucore->inlen - sizeof(cmd))) 2919 return -EOPNOTSUPP; 2920 2921 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 2922 if (err) 2923 return err; 2924 2925 if (cmd.comp_mask) 2926 return -EOPNOTSUPP; 2927 2928 obj = (struct ib_uwq_object *)uobj_alloc(uobj_get_type(wq), 2929 file->ucontext); 2930 if (IS_ERR(obj)) 2931 return PTR_ERR(obj); 2932 2933 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext); 2934 if (!pd) { 2935 err = -EINVAL; 2936 goto err_uobj; 2937 } 2938 2939 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext); 2940 if (!cq) { 2941 err = -EINVAL; 2942 goto err_put_pd; 2943 } 2944 2945 wq_init_attr.cq = cq; 2946 wq_init_attr.max_sge = cmd.max_sge; 2947 wq_init_attr.max_wr = cmd.max_wr; 2948 wq_init_attr.wq_context = file; 2949 wq_init_attr.wq_type = cmd.wq_type; 2950 wq_init_attr.event_handler = ib_uverbs_wq_event_handler; 2951 if (ucore->inlen >= (offsetof(typeof(cmd), create_flags) + 2952 sizeof(cmd.create_flags))) 2953 wq_init_attr.create_flags = cmd.create_flags; 2954 obj->uevent.events_reported = 0; 2955 INIT_LIST_HEAD(&obj->uevent.event_list); 2956 2957 if (!pd->device->create_wq) { 2958 err = -EOPNOTSUPP; 2959 goto err_put_cq; 2960 } 2961 wq = pd->device->create_wq(pd, &wq_init_attr, uhw); 2962 if (IS_ERR(wq)) { 2963 err = PTR_ERR(wq); 2964 goto err_put_cq; 2965 } 2966 2967 wq->uobject = &obj->uevent.uobject; 2968 obj->uevent.uobject.object = wq; 2969 wq->wq_type = wq_init_attr.wq_type; 2970 wq->cq = cq; 2971 wq->pd = pd; 2972 wq->device = pd->device; 2973 wq->wq_context = wq_init_attr.wq_context; 2974 atomic_set(&wq->usecnt, 0); 2975 atomic_inc(&pd->usecnt); 2976 atomic_inc(&cq->usecnt); 2977 wq->uobject = &obj->uevent.uobject; 2978 obj->uevent.uobject.object = wq; 2979 2980 memset(&resp, 0, sizeof(resp)); 2981 resp.wq_handle = obj->uevent.uobject.id; 2982 resp.max_sge = wq_init_attr.max_sge; 2983 resp.max_wr = wq_init_attr.max_wr; 2984 resp.wqn = wq->wq_num; 2985 resp.response_length = required_resp_len; 2986 err = ib_copy_to_udata(ucore, 2987 &resp, resp.response_length); 2988 if (err) 2989 goto err_copy; 2990 2991 uobj_put_obj_read(pd); 2992 uobj_put_obj_read(cq); 2993 uobj_alloc_commit(&obj->uevent.uobject); 2994 return 0; 2995 2996 err_copy: 2997 ib_destroy_wq(wq); 2998 err_put_cq: 2999 uobj_put_obj_read(cq); 3000 err_put_pd: 3001 uobj_put_obj_read(pd); 3002 err_uobj: 3003 uobj_alloc_abort(&obj->uevent.uobject); 3004 3005 return err; 3006 } 3007 3008 int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file, 3009 struct ib_device *ib_dev, 3010 struct ib_udata *ucore, 3011 struct ib_udata *uhw) 3012 { 3013 struct ib_uverbs_ex_destroy_wq cmd = {}; 3014 struct ib_uverbs_ex_destroy_wq_resp resp = {}; 3015 struct ib_uobject *uobj; 3016 struct ib_uwq_object *obj; 3017 size_t required_cmd_sz; 3018 size_t required_resp_len; 3019 int ret; 3020 3021 required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle); 3022 required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); 3023 3024 if (ucore->inlen < required_cmd_sz) 3025 return -EINVAL; 3026 3027 if (ucore->outlen < required_resp_len) 3028 return -ENOSPC; 3029 3030 if (ucore->inlen > sizeof(cmd) && 3031 !ib_is_udata_cleared(ucore, sizeof(cmd), 3032 ucore->inlen - sizeof(cmd))) 3033 return -EOPNOTSUPP; 3034 3035 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3036 if (ret) 3037 return ret; 3038 3039 if (cmd.comp_mask) 3040 return -EOPNOTSUPP; 3041 3042 resp.response_length = required_resp_len; 3043 uobj = uobj_get_write(uobj_get_type(wq), cmd.wq_handle, 3044 file->ucontext); 3045 if (IS_ERR(uobj)) 3046 return PTR_ERR(uobj); 3047 3048 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject); 3049 /* 3050 * Make sure we don't free the memory in remove_commit as we still 3051 * needs the uobject memory to create the response. 3052 */ 3053 uverbs_uobject_get(uobj); 3054 3055 ret = uobj_remove_commit(uobj); 3056 resp.events_reported = obj->uevent.events_reported; 3057 uverbs_uobject_put(uobj); 3058 if (ret) 3059 return ret; 3060 3061 return ib_copy_to_udata(ucore, &resp, resp.response_length); 3062 } 3063 3064 int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file, 3065 struct ib_device *ib_dev, 3066 struct ib_udata *ucore, 3067 struct ib_udata *uhw) 3068 { 3069 struct ib_uverbs_ex_modify_wq cmd = {}; 3070 struct ib_wq *wq; 3071 struct ib_wq_attr wq_attr = {}; 3072 size_t required_cmd_sz; 3073 int ret; 3074 3075 required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state); 3076 if (ucore->inlen < required_cmd_sz) 3077 return -EINVAL; 3078 3079 if (ucore->inlen > sizeof(cmd) && 3080 !ib_is_udata_cleared(ucore, sizeof(cmd), 3081 ucore->inlen - sizeof(cmd))) 3082 return -EOPNOTSUPP; 3083 3084 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3085 if (ret) 3086 return ret; 3087 3088 if (!cmd.attr_mask) 3089 return -EINVAL; 3090 3091 if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS)) 3092 return -EINVAL; 3093 3094 wq = uobj_get_obj_read(wq, cmd.wq_handle, file->ucontext); 3095 if (!wq) 3096 return -EINVAL; 3097 3098 wq_attr.curr_wq_state = cmd.curr_wq_state; 3099 wq_attr.wq_state = cmd.wq_state; 3100 if (cmd.attr_mask & IB_WQ_FLAGS) { 3101 wq_attr.flags = cmd.flags; 3102 wq_attr.flags_mask = cmd.flags_mask; 3103 } 3104 if (!wq->device->modify_wq) { 3105 ret = -EOPNOTSUPP; 3106 goto out; 3107 } 3108 ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw); 3109 out: 3110 uobj_put_obj_read(wq); 3111 return ret; 3112 } 3113 3114 int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, 3115 struct ib_device *ib_dev, 3116 struct ib_udata *ucore, 3117 struct ib_udata *uhw) 3118 { 3119 struct ib_uverbs_ex_create_rwq_ind_table cmd = {}; 3120 struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {}; 3121 struct ib_uobject *uobj; 3122 int err = 0; 3123 struct ib_rwq_ind_table_init_attr init_attr = {}; 3124 struct ib_rwq_ind_table *rwq_ind_tbl; 3125 struct ib_wq **wqs = NULL; 3126 u32 *wqs_handles = NULL; 3127 struct ib_wq *wq = NULL; 3128 int i, j, num_read_wqs; 3129 u32 num_wq_handles; 3130 u32 expected_in_size; 3131 size_t required_cmd_sz_header; 3132 size_t required_resp_len; 3133 3134 required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size); 3135 required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num); 3136 3137 if (ucore->inlen < required_cmd_sz_header) 3138 return -EINVAL; 3139 3140 if (ucore->outlen < required_resp_len) 3141 return -ENOSPC; 3142 3143 err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header); 3144 if (err) 3145 return err; 3146 3147 ucore->inbuf += required_cmd_sz_header; 3148 ucore->inlen -= required_cmd_sz_header; 3149 3150 if (cmd.comp_mask) 3151 return -EOPNOTSUPP; 3152 3153 if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE) 3154 return -EINVAL; 3155 3156 num_wq_handles = 1 << cmd.log_ind_tbl_size; 3157 expected_in_size = num_wq_handles * sizeof(__u32); 3158 if (num_wq_handles == 1) 3159 /* input size for wq handles is u64 aligned */ 3160 expected_in_size += sizeof(__u32); 3161 3162 if (ucore->inlen < expected_in_size) 3163 return -EINVAL; 3164 3165 if (ucore->inlen > expected_in_size && 3166 !ib_is_udata_cleared(ucore, expected_in_size, 3167 ucore->inlen - expected_in_size)) 3168 return -EOPNOTSUPP; 3169 3170 wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles), 3171 GFP_KERNEL); 3172 if (!wqs_handles) 3173 return -ENOMEM; 3174 3175 err = ib_copy_from_udata(wqs_handles, ucore, 3176 num_wq_handles * sizeof(__u32)); 3177 if (err) 3178 goto err_free; 3179 3180 wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL); 3181 if (!wqs) { 3182 err = -ENOMEM; 3183 goto err_free; 3184 } 3185 3186 for (num_read_wqs = 0; num_read_wqs < num_wq_handles; 3187 num_read_wqs++) { 3188 wq = uobj_get_obj_read(wq, wqs_handles[num_read_wqs], 3189 file->ucontext); 3190 if (!wq) { 3191 err = -EINVAL; 3192 goto put_wqs; 3193 } 3194 3195 wqs[num_read_wqs] = wq; 3196 } 3197 3198 uobj = uobj_alloc(uobj_get_type(rwq_ind_table), file->ucontext); 3199 if (IS_ERR(uobj)) { 3200 err = PTR_ERR(uobj); 3201 goto put_wqs; 3202 } 3203 3204 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size; 3205 init_attr.ind_tbl = wqs; 3206 3207 if (!ib_dev->create_rwq_ind_table) { 3208 err = -EOPNOTSUPP; 3209 goto err_uobj; 3210 } 3211 rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw); 3212 3213 if (IS_ERR(rwq_ind_tbl)) { 3214 err = PTR_ERR(rwq_ind_tbl); 3215 goto err_uobj; 3216 } 3217 3218 rwq_ind_tbl->ind_tbl = wqs; 3219 rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size; 3220 rwq_ind_tbl->uobject = uobj; 3221 uobj->object = rwq_ind_tbl; 3222 rwq_ind_tbl->device = ib_dev; 3223 atomic_set(&rwq_ind_tbl->usecnt, 0); 3224 3225 for (i = 0; i < num_wq_handles; i++) 3226 atomic_inc(&wqs[i]->usecnt); 3227 3228 resp.ind_tbl_handle = uobj->id; 3229 resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num; 3230 resp.response_length = required_resp_len; 3231 3232 err = ib_copy_to_udata(ucore, 3233 &resp, resp.response_length); 3234 if (err) 3235 goto err_copy; 3236 3237 kfree(wqs_handles); 3238 3239 for (j = 0; j < num_read_wqs; j++) 3240 uobj_put_obj_read(wqs[j]); 3241 3242 uobj_alloc_commit(uobj); 3243 return 0; 3244 3245 err_copy: 3246 ib_destroy_rwq_ind_table(rwq_ind_tbl); 3247 err_uobj: 3248 uobj_alloc_abort(uobj); 3249 put_wqs: 3250 for (j = 0; j < num_read_wqs; j++) 3251 uobj_put_obj_read(wqs[j]); 3252 err_free: 3253 kfree(wqs_handles); 3254 kfree(wqs); 3255 return err; 3256 } 3257 3258 int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file, 3259 struct ib_device *ib_dev, 3260 struct ib_udata *ucore, 3261 struct ib_udata *uhw) 3262 { 3263 struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {}; 3264 struct ib_uobject *uobj; 3265 int ret; 3266 size_t required_cmd_sz; 3267 3268 required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle); 3269 3270 if (ucore->inlen < required_cmd_sz) 3271 return -EINVAL; 3272 3273 if (ucore->inlen > sizeof(cmd) && 3274 !ib_is_udata_cleared(ucore, sizeof(cmd), 3275 ucore->inlen - sizeof(cmd))) 3276 return -EOPNOTSUPP; 3277 3278 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3279 if (ret) 3280 return ret; 3281 3282 if (cmd.comp_mask) 3283 return -EOPNOTSUPP; 3284 3285 uobj = uobj_get_write(uobj_get_type(rwq_ind_table), cmd.ind_tbl_handle, 3286 file->ucontext); 3287 if (IS_ERR(uobj)) 3288 return PTR_ERR(uobj); 3289 3290 return uobj_remove_commit(uobj); 3291 } 3292 3293 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, 3294 struct ib_device *ib_dev, 3295 struct ib_udata *ucore, 3296 struct ib_udata *uhw) 3297 { 3298 struct ib_uverbs_create_flow cmd; 3299 struct ib_uverbs_create_flow_resp resp; 3300 struct ib_uobject *uobj; 3301 struct ib_flow *flow_id; 3302 struct ib_uverbs_flow_attr *kern_flow_attr; 3303 struct ib_flow_attr *flow_attr; 3304 struct ib_qp *qp; 3305 int err = 0; 3306 void *kern_spec; 3307 void *ib_spec; 3308 int i; 3309 3310 if (ucore->inlen < sizeof(cmd)) 3311 return -EINVAL; 3312 3313 if (ucore->outlen < sizeof(resp)) 3314 return -ENOSPC; 3315 3316 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3317 if (err) 3318 return err; 3319 3320 ucore->inbuf += sizeof(cmd); 3321 ucore->inlen -= sizeof(cmd); 3322 3323 if (cmd.comp_mask) 3324 return -EINVAL; 3325 3326 if (!capable(CAP_NET_RAW)) 3327 return -EPERM; 3328 3329 if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED) 3330 return -EINVAL; 3331 3332 if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && 3333 ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) || 3334 (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT))) 3335 return -EINVAL; 3336 3337 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) 3338 return -EINVAL; 3339 3340 if (cmd.flow_attr.size > ucore->inlen || 3341 cmd.flow_attr.size > 3342 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) 3343 return -EINVAL; 3344 3345 if (cmd.flow_attr.reserved[0] || 3346 cmd.flow_attr.reserved[1]) 3347 return -EINVAL; 3348 3349 if (cmd.flow_attr.num_of_specs) { 3350 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, 3351 GFP_KERNEL); 3352 if (!kern_flow_attr) 3353 return -ENOMEM; 3354 3355 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); 3356 err = ib_copy_from_udata(kern_flow_attr + 1, ucore, 3357 cmd.flow_attr.size); 3358 if (err) 3359 goto err_free_attr; 3360 } else { 3361 kern_flow_attr = &cmd.flow_attr; 3362 } 3363 3364 uobj = uobj_alloc(uobj_get_type(flow), file->ucontext); 3365 if (IS_ERR(uobj)) { 3366 err = PTR_ERR(uobj); 3367 goto err_free_attr; 3368 } 3369 3370 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext); 3371 if (!qp) { 3372 err = -EINVAL; 3373 goto err_uobj; 3374 } 3375 3376 flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs * 3377 sizeof(union ib_flow_spec), GFP_KERNEL); 3378 if (!flow_attr) { 3379 err = -ENOMEM; 3380 goto err_put; 3381 } 3382 3383 flow_attr->type = kern_flow_attr->type; 3384 flow_attr->priority = kern_flow_attr->priority; 3385 flow_attr->num_of_specs = kern_flow_attr->num_of_specs; 3386 flow_attr->port = kern_flow_attr->port; 3387 flow_attr->flags = kern_flow_attr->flags; 3388 flow_attr->size = sizeof(*flow_attr); 3389 3390 kern_spec = kern_flow_attr + 1; 3391 ib_spec = flow_attr + 1; 3392 for (i = 0; i < flow_attr->num_of_specs && 3393 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && 3394 cmd.flow_attr.size >= 3395 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { 3396 err = kern_spec_to_ib_spec(kern_spec, ib_spec); 3397 if (err) 3398 goto err_free; 3399 flow_attr->size += 3400 ((union ib_flow_spec *) ib_spec)->size; 3401 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; 3402 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size; 3403 ib_spec += ((union ib_flow_spec *) ib_spec)->size; 3404 } 3405 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { 3406 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", 3407 i, cmd.flow_attr.size); 3408 err = -EINVAL; 3409 goto err_free; 3410 } 3411 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); 3412 if (IS_ERR(flow_id)) { 3413 err = PTR_ERR(flow_id); 3414 goto err_free; 3415 } 3416 flow_id->uobject = uobj; 3417 uobj->object = flow_id; 3418 3419 memset(&resp, 0, sizeof(resp)); 3420 resp.flow_handle = uobj->id; 3421 3422 err = ib_copy_to_udata(ucore, 3423 &resp, sizeof(resp)); 3424 if (err) 3425 goto err_copy; 3426 3427 uobj_put_obj_read(qp); 3428 uobj_alloc_commit(uobj); 3429 kfree(flow_attr); 3430 if (cmd.flow_attr.num_of_specs) 3431 kfree(kern_flow_attr); 3432 return 0; 3433 err_copy: 3434 ib_destroy_flow(flow_id); 3435 err_free: 3436 kfree(flow_attr); 3437 err_put: 3438 uobj_put_obj_read(qp); 3439 err_uobj: 3440 uobj_alloc_abort(uobj); 3441 err_free_attr: 3442 if (cmd.flow_attr.num_of_specs) 3443 kfree(kern_flow_attr); 3444 return err; 3445 } 3446 3447 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, 3448 struct ib_device *ib_dev, 3449 struct ib_udata *ucore, 3450 struct ib_udata *uhw) 3451 { 3452 struct ib_uverbs_destroy_flow cmd; 3453 struct ib_uobject *uobj; 3454 int ret; 3455 3456 if (ucore->inlen < sizeof(cmd)) 3457 return -EINVAL; 3458 3459 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3460 if (ret) 3461 return ret; 3462 3463 if (cmd.comp_mask) 3464 return -EINVAL; 3465 3466 uobj = uobj_get_write(uobj_get_type(flow), cmd.flow_handle, 3467 file->ucontext); 3468 if (IS_ERR(uobj)) 3469 return PTR_ERR(uobj); 3470 3471 ret = uobj_remove_commit(uobj); 3472 return ret; 3473 } 3474 3475 static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 3476 struct ib_device *ib_dev, 3477 struct ib_uverbs_create_xsrq *cmd, 3478 struct ib_udata *udata) 3479 { 3480 struct ib_uverbs_create_srq_resp resp; 3481 struct ib_usrq_object *obj; 3482 struct ib_pd *pd; 3483 struct ib_srq *srq; 3484 struct ib_uobject *uninitialized_var(xrcd_uobj); 3485 struct ib_srq_init_attr attr; 3486 int ret; 3487 3488 obj = (struct ib_usrq_object *)uobj_alloc(uobj_get_type(srq), 3489 file->ucontext); 3490 if (IS_ERR(obj)) 3491 return PTR_ERR(obj); 3492 3493 if (cmd->srq_type == IB_SRQT_TM) 3494 attr.ext.tag_matching.max_num_tags = cmd->max_num_tags; 3495 3496 if (cmd->srq_type == IB_SRQT_XRC) { 3497 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd->xrcd_handle, 3498 file->ucontext); 3499 if (IS_ERR(xrcd_uobj)) { 3500 ret = -EINVAL; 3501 goto err; 3502 } 3503 3504 attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object; 3505 if (!attr.ext.xrc.xrcd) { 3506 ret = -EINVAL; 3507 goto err_put_xrcd; 3508 } 3509 3510 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 3511 atomic_inc(&obj->uxrcd->refcnt); 3512 } 3513 3514 if (ib_srq_has_cq(cmd->srq_type)) { 3515 attr.ext.cq = uobj_get_obj_read(cq, cmd->cq_handle, 3516 file->ucontext); 3517 if (!attr.ext.cq) { 3518 ret = -EINVAL; 3519 goto err_put_xrcd; 3520 } 3521 } 3522 3523 pd = uobj_get_obj_read(pd, cmd->pd_handle, file->ucontext); 3524 if (!pd) { 3525 ret = -EINVAL; 3526 goto err_put_cq; 3527 } 3528 3529 attr.event_handler = ib_uverbs_srq_event_handler; 3530 attr.srq_context = file; 3531 attr.srq_type = cmd->srq_type; 3532 attr.attr.max_wr = cmd->max_wr; 3533 attr.attr.max_sge = cmd->max_sge; 3534 attr.attr.srq_limit = cmd->srq_limit; 3535 3536 obj->uevent.events_reported = 0; 3537 INIT_LIST_HEAD(&obj->uevent.event_list); 3538 3539 srq = pd->device->create_srq(pd, &attr, udata); 3540 if (IS_ERR(srq)) { 3541 ret = PTR_ERR(srq); 3542 goto err_put; 3543 } 3544 3545 srq->device = pd->device; 3546 srq->pd = pd; 3547 srq->srq_type = cmd->srq_type; 3548 srq->uobject = &obj->uevent.uobject; 3549 srq->event_handler = attr.event_handler; 3550 srq->srq_context = attr.srq_context; 3551 3552 if (ib_srq_has_cq(cmd->srq_type)) { 3553 srq->ext.cq = attr.ext.cq; 3554 atomic_inc(&attr.ext.cq->usecnt); 3555 } 3556 3557 if (cmd->srq_type == IB_SRQT_XRC) { 3558 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; 3559 atomic_inc(&attr.ext.xrc.xrcd->usecnt); 3560 } 3561 3562 atomic_inc(&pd->usecnt); 3563 atomic_set(&srq->usecnt, 0); 3564 3565 obj->uevent.uobject.object = srq; 3566 obj->uevent.uobject.user_handle = cmd->user_handle; 3567 3568 memset(&resp, 0, sizeof resp); 3569 resp.srq_handle = obj->uevent.uobject.id; 3570 resp.max_wr = attr.attr.max_wr; 3571 resp.max_sge = attr.attr.max_sge; 3572 if (cmd->srq_type == IB_SRQT_XRC) 3573 resp.srqn = srq->ext.xrc.srq_num; 3574 3575 if (copy_to_user((void __user *) (unsigned long) cmd->response, 3576 &resp, sizeof resp)) { 3577 ret = -EFAULT; 3578 goto err_copy; 3579 } 3580 3581 if (cmd->srq_type == IB_SRQT_XRC) 3582 uobj_put_read(xrcd_uobj); 3583 3584 if (ib_srq_has_cq(cmd->srq_type)) 3585 uobj_put_obj_read(attr.ext.cq); 3586 3587 uobj_put_obj_read(pd); 3588 uobj_alloc_commit(&obj->uevent.uobject); 3589 3590 return 0; 3591 3592 err_copy: 3593 ib_destroy_srq(srq); 3594 3595 err_put: 3596 uobj_put_obj_read(pd); 3597 3598 err_put_cq: 3599 if (ib_srq_has_cq(cmd->srq_type)) 3600 uobj_put_obj_read(attr.ext.cq); 3601 3602 err_put_xrcd: 3603 if (cmd->srq_type == IB_SRQT_XRC) { 3604 atomic_dec(&obj->uxrcd->refcnt); 3605 uobj_put_read(xrcd_uobj); 3606 } 3607 3608 err: 3609 uobj_alloc_abort(&obj->uevent.uobject); 3610 return ret; 3611 } 3612 3613 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 3614 struct ib_device *ib_dev, 3615 const char __user *buf, int in_len, 3616 int out_len) 3617 { 3618 struct ib_uverbs_create_srq cmd; 3619 struct ib_uverbs_create_xsrq xcmd; 3620 struct ib_uverbs_create_srq_resp resp; 3621 struct ib_udata udata; 3622 int ret; 3623 3624 if (out_len < sizeof resp) 3625 return -ENOSPC; 3626 3627 if (copy_from_user(&cmd, buf, sizeof cmd)) 3628 return -EFAULT; 3629 3630 memset(&xcmd, 0, sizeof(xcmd)); 3631 xcmd.response = cmd.response; 3632 xcmd.user_handle = cmd.user_handle; 3633 xcmd.srq_type = IB_SRQT_BASIC; 3634 xcmd.pd_handle = cmd.pd_handle; 3635 xcmd.max_wr = cmd.max_wr; 3636 xcmd.max_sge = cmd.max_sge; 3637 xcmd.srq_limit = cmd.srq_limit; 3638 3639 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 3640 u64_to_user_ptr(cmd.response) + sizeof(resp), 3641 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 3642 out_len - sizeof(resp)); 3643 3644 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata); 3645 if (ret) 3646 return ret; 3647 3648 return in_len; 3649 } 3650 3651 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, 3652 struct ib_device *ib_dev, 3653 const char __user *buf, int in_len, int out_len) 3654 { 3655 struct ib_uverbs_create_xsrq cmd; 3656 struct ib_uverbs_create_srq_resp resp; 3657 struct ib_udata udata; 3658 int ret; 3659 3660 if (out_len < sizeof resp) 3661 return -ENOSPC; 3662 3663 if (copy_from_user(&cmd, buf, sizeof cmd)) 3664 return -EFAULT; 3665 3666 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 3667 u64_to_user_ptr(cmd.response) + sizeof(resp), 3668 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 3669 out_len - sizeof(resp)); 3670 3671 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata); 3672 if (ret) 3673 return ret; 3674 3675 return in_len; 3676 } 3677 3678 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 3679 struct ib_device *ib_dev, 3680 const char __user *buf, int in_len, 3681 int out_len) 3682 { 3683 struct ib_uverbs_modify_srq cmd; 3684 struct ib_udata udata; 3685 struct ib_srq *srq; 3686 struct ib_srq_attr attr; 3687 int ret; 3688 3689 if (copy_from_user(&cmd, buf, sizeof cmd)) 3690 return -EFAULT; 3691 3692 ib_uverbs_init_udata(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 3693 out_len); 3694 3695 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext); 3696 if (!srq) 3697 return -EINVAL; 3698 3699 attr.max_wr = cmd.max_wr; 3700 attr.srq_limit = cmd.srq_limit; 3701 3702 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 3703 3704 uobj_put_obj_read(srq); 3705 3706 return ret ? ret : in_len; 3707 } 3708 3709 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 3710 struct ib_device *ib_dev, 3711 const char __user *buf, 3712 int in_len, int out_len) 3713 { 3714 struct ib_uverbs_query_srq cmd; 3715 struct ib_uverbs_query_srq_resp resp; 3716 struct ib_srq_attr attr; 3717 struct ib_srq *srq; 3718 int ret; 3719 3720 if (out_len < sizeof resp) 3721 return -ENOSPC; 3722 3723 if (copy_from_user(&cmd, buf, sizeof cmd)) 3724 return -EFAULT; 3725 3726 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext); 3727 if (!srq) 3728 return -EINVAL; 3729 3730 ret = ib_query_srq(srq, &attr); 3731 3732 uobj_put_obj_read(srq); 3733 3734 if (ret) 3735 return ret; 3736 3737 memset(&resp, 0, sizeof resp); 3738 3739 resp.max_wr = attr.max_wr; 3740 resp.max_sge = attr.max_sge; 3741 resp.srq_limit = attr.srq_limit; 3742 3743 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 3744 return -EFAULT; 3745 3746 return in_len; 3747 } 3748 3749 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 3750 struct ib_device *ib_dev, 3751 const char __user *buf, int in_len, 3752 int out_len) 3753 { 3754 struct ib_uverbs_destroy_srq cmd; 3755 struct ib_uverbs_destroy_srq_resp resp; 3756 struct ib_uobject *uobj; 3757 struct ib_uevent_object *obj; 3758 int ret = -EINVAL; 3759 3760 if (copy_from_user(&cmd, buf, sizeof cmd)) 3761 return -EFAULT; 3762 3763 uobj = uobj_get_write(uobj_get_type(srq), cmd.srq_handle, 3764 file->ucontext); 3765 if (IS_ERR(uobj)) 3766 return PTR_ERR(uobj); 3767 3768 obj = container_of(uobj, struct ib_uevent_object, uobject); 3769 /* 3770 * Make sure we don't free the memory in remove_commit as we still 3771 * needs the uobject memory to create the response. 3772 */ 3773 uverbs_uobject_get(uobj); 3774 3775 memset(&resp, 0, sizeof(resp)); 3776 3777 ret = uobj_remove_commit(uobj); 3778 if (ret) { 3779 uverbs_uobject_put(uobj); 3780 return ret; 3781 } 3782 resp.events_reported = obj->events_reported; 3783 uverbs_uobject_put(uobj); 3784 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) 3785 return -EFAULT; 3786 3787 return in_len; 3788 } 3789 3790 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, 3791 struct ib_device *ib_dev, 3792 struct ib_udata *ucore, 3793 struct ib_udata *uhw) 3794 { 3795 struct ib_uverbs_ex_query_device_resp resp = { {0} }; 3796 struct ib_uverbs_ex_query_device cmd; 3797 struct ib_device_attr attr = {0}; 3798 int err; 3799 3800 if (!ib_dev->query_device) 3801 return -EOPNOTSUPP; 3802 3803 if (ucore->inlen < sizeof(cmd)) 3804 return -EINVAL; 3805 3806 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3807 if (err) 3808 return err; 3809 3810 if (cmd.comp_mask) 3811 return -EINVAL; 3812 3813 if (cmd.reserved) 3814 return -EINVAL; 3815 3816 resp.response_length = offsetof(typeof(resp), odp_caps); 3817 3818 if (ucore->outlen < resp.response_length) 3819 return -ENOSPC; 3820 3821 err = ib_dev->query_device(ib_dev, &attr, uhw); 3822 if (err) 3823 return err; 3824 3825 copy_query_dev_fields(file, ib_dev, &resp.base, &attr); 3826 3827 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps)) 3828 goto end; 3829 3830 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 3831 resp.odp_caps.general_caps = attr.odp_caps.general_caps; 3832 resp.odp_caps.per_transport_caps.rc_odp_caps = 3833 attr.odp_caps.per_transport_caps.rc_odp_caps; 3834 resp.odp_caps.per_transport_caps.uc_odp_caps = 3835 attr.odp_caps.per_transport_caps.uc_odp_caps; 3836 resp.odp_caps.per_transport_caps.ud_odp_caps = 3837 attr.odp_caps.per_transport_caps.ud_odp_caps; 3838 #endif 3839 resp.response_length += sizeof(resp.odp_caps); 3840 3841 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask)) 3842 goto end; 3843 3844 resp.timestamp_mask = attr.timestamp_mask; 3845 resp.response_length += sizeof(resp.timestamp_mask); 3846 3847 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock)) 3848 goto end; 3849 3850 resp.hca_core_clock = attr.hca_core_clock; 3851 resp.response_length += sizeof(resp.hca_core_clock); 3852 3853 if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex)) 3854 goto end; 3855 3856 resp.device_cap_flags_ex = attr.device_cap_flags; 3857 resp.response_length += sizeof(resp.device_cap_flags_ex); 3858 3859 if (ucore->outlen < resp.response_length + sizeof(resp.rss_caps)) 3860 goto end; 3861 3862 resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts; 3863 resp.rss_caps.max_rwq_indirection_tables = 3864 attr.rss_caps.max_rwq_indirection_tables; 3865 resp.rss_caps.max_rwq_indirection_table_size = 3866 attr.rss_caps.max_rwq_indirection_table_size; 3867 3868 resp.response_length += sizeof(resp.rss_caps); 3869 3870 if (ucore->outlen < resp.response_length + sizeof(resp.max_wq_type_rq)) 3871 goto end; 3872 3873 resp.max_wq_type_rq = attr.max_wq_type_rq; 3874 resp.response_length += sizeof(resp.max_wq_type_rq); 3875 3876 if (ucore->outlen < resp.response_length + sizeof(resp.raw_packet_caps)) 3877 goto end; 3878 3879 resp.raw_packet_caps = attr.raw_packet_caps; 3880 resp.response_length += sizeof(resp.raw_packet_caps); 3881 3882 if (ucore->outlen < resp.response_length + sizeof(resp.tm_caps)) 3883 goto end; 3884 3885 resp.tm_caps.max_rndv_hdr_size = attr.tm_caps.max_rndv_hdr_size; 3886 resp.tm_caps.max_num_tags = attr.tm_caps.max_num_tags; 3887 resp.tm_caps.max_ops = attr.tm_caps.max_ops; 3888 resp.tm_caps.max_sge = attr.tm_caps.max_sge; 3889 resp.tm_caps.flags = attr.tm_caps.flags; 3890 resp.response_length += sizeof(resp.tm_caps); 3891 3892 if (ucore->outlen < resp.response_length + sizeof(resp.cq_moderation_caps)) 3893 goto end; 3894 3895 resp.cq_moderation_caps.max_cq_moderation_count = 3896 attr.cq_caps.max_cq_moderation_count; 3897 resp.cq_moderation_caps.max_cq_moderation_period = 3898 attr.cq_caps.max_cq_moderation_period; 3899 resp.response_length += sizeof(resp.cq_moderation_caps); 3900 end: 3901 err = ib_copy_to_udata(ucore, &resp, resp.response_length); 3902 return err; 3903 } 3904 3905 int ib_uverbs_ex_modify_cq(struct ib_uverbs_file *file, 3906 struct ib_device *ib_dev, 3907 struct ib_udata *ucore, 3908 struct ib_udata *uhw) 3909 { 3910 struct ib_uverbs_ex_modify_cq cmd = {}; 3911 struct ib_cq *cq; 3912 size_t required_cmd_sz; 3913 int ret; 3914 3915 required_cmd_sz = offsetof(typeof(cmd), reserved) + 3916 sizeof(cmd.reserved); 3917 if (ucore->inlen < required_cmd_sz) 3918 return -EINVAL; 3919 3920 /* sanity checks */ 3921 if (ucore->inlen > sizeof(cmd) && 3922 !ib_is_udata_cleared(ucore, sizeof(cmd), 3923 ucore->inlen - sizeof(cmd))) 3924 return -EOPNOTSUPP; 3925 3926 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3927 if (ret) 3928 return ret; 3929 3930 if (!cmd.attr_mask || cmd.reserved) 3931 return -EINVAL; 3932 3933 if (cmd.attr_mask > IB_CQ_MODERATE) 3934 return -EOPNOTSUPP; 3935 3936 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext); 3937 if (!cq) 3938 return -EINVAL; 3939 3940 ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period); 3941 3942 uobj_put_obj_read(cq); 3943 3944 return ret; 3945 } 3946