1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/file.h> 37 #include <linux/fs.h> 38 #include <linux/slab.h> 39 #include <linux/sched.h> 40 41 #include <linux/uaccess.h> 42 43 #include <rdma/uverbs_types.h> 44 #include <rdma/uverbs_std_types.h> 45 #include "rdma_core.h" 46 47 #include "uverbs.h" 48 #include "core_priv.h" 49 50 static struct ib_uverbs_completion_event_file * 51 ib_uverbs_lookup_comp_file(int fd, struct ib_ucontext *context) 52 { 53 struct ib_uobject *uobj = uobj_get_read(UVERBS_OBJECT_COMP_CHANNEL, 54 fd, context); 55 struct ib_uobject_file *uobj_file; 56 57 if (IS_ERR(uobj)) 58 return (void *)uobj; 59 60 uverbs_uobject_get(uobj); 61 uobj_put_read(uobj); 62 63 uobj_file = container_of(uobj, struct ib_uobject_file, uobj); 64 return container_of(uobj_file, struct ib_uverbs_completion_event_file, 65 uobj_file); 66 } 67 68 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 69 struct ib_device *ib_dev, 70 const char __user *buf, 71 int in_len, int out_len) 72 { 73 struct ib_uverbs_get_context cmd; 74 struct ib_uverbs_get_context_resp resp; 75 struct ib_udata udata; 76 struct ib_ucontext *ucontext; 77 struct file *filp; 78 struct ib_rdmacg_object cg_obj; 79 int ret; 80 81 if (out_len < sizeof resp) 82 return -ENOSPC; 83 84 if (copy_from_user(&cmd, buf, sizeof cmd)) 85 return -EFAULT; 86 87 mutex_lock(&file->mutex); 88 89 if (file->ucontext) { 90 ret = -EINVAL; 91 goto err; 92 } 93 94 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 95 u64_to_user_ptr(cmd.response) + sizeof(resp), 96 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 97 out_len - sizeof(resp)); 98 99 ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); 100 if (ret) 101 goto err; 102 103 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata); 104 if (IS_ERR(ucontext)) { 105 ret = PTR_ERR(ucontext); 106 goto err_alloc; 107 } 108 109 ucontext->device = ib_dev; 110 ucontext->cg_obj = cg_obj; 111 /* ufile is required when some objects are released */ 112 ucontext->ufile = file; 113 uverbs_initialize_ucontext(ucontext); 114 115 rcu_read_lock(); 116 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); 117 rcu_read_unlock(); 118 ucontext->closing = 0; 119 120 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 121 ucontext->umem_tree = RB_ROOT_CACHED; 122 init_rwsem(&ucontext->umem_rwsem); 123 ucontext->odp_mrs_count = 0; 124 INIT_LIST_HEAD(&ucontext->no_private_counters); 125 126 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)) 127 ucontext->invalidate_range = NULL; 128 129 #endif 130 131 resp.num_comp_vectors = file->device->num_comp_vectors; 132 133 ret = get_unused_fd_flags(O_CLOEXEC); 134 if (ret < 0) 135 goto err_free; 136 resp.async_fd = ret; 137 138 filp = ib_uverbs_alloc_async_event_file(file, ib_dev); 139 if (IS_ERR(filp)) { 140 ret = PTR_ERR(filp); 141 goto err_fd; 142 } 143 144 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 145 ret = -EFAULT; 146 goto err_file; 147 } 148 149 file->ucontext = ucontext; 150 151 fd_install(resp.async_fd, filp); 152 153 mutex_unlock(&file->mutex); 154 155 return in_len; 156 157 err_file: 158 ib_uverbs_free_async_event_file(file); 159 fput(filp); 160 161 err_fd: 162 put_unused_fd(resp.async_fd); 163 164 err_free: 165 put_pid(ucontext->tgid); 166 ib_dev->dealloc_ucontext(ucontext); 167 168 err_alloc: 169 ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); 170 171 err: 172 mutex_unlock(&file->mutex); 173 return ret; 174 } 175 176 static void copy_query_dev_fields(struct ib_uverbs_file *file, 177 struct ib_device *ib_dev, 178 struct ib_uverbs_query_device_resp *resp, 179 struct ib_device_attr *attr) 180 { 181 resp->fw_ver = attr->fw_ver; 182 resp->node_guid = ib_dev->node_guid; 183 resp->sys_image_guid = attr->sys_image_guid; 184 resp->max_mr_size = attr->max_mr_size; 185 resp->page_size_cap = attr->page_size_cap; 186 resp->vendor_id = attr->vendor_id; 187 resp->vendor_part_id = attr->vendor_part_id; 188 resp->hw_ver = attr->hw_ver; 189 resp->max_qp = attr->max_qp; 190 resp->max_qp_wr = attr->max_qp_wr; 191 resp->device_cap_flags = lower_32_bits(attr->device_cap_flags); 192 resp->max_sge = attr->max_sge; 193 resp->max_sge_rd = attr->max_sge_rd; 194 resp->max_cq = attr->max_cq; 195 resp->max_cqe = attr->max_cqe; 196 resp->max_mr = attr->max_mr; 197 resp->max_pd = attr->max_pd; 198 resp->max_qp_rd_atom = attr->max_qp_rd_atom; 199 resp->max_ee_rd_atom = attr->max_ee_rd_atom; 200 resp->max_res_rd_atom = attr->max_res_rd_atom; 201 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom; 202 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom; 203 resp->atomic_cap = attr->atomic_cap; 204 resp->max_ee = attr->max_ee; 205 resp->max_rdd = attr->max_rdd; 206 resp->max_mw = attr->max_mw; 207 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp; 208 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp; 209 resp->max_mcast_grp = attr->max_mcast_grp; 210 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach; 211 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach; 212 resp->max_ah = attr->max_ah; 213 resp->max_fmr = attr->max_fmr; 214 resp->max_map_per_fmr = attr->max_map_per_fmr; 215 resp->max_srq = attr->max_srq; 216 resp->max_srq_wr = attr->max_srq_wr; 217 resp->max_srq_sge = attr->max_srq_sge; 218 resp->max_pkeys = attr->max_pkeys; 219 resp->local_ca_ack_delay = attr->local_ca_ack_delay; 220 resp->phys_port_cnt = ib_dev->phys_port_cnt; 221 } 222 223 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 224 struct ib_device *ib_dev, 225 const char __user *buf, 226 int in_len, int out_len) 227 { 228 struct ib_uverbs_query_device cmd; 229 struct ib_uverbs_query_device_resp resp; 230 231 if (out_len < sizeof resp) 232 return -ENOSPC; 233 234 if (copy_from_user(&cmd, buf, sizeof cmd)) 235 return -EFAULT; 236 237 memset(&resp, 0, sizeof resp); 238 copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs); 239 240 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 241 return -EFAULT; 242 243 return in_len; 244 } 245 246 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 247 struct ib_device *ib_dev, 248 const char __user *buf, 249 int in_len, int out_len) 250 { 251 struct ib_uverbs_query_port cmd; 252 struct ib_uverbs_query_port_resp resp; 253 struct ib_port_attr attr; 254 int ret; 255 256 if (out_len < sizeof resp) 257 return -ENOSPC; 258 259 if (copy_from_user(&cmd, buf, sizeof cmd)) 260 return -EFAULT; 261 262 ret = ib_query_port(ib_dev, cmd.port_num, &attr); 263 if (ret) 264 return ret; 265 266 memset(&resp, 0, sizeof resp); 267 268 resp.state = attr.state; 269 resp.max_mtu = attr.max_mtu; 270 resp.active_mtu = attr.active_mtu; 271 resp.gid_tbl_len = attr.gid_tbl_len; 272 resp.port_cap_flags = attr.port_cap_flags; 273 resp.max_msg_sz = attr.max_msg_sz; 274 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 275 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 276 resp.pkey_tbl_len = attr.pkey_tbl_len; 277 278 if (rdma_cap_opa_ah(ib_dev, cmd.port_num)) { 279 resp.lid = OPA_TO_IB_UCAST_LID(attr.lid); 280 resp.sm_lid = OPA_TO_IB_UCAST_LID(attr.sm_lid); 281 } else { 282 resp.lid = ib_lid_cpu16(attr.lid); 283 resp.sm_lid = ib_lid_cpu16(attr.sm_lid); 284 } 285 resp.lmc = attr.lmc; 286 resp.max_vl_num = attr.max_vl_num; 287 resp.sm_sl = attr.sm_sl; 288 resp.subnet_timeout = attr.subnet_timeout; 289 resp.init_type_reply = attr.init_type_reply; 290 resp.active_width = attr.active_width; 291 resp.active_speed = attr.active_speed; 292 resp.phys_state = attr.phys_state; 293 resp.link_layer = rdma_port_get_link_layer(ib_dev, 294 cmd.port_num); 295 296 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 297 return -EFAULT; 298 299 return in_len; 300 } 301 302 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 303 struct ib_device *ib_dev, 304 const char __user *buf, 305 int in_len, int out_len) 306 { 307 struct ib_uverbs_alloc_pd cmd; 308 struct ib_uverbs_alloc_pd_resp resp; 309 struct ib_udata udata; 310 struct ib_uobject *uobj; 311 struct ib_pd *pd; 312 int ret; 313 314 if (out_len < sizeof resp) 315 return -ENOSPC; 316 317 if (copy_from_user(&cmd, buf, sizeof cmd)) 318 return -EFAULT; 319 320 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 321 u64_to_user_ptr(cmd.response) + sizeof(resp), 322 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 323 out_len - sizeof(resp)); 324 325 uobj = uobj_alloc(UVERBS_OBJECT_PD, file->ucontext); 326 if (IS_ERR(uobj)) 327 return PTR_ERR(uobj); 328 329 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata); 330 if (IS_ERR(pd)) { 331 ret = PTR_ERR(pd); 332 goto err; 333 } 334 335 pd->device = ib_dev; 336 pd->uobject = uobj; 337 pd->__internal_mr = NULL; 338 atomic_set(&pd->usecnt, 0); 339 340 uobj->object = pd; 341 memset(&resp, 0, sizeof resp); 342 resp.pd_handle = uobj->id; 343 pd->res.type = RDMA_RESTRACK_PD; 344 rdma_restrack_add(&pd->res); 345 346 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 347 ret = -EFAULT; 348 goto err_copy; 349 } 350 351 uobj_alloc_commit(uobj); 352 353 return in_len; 354 355 err_copy: 356 ib_dealloc_pd(pd); 357 358 err: 359 uobj_alloc_abort(uobj); 360 return ret; 361 } 362 363 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 364 struct ib_device *ib_dev, 365 const char __user *buf, 366 int in_len, int out_len) 367 { 368 struct ib_uverbs_dealloc_pd cmd; 369 struct ib_uobject *uobj; 370 int ret; 371 372 if (copy_from_user(&cmd, buf, sizeof cmd)) 373 return -EFAULT; 374 375 uobj = uobj_get_write(UVERBS_OBJECT_PD, cmd.pd_handle, 376 file->ucontext); 377 if (IS_ERR(uobj)) 378 return PTR_ERR(uobj); 379 380 ret = uobj_remove_commit(uobj); 381 382 return ret ?: in_len; 383 } 384 385 struct xrcd_table_entry { 386 struct rb_node node; 387 struct ib_xrcd *xrcd; 388 struct inode *inode; 389 }; 390 391 static int xrcd_table_insert(struct ib_uverbs_device *dev, 392 struct inode *inode, 393 struct ib_xrcd *xrcd) 394 { 395 struct xrcd_table_entry *entry, *scan; 396 struct rb_node **p = &dev->xrcd_tree.rb_node; 397 struct rb_node *parent = NULL; 398 399 entry = kmalloc(sizeof *entry, GFP_KERNEL); 400 if (!entry) 401 return -ENOMEM; 402 403 entry->xrcd = xrcd; 404 entry->inode = inode; 405 406 while (*p) { 407 parent = *p; 408 scan = rb_entry(parent, struct xrcd_table_entry, node); 409 410 if (inode < scan->inode) { 411 p = &(*p)->rb_left; 412 } else if (inode > scan->inode) { 413 p = &(*p)->rb_right; 414 } else { 415 kfree(entry); 416 return -EEXIST; 417 } 418 } 419 420 rb_link_node(&entry->node, parent, p); 421 rb_insert_color(&entry->node, &dev->xrcd_tree); 422 igrab(inode); 423 return 0; 424 } 425 426 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, 427 struct inode *inode) 428 { 429 struct xrcd_table_entry *entry; 430 struct rb_node *p = dev->xrcd_tree.rb_node; 431 432 while (p) { 433 entry = rb_entry(p, struct xrcd_table_entry, node); 434 435 if (inode < entry->inode) 436 p = p->rb_left; 437 else if (inode > entry->inode) 438 p = p->rb_right; 439 else 440 return entry; 441 } 442 443 return NULL; 444 } 445 446 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) 447 { 448 struct xrcd_table_entry *entry; 449 450 entry = xrcd_table_search(dev, inode); 451 if (!entry) 452 return NULL; 453 454 return entry->xrcd; 455 } 456 457 static void xrcd_table_delete(struct ib_uverbs_device *dev, 458 struct inode *inode) 459 { 460 struct xrcd_table_entry *entry; 461 462 entry = xrcd_table_search(dev, inode); 463 if (entry) { 464 iput(inode); 465 rb_erase(&entry->node, &dev->xrcd_tree); 466 kfree(entry); 467 } 468 } 469 470 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, 471 struct ib_device *ib_dev, 472 const char __user *buf, int in_len, 473 int out_len) 474 { 475 struct ib_uverbs_open_xrcd cmd; 476 struct ib_uverbs_open_xrcd_resp resp; 477 struct ib_udata udata; 478 struct ib_uxrcd_object *obj; 479 struct ib_xrcd *xrcd = NULL; 480 struct fd f = {NULL, 0}; 481 struct inode *inode = NULL; 482 int ret = 0; 483 int new_xrcd = 0; 484 485 if (out_len < sizeof resp) 486 return -ENOSPC; 487 488 if (copy_from_user(&cmd, buf, sizeof cmd)) 489 return -EFAULT; 490 491 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 492 u64_to_user_ptr(cmd.response) + sizeof(resp), 493 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 494 out_len - sizeof(resp)); 495 496 mutex_lock(&file->device->xrcd_tree_mutex); 497 498 if (cmd.fd != -1) { 499 /* search for file descriptor */ 500 f = fdget(cmd.fd); 501 if (!f.file) { 502 ret = -EBADF; 503 goto err_tree_mutex_unlock; 504 } 505 506 inode = file_inode(f.file); 507 xrcd = find_xrcd(file->device, inode); 508 if (!xrcd && !(cmd.oflags & O_CREAT)) { 509 /* no file descriptor. Need CREATE flag */ 510 ret = -EAGAIN; 511 goto err_tree_mutex_unlock; 512 } 513 514 if (xrcd && cmd.oflags & O_EXCL) { 515 ret = -EINVAL; 516 goto err_tree_mutex_unlock; 517 } 518 } 519 520 obj = (struct ib_uxrcd_object *)uobj_alloc(UVERBS_OBJECT_XRCD, 521 file->ucontext); 522 if (IS_ERR(obj)) { 523 ret = PTR_ERR(obj); 524 goto err_tree_mutex_unlock; 525 } 526 527 if (!xrcd) { 528 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata); 529 if (IS_ERR(xrcd)) { 530 ret = PTR_ERR(xrcd); 531 goto err; 532 } 533 534 xrcd->inode = inode; 535 xrcd->device = ib_dev; 536 atomic_set(&xrcd->usecnt, 0); 537 mutex_init(&xrcd->tgt_qp_mutex); 538 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 539 new_xrcd = 1; 540 } 541 542 atomic_set(&obj->refcnt, 0); 543 obj->uobject.object = xrcd; 544 memset(&resp, 0, sizeof resp); 545 resp.xrcd_handle = obj->uobject.id; 546 547 if (inode) { 548 if (new_xrcd) { 549 /* create new inode/xrcd table entry */ 550 ret = xrcd_table_insert(file->device, inode, xrcd); 551 if (ret) 552 goto err_dealloc_xrcd; 553 } 554 atomic_inc(&xrcd->usecnt); 555 } 556 557 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 558 ret = -EFAULT; 559 goto err_copy; 560 } 561 562 if (f.file) 563 fdput(f); 564 565 mutex_unlock(&file->device->xrcd_tree_mutex); 566 567 uobj_alloc_commit(&obj->uobject); 568 569 return in_len; 570 571 err_copy: 572 if (inode) { 573 if (new_xrcd) 574 xrcd_table_delete(file->device, inode); 575 atomic_dec(&xrcd->usecnt); 576 } 577 578 err_dealloc_xrcd: 579 ib_dealloc_xrcd(xrcd); 580 581 err: 582 uobj_alloc_abort(&obj->uobject); 583 584 err_tree_mutex_unlock: 585 if (f.file) 586 fdput(f); 587 588 mutex_unlock(&file->device->xrcd_tree_mutex); 589 590 return ret; 591 } 592 593 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, 594 struct ib_device *ib_dev, 595 const char __user *buf, int in_len, 596 int out_len) 597 { 598 struct ib_uverbs_close_xrcd cmd; 599 struct ib_uobject *uobj; 600 int ret = 0; 601 602 if (copy_from_user(&cmd, buf, sizeof cmd)) 603 return -EFAULT; 604 605 uobj = uobj_get_write(UVERBS_OBJECT_XRCD, cmd.xrcd_handle, 606 file->ucontext); 607 if (IS_ERR(uobj)) 608 return PTR_ERR(uobj); 609 610 ret = uobj_remove_commit(uobj); 611 return ret ?: in_len; 612 } 613 614 int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, 615 struct ib_xrcd *xrcd, 616 enum rdma_remove_reason why) 617 { 618 struct inode *inode; 619 int ret; 620 621 inode = xrcd->inode; 622 if (inode && !atomic_dec_and_test(&xrcd->usecnt)) 623 return 0; 624 625 ret = ib_dealloc_xrcd(xrcd); 626 627 if (why == RDMA_REMOVE_DESTROY && ret) 628 atomic_inc(&xrcd->usecnt); 629 else if (inode) 630 xrcd_table_delete(dev, inode); 631 632 return ret; 633 } 634 635 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 636 struct ib_device *ib_dev, 637 const char __user *buf, int in_len, 638 int out_len) 639 { 640 struct ib_uverbs_reg_mr cmd; 641 struct ib_uverbs_reg_mr_resp resp; 642 struct ib_udata udata; 643 struct ib_uobject *uobj; 644 struct ib_pd *pd; 645 struct ib_mr *mr; 646 int ret; 647 648 if (out_len < sizeof resp) 649 return -ENOSPC; 650 651 if (copy_from_user(&cmd, buf, sizeof cmd)) 652 return -EFAULT; 653 654 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 655 u64_to_user_ptr(cmd.response) + sizeof(resp), 656 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 657 out_len - sizeof(resp)); 658 659 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 660 return -EINVAL; 661 662 ret = ib_check_mr_access(cmd.access_flags); 663 if (ret) 664 return ret; 665 666 uobj = uobj_alloc(UVERBS_OBJECT_MR, file->ucontext); 667 if (IS_ERR(uobj)) 668 return PTR_ERR(uobj); 669 670 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext); 671 if (!pd) { 672 ret = -EINVAL; 673 goto err_free; 674 } 675 676 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) { 677 if (!(pd->device->attrs.device_cap_flags & 678 IB_DEVICE_ON_DEMAND_PAGING)) { 679 pr_debug("ODP support not available\n"); 680 ret = -EINVAL; 681 goto err_put; 682 } 683 } 684 685 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 686 cmd.access_flags, &udata); 687 if (IS_ERR(mr)) { 688 ret = PTR_ERR(mr); 689 goto err_put; 690 } 691 692 mr->device = pd->device; 693 mr->pd = pd; 694 mr->uobject = uobj; 695 atomic_inc(&pd->usecnt); 696 mr->res.type = RDMA_RESTRACK_MR; 697 rdma_restrack_add(&mr->res); 698 699 uobj->object = mr; 700 701 memset(&resp, 0, sizeof resp); 702 resp.lkey = mr->lkey; 703 resp.rkey = mr->rkey; 704 resp.mr_handle = uobj->id; 705 706 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 707 ret = -EFAULT; 708 goto err_copy; 709 } 710 711 uobj_put_obj_read(pd); 712 713 uobj_alloc_commit(uobj); 714 715 return in_len; 716 717 err_copy: 718 ib_dereg_mr(mr); 719 720 err_put: 721 uobj_put_obj_read(pd); 722 723 err_free: 724 uobj_alloc_abort(uobj); 725 return ret; 726 } 727 728 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, 729 struct ib_device *ib_dev, 730 const char __user *buf, int in_len, 731 int out_len) 732 { 733 struct ib_uverbs_rereg_mr cmd; 734 struct ib_uverbs_rereg_mr_resp resp; 735 struct ib_udata udata; 736 struct ib_pd *pd = NULL; 737 struct ib_mr *mr; 738 struct ib_pd *old_pd; 739 int ret; 740 struct ib_uobject *uobj; 741 742 if (out_len < sizeof(resp)) 743 return -ENOSPC; 744 745 if (copy_from_user(&cmd, buf, sizeof(cmd))) 746 return -EFAULT; 747 748 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 749 u64_to_user_ptr(cmd.response) + sizeof(resp), 750 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 751 out_len - sizeof(resp)); 752 753 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags) 754 return -EINVAL; 755 756 if ((cmd.flags & IB_MR_REREG_TRANS) && 757 (!cmd.start || !cmd.hca_va || 0 >= cmd.length || 758 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))) 759 return -EINVAL; 760 761 uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle, 762 file->ucontext); 763 if (IS_ERR(uobj)) 764 return PTR_ERR(uobj); 765 766 mr = uobj->object; 767 768 if (cmd.flags & IB_MR_REREG_ACCESS) { 769 ret = ib_check_mr_access(cmd.access_flags); 770 if (ret) 771 goto put_uobjs; 772 } 773 774 if (cmd.flags & IB_MR_REREG_PD) { 775 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext); 776 if (!pd) { 777 ret = -EINVAL; 778 goto put_uobjs; 779 } 780 } 781 782 old_pd = mr->pd; 783 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, 784 cmd.length, cmd.hca_va, 785 cmd.access_flags, pd, &udata); 786 if (!ret) { 787 if (cmd.flags & IB_MR_REREG_PD) { 788 atomic_inc(&pd->usecnt); 789 mr->pd = pd; 790 atomic_dec(&old_pd->usecnt); 791 } 792 } else { 793 goto put_uobj_pd; 794 } 795 796 memset(&resp, 0, sizeof(resp)); 797 resp.lkey = mr->lkey; 798 resp.rkey = mr->rkey; 799 800 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) 801 ret = -EFAULT; 802 else 803 ret = in_len; 804 805 put_uobj_pd: 806 if (cmd.flags & IB_MR_REREG_PD) 807 uobj_put_obj_read(pd); 808 809 put_uobjs: 810 uobj_put_write(uobj); 811 812 return ret; 813 } 814 815 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 816 struct ib_device *ib_dev, 817 const char __user *buf, int in_len, 818 int out_len) 819 { 820 struct ib_uverbs_dereg_mr cmd; 821 struct ib_uobject *uobj; 822 int ret = -EINVAL; 823 824 if (copy_from_user(&cmd, buf, sizeof cmd)) 825 return -EFAULT; 826 827 uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle, 828 file->ucontext); 829 if (IS_ERR(uobj)) 830 return PTR_ERR(uobj); 831 832 ret = uobj_remove_commit(uobj); 833 834 return ret ?: in_len; 835 } 836 837 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, 838 struct ib_device *ib_dev, 839 const char __user *buf, int in_len, 840 int out_len) 841 { 842 struct ib_uverbs_alloc_mw cmd; 843 struct ib_uverbs_alloc_mw_resp resp; 844 struct ib_uobject *uobj; 845 struct ib_pd *pd; 846 struct ib_mw *mw; 847 struct ib_udata udata; 848 int ret; 849 850 if (out_len < sizeof(resp)) 851 return -ENOSPC; 852 853 if (copy_from_user(&cmd, buf, sizeof(cmd))) 854 return -EFAULT; 855 856 uobj = uobj_alloc(UVERBS_OBJECT_MW, file->ucontext); 857 if (IS_ERR(uobj)) 858 return PTR_ERR(uobj); 859 860 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext); 861 if (!pd) { 862 ret = -EINVAL; 863 goto err_free; 864 } 865 866 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 867 u64_to_user_ptr(cmd.response) + sizeof(resp), 868 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 869 out_len - sizeof(resp)); 870 871 mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata); 872 if (IS_ERR(mw)) { 873 ret = PTR_ERR(mw); 874 goto err_put; 875 } 876 877 mw->device = pd->device; 878 mw->pd = pd; 879 mw->uobject = uobj; 880 atomic_inc(&pd->usecnt); 881 882 uobj->object = mw; 883 884 memset(&resp, 0, sizeof(resp)); 885 resp.rkey = mw->rkey; 886 resp.mw_handle = uobj->id; 887 888 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) { 889 ret = -EFAULT; 890 goto err_copy; 891 } 892 893 uobj_put_obj_read(pd); 894 uobj_alloc_commit(uobj); 895 896 return in_len; 897 898 err_copy: 899 uverbs_dealloc_mw(mw); 900 err_put: 901 uobj_put_obj_read(pd); 902 err_free: 903 uobj_alloc_abort(uobj); 904 return ret; 905 } 906 907 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file, 908 struct ib_device *ib_dev, 909 const char __user *buf, int in_len, 910 int out_len) 911 { 912 struct ib_uverbs_dealloc_mw cmd; 913 struct ib_uobject *uobj; 914 int ret = -EINVAL; 915 916 if (copy_from_user(&cmd, buf, sizeof(cmd))) 917 return -EFAULT; 918 919 uobj = uobj_get_write(UVERBS_OBJECT_MW, cmd.mw_handle, 920 file->ucontext); 921 if (IS_ERR(uobj)) 922 return PTR_ERR(uobj); 923 924 ret = uobj_remove_commit(uobj); 925 return ret ?: in_len; 926 } 927 928 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 929 struct ib_device *ib_dev, 930 const char __user *buf, int in_len, 931 int out_len) 932 { 933 struct ib_uverbs_create_comp_channel cmd; 934 struct ib_uverbs_create_comp_channel_resp resp; 935 struct ib_uobject *uobj; 936 struct ib_uverbs_completion_event_file *ev_file; 937 938 if (out_len < sizeof resp) 939 return -ENOSPC; 940 941 if (copy_from_user(&cmd, buf, sizeof cmd)) 942 return -EFAULT; 943 944 uobj = uobj_alloc(UVERBS_OBJECT_COMP_CHANNEL, file->ucontext); 945 if (IS_ERR(uobj)) 946 return PTR_ERR(uobj); 947 948 resp.fd = uobj->id; 949 950 ev_file = container_of(uobj, struct ib_uverbs_completion_event_file, 951 uobj_file.uobj); 952 ib_uverbs_init_event_queue(&ev_file->ev_queue); 953 954 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 955 uobj_alloc_abort(uobj); 956 return -EFAULT; 957 } 958 959 uobj_alloc_commit(uobj); 960 return in_len; 961 } 962 963 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, 964 struct ib_device *ib_dev, 965 struct ib_udata *ucore, 966 struct ib_udata *uhw, 967 struct ib_uverbs_ex_create_cq *cmd, 968 size_t cmd_sz, 969 int (*cb)(struct ib_uverbs_file *file, 970 struct ib_ucq_object *obj, 971 struct ib_uverbs_ex_create_cq_resp *resp, 972 struct ib_udata *udata, 973 void *context), 974 void *context) 975 { 976 struct ib_ucq_object *obj; 977 struct ib_uverbs_completion_event_file *ev_file = NULL; 978 struct ib_cq *cq; 979 int ret; 980 struct ib_uverbs_ex_create_cq_resp resp; 981 struct ib_cq_init_attr attr = {}; 982 983 if (!ib_dev->create_cq) 984 return ERR_PTR(-EOPNOTSUPP); 985 986 if (cmd->comp_vector >= file->device->num_comp_vectors) 987 return ERR_PTR(-EINVAL); 988 989 obj = (struct ib_ucq_object *)uobj_alloc(UVERBS_OBJECT_CQ, 990 file->ucontext); 991 if (IS_ERR(obj)) 992 return obj; 993 994 if (cmd->comp_channel >= 0) { 995 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel, 996 file->ucontext); 997 if (IS_ERR(ev_file)) { 998 ret = PTR_ERR(ev_file); 999 goto err; 1000 } 1001 } 1002 1003 obj->uobject.user_handle = cmd->user_handle; 1004 obj->uverbs_file = file; 1005 obj->comp_events_reported = 0; 1006 obj->async_events_reported = 0; 1007 INIT_LIST_HEAD(&obj->comp_list); 1008 INIT_LIST_HEAD(&obj->async_list); 1009 1010 attr.cqe = cmd->cqe; 1011 attr.comp_vector = cmd->comp_vector; 1012 1013 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags)) 1014 attr.flags = cmd->flags; 1015 1016 cq = ib_dev->create_cq(ib_dev, &attr, file->ucontext, uhw); 1017 if (IS_ERR(cq)) { 1018 ret = PTR_ERR(cq); 1019 goto err_file; 1020 } 1021 1022 cq->device = ib_dev; 1023 cq->uobject = &obj->uobject; 1024 cq->comp_handler = ib_uverbs_comp_handler; 1025 cq->event_handler = ib_uverbs_cq_event_handler; 1026 cq->cq_context = ev_file ? &ev_file->ev_queue : NULL; 1027 atomic_set(&cq->usecnt, 0); 1028 1029 obj->uobject.object = cq; 1030 memset(&resp, 0, sizeof resp); 1031 resp.base.cq_handle = obj->uobject.id; 1032 resp.base.cqe = cq->cqe; 1033 1034 resp.response_length = offsetof(typeof(resp), response_length) + 1035 sizeof(resp.response_length); 1036 1037 cq->res.type = RDMA_RESTRACK_CQ; 1038 rdma_restrack_add(&cq->res); 1039 1040 ret = cb(file, obj, &resp, ucore, context); 1041 if (ret) 1042 goto err_cb; 1043 1044 uobj_alloc_commit(&obj->uobject); 1045 return obj; 1046 1047 err_cb: 1048 ib_destroy_cq(cq); 1049 1050 err_file: 1051 if (ev_file) 1052 ib_uverbs_release_ucq(file, ev_file, obj); 1053 1054 err: 1055 uobj_alloc_abort(&obj->uobject); 1056 1057 return ERR_PTR(ret); 1058 } 1059 1060 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file, 1061 struct ib_ucq_object *obj, 1062 struct ib_uverbs_ex_create_cq_resp *resp, 1063 struct ib_udata *ucore, void *context) 1064 { 1065 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1066 return -EFAULT; 1067 1068 return 0; 1069 } 1070 1071 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 1072 struct ib_device *ib_dev, 1073 const char __user *buf, int in_len, 1074 int out_len) 1075 { 1076 struct ib_uverbs_create_cq cmd; 1077 struct ib_uverbs_ex_create_cq cmd_ex; 1078 struct ib_uverbs_create_cq_resp resp; 1079 struct ib_udata ucore; 1080 struct ib_udata uhw; 1081 struct ib_ucq_object *obj; 1082 1083 if (out_len < sizeof(resp)) 1084 return -ENOSPC; 1085 1086 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1087 return -EFAULT; 1088 1089 ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response), 1090 sizeof(cmd), sizeof(resp)); 1091 1092 ib_uverbs_init_udata(&uhw, buf + sizeof(cmd), 1093 u64_to_user_ptr(cmd.response) + sizeof(resp), 1094 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1095 out_len - sizeof(resp)); 1096 1097 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1098 cmd_ex.user_handle = cmd.user_handle; 1099 cmd_ex.cqe = cmd.cqe; 1100 cmd_ex.comp_vector = cmd.comp_vector; 1101 cmd_ex.comp_channel = cmd.comp_channel; 1102 1103 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex, 1104 offsetof(typeof(cmd_ex), comp_channel) + 1105 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb, 1106 NULL); 1107 1108 if (IS_ERR(obj)) 1109 return PTR_ERR(obj); 1110 1111 return in_len; 1112 } 1113 1114 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file, 1115 struct ib_ucq_object *obj, 1116 struct ib_uverbs_ex_create_cq_resp *resp, 1117 struct ib_udata *ucore, void *context) 1118 { 1119 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1120 return -EFAULT; 1121 1122 return 0; 1123 } 1124 1125 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file, 1126 struct ib_device *ib_dev, 1127 struct ib_udata *ucore, 1128 struct ib_udata *uhw) 1129 { 1130 struct ib_uverbs_ex_create_cq_resp resp; 1131 struct ib_uverbs_ex_create_cq cmd; 1132 struct ib_ucq_object *obj; 1133 int err; 1134 1135 if (ucore->inlen < sizeof(cmd)) 1136 return -EINVAL; 1137 1138 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 1139 if (err) 1140 return err; 1141 1142 if (cmd.comp_mask) 1143 return -EINVAL; 1144 1145 if (cmd.reserved) 1146 return -EINVAL; 1147 1148 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1149 sizeof(resp.response_length))) 1150 return -ENOSPC; 1151 1152 obj = create_cq(file, ib_dev, ucore, uhw, &cmd, 1153 min(ucore->inlen, sizeof(cmd)), 1154 ib_uverbs_ex_create_cq_cb, NULL); 1155 1156 return PTR_ERR_OR_ZERO(obj); 1157 } 1158 1159 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 1160 struct ib_device *ib_dev, 1161 const char __user *buf, int in_len, 1162 int out_len) 1163 { 1164 struct ib_uverbs_resize_cq cmd; 1165 struct ib_uverbs_resize_cq_resp resp = {}; 1166 struct ib_udata udata; 1167 struct ib_cq *cq; 1168 int ret = -EINVAL; 1169 1170 if (copy_from_user(&cmd, buf, sizeof cmd)) 1171 return -EFAULT; 1172 1173 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 1174 u64_to_user_ptr(cmd.response) + sizeof(resp), 1175 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1176 out_len - sizeof(resp)); 1177 1178 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext); 1179 if (!cq) 1180 return -EINVAL; 1181 1182 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 1183 if (ret) 1184 goto out; 1185 1186 resp.cqe = cq->cqe; 1187 1188 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp.cqe)) 1189 ret = -EFAULT; 1190 1191 out: 1192 uobj_put_obj_read(cq); 1193 1194 return ret ? ret : in_len; 1195 } 1196 1197 static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest, 1198 struct ib_wc *wc) 1199 { 1200 struct ib_uverbs_wc tmp; 1201 1202 tmp.wr_id = wc->wr_id; 1203 tmp.status = wc->status; 1204 tmp.opcode = wc->opcode; 1205 tmp.vendor_err = wc->vendor_err; 1206 tmp.byte_len = wc->byte_len; 1207 tmp.ex.imm_data = wc->ex.imm_data; 1208 tmp.qp_num = wc->qp->qp_num; 1209 tmp.src_qp = wc->src_qp; 1210 tmp.wc_flags = wc->wc_flags; 1211 tmp.pkey_index = wc->pkey_index; 1212 if (rdma_cap_opa_ah(ib_dev, wc->port_num)) 1213 tmp.slid = OPA_TO_IB_UCAST_LID(wc->slid); 1214 else 1215 tmp.slid = ib_lid_cpu16(wc->slid); 1216 tmp.sl = wc->sl; 1217 tmp.dlid_path_bits = wc->dlid_path_bits; 1218 tmp.port_num = wc->port_num; 1219 tmp.reserved = 0; 1220 1221 if (copy_to_user(dest, &tmp, sizeof tmp)) 1222 return -EFAULT; 1223 1224 return 0; 1225 } 1226 1227 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 1228 struct ib_device *ib_dev, 1229 const char __user *buf, int in_len, 1230 int out_len) 1231 { 1232 struct ib_uverbs_poll_cq cmd; 1233 struct ib_uverbs_poll_cq_resp resp; 1234 u8 __user *header_ptr; 1235 u8 __user *data_ptr; 1236 struct ib_cq *cq; 1237 struct ib_wc wc; 1238 int ret; 1239 1240 if (copy_from_user(&cmd, buf, sizeof cmd)) 1241 return -EFAULT; 1242 1243 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext); 1244 if (!cq) 1245 return -EINVAL; 1246 1247 /* we copy a struct ib_uverbs_poll_cq_resp to user space */ 1248 header_ptr = u64_to_user_ptr(cmd.response); 1249 data_ptr = header_ptr + sizeof resp; 1250 1251 memset(&resp, 0, sizeof resp); 1252 while (resp.count < cmd.ne) { 1253 ret = ib_poll_cq(cq, 1, &wc); 1254 if (ret < 0) 1255 goto out_put; 1256 if (!ret) 1257 break; 1258 1259 ret = copy_wc_to_user(ib_dev, data_ptr, &wc); 1260 if (ret) 1261 goto out_put; 1262 1263 data_ptr += sizeof(struct ib_uverbs_wc); 1264 ++resp.count; 1265 } 1266 1267 if (copy_to_user(header_ptr, &resp, sizeof resp)) { 1268 ret = -EFAULT; 1269 goto out_put; 1270 } 1271 1272 ret = in_len; 1273 1274 out_put: 1275 uobj_put_obj_read(cq); 1276 return ret; 1277 } 1278 1279 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 1280 struct ib_device *ib_dev, 1281 const char __user *buf, int in_len, 1282 int out_len) 1283 { 1284 struct ib_uverbs_req_notify_cq cmd; 1285 struct ib_cq *cq; 1286 1287 if (copy_from_user(&cmd, buf, sizeof cmd)) 1288 return -EFAULT; 1289 1290 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext); 1291 if (!cq) 1292 return -EINVAL; 1293 1294 ib_req_notify_cq(cq, cmd.solicited_only ? 1295 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 1296 1297 uobj_put_obj_read(cq); 1298 1299 return in_len; 1300 } 1301 1302 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 1303 struct ib_device *ib_dev, 1304 const char __user *buf, int in_len, 1305 int out_len) 1306 { 1307 struct ib_uverbs_destroy_cq cmd; 1308 struct ib_uverbs_destroy_cq_resp resp; 1309 struct ib_uobject *uobj; 1310 struct ib_cq *cq; 1311 struct ib_ucq_object *obj; 1312 int ret = -EINVAL; 1313 1314 if (copy_from_user(&cmd, buf, sizeof cmd)) 1315 return -EFAULT; 1316 1317 uobj = uobj_get_write(UVERBS_OBJECT_CQ, cmd.cq_handle, 1318 file->ucontext); 1319 if (IS_ERR(uobj)) 1320 return PTR_ERR(uobj); 1321 1322 /* 1323 * Make sure we don't free the memory in remove_commit as we still 1324 * needs the uobject memory to create the response. 1325 */ 1326 uverbs_uobject_get(uobj); 1327 cq = uobj->object; 1328 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1329 1330 memset(&resp, 0, sizeof(resp)); 1331 1332 ret = uobj_remove_commit(uobj); 1333 if (ret) { 1334 uverbs_uobject_put(uobj); 1335 return ret; 1336 } 1337 1338 resp.comp_events_reported = obj->comp_events_reported; 1339 resp.async_events_reported = obj->async_events_reported; 1340 1341 uverbs_uobject_put(uobj); 1342 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 1343 return -EFAULT; 1344 1345 return in_len; 1346 } 1347 1348 static int create_qp(struct ib_uverbs_file *file, 1349 struct ib_udata *ucore, 1350 struct ib_udata *uhw, 1351 struct ib_uverbs_ex_create_qp *cmd, 1352 size_t cmd_sz, 1353 int (*cb)(struct ib_uverbs_file *file, 1354 struct ib_uverbs_ex_create_qp_resp *resp, 1355 struct ib_udata *udata), 1356 void *context) 1357 { 1358 struct ib_uqp_object *obj; 1359 struct ib_device *device; 1360 struct ib_pd *pd = NULL; 1361 struct ib_xrcd *xrcd = NULL; 1362 struct ib_uobject *xrcd_uobj = ERR_PTR(-ENOENT); 1363 struct ib_cq *scq = NULL, *rcq = NULL; 1364 struct ib_srq *srq = NULL; 1365 struct ib_qp *qp; 1366 char *buf; 1367 struct ib_qp_init_attr attr = {}; 1368 struct ib_uverbs_ex_create_qp_resp resp; 1369 int ret; 1370 struct ib_rwq_ind_table *ind_tbl = NULL; 1371 bool has_sq = true; 1372 1373 if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) 1374 return -EPERM; 1375 1376 obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, 1377 file->ucontext); 1378 if (IS_ERR(obj)) 1379 return PTR_ERR(obj); 1380 obj->uxrcd = NULL; 1381 obj->uevent.uobject.user_handle = cmd->user_handle; 1382 mutex_init(&obj->mcast_lock); 1383 1384 if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) + 1385 sizeof(cmd->rwq_ind_tbl_handle) && 1386 (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) { 1387 ind_tbl = uobj_get_obj_read(rwq_ind_table, UVERBS_OBJECT_RWQ_IND_TBL, 1388 cmd->rwq_ind_tbl_handle, 1389 file->ucontext); 1390 if (!ind_tbl) { 1391 ret = -EINVAL; 1392 goto err_put; 1393 } 1394 1395 attr.rwq_ind_tbl = ind_tbl; 1396 } 1397 1398 if (cmd_sz > sizeof(*cmd) && 1399 !ib_is_udata_cleared(ucore, sizeof(*cmd), 1400 cmd_sz - sizeof(*cmd))) { 1401 ret = -EOPNOTSUPP; 1402 goto err_put; 1403 } 1404 1405 if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) { 1406 ret = -EINVAL; 1407 goto err_put; 1408 } 1409 1410 if (ind_tbl && !cmd->max_send_wr) 1411 has_sq = false; 1412 1413 if (cmd->qp_type == IB_QPT_XRC_TGT) { 1414 xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->pd_handle, 1415 file->ucontext); 1416 1417 if (IS_ERR(xrcd_uobj)) { 1418 ret = -EINVAL; 1419 goto err_put; 1420 } 1421 1422 xrcd = (struct ib_xrcd *)xrcd_uobj->object; 1423 if (!xrcd) { 1424 ret = -EINVAL; 1425 goto err_put; 1426 } 1427 device = xrcd->device; 1428 } else { 1429 if (cmd->qp_type == IB_QPT_XRC_INI) { 1430 cmd->max_recv_wr = 0; 1431 cmd->max_recv_sge = 0; 1432 } else { 1433 if (cmd->is_srq) { 1434 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd->srq_handle, 1435 file->ucontext); 1436 if (!srq || srq->srq_type == IB_SRQT_XRC) { 1437 ret = -EINVAL; 1438 goto err_put; 1439 } 1440 } 1441 1442 if (!ind_tbl) { 1443 if (cmd->recv_cq_handle != cmd->send_cq_handle) { 1444 rcq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd->recv_cq_handle, 1445 file->ucontext); 1446 if (!rcq) { 1447 ret = -EINVAL; 1448 goto err_put; 1449 } 1450 } 1451 } 1452 } 1453 1454 if (has_sq) 1455 scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd->send_cq_handle, 1456 file->ucontext); 1457 if (!ind_tbl) 1458 rcq = rcq ?: scq; 1459 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, file->ucontext); 1460 if (!pd || (!scq && has_sq)) { 1461 ret = -EINVAL; 1462 goto err_put; 1463 } 1464 1465 device = pd->device; 1466 } 1467 1468 attr.event_handler = ib_uverbs_qp_event_handler; 1469 attr.qp_context = file; 1470 attr.send_cq = scq; 1471 attr.recv_cq = rcq; 1472 attr.srq = srq; 1473 attr.xrcd = xrcd; 1474 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR : 1475 IB_SIGNAL_REQ_WR; 1476 attr.qp_type = cmd->qp_type; 1477 attr.create_flags = 0; 1478 1479 attr.cap.max_send_wr = cmd->max_send_wr; 1480 attr.cap.max_recv_wr = cmd->max_recv_wr; 1481 attr.cap.max_send_sge = cmd->max_send_sge; 1482 attr.cap.max_recv_sge = cmd->max_recv_sge; 1483 attr.cap.max_inline_data = cmd->max_inline_data; 1484 1485 obj->uevent.events_reported = 0; 1486 INIT_LIST_HEAD(&obj->uevent.event_list); 1487 INIT_LIST_HEAD(&obj->mcast_list); 1488 1489 if (cmd_sz >= offsetof(typeof(*cmd), create_flags) + 1490 sizeof(cmd->create_flags)) 1491 attr.create_flags = cmd->create_flags; 1492 1493 if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | 1494 IB_QP_CREATE_CROSS_CHANNEL | 1495 IB_QP_CREATE_MANAGED_SEND | 1496 IB_QP_CREATE_MANAGED_RECV | 1497 IB_QP_CREATE_SCATTER_FCS | 1498 IB_QP_CREATE_CVLAN_STRIPPING | 1499 IB_QP_CREATE_SOURCE_QPN | 1500 IB_QP_CREATE_PCI_WRITE_END_PADDING)) { 1501 ret = -EINVAL; 1502 goto err_put; 1503 } 1504 1505 if (attr.create_flags & IB_QP_CREATE_SOURCE_QPN) { 1506 if (!capable(CAP_NET_RAW)) { 1507 ret = -EPERM; 1508 goto err_put; 1509 } 1510 1511 attr.source_qpn = cmd->source_qpn; 1512 } 1513 1514 buf = (void *)cmd + sizeof(*cmd); 1515 if (cmd_sz > sizeof(*cmd)) 1516 if (!(buf[0] == 0 && !memcmp(buf, buf + 1, 1517 cmd_sz - sizeof(*cmd) - 1))) { 1518 ret = -EINVAL; 1519 goto err_put; 1520 } 1521 1522 if (cmd->qp_type == IB_QPT_XRC_TGT) 1523 qp = ib_create_qp(pd, &attr); 1524 else 1525 qp = _ib_create_qp(device, pd, &attr, uhw, 1526 &obj->uevent.uobject); 1527 1528 if (IS_ERR(qp)) { 1529 ret = PTR_ERR(qp); 1530 goto err_put; 1531 } 1532 1533 if (cmd->qp_type != IB_QPT_XRC_TGT) { 1534 ret = ib_create_qp_security(qp, device); 1535 if (ret) 1536 goto err_cb; 1537 1538 qp->real_qp = qp; 1539 qp->pd = pd; 1540 qp->send_cq = attr.send_cq; 1541 qp->recv_cq = attr.recv_cq; 1542 qp->srq = attr.srq; 1543 qp->rwq_ind_tbl = ind_tbl; 1544 qp->event_handler = attr.event_handler; 1545 qp->qp_context = attr.qp_context; 1546 qp->qp_type = attr.qp_type; 1547 atomic_set(&qp->usecnt, 0); 1548 atomic_inc(&pd->usecnt); 1549 qp->port = 0; 1550 if (attr.send_cq) 1551 atomic_inc(&attr.send_cq->usecnt); 1552 if (attr.recv_cq) 1553 atomic_inc(&attr.recv_cq->usecnt); 1554 if (attr.srq) 1555 atomic_inc(&attr.srq->usecnt); 1556 if (ind_tbl) 1557 atomic_inc(&ind_tbl->usecnt); 1558 } else { 1559 /* It is done in _ib_create_qp for other QP types */ 1560 qp->uobject = &obj->uevent.uobject; 1561 } 1562 1563 obj->uevent.uobject.object = qp; 1564 1565 memset(&resp, 0, sizeof resp); 1566 resp.base.qpn = qp->qp_num; 1567 resp.base.qp_handle = obj->uevent.uobject.id; 1568 resp.base.max_recv_sge = attr.cap.max_recv_sge; 1569 resp.base.max_send_sge = attr.cap.max_send_sge; 1570 resp.base.max_recv_wr = attr.cap.max_recv_wr; 1571 resp.base.max_send_wr = attr.cap.max_send_wr; 1572 resp.base.max_inline_data = attr.cap.max_inline_data; 1573 1574 resp.response_length = offsetof(typeof(resp), response_length) + 1575 sizeof(resp.response_length); 1576 1577 ret = cb(file, &resp, ucore); 1578 if (ret) 1579 goto err_cb; 1580 1581 if (xrcd) { 1582 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, 1583 uobject); 1584 atomic_inc(&obj->uxrcd->refcnt); 1585 uobj_put_read(xrcd_uobj); 1586 } 1587 1588 if (pd) 1589 uobj_put_obj_read(pd); 1590 if (scq) 1591 uobj_put_obj_read(scq); 1592 if (rcq && rcq != scq) 1593 uobj_put_obj_read(rcq); 1594 if (srq) 1595 uobj_put_obj_read(srq); 1596 if (ind_tbl) 1597 uobj_put_obj_read(ind_tbl); 1598 1599 uobj_alloc_commit(&obj->uevent.uobject); 1600 1601 return 0; 1602 err_cb: 1603 ib_destroy_qp(qp); 1604 1605 err_put: 1606 if (!IS_ERR(xrcd_uobj)) 1607 uobj_put_read(xrcd_uobj); 1608 if (pd) 1609 uobj_put_obj_read(pd); 1610 if (scq) 1611 uobj_put_obj_read(scq); 1612 if (rcq && rcq != scq) 1613 uobj_put_obj_read(rcq); 1614 if (srq) 1615 uobj_put_obj_read(srq); 1616 if (ind_tbl) 1617 uobj_put_obj_read(ind_tbl); 1618 1619 uobj_alloc_abort(&obj->uevent.uobject); 1620 return ret; 1621 } 1622 1623 static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file, 1624 struct ib_uverbs_ex_create_qp_resp *resp, 1625 struct ib_udata *ucore) 1626 { 1627 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1628 return -EFAULT; 1629 1630 return 0; 1631 } 1632 1633 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 1634 struct ib_device *ib_dev, 1635 const char __user *buf, int in_len, 1636 int out_len) 1637 { 1638 struct ib_uverbs_create_qp cmd; 1639 struct ib_uverbs_ex_create_qp cmd_ex; 1640 struct ib_udata ucore; 1641 struct ib_udata uhw; 1642 ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp); 1643 int err; 1644 1645 if (out_len < resp_size) 1646 return -ENOSPC; 1647 1648 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1649 return -EFAULT; 1650 1651 ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response), 1652 sizeof(cmd), resp_size); 1653 ib_uverbs_init_udata(&uhw, buf + sizeof(cmd), 1654 u64_to_user_ptr(cmd.response) + resp_size, 1655 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1656 out_len - resp_size); 1657 1658 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1659 cmd_ex.user_handle = cmd.user_handle; 1660 cmd_ex.pd_handle = cmd.pd_handle; 1661 cmd_ex.send_cq_handle = cmd.send_cq_handle; 1662 cmd_ex.recv_cq_handle = cmd.recv_cq_handle; 1663 cmd_ex.srq_handle = cmd.srq_handle; 1664 cmd_ex.max_send_wr = cmd.max_send_wr; 1665 cmd_ex.max_recv_wr = cmd.max_recv_wr; 1666 cmd_ex.max_send_sge = cmd.max_send_sge; 1667 cmd_ex.max_recv_sge = cmd.max_recv_sge; 1668 cmd_ex.max_inline_data = cmd.max_inline_data; 1669 cmd_ex.sq_sig_all = cmd.sq_sig_all; 1670 cmd_ex.qp_type = cmd.qp_type; 1671 cmd_ex.is_srq = cmd.is_srq; 1672 1673 err = create_qp(file, &ucore, &uhw, &cmd_ex, 1674 offsetof(typeof(cmd_ex), is_srq) + 1675 sizeof(cmd.is_srq), ib_uverbs_create_qp_cb, 1676 NULL); 1677 1678 if (err) 1679 return err; 1680 1681 return in_len; 1682 } 1683 1684 static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file, 1685 struct ib_uverbs_ex_create_qp_resp *resp, 1686 struct ib_udata *ucore) 1687 { 1688 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1689 return -EFAULT; 1690 1691 return 0; 1692 } 1693 1694 int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file, 1695 struct ib_device *ib_dev, 1696 struct ib_udata *ucore, 1697 struct ib_udata *uhw) 1698 { 1699 struct ib_uverbs_ex_create_qp_resp resp; 1700 struct ib_uverbs_ex_create_qp cmd = {0}; 1701 int err; 1702 1703 if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) + 1704 sizeof(cmd.comp_mask))) 1705 return -EINVAL; 1706 1707 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 1708 if (err) 1709 return err; 1710 1711 if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK) 1712 return -EINVAL; 1713 1714 if (cmd.reserved) 1715 return -EINVAL; 1716 1717 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1718 sizeof(resp.response_length))) 1719 return -ENOSPC; 1720 1721 err = create_qp(file, ucore, uhw, &cmd, 1722 min(ucore->inlen, sizeof(cmd)), 1723 ib_uverbs_ex_create_qp_cb, NULL); 1724 1725 if (err) 1726 return err; 1727 1728 return 0; 1729 } 1730 1731 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, 1732 struct ib_device *ib_dev, 1733 const char __user *buf, int in_len, int out_len) 1734 { 1735 struct ib_uverbs_open_qp cmd; 1736 struct ib_uverbs_create_qp_resp resp; 1737 struct ib_udata udata; 1738 struct ib_uqp_object *obj; 1739 struct ib_xrcd *xrcd; 1740 struct ib_uobject *uninitialized_var(xrcd_uobj); 1741 struct ib_qp *qp; 1742 struct ib_qp_open_attr attr; 1743 int ret; 1744 1745 if (out_len < sizeof resp) 1746 return -ENOSPC; 1747 1748 if (copy_from_user(&cmd, buf, sizeof cmd)) 1749 return -EFAULT; 1750 1751 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 1752 u64_to_user_ptr(cmd.response) + sizeof(resp), 1753 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1754 out_len - sizeof(resp)); 1755 1756 obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, 1757 file->ucontext); 1758 if (IS_ERR(obj)) 1759 return PTR_ERR(obj); 1760 1761 xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd.pd_handle, 1762 file->ucontext); 1763 if (IS_ERR(xrcd_uobj)) { 1764 ret = -EINVAL; 1765 goto err_put; 1766 } 1767 1768 xrcd = (struct ib_xrcd *)xrcd_uobj->object; 1769 if (!xrcd) { 1770 ret = -EINVAL; 1771 goto err_xrcd; 1772 } 1773 1774 attr.event_handler = ib_uverbs_qp_event_handler; 1775 attr.qp_context = file; 1776 attr.qp_num = cmd.qpn; 1777 attr.qp_type = cmd.qp_type; 1778 1779 obj->uevent.events_reported = 0; 1780 INIT_LIST_HEAD(&obj->uevent.event_list); 1781 INIT_LIST_HEAD(&obj->mcast_list); 1782 1783 qp = ib_open_qp(xrcd, &attr); 1784 if (IS_ERR(qp)) { 1785 ret = PTR_ERR(qp); 1786 goto err_xrcd; 1787 } 1788 1789 obj->uevent.uobject.object = qp; 1790 obj->uevent.uobject.user_handle = cmd.user_handle; 1791 1792 memset(&resp, 0, sizeof resp); 1793 resp.qpn = qp->qp_num; 1794 resp.qp_handle = obj->uevent.uobject.id; 1795 1796 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 1797 ret = -EFAULT; 1798 goto err_destroy; 1799 } 1800 1801 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 1802 atomic_inc(&obj->uxrcd->refcnt); 1803 qp->uobject = &obj->uevent.uobject; 1804 uobj_put_read(xrcd_uobj); 1805 1806 1807 uobj_alloc_commit(&obj->uevent.uobject); 1808 1809 return in_len; 1810 1811 err_destroy: 1812 ib_destroy_qp(qp); 1813 err_xrcd: 1814 uobj_put_read(xrcd_uobj); 1815 err_put: 1816 uobj_alloc_abort(&obj->uevent.uobject); 1817 return ret; 1818 } 1819 1820 static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr, 1821 struct rdma_ah_attr *rdma_attr) 1822 { 1823 const struct ib_global_route *grh; 1824 1825 uverb_attr->dlid = rdma_ah_get_dlid(rdma_attr); 1826 uverb_attr->sl = rdma_ah_get_sl(rdma_attr); 1827 uverb_attr->src_path_bits = rdma_ah_get_path_bits(rdma_attr); 1828 uverb_attr->static_rate = rdma_ah_get_static_rate(rdma_attr); 1829 uverb_attr->is_global = !!(rdma_ah_get_ah_flags(rdma_attr) & 1830 IB_AH_GRH); 1831 if (uverb_attr->is_global) { 1832 grh = rdma_ah_read_grh(rdma_attr); 1833 memcpy(uverb_attr->dgid, grh->dgid.raw, 16); 1834 uverb_attr->flow_label = grh->flow_label; 1835 uverb_attr->sgid_index = grh->sgid_index; 1836 uverb_attr->hop_limit = grh->hop_limit; 1837 uverb_attr->traffic_class = grh->traffic_class; 1838 } 1839 uverb_attr->port_num = rdma_ah_get_port_num(rdma_attr); 1840 } 1841 1842 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 1843 struct ib_device *ib_dev, 1844 const char __user *buf, int in_len, 1845 int out_len) 1846 { 1847 struct ib_uverbs_query_qp cmd; 1848 struct ib_uverbs_query_qp_resp resp; 1849 struct ib_qp *qp; 1850 struct ib_qp_attr *attr; 1851 struct ib_qp_init_attr *init_attr; 1852 int ret; 1853 1854 if (copy_from_user(&cmd, buf, sizeof cmd)) 1855 return -EFAULT; 1856 1857 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1858 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 1859 if (!attr || !init_attr) { 1860 ret = -ENOMEM; 1861 goto out; 1862 } 1863 1864 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext); 1865 if (!qp) { 1866 ret = -EINVAL; 1867 goto out; 1868 } 1869 1870 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 1871 1872 uobj_put_obj_read(qp); 1873 1874 if (ret) 1875 goto out; 1876 1877 memset(&resp, 0, sizeof resp); 1878 1879 resp.qp_state = attr->qp_state; 1880 resp.cur_qp_state = attr->cur_qp_state; 1881 resp.path_mtu = attr->path_mtu; 1882 resp.path_mig_state = attr->path_mig_state; 1883 resp.qkey = attr->qkey; 1884 resp.rq_psn = attr->rq_psn; 1885 resp.sq_psn = attr->sq_psn; 1886 resp.dest_qp_num = attr->dest_qp_num; 1887 resp.qp_access_flags = attr->qp_access_flags; 1888 resp.pkey_index = attr->pkey_index; 1889 resp.alt_pkey_index = attr->alt_pkey_index; 1890 resp.sq_draining = attr->sq_draining; 1891 resp.max_rd_atomic = attr->max_rd_atomic; 1892 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 1893 resp.min_rnr_timer = attr->min_rnr_timer; 1894 resp.port_num = attr->port_num; 1895 resp.timeout = attr->timeout; 1896 resp.retry_cnt = attr->retry_cnt; 1897 resp.rnr_retry = attr->rnr_retry; 1898 resp.alt_port_num = attr->alt_port_num; 1899 resp.alt_timeout = attr->alt_timeout; 1900 1901 copy_ah_attr_to_uverbs(&resp.dest, &attr->ah_attr); 1902 copy_ah_attr_to_uverbs(&resp.alt_dest, &attr->alt_ah_attr); 1903 1904 resp.max_send_wr = init_attr->cap.max_send_wr; 1905 resp.max_recv_wr = init_attr->cap.max_recv_wr; 1906 resp.max_send_sge = init_attr->cap.max_send_sge; 1907 resp.max_recv_sge = init_attr->cap.max_recv_sge; 1908 resp.max_inline_data = init_attr->cap.max_inline_data; 1909 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 1910 1911 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 1912 ret = -EFAULT; 1913 1914 out: 1915 kfree(attr); 1916 kfree(init_attr); 1917 1918 return ret ? ret : in_len; 1919 } 1920 1921 /* Remove ignored fields set in the attribute mask */ 1922 static int modify_qp_mask(enum ib_qp_type qp_type, int mask) 1923 { 1924 switch (qp_type) { 1925 case IB_QPT_XRC_INI: 1926 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); 1927 case IB_QPT_XRC_TGT: 1928 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | 1929 IB_QP_RNR_RETRY); 1930 default: 1931 return mask; 1932 } 1933 } 1934 1935 static void copy_ah_attr_from_uverbs(struct ib_device *dev, 1936 struct rdma_ah_attr *rdma_attr, 1937 struct ib_uverbs_qp_dest *uverb_attr) 1938 { 1939 rdma_attr->type = rdma_ah_find_type(dev, uverb_attr->port_num); 1940 if (uverb_attr->is_global) { 1941 rdma_ah_set_grh(rdma_attr, NULL, 1942 uverb_attr->flow_label, 1943 uverb_attr->sgid_index, 1944 uverb_attr->hop_limit, 1945 uverb_attr->traffic_class); 1946 rdma_ah_set_dgid_raw(rdma_attr, uverb_attr->dgid); 1947 } else { 1948 rdma_ah_set_ah_flags(rdma_attr, 0); 1949 } 1950 rdma_ah_set_dlid(rdma_attr, uverb_attr->dlid); 1951 rdma_ah_set_sl(rdma_attr, uverb_attr->sl); 1952 rdma_ah_set_path_bits(rdma_attr, uverb_attr->src_path_bits); 1953 rdma_ah_set_static_rate(rdma_attr, uverb_attr->static_rate); 1954 rdma_ah_set_port_num(rdma_attr, uverb_attr->port_num); 1955 rdma_ah_set_make_grd(rdma_attr, false); 1956 } 1957 1958 static int modify_qp(struct ib_uverbs_file *file, 1959 struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata) 1960 { 1961 struct ib_qp_attr *attr; 1962 struct ib_qp *qp; 1963 int ret; 1964 1965 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1966 if (!attr) 1967 return -ENOMEM; 1968 1969 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle, file->ucontext); 1970 if (!qp) { 1971 ret = -EINVAL; 1972 goto out; 1973 } 1974 1975 if ((cmd->base.attr_mask & IB_QP_PORT) && 1976 !rdma_is_port_valid(qp->device, cmd->base.port_num)) { 1977 ret = -EINVAL; 1978 goto release_qp; 1979 } 1980 1981 if ((cmd->base.attr_mask & IB_QP_AV) && 1982 !rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) { 1983 ret = -EINVAL; 1984 goto release_qp; 1985 } 1986 1987 if ((cmd->base.attr_mask & IB_QP_ALT_PATH) && 1988 (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) || 1989 !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num))) { 1990 ret = -EINVAL; 1991 goto release_qp; 1992 } 1993 1994 if ((cmd->base.attr_mask & IB_QP_CUR_STATE && 1995 cmd->base.cur_qp_state > IB_QPS_ERR) || 1996 cmd->base.qp_state > IB_QPS_ERR) { 1997 ret = -EINVAL; 1998 goto release_qp; 1999 } 2000 2001 attr->qp_state = cmd->base.qp_state; 2002 attr->cur_qp_state = cmd->base.cur_qp_state; 2003 attr->path_mtu = cmd->base.path_mtu; 2004 attr->path_mig_state = cmd->base.path_mig_state; 2005 attr->qkey = cmd->base.qkey; 2006 attr->rq_psn = cmd->base.rq_psn; 2007 attr->sq_psn = cmd->base.sq_psn; 2008 attr->dest_qp_num = cmd->base.dest_qp_num; 2009 attr->qp_access_flags = cmd->base.qp_access_flags; 2010 attr->pkey_index = cmd->base.pkey_index; 2011 attr->alt_pkey_index = cmd->base.alt_pkey_index; 2012 attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify; 2013 attr->max_rd_atomic = cmd->base.max_rd_atomic; 2014 attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic; 2015 attr->min_rnr_timer = cmd->base.min_rnr_timer; 2016 attr->port_num = cmd->base.port_num; 2017 attr->timeout = cmd->base.timeout; 2018 attr->retry_cnt = cmd->base.retry_cnt; 2019 attr->rnr_retry = cmd->base.rnr_retry; 2020 attr->alt_port_num = cmd->base.alt_port_num; 2021 attr->alt_timeout = cmd->base.alt_timeout; 2022 attr->rate_limit = cmd->rate_limit; 2023 2024 if (cmd->base.attr_mask & IB_QP_AV) 2025 copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr, 2026 &cmd->base.dest); 2027 2028 if (cmd->base.attr_mask & IB_QP_ALT_PATH) 2029 copy_ah_attr_from_uverbs(qp->device, &attr->alt_ah_attr, 2030 &cmd->base.alt_dest); 2031 2032 ret = ib_modify_qp_with_udata(qp, attr, 2033 modify_qp_mask(qp->qp_type, 2034 cmd->base.attr_mask), 2035 udata); 2036 2037 release_qp: 2038 uobj_put_obj_read(qp); 2039 out: 2040 kfree(attr); 2041 2042 return ret; 2043 } 2044 2045 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 2046 struct ib_device *ib_dev, 2047 const char __user *buf, int in_len, 2048 int out_len) 2049 { 2050 struct ib_uverbs_ex_modify_qp cmd = {}; 2051 struct ib_udata udata; 2052 int ret; 2053 2054 if (copy_from_user(&cmd.base, buf, sizeof(cmd.base))) 2055 return -EFAULT; 2056 2057 if (cmd.base.attr_mask & 2058 ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1)) 2059 return -EOPNOTSUPP; 2060 2061 ib_uverbs_init_udata(&udata, buf + sizeof(cmd.base), NULL, 2062 in_len - sizeof(cmd.base) - sizeof(struct ib_uverbs_cmd_hdr), 2063 out_len); 2064 2065 ret = modify_qp(file, &cmd, &udata); 2066 if (ret) 2067 return ret; 2068 2069 return in_len; 2070 } 2071 2072 int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file, 2073 struct ib_device *ib_dev, 2074 struct ib_udata *ucore, 2075 struct ib_udata *uhw) 2076 { 2077 struct ib_uverbs_ex_modify_qp cmd = {}; 2078 int ret; 2079 2080 /* 2081 * Last bit is reserved for extending the attr_mask by 2082 * using another field. 2083 */ 2084 BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31)); 2085 2086 if (ucore->inlen < sizeof(cmd.base)) 2087 return -EINVAL; 2088 2089 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 2090 if (ret) 2091 return ret; 2092 2093 if (cmd.base.attr_mask & 2094 ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1)) 2095 return -EOPNOTSUPP; 2096 2097 if (ucore->inlen > sizeof(cmd)) { 2098 if (!ib_is_udata_cleared(ucore, sizeof(cmd), 2099 ucore->inlen - sizeof(cmd))) 2100 return -EOPNOTSUPP; 2101 } 2102 2103 ret = modify_qp(file, &cmd, uhw); 2104 2105 return ret; 2106 } 2107 2108 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 2109 struct ib_device *ib_dev, 2110 const char __user *buf, int in_len, 2111 int out_len) 2112 { 2113 struct ib_uverbs_destroy_qp cmd; 2114 struct ib_uverbs_destroy_qp_resp resp; 2115 struct ib_uobject *uobj; 2116 struct ib_uqp_object *obj; 2117 int ret = -EINVAL; 2118 2119 if (copy_from_user(&cmd, buf, sizeof cmd)) 2120 return -EFAULT; 2121 2122 memset(&resp, 0, sizeof resp); 2123 2124 uobj = uobj_get_write(UVERBS_OBJECT_QP, cmd.qp_handle, 2125 file->ucontext); 2126 if (IS_ERR(uobj)) 2127 return PTR_ERR(uobj); 2128 2129 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 2130 /* 2131 * Make sure we don't free the memory in remove_commit as we still 2132 * needs the uobject memory to create the response. 2133 */ 2134 uverbs_uobject_get(uobj); 2135 2136 ret = uobj_remove_commit(uobj); 2137 if (ret) { 2138 uverbs_uobject_put(uobj); 2139 return ret; 2140 } 2141 2142 resp.events_reported = obj->uevent.events_reported; 2143 uverbs_uobject_put(uobj); 2144 2145 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 2146 return -EFAULT; 2147 2148 return in_len; 2149 } 2150 2151 static void *alloc_wr(size_t wr_size, __u32 num_sge) 2152 { 2153 if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) / 2154 sizeof (struct ib_sge)) 2155 return NULL; 2156 2157 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) + 2158 num_sge * sizeof (struct ib_sge), GFP_KERNEL); 2159 } 2160 2161 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 2162 struct ib_device *ib_dev, 2163 const char __user *buf, int in_len, 2164 int out_len) 2165 { 2166 struct ib_uverbs_post_send cmd; 2167 struct ib_uverbs_post_send_resp resp; 2168 struct ib_uverbs_send_wr *user_wr; 2169 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 2170 struct ib_qp *qp; 2171 int i, sg_ind; 2172 int is_ud; 2173 ssize_t ret = -EINVAL; 2174 size_t next_size; 2175 2176 if (copy_from_user(&cmd, buf, sizeof cmd)) 2177 return -EFAULT; 2178 2179 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 2180 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 2181 return -EINVAL; 2182 2183 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 2184 return -EINVAL; 2185 2186 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 2187 if (!user_wr) 2188 return -ENOMEM; 2189 2190 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext); 2191 if (!qp) 2192 goto out; 2193 2194 is_ud = qp->qp_type == IB_QPT_UD; 2195 sg_ind = 0; 2196 last = NULL; 2197 for (i = 0; i < cmd.wr_count; ++i) { 2198 if (copy_from_user(user_wr, 2199 buf + sizeof cmd + i * cmd.wqe_size, 2200 cmd.wqe_size)) { 2201 ret = -EFAULT; 2202 goto out_put; 2203 } 2204 2205 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 2206 ret = -EINVAL; 2207 goto out_put; 2208 } 2209 2210 if (is_ud) { 2211 struct ib_ud_wr *ud; 2212 2213 if (user_wr->opcode != IB_WR_SEND && 2214 user_wr->opcode != IB_WR_SEND_WITH_IMM) { 2215 ret = -EINVAL; 2216 goto out_put; 2217 } 2218 2219 next_size = sizeof(*ud); 2220 ud = alloc_wr(next_size, user_wr->num_sge); 2221 if (!ud) { 2222 ret = -ENOMEM; 2223 goto out_put; 2224 } 2225 2226 ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH, user_wr->wr.ud.ah, 2227 file->ucontext); 2228 if (!ud->ah) { 2229 kfree(ud); 2230 ret = -EINVAL; 2231 goto out_put; 2232 } 2233 ud->remote_qpn = user_wr->wr.ud.remote_qpn; 2234 ud->remote_qkey = user_wr->wr.ud.remote_qkey; 2235 2236 next = &ud->wr; 2237 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || 2238 user_wr->opcode == IB_WR_RDMA_WRITE || 2239 user_wr->opcode == IB_WR_RDMA_READ) { 2240 struct ib_rdma_wr *rdma; 2241 2242 next_size = sizeof(*rdma); 2243 rdma = alloc_wr(next_size, user_wr->num_sge); 2244 if (!rdma) { 2245 ret = -ENOMEM; 2246 goto out_put; 2247 } 2248 2249 rdma->remote_addr = user_wr->wr.rdma.remote_addr; 2250 rdma->rkey = user_wr->wr.rdma.rkey; 2251 2252 next = &rdma->wr; 2253 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || 2254 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 2255 struct ib_atomic_wr *atomic; 2256 2257 next_size = sizeof(*atomic); 2258 atomic = alloc_wr(next_size, user_wr->num_sge); 2259 if (!atomic) { 2260 ret = -ENOMEM; 2261 goto out_put; 2262 } 2263 2264 atomic->remote_addr = user_wr->wr.atomic.remote_addr; 2265 atomic->compare_add = user_wr->wr.atomic.compare_add; 2266 atomic->swap = user_wr->wr.atomic.swap; 2267 atomic->rkey = user_wr->wr.atomic.rkey; 2268 2269 next = &atomic->wr; 2270 } else if (user_wr->opcode == IB_WR_SEND || 2271 user_wr->opcode == IB_WR_SEND_WITH_IMM || 2272 user_wr->opcode == IB_WR_SEND_WITH_INV) { 2273 next_size = sizeof(*next); 2274 next = alloc_wr(next_size, user_wr->num_sge); 2275 if (!next) { 2276 ret = -ENOMEM; 2277 goto out_put; 2278 } 2279 } else { 2280 ret = -EINVAL; 2281 goto out_put; 2282 } 2283 2284 if (user_wr->opcode == IB_WR_SEND_WITH_IMM || 2285 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 2286 next->ex.imm_data = 2287 (__be32 __force) user_wr->ex.imm_data; 2288 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) { 2289 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey; 2290 } 2291 2292 if (!last) 2293 wr = next; 2294 else 2295 last->next = next; 2296 last = next; 2297 2298 next->next = NULL; 2299 next->wr_id = user_wr->wr_id; 2300 next->num_sge = user_wr->num_sge; 2301 next->opcode = user_wr->opcode; 2302 next->send_flags = user_wr->send_flags; 2303 2304 if (next->num_sge) { 2305 next->sg_list = (void *) next + 2306 ALIGN(next_size, sizeof(struct ib_sge)); 2307 if (copy_from_user(next->sg_list, 2308 buf + sizeof cmd + 2309 cmd.wr_count * cmd.wqe_size + 2310 sg_ind * sizeof (struct ib_sge), 2311 next->num_sge * sizeof (struct ib_sge))) { 2312 ret = -EFAULT; 2313 goto out_put; 2314 } 2315 sg_ind += next->num_sge; 2316 } else 2317 next->sg_list = NULL; 2318 } 2319 2320 resp.bad_wr = 0; 2321 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); 2322 if (ret) 2323 for (next = wr; next; next = next->next) { 2324 ++resp.bad_wr; 2325 if (next == bad_wr) 2326 break; 2327 } 2328 2329 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 2330 ret = -EFAULT; 2331 2332 out_put: 2333 uobj_put_obj_read(qp); 2334 2335 while (wr) { 2336 if (is_ud && ud_wr(wr)->ah) 2337 uobj_put_obj_read(ud_wr(wr)->ah); 2338 next = wr->next; 2339 kfree(wr); 2340 wr = next; 2341 } 2342 2343 out: 2344 kfree(user_wr); 2345 2346 return ret ? ret : in_len; 2347 } 2348 2349 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 2350 int in_len, 2351 u32 wr_count, 2352 u32 sge_count, 2353 u32 wqe_size) 2354 { 2355 struct ib_uverbs_recv_wr *user_wr; 2356 struct ib_recv_wr *wr = NULL, *last, *next; 2357 int sg_ind; 2358 int i; 2359 int ret; 2360 2361 if (in_len < wqe_size * wr_count + 2362 sge_count * sizeof (struct ib_uverbs_sge)) 2363 return ERR_PTR(-EINVAL); 2364 2365 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 2366 return ERR_PTR(-EINVAL); 2367 2368 user_wr = kmalloc(wqe_size, GFP_KERNEL); 2369 if (!user_wr) 2370 return ERR_PTR(-ENOMEM); 2371 2372 sg_ind = 0; 2373 last = NULL; 2374 for (i = 0; i < wr_count; ++i) { 2375 if (copy_from_user(user_wr, buf + i * wqe_size, 2376 wqe_size)) { 2377 ret = -EFAULT; 2378 goto err; 2379 } 2380 2381 if (user_wr->num_sge + sg_ind > sge_count) { 2382 ret = -EINVAL; 2383 goto err; 2384 } 2385 2386 if (user_wr->num_sge >= 2387 (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) / 2388 sizeof (struct ib_sge)) { 2389 ret = -EINVAL; 2390 goto err; 2391 } 2392 2393 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2394 user_wr->num_sge * sizeof (struct ib_sge), 2395 GFP_KERNEL); 2396 if (!next) { 2397 ret = -ENOMEM; 2398 goto err; 2399 } 2400 2401 if (!last) 2402 wr = next; 2403 else 2404 last->next = next; 2405 last = next; 2406 2407 next->next = NULL; 2408 next->wr_id = user_wr->wr_id; 2409 next->num_sge = user_wr->num_sge; 2410 2411 if (next->num_sge) { 2412 next->sg_list = (void *) next + 2413 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2414 if (copy_from_user(next->sg_list, 2415 buf + wr_count * wqe_size + 2416 sg_ind * sizeof (struct ib_sge), 2417 next->num_sge * sizeof (struct ib_sge))) { 2418 ret = -EFAULT; 2419 goto err; 2420 } 2421 sg_ind += next->num_sge; 2422 } else 2423 next->sg_list = NULL; 2424 } 2425 2426 kfree(user_wr); 2427 return wr; 2428 2429 err: 2430 kfree(user_wr); 2431 2432 while (wr) { 2433 next = wr->next; 2434 kfree(wr); 2435 wr = next; 2436 } 2437 2438 return ERR_PTR(ret); 2439 } 2440 2441 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 2442 struct ib_device *ib_dev, 2443 const char __user *buf, int in_len, 2444 int out_len) 2445 { 2446 struct ib_uverbs_post_recv cmd; 2447 struct ib_uverbs_post_recv_resp resp; 2448 struct ib_recv_wr *wr, *next, *bad_wr; 2449 struct ib_qp *qp; 2450 ssize_t ret = -EINVAL; 2451 2452 if (copy_from_user(&cmd, buf, sizeof cmd)) 2453 return -EFAULT; 2454 2455 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2456 in_len - sizeof cmd, cmd.wr_count, 2457 cmd.sge_count, cmd.wqe_size); 2458 if (IS_ERR(wr)) 2459 return PTR_ERR(wr); 2460 2461 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext); 2462 if (!qp) 2463 goto out; 2464 2465 resp.bad_wr = 0; 2466 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); 2467 2468 uobj_put_obj_read(qp); 2469 if (ret) { 2470 for (next = wr; next; next = next->next) { 2471 ++resp.bad_wr; 2472 if (next == bad_wr) 2473 break; 2474 } 2475 } 2476 2477 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 2478 ret = -EFAULT; 2479 2480 out: 2481 while (wr) { 2482 next = wr->next; 2483 kfree(wr); 2484 wr = next; 2485 } 2486 2487 return ret ? ret : in_len; 2488 } 2489 2490 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 2491 struct ib_device *ib_dev, 2492 const char __user *buf, int in_len, 2493 int out_len) 2494 { 2495 struct ib_uverbs_post_srq_recv cmd; 2496 struct ib_uverbs_post_srq_recv_resp resp; 2497 struct ib_recv_wr *wr, *next, *bad_wr; 2498 struct ib_srq *srq; 2499 ssize_t ret = -EINVAL; 2500 2501 if (copy_from_user(&cmd, buf, sizeof cmd)) 2502 return -EFAULT; 2503 2504 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2505 in_len - sizeof cmd, cmd.wr_count, 2506 cmd.sge_count, cmd.wqe_size); 2507 if (IS_ERR(wr)) 2508 return PTR_ERR(wr); 2509 2510 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file->ucontext); 2511 if (!srq) 2512 goto out; 2513 2514 resp.bad_wr = 0; 2515 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 2516 2517 uobj_put_obj_read(srq); 2518 2519 if (ret) 2520 for (next = wr; next; next = next->next) { 2521 ++resp.bad_wr; 2522 if (next == bad_wr) 2523 break; 2524 } 2525 2526 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 2527 ret = -EFAULT; 2528 2529 out: 2530 while (wr) { 2531 next = wr->next; 2532 kfree(wr); 2533 wr = next; 2534 } 2535 2536 return ret ? ret : in_len; 2537 } 2538 2539 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 2540 struct ib_device *ib_dev, 2541 const char __user *buf, int in_len, 2542 int out_len) 2543 { 2544 struct ib_uverbs_create_ah cmd; 2545 struct ib_uverbs_create_ah_resp resp; 2546 struct ib_uobject *uobj; 2547 struct ib_pd *pd; 2548 struct ib_ah *ah; 2549 struct rdma_ah_attr attr; 2550 int ret; 2551 struct ib_udata udata; 2552 2553 if (out_len < sizeof resp) 2554 return -ENOSPC; 2555 2556 if (copy_from_user(&cmd, buf, sizeof cmd)) 2557 return -EFAULT; 2558 2559 if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) 2560 return -EINVAL; 2561 2562 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 2563 u64_to_user_ptr(cmd.response) + sizeof(resp), 2564 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 2565 out_len - sizeof(resp)); 2566 2567 uobj = uobj_alloc(UVERBS_OBJECT_AH, file->ucontext); 2568 if (IS_ERR(uobj)) 2569 return PTR_ERR(uobj); 2570 2571 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext); 2572 if (!pd) { 2573 ret = -EINVAL; 2574 goto err; 2575 } 2576 2577 attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num); 2578 rdma_ah_set_make_grd(&attr, false); 2579 rdma_ah_set_dlid(&attr, cmd.attr.dlid); 2580 rdma_ah_set_sl(&attr, cmd.attr.sl); 2581 rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits); 2582 rdma_ah_set_static_rate(&attr, cmd.attr.static_rate); 2583 rdma_ah_set_port_num(&attr, cmd.attr.port_num); 2584 2585 if (cmd.attr.is_global) { 2586 rdma_ah_set_grh(&attr, NULL, cmd.attr.grh.flow_label, 2587 cmd.attr.grh.sgid_index, 2588 cmd.attr.grh.hop_limit, 2589 cmd.attr.grh.traffic_class); 2590 rdma_ah_set_dgid_raw(&attr, cmd.attr.grh.dgid); 2591 } else { 2592 rdma_ah_set_ah_flags(&attr, 0); 2593 } 2594 2595 ah = rdma_create_user_ah(pd, &attr, &udata); 2596 if (IS_ERR(ah)) { 2597 ret = PTR_ERR(ah); 2598 goto err_put; 2599 } 2600 2601 ah->uobject = uobj; 2602 uobj->user_handle = cmd.user_handle; 2603 uobj->object = ah; 2604 2605 resp.ah_handle = uobj->id; 2606 2607 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 2608 ret = -EFAULT; 2609 goto err_copy; 2610 } 2611 2612 uobj_put_obj_read(pd); 2613 uobj_alloc_commit(uobj); 2614 2615 return in_len; 2616 2617 err_copy: 2618 rdma_destroy_ah(ah); 2619 2620 err_put: 2621 uobj_put_obj_read(pd); 2622 2623 err: 2624 uobj_alloc_abort(uobj); 2625 return ret; 2626 } 2627 2628 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 2629 struct ib_device *ib_dev, 2630 const char __user *buf, int in_len, int out_len) 2631 { 2632 struct ib_uverbs_destroy_ah cmd; 2633 struct ib_uobject *uobj; 2634 int ret; 2635 2636 if (copy_from_user(&cmd, buf, sizeof cmd)) 2637 return -EFAULT; 2638 2639 uobj = uobj_get_write(UVERBS_OBJECT_AH, cmd.ah_handle, 2640 file->ucontext); 2641 if (IS_ERR(uobj)) 2642 return PTR_ERR(uobj); 2643 2644 ret = uobj_remove_commit(uobj); 2645 return ret ?: in_len; 2646 } 2647 2648 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 2649 struct ib_device *ib_dev, 2650 const char __user *buf, int in_len, 2651 int out_len) 2652 { 2653 struct ib_uverbs_attach_mcast cmd; 2654 struct ib_qp *qp; 2655 struct ib_uqp_object *obj; 2656 struct ib_uverbs_mcast_entry *mcast; 2657 int ret; 2658 2659 if (copy_from_user(&cmd, buf, sizeof cmd)) 2660 return -EFAULT; 2661 2662 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext); 2663 if (!qp) 2664 return -EINVAL; 2665 2666 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2667 2668 mutex_lock(&obj->mcast_lock); 2669 list_for_each_entry(mcast, &obj->mcast_list, list) 2670 if (cmd.mlid == mcast->lid && 2671 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2672 ret = 0; 2673 goto out_put; 2674 } 2675 2676 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 2677 if (!mcast) { 2678 ret = -ENOMEM; 2679 goto out_put; 2680 } 2681 2682 mcast->lid = cmd.mlid; 2683 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 2684 2685 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 2686 if (!ret) 2687 list_add_tail(&mcast->list, &obj->mcast_list); 2688 else 2689 kfree(mcast); 2690 2691 out_put: 2692 mutex_unlock(&obj->mcast_lock); 2693 uobj_put_obj_read(qp); 2694 2695 return ret ? ret : in_len; 2696 } 2697 2698 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 2699 struct ib_device *ib_dev, 2700 const char __user *buf, int in_len, 2701 int out_len) 2702 { 2703 struct ib_uverbs_detach_mcast cmd; 2704 struct ib_uqp_object *obj; 2705 struct ib_qp *qp; 2706 struct ib_uverbs_mcast_entry *mcast; 2707 int ret = -EINVAL; 2708 bool found = false; 2709 2710 if (copy_from_user(&cmd, buf, sizeof cmd)) 2711 return -EFAULT; 2712 2713 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext); 2714 if (!qp) 2715 return -EINVAL; 2716 2717 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2718 mutex_lock(&obj->mcast_lock); 2719 2720 list_for_each_entry(mcast, &obj->mcast_list, list) 2721 if (cmd.mlid == mcast->lid && 2722 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2723 list_del(&mcast->list); 2724 kfree(mcast); 2725 found = true; 2726 break; 2727 } 2728 2729 if (!found) { 2730 ret = -EINVAL; 2731 goto out_put; 2732 } 2733 2734 ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid); 2735 2736 out_put: 2737 mutex_unlock(&obj->mcast_lock); 2738 uobj_put_obj_read(qp); 2739 return ret ? ret : in_len; 2740 } 2741 2742 struct ib_uflow_resources { 2743 size_t max; 2744 size_t num; 2745 struct ib_flow_action *collection[0]; 2746 }; 2747 2748 static struct ib_uflow_resources *flow_resources_alloc(size_t num_specs) 2749 { 2750 struct ib_uflow_resources *resources; 2751 2752 resources = 2753 kmalloc(sizeof(*resources) + 2754 num_specs * sizeof(*resources->collection), GFP_KERNEL); 2755 2756 if (!resources) 2757 return NULL; 2758 2759 resources->num = 0; 2760 resources->max = num_specs; 2761 2762 return resources; 2763 } 2764 2765 void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res) 2766 { 2767 unsigned int i; 2768 2769 for (i = 0; i < uflow_res->num; i++) 2770 atomic_dec(&uflow_res->collection[i]->usecnt); 2771 2772 kfree(uflow_res); 2773 } 2774 2775 static void flow_resources_add(struct ib_uflow_resources *uflow_res, 2776 struct ib_flow_action *action) 2777 { 2778 WARN_ON(uflow_res->num >= uflow_res->max); 2779 2780 atomic_inc(&action->usecnt); 2781 uflow_res->collection[uflow_res->num++] = action; 2782 } 2783 2784 static int kern_spec_to_ib_spec_action(struct ib_ucontext *ucontext, 2785 struct ib_uverbs_flow_spec *kern_spec, 2786 union ib_flow_spec *ib_spec, 2787 struct ib_uflow_resources *uflow_res) 2788 { 2789 ib_spec->type = kern_spec->type; 2790 switch (ib_spec->type) { 2791 case IB_FLOW_SPEC_ACTION_TAG: 2792 if (kern_spec->flow_tag.size != 2793 sizeof(struct ib_uverbs_flow_spec_action_tag)) 2794 return -EINVAL; 2795 2796 ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag); 2797 ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id; 2798 break; 2799 case IB_FLOW_SPEC_ACTION_DROP: 2800 if (kern_spec->drop.size != 2801 sizeof(struct ib_uverbs_flow_spec_action_drop)) 2802 return -EINVAL; 2803 2804 ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop); 2805 break; 2806 case IB_FLOW_SPEC_ACTION_HANDLE: 2807 if (kern_spec->action.size != 2808 sizeof(struct ib_uverbs_flow_spec_action_handle)) 2809 return -EOPNOTSUPP; 2810 ib_spec->action.act = uobj_get_obj_read(flow_action, 2811 UVERBS_OBJECT_FLOW_ACTION, 2812 kern_spec->action.handle, 2813 ucontext); 2814 if (!ib_spec->action.act) 2815 return -EINVAL; 2816 ib_spec->action.size = 2817 sizeof(struct ib_flow_spec_action_handle); 2818 flow_resources_add(uflow_res, ib_spec->action.act); 2819 uobj_put_obj_read(ib_spec->action.act); 2820 break; 2821 default: 2822 return -EINVAL; 2823 } 2824 return 0; 2825 } 2826 2827 static size_t kern_spec_filter_sz(const struct ib_uverbs_flow_spec_hdr *spec) 2828 { 2829 /* Returns user space filter size, includes padding */ 2830 return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2; 2831 } 2832 2833 static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size, 2834 u16 ib_real_filter_sz) 2835 { 2836 /* 2837 * User space filter structures must be 64 bit aligned, otherwise this 2838 * may pass, but we won't handle additional new attributes. 2839 */ 2840 2841 if (kern_filter_size > ib_real_filter_sz) { 2842 if (memchr_inv(kern_spec_filter + 2843 ib_real_filter_sz, 0, 2844 kern_filter_size - ib_real_filter_sz)) 2845 return -EINVAL; 2846 return ib_real_filter_sz; 2847 } 2848 return kern_filter_size; 2849 } 2850 2851 int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type, 2852 const void *kern_spec_mask, 2853 const void *kern_spec_val, 2854 size_t kern_filter_sz, 2855 union ib_flow_spec *ib_spec) 2856 { 2857 ssize_t actual_filter_sz; 2858 ssize_t ib_filter_sz; 2859 2860 /* User flow spec size must be aligned to 4 bytes */ 2861 if (kern_filter_sz != ALIGN(kern_filter_sz, 4)) 2862 return -EINVAL; 2863 2864 ib_spec->type = type; 2865 2866 if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL)) 2867 return -EINVAL; 2868 2869 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) { 2870 case IB_FLOW_SPEC_ETH: 2871 ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz); 2872 actual_filter_sz = spec_filter_size(kern_spec_mask, 2873 kern_filter_sz, 2874 ib_filter_sz); 2875 if (actual_filter_sz <= 0) 2876 return -EINVAL; 2877 ib_spec->size = sizeof(struct ib_flow_spec_eth); 2878 memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz); 2879 memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz); 2880 break; 2881 case IB_FLOW_SPEC_IPV4: 2882 ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz); 2883 actual_filter_sz = spec_filter_size(kern_spec_mask, 2884 kern_filter_sz, 2885 ib_filter_sz); 2886 if (actual_filter_sz <= 0) 2887 return -EINVAL; 2888 ib_spec->size = sizeof(struct ib_flow_spec_ipv4); 2889 memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz); 2890 memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz); 2891 break; 2892 case IB_FLOW_SPEC_IPV6: 2893 ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz); 2894 actual_filter_sz = spec_filter_size(kern_spec_mask, 2895 kern_filter_sz, 2896 ib_filter_sz); 2897 if (actual_filter_sz <= 0) 2898 return -EINVAL; 2899 ib_spec->size = sizeof(struct ib_flow_spec_ipv6); 2900 memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz); 2901 memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz); 2902 2903 if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) || 2904 (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20)) 2905 return -EINVAL; 2906 break; 2907 case IB_FLOW_SPEC_TCP: 2908 case IB_FLOW_SPEC_UDP: 2909 ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz); 2910 actual_filter_sz = spec_filter_size(kern_spec_mask, 2911 kern_filter_sz, 2912 ib_filter_sz); 2913 if (actual_filter_sz <= 0) 2914 return -EINVAL; 2915 ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp); 2916 memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz); 2917 memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz); 2918 break; 2919 case IB_FLOW_SPEC_VXLAN_TUNNEL: 2920 ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz); 2921 actual_filter_sz = spec_filter_size(kern_spec_mask, 2922 kern_filter_sz, 2923 ib_filter_sz); 2924 if (actual_filter_sz <= 0) 2925 return -EINVAL; 2926 ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel); 2927 memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz); 2928 memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz); 2929 2930 if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) || 2931 (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24)) 2932 return -EINVAL; 2933 break; 2934 case IB_FLOW_SPEC_ESP: 2935 ib_filter_sz = offsetof(struct ib_flow_esp_filter, real_sz); 2936 actual_filter_sz = spec_filter_size(kern_spec_mask, 2937 kern_filter_sz, 2938 ib_filter_sz); 2939 if (actual_filter_sz <= 0) 2940 return -EINVAL; 2941 ib_spec->esp.size = sizeof(struct ib_flow_spec_esp); 2942 memcpy(&ib_spec->esp.val, kern_spec_val, actual_filter_sz); 2943 memcpy(&ib_spec->esp.mask, kern_spec_mask, actual_filter_sz); 2944 break; 2945 default: 2946 return -EINVAL; 2947 } 2948 return 0; 2949 } 2950 2951 static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec, 2952 union ib_flow_spec *ib_spec) 2953 { 2954 ssize_t kern_filter_sz; 2955 void *kern_spec_mask; 2956 void *kern_spec_val; 2957 2958 if (kern_spec->reserved) 2959 return -EINVAL; 2960 2961 kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr); 2962 2963 kern_spec_val = (void *)kern_spec + 2964 sizeof(struct ib_uverbs_flow_spec_hdr); 2965 kern_spec_mask = kern_spec_val + kern_filter_sz; 2966 2967 return ib_uverbs_kern_spec_to_ib_spec_filter(kern_spec->type, 2968 kern_spec_mask, 2969 kern_spec_val, 2970 kern_filter_sz, ib_spec); 2971 } 2972 2973 static int kern_spec_to_ib_spec(struct ib_ucontext *ucontext, 2974 struct ib_uverbs_flow_spec *kern_spec, 2975 union ib_flow_spec *ib_spec, 2976 struct ib_uflow_resources *uflow_res) 2977 { 2978 if (kern_spec->reserved) 2979 return -EINVAL; 2980 2981 if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG) 2982 return kern_spec_to_ib_spec_action(ucontext, kern_spec, ib_spec, 2983 uflow_res); 2984 else 2985 return kern_spec_to_ib_spec_filter(kern_spec, ib_spec); 2986 } 2987 2988 int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file, 2989 struct ib_device *ib_dev, 2990 struct ib_udata *ucore, 2991 struct ib_udata *uhw) 2992 { 2993 struct ib_uverbs_ex_create_wq cmd = {}; 2994 struct ib_uverbs_ex_create_wq_resp resp = {}; 2995 struct ib_uwq_object *obj; 2996 int err = 0; 2997 struct ib_cq *cq; 2998 struct ib_pd *pd; 2999 struct ib_wq *wq; 3000 struct ib_wq_init_attr wq_init_attr = {}; 3001 size_t required_cmd_sz; 3002 size_t required_resp_len; 3003 3004 required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge); 3005 required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn); 3006 3007 if (ucore->inlen < required_cmd_sz) 3008 return -EINVAL; 3009 3010 if (ucore->outlen < required_resp_len) 3011 return -ENOSPC; 3012 3013 if (ucore->inlen > sizeof(cmd) && 3014 !ib_is_udata_cleared(ucore, sizeof(cmd), 3015 ucore->inlen - sizeof(cmd))) 3016 return -EOPNOTSUPP; 3017 3018 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3019 if (err) 3020 return err; 3021 3022 if (cmd.comp_mask) 3023 return -EOPNOTSUPP; 3024 3025 obj = (struct ib_uwq_object *)uobj_alloc(UVERBS_OBJECT_WQ, 3026 file->ucontext); 3027 if (IS_ERR(obj)) 3028 return PTR_ERR(obj); 3029 3030 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext); 3031 if (!pd) { 3032 err = -EINVAL; 3033 goto err_uobj; 3034 } 3035 3036 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext); 3037 if (!cq) { 3038 err = -EINVAL; 3039 goto err_put_pd; 3040 } 3041 3042 wq_init_attr.cq = cq; 3043 wq_init_attr.max_sge = cmd.max_sge; 3044 wq_init_attr.max_wr = cmd.max_wr; 3045 wq_init_attr.wq_context = file; 3046 wq_init_attr.wq_type = cmd.wq_type; 3047 wq_init_attr.event_handler = ib_uverbs_wq_event_handler; 3048 if (ucore->inlen >= (offsetof(typeof(cmd), create_flags) + 3049 sizeof(cmd.create_flags))) 3050 wq_init_attr.create_flags = cmd.create_flags; 3051 obj->uevent.events_reported = 0; 3052 INIT_LIST_HEAD(&obj->uevent.event_list); 3053 3054 if (!pd->device->create_wq) { 3055 err = -EOPNOTSUPP; 3056 goto err_put_cq; 3057 } 3058 wq = pd->device->create_wq(pd, &wq_init_attr, uhw); 3059 if (IS_ERR(wq)) { 3060 err = PTR_ERR(wq); 3061 goto err_put_cq; 3062 } 3063 3064 wq->uobject = &obj->uevent.uobject; 3065 obj->uevent.uobject.object = wq; 3066 wq->wq_type = wq_init_attr.wq_type; 3067 wq->cq = cq; 3068 wq->pd = pd; 3069 wq->device = pd->device; 3070 wq->wq_context = wq_init_attr.wq_context; 3071 atomic_set(&wq->usecnt, 0); 3072 atomic_inc(&pd->usecnt); 3073 atomic_inc(&cq->usecnt); 3074 wq->uobject = &obj->uevent.uobject; 3075 obj->uevent.uobject.object = wq; 3076 3077 memset(&resp, 0, sizeof(resp)); 3078 resp.wq_handle = obj->uevent.uobject.id; 3079 resp.max_sge = wq_init_attr.max_sge; 3080 resp.max_wr = wq_init_attr.max_wr; 3081 resp.wqn = wq->wq_num; 3082 resp.response_length = required_resp_len; 3083 err = ib_copy_to_udata(ucore, 3084 &resp, resp.response_length); 3085 if (err) 3086 goto err_copy; 3087 3088 uobj_put_obj_read(pd); 3089 uobj_put_obj_read(cq); 3090 uobj_alloc_commit(&obj->uevent.uobject); 3091 return 0; 3092 3093 err_copy: 3094 ib_destroy_wq(wq); 3095 err_put_cq: 3096 uobj_put_obj_read(cq); 3097 err_put_pd: 3098 uobj_put_obj_read(pd); 3099 err_uobj: 3100 uobj_alloc_abort(&obj->uevent.uobject); 3101 3102 return err; 3103 } 3104 3105 int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file, 3106 struct ib_device *ib_dev, 3107 struct ib_udata *ucore, 3108 struct ib_udata *uhw) 3109 { 3110 struct ib_uverbs_ex_destroy_wq cmd = {}; 3111 struct ib_uverbs_ex_destroy_wq_resp resp = {}; 3112 struct ib_uobject *uobj; 3113 struct ib_uwq_object *obj; 3114 size_t required_cmd_sz; 3115 size_t required_resp_len; 3116 int ret; 3117 3118 required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle); 3119 required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); 3120 3121 if (ucore->inlen < required_cmd_sz) 3122 return -EINVAL; 3123 3124 if (ucore->outlen < required_resp_len) 3125 return -ENOSPC; 3126 3127 if (ucore->inlen > sizeof(cmd) && 3128 !ib_is_udata_cleared(ucore, sizeof(cmd), 3129 ucore->inlen - sizeof(cmd))) 3130 return -EOPNOTSUPP; 3131 3132 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3133 if (ret) 3134 return ret; 3135 3136 if (cmd.comp_mask) 3137 return -EOPNOTSUPP; 3138 3139 resp.response_length = required_resp_len; 3140 uobj = uobj_get_write(UVERBS_OBJECT_WQ, cmd.wq_handle, 3141 file->ucontext); 3142 if (IS_ERR(uobj)) 3143 return PTR_ERR(uobj); 3144 3145 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject); 3146 /* 3147 * Make sure we don't free the memory in remove_commit as we still 3148 * needs the uobject memory to create the response. 3149 */ 3150 uverbs_uobject_get(uobj); 3151 3152 ret = uobj_remove_commit(uobj); 3153 resp.events_reported = obj->uevent.events_reported; 3154 uverbs_uobject_put(uobj); 3155 if (ret) 3156 return ret; 3157 3158 return ib_copy_to_udata(ucore, &resp, resp.response_length); 3159 } 3160 3161 int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file, 3162 struct ib_device *ib_dev, 3163 struct ib_udata *ucore, 3164 struct ib_udata *uhw) 3165 { 3166 struct ib_uverbs_ex_modify_wq cmd = {}; 3167 struct ib_wq *wq; 3168 struct ib_wq_attr wq_attr = {}; 3169 size_t required_cmd_sz; 3170 int ret; 3171 3172 required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state); 3173 if (ucore->inlen < required_cmd_sz) 3174 return -EINVAL; 3175 3176 if (ucore->inlen > sizeof(cmd) && 3177 !ib_is_udata_cleared(ucore, sizeof(cmd), 3178 ucore->inlen - sizeof(cmd))) 3179 return -EOPNOTSUPP; 3180 3181 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3182 if (ret) 3183 return ret; 3184 3185 if (!cmd.attr_mask) 3186 return -EINVAL; 3187 3188 if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS)) 3189 return -EINVAL; 3190 3191 wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, file->ucontext); 3192 if (!wq) 3193 return -EINVAL; 3194 3195 wq_attr.curr_wq_state = cmd.curr_wq_state; 3196 wq_attr.wq_state = cmd.wq_state; 3197 if (cmd.attr_mask & IB_WQ_FLAGS) { 3198 wq_attr.flags = cmd.flags; 3199 wq_attr.flags_mask = cmd.flags_mask; 3200 } 3201 if (!wq->device->modify_wq) { 3202 ret = -EOPNOTSUPP; 3203 goto out; 3204 } 3205 ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw); 3206 out: 3207 uobj_put_obj_read(wq); 3208 return ret; 3209 } 3210 3211 int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, 3212 struct ib_device *ib_dev, 3213 struct ib_udata *ucore, 3214 struct ib_udata *uhw) 3215 { 3216 struct ib_uverbs_ex_create_rwq_ind_table cmd = {}; 3217 struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {}; 3218 struct ib_uobject *uobj; 3219 int err = 0; 3220 struct ib_rwq_ind_table_init_attr init_attr = {}; 3221 struct ib_rwq_ind_table *rwq_ind_tbl; 3222 struct ib_wq **wqs = NULL; 3223 u32 *wqs_handles = NULL; 3224 struct ib_wq *wq = NULL; 3225 int i, j, num_read_wqs; 3226 u32 num_wq_handles; 3227 u32 expected_in_size; 3228 size_t required_cmd_sz_header; 3229 size_t required_resp_len; 3230 3231 required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size); 3232 required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num); 3233 3234 if (ucore->inlen < required_cmd_sz_header) 3235 return -EINVAL; 3236 3237 if (ucore->outlen < required_resp_len) 3238 return -ENOSPC; 3239 3240 err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header); 3241 if (err) 3242 return err; 3243 3244 ucore->inbuf += required_cmd_sz_header; 3245 ucore->inlen -= required_cmd_sz_header; 3246 3247 if (cmd.comp_mask) 3248 return -EOPNOTSUPP; 3249 3250 if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE) 3251 return -EINVAL; 3252 3253 num_wq_handles = 1 << cmd.log_ind_tbl_size; 3254 expected_in_size = num_wq_handles * sizeof(__u32); 3255 if (num_wq_handles == 1) 3256 /* input size for wq handles is u64 aligned */ 3257 expected_in_size += sizeof(__u32); 3258 3259 if (ucore->inlen < expected_in_size) 3260 return -EINVAL; 3261 3262 if (ucore->inlen > expected_in_size && 3263 !ib_is_udata_cleared(ucore, expected_in_size, 3264 ucore->inlen - expected_in_size)) 3265 return -EOPNOTSUPP; 3266 3267 wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles), 3268 GFP_KERNEL); 3269 if (!wqs_handles) 3270 return -ENOMEM; 3271 3272 err = ib_copy_from_udata(wqs_handles, ucore, 3273 num_wq_handles * sizeof(__u32)); 3274 if (err) 3275 goto err_free; 3276 3277 wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL); 3278 if (!wqs) { 3279 err = -ENOMEM; 3280 goto err_free; 3281 } 3282 3283 for (num_read_wqs = 0; num_read_wqs < num_wq_handles; 3284 num_read_wqs++) { 3285 wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, wqs_handles[num_read_wqs], 3286 file->ucontext); 3287 if (!wq) { 3288 err = -EINVAL; 3289 goto put_wqs; 3290 } 3291 3292 wqs[num_read_wqs] = wq; 3293 } 3294 3295 uobj = uobj_alloc(UVERBS_OBJECT_RWQ_IND_TBL, file->ucontext); 3296 if (IS_ERR(uobj)) { 3297 err = PTR_ERR(uobj); 3298 goto put_wqs; 3299 } 3300 3301 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size; 3302 init_attr.ind_tbl = wqs; 3303 3304 if (!ib_dev->create_rwq_ind_table) { 3305 err = -EOPNOTSUPP; 3306 goto err_uobj; 3307 } 3308 rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw); 3309 3310 if (IS_ERR(rwq_ind_tbl)) { 3311 err = PTR_ERR(rwq_ind_tbl); 3312 goto err_uobj; 3313 } 3314 3315 rwq_ind_tbl->ind_tbl = wqs; 3316 rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size; 3317 rwq_ind_tbl->uobject = uobj; 3318 uobj->object = rwq_ind_tbl; 3319 rwq_ind_tbl->device = ib_dev; 3320 atomic_set(&rwq_ind_tbl->usecnt, 0); 3321 3322 for (i = 0; i < num_wq_handles; i++) 3323 atomic_inc(&wqs[i]->usecnt); 3324 3325 resp.ind_tbl_handle = uobj->id; 3326 resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num; 3327 resp.response_length = required_resp_len; 3328 3329 err = ib_copy_to_udata(ucore, 3330 &resp, resp.response_length); 3331 if (err) 3332 goto err_copy; 3333 3334 kfree(wqs_handles); 3335 3336 for (j = 0; j < num_read_wqs; j++) 3337 uobj_put_obj_read(wqs[j]); 3338 3339 uobj_alloc_commit(uobj); 3340 return 0; 3341 3342 err_copy: 3343 ib_destroy_rwq_ind_table(rwq_ind_tbl); 3344 err_uobj: 3345 uobj_alloc_abort(uobj); 3346 put_wqs: 3347 for (j = 0; j < num_read_wqs; j++) 3348 uobj_put_obj_read(wqs[j]); 3349 err_free: 3350 kfree(wqs_handles); 3351 kfree(wqs); 3352 return err; 3353 } 3354 3355 int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file, 3356 struct ib_device *ib_dev, 3357 struct ib_udata *ucore, 3358 struct ib_udata *uhw) 3359 { 3360 struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {}; 3361 struct ib_uobject *uobj; 3362 int ret; 3363 size_t required_cmd_sz; 3364 3365 required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle); 3366 3367 if (ucore->inlen < required_cmd_sz) 3368 return -EINVAL; 3369 3370 if (ucore->inlen > sizeof(cmd) && 3371 !ib_is_udata_cleared(ucore, sizeof(cmd), 3372 ucore->inlen - sizeof(cmd))) 3373 return -EOPNOTSUPP; 3374 3375 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3376 if (ret) 3377 return ret; 3378 3379 if (cmd.comp_mask) 3380 return -EOPNOTSUPP; 3381 3382 uobj = uobj_get_write(UVERBS_OBJECT_RWQ_IND_TBL, cmd.ind_tbl_handle, 3383 file->ucontext); 3384 if (IS_ERR(uobj)) 3385 return PTR_ERR(uobj); 3386 3387 return uobj_remove_commit(uobj); 3388 } 3389 3390 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, 3391 struct ib_device *ib_dev, 3392 struct ib_udata *ucore, 3393 struct ib_udata *uhw) 3394 { 3395 struct ib_uverbs_create_flow cmd; 3396 struct ib_uverbs_create_flow_resp resp; 3397 struct ib_uobject *uobj; 3398 struct ib_uflow_object *uflow; 3399 struct ib_flow *flow_id; 3400 struct ib_uverbs_flow_attr *kern_flow_attr; 3401 struct ib_flow_attr *flow_attr; 3402 struct ib_qp *qp; 3403 struct ib_uflow_resources *uflow_res; 3404 int err = 0; 3405 void *kern_spec; 3406 void *ib_spec; 3407 int i; 3408 3409 if (ucore->inlen < sizeof(cmd)) 3410 return -EINVAL; 3411 3412 if (ucore->outlen < sizeof(resp)) 3413 return -ENOSPC; 3414 3415 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3416 if (err) 3417 return err; 3418 3419 ucore->inbuf += sizeof(cmd); 3420 ucore->inlen -= sizeof(cmd); 3421 3422 if (cmd.comp_mask) 3423 return -EINVAL; 3424 3425 if (!capable(CAP_NET_RAW)) 3426 return -EPERM; 3427 3428 if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED) 3429 return -EINVAL; 3430 3431 if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && 3432 ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) || 3433 (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT))) 3434 return -EINVAL; 3435 3436 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) 3437 return -EINVAL; 3438 3439 if (cmd.flow_attr.size > ucore->inlen || 3440 cmd.flow_attr.size > 3441 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) 3442 return -EINVAL; 3443 3444 if (cmd.flow_attr.reserved[0] || 3445 cmd.flow_attr.reserved[1]) 3446 return -EINVAL; 3447 3448 if (cmd.flow_attr.num_of_specs) { 3449 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, 3450 GFP_KERNEL); 3451 if (!kern_flow_attr) 3452 return -ENOMEM; 3453 3454 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); 3455 err = ib_copy_from_udata(kern_flow_attr + 1, ucore, 3456 cmd.flow_attr.size); 3457 if (err) 3458 goto err_free_attr; 3459 } else { 3460 kern_flow_attr = &cmd.flow_attr; 3461 } 3462 3463 uobj = uobj_alloc(UVERBS_OBJECT_FLOW, file->ucontext); 3464 if (IS_ERR(uobj)) { 3465 err = PTR_ERR(uobj); 3466 goto err_free_attr; 3467 } 3468 3469 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext); 3470 if (!qp) { 3471 err = -EINVAL; 3472 goto err_uobj; 3473 } 3474 3475 flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs * 3476 sizeof(union ib_flow_spec), GFP_KERNEL); 3477 if (!flow_attr) { 3478 err = -ENOMEM; 3479 goto err_put; 3480 } 3481 uflow_res = flow_resources_alloc(cmd.flow_attr.num_of_specs); 3482 if (!uflow_res) { 3483 err = -ENOMEM; 3484 goto err_free_flow_attr; 3485 } 3486 3487 flow_attr->type = kern_flow_attr->type; 3488 flow_attr->priority = kern_flow_attr->priority; 3489 flow_attr->num_of_specs = kern_flow_attr->num_of_specs; 3490 flow_attr->port = kern_flow_attr->port; 3491 flow_attr->flags = kern_flow_attr->flags; 3492 flow_attr->size = sizeof(*flow_attr); 3493 3494 kern_spec = kern_flow_attr + 1; 3495 ib_spec = flow_attr + 1; 3496 for (i = 0; i < flow_attr->num_of_specs && 3497 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && 3498 cmd.flow_attr.size >= 3499 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { 3500 err = kern_spec_to_ib_spec(file->ucontext, kern_spec, ib_spec, 3501 uflow_res); 3502 if (err) 3503 goto err_free; 3504 flow_attr->size += 3505 ((union ib_flow_spec *) ib_spec)->size; 3506 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; 3507 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size; 3508 ib_spec += ((union ib_flow_spec *) ib_spec)->size; 3509 } 3510 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { 3511 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", 3512 i, cmd.flow_attr.size); 3513 err = -EINVAL; 3514 goto err_free; 3515 } 3516 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); 3517 if (IS_ERR(flow_id)) { 3518 err = PTR_ERR(flow_id); 3519 goto err_free; 3520 } 3521 flow_id->uobject = uobj; 3522 uobj->object = flow_id; 3523 uflow = container_of(uobj, typeof(*uflow), uobject); 3524 uflow->resources = uflow_res; 3525 3526 memset(&resp, 0, sizeof(resp)); 3527 resp.flow_handle = uobj->id; 3528 3529 err = ib_copy_to_udata(ucore, 3530 &resp, sizeof(resp)); 3531 if (err) 3532 goto err_copy; 3533 3534 uobj_put_obj_read(qp); 3535 uobj_alloc_commit(uobj); 3536 kfree(flow_attr); 3537 if (cmd.flow_attr.num_of_specs) 3538 kfree(kern_flow_attr); 3539 return 0; 3540 err_copy: 3541 ib_destroy_flow(flow_id); 3542 err_free: 3543 ib_uverbs_flow_resources_free(uflow_res); 3544 err_free_flow_attr: 3545 kfree(flow_attr); 3546 err_put: 3547 uobj_put_obj_read(qp); 3548 err_uobj: 3549 uobj_alloc_abort(uobj); 3550 err_free_attr: 3551 if (cmd.flow_attr.num_of_specs) 3552 kfree(kern_flow_attr); 3553 return err; 3554 } 3555 3556 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, 3557 struct ib_device *ib_dev, 3558 struct ib_udata *ucore, 3559 struct ib_udata *uhw) 3560 { 3561 struct ib_uverbs_destroy_flow cmd; 3562 struct ib_uobject *uobj; 3563 int ret; 3564 3565 if (ucore->inlen < sizeof(cmd)) 3566 return -EINVAL; 3567 3568 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3569 if (ret) 3570 return ret; 3571 3572 if (cmd.comp_mask) 3573 return -EINVAL; 3574 3575 uobj = uobj_get_write(UVERBS_OBJECT_FLOW, cmd.flow_handle, 3576 file->ucontext); 3577 if (IS_ERR(uobj)) 3578 return PTR_ERR(uobj); 3579 3580 ret = uobj_remove_commit(uobj); 3581 return ret; 3582 } 3583 3584 static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 3585 struct ib_device *ib_dev, 3586 struct ib_uverbs_create_xsrq *cmd, 3587 struct ib_udata *udata) 3588 { 3589 struct ib_uverbs_create_srq_resp resp; 3590 struct ib_usrq_object *obj; 3591 struct ib_pd *pd; 3592 struct ib_srq *srq; 3593 struct ib_uobject *uninitialized_var(xrcd_uobj); 3594 struct ib_srq_init_attr attr; 3595 int ret; 3596 3597 obj = (struct ib_usrq_object *)uobj_alloc(UVERBS_OBJECT_SRQ, 3598 file->ucontext); 3599 if (IS_ERR(obj)) 3600 return PTR_ERR(obj); 3601 3602 if (cmd->srq_type == IB_SRQT_TM) 3603 attr.ext.tag_matching.max_num_tags = cmd->max_num_tags; 3604 3605 if (cmd->srq_type == IB_SRQT_XRC) { 3606 xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->xrcd_handle, 3607 file->ucontext); 3608 if (IS_ERR(xrcd_uobj)) { 3609 ret = -EINVAL; 3610 goto err; 3611 } 3612 3613 attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object; 3614 if (!attr.ext.xrc.xrcd) { 3615 ret = -EINVAL; 3616 goto err_put_xrcd; 3617 } 3618 3619 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 3620 atomic_inc(&obj->uxrcd->refcnt); 3621 } 3622 3623 if (ib_srq_has_cq(cmd->srq_type)) { 3624 attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd->cq_handle, 3625 file->ucontext); 3626 if (!attr.ext.cq) { 3627 ret = -EINVAL; 3628 goto err_put_xrcd; 3629 } 3630 } 3631 3632 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, file->ucontext); 3633 if (!pd) { 3634 ret = -EINVAL; 3635 goto err_put_cq; 3636 } 3637 3638 attr.event_handler = ib_uverbs_srq_event_handler; 3639 attr.srq_context = file; 3640 attr.srq_type = cmd->srq_type; 3641 attr.attr.max_wr = cmd->max_wr; 3642 attr.attr.max_sge = cmd->max_sge; 3643 attr.attr.srq_limit = cmd->srq_limit; 3644 3645 obj->uevent.events_reported = 0; 3646 INIT_LIST_HEAD(&obj->uevent.event_list); 3647 3648 srq = pd->device->create_srq(pd, &attr, udata); 3649 if (IS_ERR(srq)) { 3650 ret = PTR_ERR(srq); 3651 goto err_put; 3652 } 3653 3654 srq->device = pd->device; 3655 srq->pd = pd; 3656 srq->srq_type = cmd->srq_type; 3657 srq->uobject = &obj->uevent.uobject; 3658 srq->event_handler = attr.event_handler; 3659 srq->srq_context = attr.srq_context; 3660 3661 if (ib_srq_has_cq(cmd->srq_type)) { 3662 srq->ext.cq = attr.ext.cq; 3663 atomic_inc(&attr.ext.cq->usecnt); 3664 } 3665 3666 if (cmd->srq_type == IB_SRQT_XRC) { 3667 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; 3668 atomic_inc(&attr.ext.xrc.xrcd->usecnt); 3669 } 3670 3671 atomic_inc(&pd->usecnt); 3672 atomic_set(&srq->usecnt, 0); 3673 3674 obj->uevent.uobject.object = srq; 3675 obj->uevent.uobject.user_handle = cmd->user_handle; 3676 3677 memset(&resp, 0, sizeof resp); 3678 resp.srq_handle = obj->uevent.uobject.id; 3679 resp.max_wr = attr.attr.max_wr; 3680 resp.max_sge = attr.attr.max_sge; 3681 if (cmd->srq_type == IB_SRQT_XRC) 3682 resp.srqn = srq->ext.xrc.srq_num; 3683 3684 if (copy_to_user(u64_to_user_ptr(cmd->response), 3685 &resp, sizeof resp)) { 3686 ret = -EFAULT; 3687 goto err_copy; 3688 } 3689 3690 if (cmd->srq_type == IB_SRQT_XRC) 3691 uobj_put_read(xrcd_uobj); 3692 3693 if (ib_srq_has_cq(cmd->srq_type)) 3694 uobj_put_obj_read(attr.ext.cq); 3695 3696 uobj_put_obj_read(pd); 3697 uobj_alloc_commit(&obj->uevent.uobject); 3698 3699 return 0; 3700 3701 err_copy: 3702 ib_destroy_srq(srq); 3703 3704 err_put: 3705 uobj_put_obj_read(pd); 3706 3707 err_put_cq: 3708 if (ib_srq_has_cq(cmd->srq_type)) 3709 uobj_put_obj_read(attr.ext.cq); 3710 3711 err_put_xrcd: 3712 if (cmd->srq_type == IB_SRQT_XRC) { 3713 atomic_dec(&obj->uxrcd->refcnt); 3714 uobj_put_read(xrcd_uobj); 3715 } 3716 3717 err: 3718 uobj_alloc_abort(&obj->uevent.uobject); 3719 return ret; 3720 } 3721 3722 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 3723 struct ib_device *ib_dev, 3724 const char __user *buf, int in_len, 3725 int out_len) 3726 { 3727 struct ib_uverbs_create_srq cmd; 3728 struct ib_uverbs_create_xsrq xcmd; 3729 struct ib_uverbs_create_srq_resp resp; 3730 struct ib_udata udata; 3731 int ret; 3732 3733 if (out_len < sizeof resp) 3734 return -ENOSPC; 3735 3736 if (copy_from_user(&cmd, buf, sizeof cmd)) 3737 return -EFAULT; 3738 3739 memset(&xcmd, 0, sizeof(xcmd)); 3740 xcmd.response = cmd.response; 3741 xcmd.user_handle = cmd.user_handle; 3742 xcmd.srq_type = IB_SRQT_BASIC; 3743 xcmd.pd_handle = cmd.pd_handle; 3744 xcmd.max_wr = cmd.max_wr; 3745 xcmd.max_sge = cmd.max_sge; 3746 xcmd.srq_limit = cmd.srq_limit; 3747 3748 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 3749 u64_to_user_ptr(cmd.response) + sizeof(resp), 3750 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 3751 out_len - sizeof(resp)); 3752 3753 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata); 3754 if (ret) 3755 return ret; 3756 3757 return in_len; 3758 } 3759 3760 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, 3761 struct ib_device *ib_dev, 3762 const char __user *buf, int in_len, int out_len) 3763 { 3764 struct ib_uverbs_create_xsrq cmd; 3765 struct ib_uverbs_create_srq_resp resp; 3766 struct ib_udata udata; 3767 int ret; 3768 3769 if (out_len < sizeof resp) 3770 return -ENOSPC; 3771 3772 if (copy_from_user(&cmd, buf, sizeof cmd)) 3773 return -EFAULT; 3774 3775 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 3776 u64_to_user_ptr(cmd.response) + sizeof(resp), 3777 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 3778 out_len - sizeof(resp)); 3779 3780 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata); 3781 if (ret) 3782 return ret; 3783 3784 return in_len; 3785 } 3786 3787 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 3788 struct ib_device *ib_dev, 3789 const char __user *buf, int in_len, 3790 int out_len) 3791 { 3792 struct ib_uverbs_modify_srq cmd; 3793 struct ib_udata udata; 3794 struct ib_srq *srq; 3795 struct ib_srq_attr attr; 3796 int ret; 3797 3798 if (copy_from_user(&cmd, buf, sizeof cmd)) 3799 return -EFAULT; 3800 3801 ib_uverbs_init_udata(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 3802 out_len); 3803 3804 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file->ucontext); 3805 if (!srq) 3806 return -EINVAL; 3807 3808 attr.max_wr = cmd.max_wr; 3809 attr.srq_limit = cmd.srq_limit; 3810 3811 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 3812 3813 uobj_put_obj_read(srq); 3814 3815 return ret ? ret : in_len; 3816 } 3817 3818 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 3819 struct ib_device *ib_dev, 3820 const char __user *buf, 3821 int in_len, int out_len) 3822 { 3823 struct ib_uverbs_query_srq cmd; 3824 struct ib_uverbs_query_srq_resp resp; 3825 struct ib_srq_attr attr; 3826 struct ib_srq *srq; 3827 int ret; 3828 3829 if (out_len < sizeof resp) 3830 return -ENOSPC; 3831 3832 if (copy_from_user(&cmd, buf, sizeof cmd)) 3833 return -EFAULT; 3834 3835 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file->ucontext); 3836 if (!srq) 3837 return -EINVAL; 3838 3839 ret = ib_query_srq(srq, &attr); 3840 3841 uobj_put_obj_read(srq); 3842 3843 if (ret) 3844 return ret; 3845 3846 memset(&resp, 0, sizeof resp); 3847 3848 resp.max_wr = attr.max_wr; 3849 resp.max_sge = attr.max_sge; 3850 resp.srq_limit = attr.srq_limit; 3851 3852 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 3853 return -EFAULT; 3854 3855 return in_len; 3856 } 3857 3858 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 3859 struct ib_device *ib_dev, 3860 const char __user *buf, int in_len, 3861 int out_len) 3862 { 3863 struct ib_uverbs_destroy_srq cmd; 3864 struct ib_uverbs_destroy_srq_resp resp; 3865 struct ib_uobject *uobj; 3866 struct ib_uevent_object *obj; 3867 int ret = -EINVAL; 3868 3869 if (copy_from_user(&cmd, buf, sizeof cmd)) 3870 return -EFAULT; 3871 3872 uobj = uobj_get_write(UVERBS_OBJECT_SRQ, cmd.srq_handle, 3873 file->ucontext); 3874 if (IS_ERR(uobj)) 3875 return PTR_ERR(uobj); 3876 3877 obj = container_of(uobj, struct ib_uevent_object, uobject); 3878 /* 3879 * Make sure we don't free the memory in remove_commit as we still 3880 * needs the uobject memory to create the response. 3881 */ 3882 uverbs_uobject_get(uobj); 3883 3884 memset(&resp, 0, sizeof(resp)); 3885 3886 ret = uobj_remove_commit(uobj); 3887 if (ret) { 3888 uverbs_uobject_put(uobj); 3889 return ret; 3890 } 3891 resp.events_reported = obj->events_reported; 3892 uverbs_uobject_put(uobj); 3893 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) 3894 return -EFAULT; 3895 3896 return in_len; 3897 } 3898 3899 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, 3900 struct ib_device *ib_dev, 3901 struct ib_udata *ucore, 3902 struct ib_udata *uhw) 3903 { 3904 struct ib_uverbs_ex_query_device_resp resp = { {0} }; 3905 struct ib_uverbs_ex_query_device cmd; 3906 struct ib_device_attr attr = {0}; 3907 int err; 3908 3909 if (!ib_dev->query_device) 3910 return -EOPNOTSUPP; 3911 3912 if (ucore->inlen < sizeof(cmd)) 3913 return -EINVAL; 3914 3915 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3916 if (err) 3917 return err; 3918 3919 if (cmd.comp_mask) 3920 return -EINVAL; 3921 3922 if (cmd.reserved) 3923 return -EINVAL; 3924 3925 resp.response_length = offsetof(typeof(resp), odp_caps); 3926 3927 if (ucore->outlen < resp.response_length) 3928 return -ENOSPC; 3929 3930 err = ib_dev->query_device(ib_dev, &attr, uhw); 3931 if (err) 3932 return err; 3933 3934 copy_query_dev_fields(file, ib_dev, &resp.base, &attr); 3935 3936 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps)) 3937 goto end; 3938 3939 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 3940 resp.odp_caps.general_caps = attr.odp_caps.general_caps; 3941 resp.odp_caps.per_transport_caps.rc_odp_caps = 3942 attr.odp_caps.per_transport_caps.rc_odp_caps; 3943 resp.odp_caps.per_transport_caps.uc_odp_caps = 3944 attr.odp_caps.per_transport_caps.uc_odp_caps; 3945 resp.odp_caps.per_transport_caps.ud_odp_caps = 3946 attr.odp_caps.per_transport_caps.ud_odp_caps; 3947 #endif 3948 resp.response_length += sizeof(resp.odp_caps); 3949 3950 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask)) 3951 goto end; 3952 3953 resp.timestamp_mask = attr.timestamp_mask; 3954 resp.response_length += sizeof(resp.timestamp_mask); 3955 3956 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock)) 3957 goto end; 3958 3959 resp.hca_core_clock = attr.hca_core_clock; 3960 resp.response_length += sizeof(resp.hca_core_clock); 3961 3962 if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex)) 3963 goto end; 3964 3965 resp.device_cap_flags_ex = attr.device_cap_flags; 3966 resp.response_length += sizeof(resp.device_cap_flags_ex); 3967 3968 if (ucore->outlen < resp.response_length + sizeof(resp.rss_caps)) 3969 goto end; 3970 3971 resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts; 3972 resp.rss_caps.max_rwq_indirection_tables = 3973 attr.rss_caps.max_rwq_indirection_tables; 3974 resp.rss_caps.max_rwq_indirection_table_size = 3975 attr.rss_caps.max_rwq_indirection_table_size; 3976 3977 resp.response_length += sizeof(resp.rss_caps); 3978 3979 if (ucore->outlen < resp.response_length + sizeof(resp.max_wq_type_rq)) 3980 goto end; 3981 3982 resp.max_wq_type_rq = attr.max_wq_type_rq; 3983 resp.response_length += sizeof(resp.max_wq_type_rq); 3984 3985 if (ucore->outlen < resp.response_length + sizeof(resp.raw_packet_caps)) 3986 goto end; 3987 3988 resp.raw_packet_caps = attr.raw_packet_caps; 3989 resp.response_length += sizeof(resp.raw_packet_caps); 3990 3991 if (ucore->outlen < resp.response_length + sizeof(resp.tm_caps)) 3992 goto end; 3993 3994 resp.tm_caps.max_rndv_hdr_size = attr.tm_caps.max_rndv_hdr_size; 3995 resp.tm_caps.max_num_tags = attr.tm_caps.max_num_tags; 3996 resp.tm_caps.max_ops = attr.tm_caps.max_ops; 3997 resp.tm_caps.max_sge = attr.tm_caps.max_sge; 3998 resp.tm_caps.flags = attr.tm_caps.flags; 3999 resp.response_length += sizeof(resp.tm_caps); 4000 4001 if (ucore->outlen < resp.response_length + sizeof(resp.cq_moderation_caps)) 4002 goto end; 4003 4004 resp.cq_moderation_caps.max_cq_moderation_count = 4005 attr.cq_caps.max_cq_moderation_count; 4006 resp.cq_moderation_caps.max_cq_moderation_period = 4007 attr.cq_caps.max_cq_moderation_period; 4008 resp.response_length += sizeof(resp.cq_moderation_caps); 4009 4010 if (ucore->outlen < resp.response_length + sizeof(resp.max_dm_size)) 4011 goto end; 4012 4013 resp.max_dm_size = attr.max_dm_size; 4014 resp.response_length += sizeof(resp.max_dm_size); 4015 end: 4016 err = ib_copy_to_udata(ucore, &resp, resp.response_length); 4017 return err; 4018 } 4019 4020 int ib_uverbs_ex_modify_cq(struct ib_uverbs_file *file, 4021 struct ib_device *ib_dev, 4022 struct ib_udata *ucore, 4023 struct ib_udata *uhw) 4024 { 4025 struct ib_uverbs_ex_modify_cq cmd = {}; 4026 struct ib_cq *cq; 4027 size_t required_cmd_sz; 4028 int ret; 4029 4030 required_cmd_sz = offsetof(typeof(cmd), reserved) + 4031 sizeof(cmd.reserved); 4032 if (ucore->inlen < required_cmd_sz) 4033 return -EINVAL; 4034 4035 /* sanity checks */ 4036 if (ucore->inlen > sizeof(cmd) && 4037 !ib_is_udata_cleared(ucore, sizeof(cmd), 4038 ucore->inlen - sizeof(cmd))) 4039 return -EOPNOTSUPP; 4040 4041 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 4042 if (ret) 4043 return ret; 4044 4045 if (!cmd.attr_mask || cmd.reserved) 4046 return -EINVAL; 4047 4048 if (cmd.attr_mask > IB_CQ_MODERATE) 4049 return -EOPNOTSUPP; 4050 4051 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext); 4052 if (!cq) 4053 return -EINVAL; 4054 4055 ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period); 4056 4057 uobj_put_obj_read(cq); 4058 4059 return ret; 4060 } 4061