1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/file.h> 37 #include <linux/fs.h> 38 #include <linux/slab.h> 39 #include <linux/sched.h> 40 41 #include <linux/uaccess.h> 42 43 #include <rdma/uverbs_types.h> 44 #include <rdma/uverbs_std_types.h> 45 #include "rdma_core.h" 46 47 #include "uverbs.h" 48 #include "core_priv.h" 49 50 static struct ib_uverbs_completion_event_file * 51 ib_uverbs_lookup_comp_file(int fd, struct ib_ucontext *context) 52 { 53 struct ib_uobject *uobj = uobj_get_read(UVERBS_OBJECT_COMP_CHANNEL, 54 fd, context); 55 struct ib_uobject_file *uobj_file; 56 57 if (IS_ERR(uobj)) 58 return (void *)uobj; 59 60 uverbs_uobject_get(uobj); 61 uobj_put_read(uobj); 62 63 uobj_file = container_of(uobj, struct ib_uobject_file, uobj); 64 return container_of(uobj_file, struct ib_uverbs_completion_event_file, 65 uobj_file); 66 } 67 68 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 69 struct ib_device *ib_dev, 70 const char __user *buf, 71 int in_len, int out_len) 72 { 73 struct ib_uverbs_get_context cmd; 74 struct ib_uverbs_get_context_resp resp; 75 struct ib_udata udata; 76 struct ib_ucontext *ucontext; 77 struct file *filp; 78 struct ib_rdmacg_object cg_obj; 79 int ret; 80 81 if (out_len < sizeof resp) 82 return -ENOSPC; 83 84 if (copy_from_user(&cmd, buf, sizeof cmd)) 85 return -EFAULT; 86 87 mutex_lock(&file->mutex); 88 89 if (file->ucontext) { 90 ret = -EINVAL; 91 goto err; 92 } 93 94 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 95 u64_to_user_ptr(cmd.response) + sizeof(resp), 96 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 97 out_len - sizeof(resp)); 98 99 ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); 100 if (ret) 101 goto err; 102 103 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata); 104 if (IS_ERR(ucontext)) { 105 ret = PTR_ERR(ucontext); 106 goto err_alloc; 107 } 108 109 ucontext->device = ib_dev; 110 ucontext->cg_obj = cg_obj; 111 /* ufile is required when some objects are released */ 112 ucontext->ufile = file; 113 uverbs_initialize_ucontext(ucontext); 114 115 rcu_read_lock(); 116 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); 117 rcu_read_unlock(); 118 ucontext->closing = 0; 119 120 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 121 ucontext->umem_tree = RB_ROOT_CACHED; 122 init_rwsem(&ucontext->umem_rwsem); 123 ucontext->odp_mrs_count = 0; 124 INIT_LIST_HEAD(&ucontext->no_private_counters); 125 126 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)) 127 ucontext->invalidate_range = NULL; 128 129 #endif 130 131 resp.num_comp_vectors = file->device->num_comp_vectors; 132 133 ret = get_unused_fd_flags(O_CLOEXEC); 134 if (ret < 0) 135 goto err_free; 136 resp.async_fd = ret; 137 138 filp = ib_uverbs_alloc_async_event_file(file, ib_dev); 139 if (IS_ERR(filp)) { 140 ret = PTR_ERR(filp); 141 goto err_fd; 142 } 143 144 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 145 ret = -EFAULT; 146 goto err_file; 147 } 148 149 file->ucontext = ucontext; 150 151 fd_install(resp.async_fd, filp); 152 153 mutex_unlock(&file->mutex); 154 155 return in_len; 156 157 err_file: 158 ib_uverbs_free_async_event_file(file); 159 fput(filp); 160 161 err_fd: 162 put_unused_fd(resp.async_fd); 163 164 err_free: 165 put_pid(ucontext->tgid); 166 ib_dev->dealloc_ucontext(ucontext); 167 168 err_alloc: 169 ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); 170 171 err: 172 mutex_unlock(&file->mutex); 173 return ret; 174 } 175 176 static void copy_query_dev_fields(struct ib_uverbs_file *file, 177 struct ib_device *ib_dev, 178 struct ib_uverbs_query_device_resp *resp, 179 struct ib_device_attr *attr) 180 { 181 resp->fw_ver = attr->fw_ver; 182 resp->node_guid = ib_dev->node_guid; 183 resp->sys_image_guid = attr->sys_image_guid; 184 resp->max_mr_size = attr->max_mr_size; 185 resp->page_size_cap = attr->page_size_cap; 186 resp->vendor_id = attr->vendor_id; 187 resp->vendor_part_id = attr->vendor_part_id; 188 resp->hw_ver = attr->hw_ver; 189 resp->max_qp = attr->max_qp; 190 resp->max_qp_wr = attr->max_qp_wr; 191 resp->device_cap_flags = lower_32_bits(attr->device_cap_flags); 192 resp->max_sge = attr->max_sge; 193 resp->max_sge_rd = attr->max_sge_rd; 194 resp->max_cq = attr->max_cq; 195 resp->max_cqe = attr->max_cqe; 196 resp->max_mr = attr->max_mr; 197 resp->max_pd = attr->max_pd; 198 resp->max_qp_rd_atom = attr->max_qp_rd_atom; 199 resp->max_ee_rd_atom = attr->max_ee_rd_atom; 200 resp->max_res_rd_atom = attr->max_res_rd_atom; 201 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom; 202 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom; 203 resp->atomic_cap = attr->atomic_cap; 204 resp->max_ee = attr->max_ee; 205 resp->max_rdd = attr->max_rdd; 206 resp->max_mw = attr->max_mw; 207 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp; 208 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp; 209 resp->max_mcast_grp = attr->max_mcast_grp; 210 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach; 211 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach; 212 resp->max_ah = attr->max_ah; 213 resp->max_fmr = attr->max_fmr; 214 resp->max_map_per_fmr = attr->max_map_per_fmr; 215 resp->max_srq = attr->max_srq; 216 resp->max_srq_wr = attr->max_srq_wr; 217 resp->max_srq_sge = attr->max_srq_sge; 218 resp->max_pkeys = attr->max_pkeys; 219 resp->local_ca_ack_delay = attr->local_ca_ack_delay; 220 resp->phys_port_cnt = ib_dev->phys_port_cnt; 221 } 222 223 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 224 struct ib_device *ib_dev, 225 const char __user *buf, 226 int in_len, int out_len) 227 { 228 struct ib_uverbs_query_device cmd; 229 struct ib_uverbs_query_device_resp resp; 230 231 if (out_len < sizeof resp) 232 return -ENOSPC; 233 234 if (copy_from_user(&cmd, buf, sizeof cmd)) 235 return -EFAULT; 236 237 memset(&resp, 0, sizeof resp); 238 copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs); 239 240 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 241 return -EFAULT; 242 243 return in_len; 244 } 245 246 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 247 struct ib_device *ib_dev, 248 const char __user *buf, 249 int in_len, int out_len) 250 { 251 struct ib_uverbs_query_port cmd; 252 struct ib_uverbs_query_port_resp resp; 253 struct ib_port_attr attr; 254 int ret; 255 256 if (out_len < sizeof resp) 257 return -ENOSPC; 258 259 if (copy_from_user(&cmd, buf, sizeof cmd)) 260 return -EFAULT; 261 262 ret = ib_query_port(ib_dev, cmd.port_num, &attr); 263 if (ret) 264 return ret; 265 266 memset(&resp, 0, sizeof resp); 267 268 resp.state = attr.state; 269 resp.max_mtu = attr.max_mtu; 270 resp.active_mtu = attr.active_mtu; 271 resp.gid_tbl_len = attr.gid_tbl_len; 272 resp.port_cap_flags = attr.port_cap_flags; 273 resp.max_msg_sz = attr.max_msg_sz; 274 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 275 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 276 resp.pkey_tbl_len = attr.pkey_tbl_len; 277 278 if (rdma_cap_opa_ah(ib_dev, cmd.port_num)) { 279 resp.lid = OPA_TO_IB_UCAST_LID(attr.lid); 280 resp.sm_lid = OPA_TO_IB_UCAST_LID(attr.sm_lid); 281 } else { 282 resp.lid = ib_lid_cpu16(attr.lid); 283 resp.sm_lid = ib_lid_cpu16(attr.sm_lid); 284 } 285 resp.lmc = attr.lmc; 286 resp.max_vl_num = attr.max_vl_num; 287 resp.sm_sl = attr.sm_sl; 288 resp.subnet_timeout = attr.subnet_timeout; 289 resp.init_type_reply = attr.init_type_reply; 290 resp.active_width = attr.active_width; 291 resp.active_speed = attr.active_speed; 292 resp.phys_state = attr.phys_state; 293 resp.link_layer = rdma_port_get_link_layer(ib_dev, 294 cmd.port_num); 295 296 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 297 return -EFAULT; 298 299 return in_len; 300 } 301 302 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 303 struct ib_device *ib_dev, 304 const char __user *buf, 305 int in_len, int out_len) 306 { 307 struct ib_uverbs_alloc_pd cmd; 308 struct ib_uverbs_alloc_pd_resp resp; 309 struct ib_udata udata; 310 struct ib_uobject *uobj; 311 struct ib_pd *pd; 312 int ret; 313 314 if (out_len < sizeof resp) 315 return -ENOSPC; 316 317 if (copy_from_user(&cmd, buf, sizeof cmd)) 318 return -EFAULT; 319 320 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 321 u64_to_user_ptr(cmd.response) + sizeof(resp), 322 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 323 out_len - sizeof(resp)); 324 325 uobj = uobj_alloc(UVERBS_OBJECT_PD, file->ucontext); 326 if (IS_ERR(uobj)) 327 return PTR_ERR(uobj); 328 329 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata); 330 if (IS_ERR(pd)) { 331 ret = PTR_ERR(pd); 332 goto err; 333 } 334 335 pd->device = ib_dev; 336 pd->uobject = uobj; 337 pd->__internal_mr = NULL; 338 atomic_set(&pd->usecnt, 0); 339 340 uobj->object = pd; 341 memset(&resp, 0, sizeof resp); 342 resp.pd_handle = uobj->id; 343 pd->res.type = RDMA_RESTRACK_PD; 344 rdma_restrack_add(&pd->res); 345 346 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 347 ret = -EFAULT; 348 goto err_copy; 349 } 350 351 uobj_alloc_commit(uobj); 352 353 return in_len; 354 355 err_copy: 356 ib_dealloc_pd(pd); 357 358 err: 359 uobj_alloc_abort(uobj); 360 return ret; 361 } 362 363 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 364 struct ib_device *ib_dev, 365 const char __user *buf, 366 int in_len, int out_len) 367 { 368 struct ib_uverbs_dealloc_pd cmd; 369 struct ib_uobject *uobj; 370 int ret; 371 372 if (copy_from_user(&cmd, buf, sizeof cmd)) 373 return -EFAULT; 374 375 uobj = uobj_get_write(UVERBS_OBJECT_PD, cmd.pd_handle, 376 file->ucontext); 377 if (IS_ERR(uobj)) 378 return PTR_ERR(uobj); 379 380 ret = uobj_remove_commit(uobj); 381 382 return ret ?: in_len; 383 } 384 385 struct xrcd_table_entry { 386 struct rb_node node; 387 struct ib_xrcd *xrcd; 388 struct inode *inode; 389 }; 390 391 static int xrcd_table_insert(struct ib_uverbs_device *dev, 392 struct inode *inode, 393 struct ib_xrcd *xrcd) 394 { 395 struct xrcd_table_entry *entry, *scan; 396 struct rb_node **p = &dev->xrcd_tree.rb_node; 397 struct rb_node *parent = NULL; 398 399 entry = kmalloc(sizeof *entry, GFP_KERNEL); 400 if (!entry) 401 return -ENOMEM; 402 403 entry->xrcd = xrcd; 404 entry->inode = inode; 405 406 while (*p) { 407 parent = *p; 408 scan = rb_entry(parent, struct xrcd_table_entry, node); 409 410 if (inode < scan->inode) { 411 p = &(*p)->rb_left; 412 } else if (inode > scan->inode) { 413 p = &(*p)->rb_right; 414 } else { 415 kfree(entry); 416 return -EEXIST; 417 } 418 } 419 420 rb_link_node(&entry->node, parent, p); 421 rb_insert_color(&entry->node, &dev->xrcd_tree); 422 igrab(inode); 423 return 0; 424 } 425 426 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, 427 struct inode *inode) 428 { 429 struct xrcd_table_entry *entry; 430 struct rb_node *p = dev->xrcd_tree.rb_node; 431 432 while (p) { 433 entry = rb_entry(p, struct xrcd_table_entry, node); 434 435 if (inode < entry->inode) 436 p = p->rb_left; 437 else if (inode > entry->inode) 438 p = p->rb_right; 439 else 440 return entry; 441 } 442 443 return NULL; 444 } 445 446 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) 447 { 448 struct xrcd_table_entry *entry; 449 450 entry = xrcd_table_search(dev, inode); 451 if (!entry) 452 return NULL; 453 454 return entry->xrcd; 455 } 456 457 static void xrcd_table_delete(struct ib_uverbs_device *dev, 458 struct inode *inode) 459 { 460 struct xrcd_table_entry *entry; 461 462 entry = xrcd_table_search(dev, inode); 463 if (entry) { 464 iput(inode); 465 rb_erase(&entry->node, &dev->xrcd_tree); 466 kfree(entry); 467 } 468 } 469 470 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, 471 struct ib_device *ib_dev, 472 const char __user *buf, int in_len, 473 int out_len) 474 { 475 struct ib_uverbs_open_xrcd cmd; 476 struct ib_uverbs_open_xrcd_resp resp; 477 struct ib_udata udata; 478 struct ib_uxrcd_object *obj; 479 struct ib_xrcd *xrcd = NULL; 480 struct fd f = {NULL, 0}; 481 struct inode *inode = NULL; 482 int ret = 0; 483 int new_xrcd = 0; 484 485 if (out_len < sizeof resp) 486 return -ENOSPC; 487 488 if (copy_from_user(&cmd, buf, sizeof cmd)) 489 return -EFAULT; 490 491 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 492 u64_to_user_ptr(cmd.response) + sizeof(resp), 493 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 494 out_len - sizeof(resp)); 495 496 mutex_lock(&file->device->xrcd_tree_mutex); 497 498 if (cmd.fd != -1) { 499 /* search for file descriptor */ 500 f = fdget(cmd.fd); 501 if (!f.file) { 502 ret = -EBADF; 503 goto err_tree_mutex_unlock; 504 } 505 506 inode = file_inode(f.file); 507 xrcd = find_xrcd(file->device, inode); 508 if (!xrcd && !(cmd.oflags & O_CREAT)) { 509 /* no file descriptor. Need CREATE flag */ 510 ret = -EAGAIN; 511 goto err_tree_mutex_unlock; 512 } 513 514 if (xrcd && cmd.oflags & O_EXCL) { 515 ret = -EINVAL; 516 goto err_tree_mutex_unlock; 517 } 518 } 519 520 obj = (struct ib_uxrcd_object *)uobj_alloc(UVERBS_OBJECT_XRCD, 521 file->ucontext); 522 if (IS_ERR(obj)) { 523 ret = PTR_ERR(obj); 524 goto err_tree_mutex_unlock; 525 } 526 527 if (!xrcd) { 528 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata); 529 if (IS_ERR(xrcd)) { 530 ret = PTR_ERR(xrcd); 531 goto err; 532 } 533 534 xrcd->inode = inode; 535 xrcd->device = ib_dev; 536 atomic_set(&xrcd->usecnt, 0); 537 mutex_init(&xrcd->tgt_qp_mutex); 538 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 539 new_xrcd = 1; 540 } 541 542 atomic_set(&obj->refcnt, 0); 543 obj->uobject.object = xrcd; 544 memset(&resp, 0, sizeof resp); 545 resp.xrcd_handle = obj->uobject.id; 546 547 if (inode) { 548 if (new_xrcd) { 549 /* create new inode/xrcd table entry */ 550 ret = xrcd_table_insert(file->device, inode, xrcd); 551 if (ret) 552 goto err_dealloc_xrcd; 553 } 554 atomic_inc(&xrcd->usecnt); 555 } 556 557 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 558 ret = -EFAULT; 559 goto err_copy; 560 } 561 562 if (f.file) 563 fdput(f); 564 565 mutex_unlock(&file->device->xrcd_tree_mutex); 566 567 uobj_alloc_commit(&obj->uobject); 568 569 return in_len; 570 571 err_copy: 572 if (inode) { 573 if (new_xrcd) 574 xrcd_table_delete(file->device, inode); 575 atomic_dec(&xrcd->usecnt); 576 } 577 578 err_dealloc_xrcd: 579 ib_dealloc_xrcd(xrcd); 580 581 err: 582 uobj_alloc_abort(&obj->uobject); 583 584 err_tree_mutex_unlock: 585 if (f.file) 586 fdput(f); 587 588 mutex_unlock(&file->device->xrcd_tree_mutex); 589 590 return ret; 591 } 592 593 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, 594 struct ib_device *ib_dev, 595 const char __user *buf, int in_len, 596 int out_len) 597 { 598 struct ib_uverbs_close_xrcd cmd; 599 struct ib_uobject *uobj; 600 int ret = 0; 601 602 if (copy_from_user(&cmd, buf, sizeof cmd)) 603 return -EFAULT; 604 605 uobj = uobj_get_write(UVERBS_OBJECT_XRCD, cmd.xrcd_handle, 606 file->ucontext); 607 if (IS_ERR(uobj)) 608 return PTR_ERR(uobj); 609 610 ret = uobj_remove_commit(uobj); 611 return ret ?: in_len; 612 } 613 614 int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, 615 struct ib_xrcd *xrcd, 616 enum rdma_remove_reason why) 617 { 618 struct inode *inode; 619 int ret; 620 621 inode = xrcd->inode; 622 if (inode && !atomic_dec_and_test(&xrcd->usecnt)) 623 return 0; 624 625 ret = ib_dealloc_xrcd(xrcd); 626 627 if (why == RDMA_REMOVE_DESTROY && ret) 628 atomic_inc(&xrcd->usecnt); 629 else if (inode) 630 xrcd_table_delete(dev, inode); 631 632 return ret; 633 } 634 635 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 636 struct ib_device *ib_dev, 637 const char __user *buf, int in_len, 638 int out_len) 639 { 640 struct ib_uverbs_reg_mr cmd; 641 struct ib_uverbs_reg_mr_resp resp; 642 struct ib_udata udata; 643 struct ib_uobject *uobj; 644 struct ib_pd *pd; 645 struct ib_mr *mr; 646 int ret; 647 648 if (out_len < sizeof resp) 649 return -ENOSPC; 650 651 if (copy_from_user(&cmd, buf, sizeof cmd)) 652 return -EFAULT; 653 654 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 655 u64_to_user_ptr(cmd.response) + sizeof(resp), 656 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 657 out_len - sizeof(resp)); 658 659 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 660 return -EINVAL; 661 662 ret = ib_check_mr_access(cmd.access_flags); 663 if (ret) 664 return ret; 665 666 uobj = uobj_alloc(UVERBS_OBJECT_MR, file->ucontext); 667 if (IS_ERR(uobj)) 668 return PTR_ERR(uobj); 669 670 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext); 671 if (!pd) { 672 ret = -EINVAL; 673 goto err_free; 674 } 675 676 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) { 677 if (!(pd->device->attrs.device_cap_flags & 678 IB_DEVICE_ON_DEMAND_PAGING)) { 679 pr_debug("ODP support not available\n"); 680 ret = -EINVAL; 681 goto err_put; 682 } 683 } 684 685 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 686 cmd.access_flags, &udata); 687 if (IS_ERR(mr)) { 688 ret = PTR_ERR(mr); 689 goto err_put; 690 } 691 692 mr->device = pd->device; 693 mr->pd = pd; 694 mr->dm = NULL; 695 mr->uobject = uobj; 696 atomic_inc(&pd->usecnt); 697 mr->res.type = RDMA_RESTRACK_MR; 698 rdma_restrack_add(&mr->res); 699 700 uobj->object = mr; 701 702 memset(&resp, 0, sizeof resp); 703 resp.lkey = mr->lkey; 704 resp.rkey = mr->rkey; 705 resp.mr_handle = uobj->id; 706 707 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 708 ret = -EFAULT; 709 goto err_copy; 710 } 711 712 uobj_put_obj_read(pd); 713 714 uobj_alloc_commit(uobj); 715 716 return in_len; 717 718 err_copy: 719 ib_dereg_mr(mr); 720 721 err_put: 722 uobj_put_obj_read(pd); 723 724 err_free: 725 uobj_alloc_abort(uobj); 726 return ret; 727 } 728 729 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, 730 struct ib_device *ib_dev, 731 const char __user *buf, int in_len, 732 int out_len) 733 { 734 struct ib_uverbs_rereg_mr cmd; 735 struct ib_uverbs_rereg_mr_resp resp; 736 struct ib_udata udata; 737 struct ib_pd *pd = NULL; 738 struct ib_mr *mr; 739 struct ib_pd *old_pd; 740 int ret; 741 struct ib_uobject *uobj; 742 743 if (out_len < sizeof(resp)) 744 return -ENOSPC; 745 746 if (copy_from_user(&cmd, buf, sizeof(cmd))) 747 return -EFAULT; 748 749 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 750 u64_to_user_ptr(cmd.response) + sizeof(resp), 751 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 752 out_len - sizeof(resp)); 753 754 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags) 755 return -EINVAL; 756 757 if ((cmd.flags & IB_MR_REREG_TRANS) && 758 (!cmd.start || !cmd.hca_va || 0 >= cmd.length || 759 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))) 760 return -EINVAL; 761 762 uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle, 763 file->ucontext); 764 if (IS_ERR(uobj)) 765 return PTR_ERR(uobj); 766 767 mr = uobj->object; 768 769 if (mr->dm) { 770 ret = -EINVAL; 771 goto put_uobjs; 772 } 773 774 if (cmd.flags & IB_MR_REREG_ACCESS) { 775 ret = ib_check_mr_access(cmd.access_flags); 776 if (ret) 777 goto put_uobjs; 778 } 779 780 if (cmd.flags & IB_MR_REREG_PD) { 781 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext); 782 if (!pd) { 783 ret = -EINVAL; 784 goto put_uobjs; 785 } 786 } 787 788 old_pd = mr->pd; 789 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, 790 cmd.length, cmd.hca_va, 791 cmd.access_flags, pd, &udata); 792 if (!ret) { 793 if (cmd.flags & IB_MR_REREG_PD) { 794 atomic_inc(&pd->usecnt); 795 mr->pd = pd; 796 atomic_dec(&old_pd->usecnt); 797 } 798 } else { 799 goto put_uobj_pd; 800 } 801 802 memset(&resp, 0, sizeof(resp)); 803 resp.lkey = mr->lkey; 804 resp.rkey = mr->rkey; 805 806 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) 807 ret = -EFAULT; 808 else 809 ret = in_len; 810 811 put_uobj_pd: 812 if (cmd.flags & IB_MR_REREG_PD) 813 uobj_put_obj_read(pd); 814 815 put_uobjs: 816 uobj_put_write(uobj); 817 818 return ret; 819 } 820 821 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 822 struct ib_device *ib_dev, 823 const char __user *buf, int in_len, 824 int out_len) 825 { 826 struct ib_uverbs_dereg_mr cmd; 827 struct ib_uobject *uobj; 828 int ret = -EINVAL; 829 830 if (copy_from_user(&cmd, buf, sizeof cmd)) 831 return -EFAULT; 832 833 uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle, 834 file->ucontext); 835 if (IS_ERR(uobj)) 836 return PTR_ERR(uobj); 837 838 ret = uobj_remove_commit(uobj); 839 840 return ret ?: in_len; 841 } 842 843 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, 844 struct ib_device *ib_dev, 845 const char __user *buf, int in_len, 846 int out_len) 847 { 848 struct ib_uverbs_alloc_mw cmd; 849 struct ib_uverbs_alloc_mw_resp resp; 850 struct ib_uobject *uobj; 851 struct ib_pd *pd; 852 struct ib_mw *mw; 853 struct ib_udata udata; 854 int ret; 855 856 if (out_len < sizeof(resp)) 857 return -ENOSPC; 858 859 if (copy_from_user(&cmd, buf, sizeof(cmd))) 860 return -EFAULT; 861 862 uobj = uobj_alloc(UVERBS_OBJECT_MW, file->ucontext); 863 if (IS_ERR(uobj)) 864 return PTR_ERR(uobj); 865 866 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext); 867 if (!pd) { 868 ret = -EINVAL; 869 goto err_free; 870 } 871 872 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 873 u64_to_user_ptr(cmd.response) + sizeof(resp), 874 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 875 out_len - sizeof(resp)); 876 877 mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata); 878 if (IS_ERR(mw)) { 879 ret = PTR_ERR(mw); 880 goto err_put; 881 } 882 883 mw->device = pd->device; 884 mw->pd = pd; 885 mw->uobject = uobj; 886 atomic_inc(&pd->usecnt); 887 888 uobj->object = mw; 889 890 memset(&resp, 0, sizeof(resp)); 891 resp.rkey = mw->rkey; 892 resp.mw_handle = uobj->id; 893 894 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) { 895 ret = -EFAULT; 896 goto err_copy; 897 } 898 899 uobj_put_obj_read(pd); 900 uobj_alloc_commit(uobj); 901 902 return in_len; 903 904 err_copy: 905 uverbs_dealloc_mw(mw); 906 err_put: 907 uobj_put_obj_read(pd); 908 err_free: 909 uobj_alloc_abort(uobj); 910 return ret; 911 } 912 913 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file, 914 struct ib_device *ib_dev, 915 const char __user *buf, int in_len, 916 int out_len) 917 { 918 struct ib_uverbs_dealloc_mw cmd; 919 struct ib_uobject *uobj; 920 int ret = -EINVAL; 921 922 if (copy_from_user(&cmd, buf, sizeof(cmd))) 923 return -EFAULT; 924 925 uobj = uobj_get_write(UVERBS_OBJECT_MW, cmd.mw_handle, 926 file->ucontext); 927 if (IS_ERR(uobj)) 928 return PTR_ERR(uobj); 929 930 ret = uobj_remove_commit(uobj); 931 return ret ?: in_len; 932 } 933 934 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 935 struct ib_device *ib_dev, 936 const char __user *buf, int in_len, 937 int out_len) 938 { 939 struct ib_uverbs_create_comp_channel cmd; 940 struct ib_uverbs_create_comp_channel_resp resp; 941 struct ib_uobject *uobj; 942 struct ib_uverbs_completion_event_file *ev_file; 943 944 if (out_len < sizeof resp) 945 return -ENOSPC; 946 947 if (copy_from_user(&cmd, buf, sizeof cmd)) 948 return -EFAULT; 949 950 uobj = uobj_alloc(UVERBS_OBJECT_COMP_CHANNEL, file->ucontext); 951 if (IS_ERR(uobj)) 952 return PTR_ERR(uobj); 953 954 resp.fd = uobj->id; 955 956 ev_file = container_of(uobj, struct ib_uverbs_completion_event_file, 957 uobj_file.uobj); 958 ib_uverbs_init_event_queue(&ev_file->ev_queue); 959 960 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 961 uobj_alloc_abort(uobj); 962 return -EFAULT; 963 } 964 965 uobj_alloc_commit(uobj); 966 return in_len; 967 } 968 969 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, 970 struct ib_device *ib_dev, 971 struct ib_udata *ucore, 972 struct ib_udata *uhw, 973 struct ib_uverbs_ex_create_cq *cmd, 974 size_t cmd_sz, 975 int (*cb)(struct ib_uverbs_file *file, 976 struct ib_ucq_object *obj, 977 struct ib_uverbs_ex_create_cq_resp *resp, 978 struct ib_udata *udata, 979 void *context), 980 void *context) 981 { 982 struct ib_ucq_object *obj; 983 struct ib_uverbs_completion_event_file *ev_file = NULL; 984 struct ib_cq *cq; 985 int ret; 986 struct ib_uverbs_ex_create_cq_resp resp; 987 struct ib_cq_init_attr attr = {}; 988 989 if (!ib_dev->create_cq) 990 return ERR_PTR(-EOPNOTSUPP); 991 992 if (cmd->comp_vector >= file->device->num_comp_vectors) 993 return ERR_PTR(-EINVAL); 994 995 obj = (struct ib_ucq_object *)uobj_alloc(UVERBS_OBJECT_CQ, 996 file->ucontext); 997 if (IS_ERR(obj)) 998 return obj; 999 1000 if (cmd->comp_channel >= 0) { 1001 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel, 1002 file->ucontext); 1003 if (IS_ERR(ev_file)) { 1004 ret = PTR_ERR(ev_file); 1005 goto err; 1006 } 1007 } 1008 1009 obj->uobject.user_handle = cmd->user_handle; 1010 obj->uverbs_file = file; 1011 obj->comp_events_reported = 0; 1012 obj->async_events_reported = 0; 1013 INIT_LIST_HEAD(&obj->comp_list); 1014 INIT_LIST_HEAD(&obj->async_list); 1015 1016 attr.cqe = cmd->cqe; 1017 attr.comp_vector = cmd->comp_vector; 1018 1019 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags)) 1020 attr.flags = cmd->flags; 1021 1022 cq = ib_dev->create_cq(ib_dev, &attr, file->ucontext, uhw); 1023 if (IS_ERR(cq)) { 1024 ret = PTR_ERR(cq); 1025 goto err_file; 1026 } 1027 1028 cq->device = ib_dev; 1029 cq->uobject = &obj->uobject; 1030 cq->comp_handler = ib_uverbs_comp_handler; 1031 cq->event_handler = ib_uverbs_cq_event_handler; 1032 cq->cq_context = ev_file ? &ev_file->ev_queue : NULL; 1033 atomic_set(&cq->usecnt, 0); 1034 1035 obj->uobject.object = cq; 1036 memset(&resp, 0, sizeof resp); 1037 resp.base.cq_handle = obj->uobject.id; 1038 resp.base.cqe = cq->cqe; 1039 1040 resp.response_length = offsetof(typeof(resp), response_length) + 1041 sizeof(resp.response_length); 1042 1043 cq->res.type = RDMA_RESTRACK_CQ; 1044 rdma_restrack_add(&cq->res); 1045 1046 ret = cb(file, obj, &resp, ucore, context); 1047 if (ret) 1048 goto err_cb; 1049 1050 uobj_alloc_commit(&obj->uobject); 1051 return obj; 1052 1053 err_cb: 1054 ib_destroy_cq(cq); 1055 1056 err_file: 1057 if (ev_file) 1058 ib_uverbs_release_ucq(file, ev_file, obj); 1059 1060 err: 1061 uobj_alloc_abort(&obj->uobject); 1062 1063 return ERR_PTR(ret); 1064 } 1065 1066 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file, 1067 struct ib_ucq_object *obj, 1068 struct ib_uverbs_ex_create_cq_resp *resp, 1069 struct ib_udata *ucore, void *context) 1070 { 1071 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1072 return -EFAULT; 1073 1074 return 0; 1075 } 1076 1077 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 1078 struct ib_device *ib_dev, 1079 const char __user *buf, int in_len, 1080 int out_len) 1081 { 1082 struct ib_uverbs_create_cq cmd; 1083 struct ib_uverbs_ex_create_cq cmd_ex; 1084 struct ib_uverbs_create_cq_resp resp; 1085 struct ib_udata ucore; 1086 struct ib_udata uhw; 1087 struct ib_ucq_object *obj; 1088 1089 if (out_len < sizeof(resp)) 1090 return -ENOSPC; 1091 1092 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1093 return -EFAULT; 1094 1095 ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response), 1096 sizeof(cmd), sizeof(resp)); 1097 1098 ib_uverbs_init_udata(&uhw, buf + sizeof(cmd), 1099 u64_to_user_ptr(cmd.response) + sizeof(resp), 1100 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1101 out_len - sizeof(resp)); 1102 1103 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1104 cmd_ex.user_handle = cmd.user_handle; 1105 cmd_ex.cqe = cmd.cqe; 1106 cmd_ex.comp_vector = cmd.comp_vector; 1107 cmd_ex.comp_channel = cmd.comp_channel; 1108 1109 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex, 1110 offsetof(typeof(cmd_ex), comp_channel) + 1111 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb, 1112 NULL); 1113 1114 if (IS_ERR(obj)) 1115 return PTR_ERR(obj); 1116 1117 return in_len; 1118 } 1119 1120 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file, 1121 struct ib_ucq_object *obj, 1122 struct ib_uverbs_ex_create_cq_resp *resp, 1123 struct ib_udata *ucore, void *context) 1124 { 1125 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1126 return -EFAULT; 1127 1128 return 0; 1129 } 1130 1131 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file, 1132 struct ib_device *ib_dev, 1133 struct ib_udata *ucore, 1134 struct ib_udata *uhw) 1135 { 1136 struct ib_uverbs_ex_create_cq_resp resp; 1137 struct ib_uverbs_ex_create_cq cmd; 1138 struct ib_ucq_object *obj; 1139 int err; 1140 1141 if (ucore->inlen < sizeof(cmd)) 1142 return -EINVAL; 1143 1144 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 1145 if (err) 1146 return err; 1147 1148 if (cmd.comp_mask) 1149 return -EINVAL; 1150 1151 if (cmd.reserved) 1152 return -EINVAL; 1153 1154 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1155 sizeof(resp.response_length))) 1156 return -ENOSPC; 1157 1158 obj = create_cq(file, ib_dev, ucore, uhw, &cmd, 1159 min(ucore->inlen, sizeof(cmd)), 1160 ib_uverbs_ex_create_cq_cb, NULL); 1161 1162 return PTR_ERR_OR_ZERO(obj); 1163 } 1164 1165 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 1166 struct ib_device *ib_dev, 1167 const char __user *buf, int in_len, 1168 int out_len) 1169 { 1170 struct ib_uverbs_resize_cq cmd; 1171 struct ib_uverbs_resize_cq_resp resp = {}; 1172 struct ib_udata udata; 1173 struct ib_cq *cq; 1174 int ret = -EINVAL; 1175 1176 if (copy_from_user(&cmd, buf, sizeof cmd)) 1177 return -EFAULT; 1178 1179 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 1180 u64_to_user_ptr(cmd.response) + sizeof(resp), 1181 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1182 out_len - sizeof(resp)); 1183 1184 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext); 1185 if (!cq) 1186 return -EINVAL; 1187 1188 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 1189 if (ret) 1190 goto out; 1191 1192 resp.cqe = cq->cqe; 1193 1194 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp.cqe)) 1195 ret = -EFAULT; 1196 1197 out: 1198 uobj_put_obj_read(cq); 1199 1200 return ret ? ret : in_len; 1201 } 1202 1203 static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest, 1204 struct ib_wc *wc) 1205 { 1206 struct ib_uverbs_wc tmp; 1207 1208 tmp.wr_id = wc->wr_id; 1209 tmp.status = wc->status; 1210 tmp.opcode = wc->opcode; 1211 tmp.vendor_err = wc->vendor_err; 1212 tmp.byte_len = wc->byte_len; 1213 tmp.ex.imm_data = wc->ex.imm_data; 1214 tmp.qp_num = wc->qp->qp_num; 1215 tmp.src_qp = wc->src_qp; 1216 tmp.wc_flags = wc->wc_flags; 1217 tmp.pkey_index = wc->pkey_index; 1218 if (rdma_cap_opa_ah(ib_dev, wc->port_num)) 1219 tmp.slid = OPA_TO_IB_UCAST_LID(wc->slid); 1220 else 1221 tmp.slid = ib_lid_cpu16(wc->slid); 1222 tmp.sl = wc->sl; 1223 tmp.dlid_path_bits = wc->dlid_path_bits; 1224 tmp.port_num = wc->port_num; 1225 tmp.reserved = 0; 1226 1227 if (copy_to_user(dest, &tmp, sizeof tmp)) 1228 return -EFAULT; 1229 1230 return 0; 1231 } 1232 1233 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 1234 struct ib_device *ib_dev, 1235 const char __user *buf, int in_len, 1236 int out_len) 1237 { 1238 struct ib_uverbs_poll_cq cmd; 1239 struct ib_uverbs_poll_cq_resp resp; 1240 u8 __user *header_ptr; 1241 u8 __user *data_ptr; 1242 struct ib_cq *cq; 1243 struct ib_wc wc; 1244 int ret; 1245 1246 if (copy_from_user(&cmd, buf, sizeof cmd)) 1247 return -EFAULT; 1248 1249 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext); 1250 if (!cq) 1251 return -EINVAL; 1252 1253 /* we copy a struct ib_uverbs_poll_cq_resp to user space */ 1254 header_ptr = u64_to_user_ptr(cmd.response); 1255 data_ptr = header_ptr + sizeof resp; 1256 1257 memset(&resp, 0, sizeof resp); 1258 while (resp.count < cmd.ne) { 1259 ret = ib_poll_cq(cq, 1, &wc); 1260 if (ret < 0) 1261 goto out_put; 1262 if (!ret) 1263 break; 1264 1265 ret = copy_wc_to_user(ib_dev, data_ptr, &wc); 1266 if (ret) 1267 goto out_put; 1268 1269 data_ptr += sizeof(struct ib_uverbs_wc); 1270 ++resp.count; 1271 } 1272 1273 if (copy_to_user(header_ptr, &resp, sizeof resp)) { 1274 ret = -EFAULT; 1275 goto out_put; 1276 } 1277 1278 ret = in_len; 1279 1280 out_put: 1281 uobj_put_obj_read(cq); 1282 return ret; 1283 } 1284 1285 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 1286 struct ib_device *ib_dev, 1287 const char __user *buf, int in_len, 1288 int out_len) 1289 { 1290 struct ib_uverbs_req_notify_cq cmd; 1291 struct ib_cq *cq; 1292 1293 if (copy_from_user(&cmd, buf, sizeof cmd)) 1294 return -EFAULT; 1295 1296 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext); 1297 if (!cq) 1298 return -EINVAL; 1299 1300 ib_req_notify_cq(cq, cmd.solicited_only ? 1301 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 1302 1303 uobj_put_obj_read(cq); 1304 1305 return in_len; 1306 } 1307 1308 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 1309 struct ib_device *ib_dev, 1310 const char __user *buf, int in_len, 1311 int out_len) 1312 { 1313 struct ib_uverbs_destroy_cq cmd; 1314 struct ib_uverbs_destroy_cq_resp resp; 1315 struct ib_uobject *uobj; 1316 struct ib_cq *cq; 1317 struct ib_ucq_object *obj; 1318 int ret = -EINVAL; 1319 1320 if (copy_from_user(&cmd, buf, sizeof cmd)) 1321 return -EFAULT; 1322 1323 uobj = uobj_get_write(UVERBS_OBJECT_CQ, cmd.cq_handle, 1324 file->ucontext); 1325 if (IS_ERR(uobj)) 1326 return PTR_ERR(uobj); 1327 1328 /* 1329 * Make sure we don't free the memory in remove_commit as we still 1330 * needs the uobject memory to create the response. 1331 */ 1332 uverbs_uobject_get(uobj); 1333 cq = uobj->object; 1334 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1335 1336 memset(&resp, 0, sizeof(resp)); 1337 1338 ret = uobj_remove_commit(uobj); 1339 if (ret) { 1340 uverbs_uobject_put(uobj); 1341 return ret; 1342 } 1343 1344 resp.comp_events_reported = obj->comp_events_reported; 1345 resp.async_events_reported = obj->async_events_reported; 1346 1347 uverbs_uobject_put(uobj); 1348 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 1349 return -EFAULT; 1350 1351 return in_len; 1352 } 1353 1354 static int create_qp(struct ib_uverbs_file *file, 1355 struct ib_udata *ucore, 1356 struct ib_udata *uhw, 1357 struct ib_uverbs_ex_create_qp *cmd, 1358 size_t cmd_sz, 1359 int (*cb)(struct ib_uverbs_file *file, 1360 struct ib_uverbs_ex_create_qp_resp *resp, 1361 struct ib_udata *udata), 1362 void *context) 1363 { 1364 struct ib_uqp_object *obj; 1365 struct ib_device *device; 1366 struct ib_pd *pd = NULL; 1367 struct ib_xrcd *xrcd = NULL; 1368 struct ib_uobject *xrcd_uobj = ERR_PTR(-ENOENT); 1369 struct ib_cq *scq = NULL, *rcq = NULL; 1370 struct ib_srq *srq = NULL; 1371 struct ib_qp *qp; 1372 char *buf; 1373 struct ib_qp_init_attr attr = {}; 1374 struct ib_uverbs_ex_create_qp_resp resp; 1375 int ret; 1376 struct ib_rwq_ind_table *ind_tbl = NULL; 1377 bool has_sq = true; 1378 1379 if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) 1380 return -EPERM; 1381 1382 obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, 1383 file->ucontext); 1384 if (IS_ERR(obj)) 1385 return PTR_ERR(obj); 1386 obj->uxrcd = NULL; 1387 obj->uevent.uobject.user_handle = cmd->user_handle; 1388 mutex_init(&obj->mcast_lock); 1389 1390 if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) + 1391 sizeof(cmd->rwq_ind_tbl_handle) && 1392 (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) { 1393 ind_tbl = uobj_get_obj_read(rwq_ind_table, UVERBS_OBJECT_RWQ_IND_TBL, 1394 cmd->rwq_ind_tbl_handle, 1395 file->ucontext); 1396 if (!ind_tbl) { 1397 ret = -EINVAL; 1398 goto err_put; 1399 } 1400 1401 attr.rwq_ind_tbl = ind_tbl; 1402 } 1403 1404 if (cmd_sz > sizeof(*cmd) && 1405 !ib_is_udata_cleared(ucore, sizeof(*cmd), 1406 cmd_sz - sizeof(*cmd))) { 1407 ret = -EOPNOTSUPP; 1408 goto err_put; 1409 } 1410 1411 if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) { 1412 ret = -EINVAL; 1413 goto err_put; 1414 } 1415 1416 if (ind_tbl && !cmd->max_send_wr) 1417 has_sq = false; 1418 1419 if (cmd->qp_type == IB_QPT_XRC_TGT) { 1420 xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->pd_handle, 1421 file->ucontext); 1422 1423 if (IS_ERR(xrcd_uobj)) { 1424 ret = -EINVAL; 1425 goto err_put; 1426 } 1427 1428 xrcd = (struct ib_xrcd *)xrcd_uobj->object; 1429 if (!xrcd) { 1430 ret = -EINVAL; 1431 goto err_put; 1432 } 1433 device = xrcd->device; 1434 } else { 1435 if (cmd->qp_type == IB_QPT_XRC_INI) { 1436 cmd->max_recv_wr = 0; 1437 cmd->max_recv_sge = 0; 1438 } else { 1439 if (cmd->is_srq) { 1440 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd->srq_handle, 1441 file->ucontext); 1442 if (!srq || srq->srq_type == IB_SRQT_XRC) { 1443 ret = -EINVAL; 1444 goto err_put; 1445 } 1446 } 1447 1448 if (!ind_tbl) { 1449 if (cmd->recv_cq_handle != cmd->send_cq_handle) { 1450 rcq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd->recv_cq_handle, 1451 file->ucontext); 1452 if (!rcq) { 1453 ret = -EINVAL; 1454 goto err_put; 1455 } 1456 } 1457 } 1458 } 1459 1460 if (has_sq) 1461 scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd->send_cq_handle, 1462 file->ucontext); 1463 if (!ind_tbl) 1464 rcq = rcq ?: scq; 1465 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, file->ucontext); 1466 if (!pd || (!scq && has_sq)) { 1467 ret = -EINVAL; 1468 goto err_put; 1469 } 1470 1471 device = pd->device; 1472 } 1473 1474 attr.event_handler = ib_uverbs_qp_event_handler; 1475 attr.qp_context = file; 1476 attr.send_cq = scq; 1477 attr.recv_cq = rcq; 1478 attr.srq = srq; 1479 attr.xrcd = xrcd; 1480 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR : 1481 IB_SIGNAL_REQ_WR; 1482 attr.qp_type = cmd->qp_type; 1483 attr.create_flags = 0; 1484 1485 attr.cap.max_send_wr = cmd->max_send_wr; 1486 attr.cap.max_recv_wr = cmd->max_recv_wr; 1487 attr.cap.max_send_sge = cmd->max_send_sge; 1488 attr.cap.max_recv_sge = cmd->max_recv_sge; 1489 attr.cap.max_inline_data = cmd->max_inline_data; 1490 1491 obj->uevent.events_reported = 0; 1492 INIT_LIST_HEAD(&obj->uevent.event_list); 1493 INIT_LIST_HEAD(&obj->mcast_list); 1494 1495 if (cmd_sz >= offsetof(typeof(*cmd), create_flags) + 1496 sizeof(cmd->create_flags)) 1497 attr.create_flags = cmd->create_flags; 1498 1499 if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | 1500 IB_QP_CREATE_CROSS_CHANNEL | 1501 IB_QP_CREATE_MANAGED_SEND | 1502 IB_QP_CREATE_MANAGED_RECV | 1503 IB_QP_CREATE_SCATTER_FCS | 1504 IB_QP_CREATE_CVLAN_STRIPPING | 1505 IB_QP_CREATE_SOURCE_QPN | 1506 IB_QP_CREATE_PCI_WRITE_END_PADDING)) { 1507 ret = -EINVAL; 1508 goto err_put; 1509 } 1510 1511 if (attr.create_flags & IB_QP_CREATE_SOURCE_QPN) { 1512 if (!capable(CAP_NET_RAW)) { 1513 ret = -EPERM; 1514 goto err_put; 1515 } 1516 1517 attr.source_qpn = cmd->source_qpn; 1518 } 1519 1520 buf = (void *)cmd + sizeof(*cmd); 1521 if (cmd_sz > sizeof(*cmd)) 1522 if (!(buf[0] == 0 && !memcmp(buf, buf + 1, 1523 cmd_sz - sizeof(*cmd) - 1))) { 1524 ret = -EINVAL; 1525 goto err_put; 1526 } 1527 1528 if (cmd->qp_type == IB_QPT_XRC_TGT) 1529 qp = ib_create_qp(pd, &attr); 1530 else 1531 qp = _ib_create_qp(device, pd, &attr, uhw, 1532 &obj->uevent.uobject); 1533 1534 if (IS_ERR(qp)) { 1535 ret = PTR_ERR(qp); 1536 goto err_put; 1537 } 1538 1539 if (cmd->qp_type != IB_QPT_XRC_TGT) { 1540 ret = ib_create_qp_security(qp, device); 1541 if (ret) 1542 goto err_cb; 1543 1544 qp->real_qp = qp; 1545 qp->pd = pd; 1546 qp->send_cq = attr.send_cq; 1547 qp->recv_cq = attr.recv_cq; 1548 qp->srq = attr.srq; 1549 qp->rwq_ind_tbl = ind_tbl; 1550 qp->event_handler = attr.event_handler; 1551 qp->qp_context = attr.qp_context; 1552 qp->qp_type = attr.qp_type; 1553 atomic_set(&qp->usecnt, 0); 1554 atomic_inc(&pd->usecnt); 1555 qp->port = 0; 1556 if (attr.send_cq) 1557 atomic_inc(&attr.send_cq->usecnt); 1558 if (attr.recv_cq) 1559 atomic_inc(&attr.recv_cq->usecnt); 1560 if (attr.srq) 1561 atomic_inc(&attr.srq->usecnt); 1562 if (ind_tbl) 1563 atomic_inc(&ind_tbl->usecnt); 1564 } else { 1565 /* It is done in _ib_create_qp for other QP types */ 1566 qp->uobject = &obj->uevent.uobject; 1567 } 1568 1569 obj->uevent.uobject.object = qp; 1570 1571 memset(&resp, 0, sizeof resp); 1572 resp.base.qpn = qp->qp_num; 1573 resp.base.qp_handle = obj->uevent.uobject.id; 1574 resp.base.max_recv_sge = attr.cap.max_recv_sge; 1575 resp.base.max_send_sge = attr.cap.max_send_sge; 1576 resp.base.max_recv_wr = attr.cap.max_recv_wr; 1577 resp.base.max_send_wr = attr.cap.max_send_wr; 1578 resp.base.max_inline_data = attr.cap.max_inline_data; 1579 1580 resp.response_length = offsetof(typeof(resp), response_length) + 1581 sizeof(resp.response_length); 1582 1583 ret = cb(file, &resp, ucore); 1584 if (ret) 1585 goto err_cb; 1586 1587 if (xrcd) { 1588 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, 1589 uobject); 1590 atomic_inc(&obj->uxrcd->refcnt); 1591 uobj_put_read(xrcd_uobj); 1592 } 1593 1594 if (pd) 1595 uobj_put_obj_read(pd); 1596 if (scq) 1597 uobj_put_obj_read(scq); 1598 if (rcq && rcq != scq) 1599 uobj_put_obj_read(rcq); 1600 if (srq) 1601 uobj_put_obj_read(srq); 1602 if (ind_tbl) 1603 uobj_put_obj_read(ind_tbl); 1604 1605 uobj_alloc_commit(&obj->uevent.uobject); 1606 1607 return 0; 1608 err_cb: 1609 ib_destroy_qp(qp); 1610 1611 err_put: 1612 if (!IS_ERR(xrcd_uobj)) 1613 uobj_put_read(xrcd_uobj); 1614 if (pd) 1615 uobj_put_obj_read(pd); 1616 if (scq) 1617 uobj_put_obj_read(scq); 1618 if (rcq && rcq != scq) 1619 uobj_put_obj_read(rcq); 1620 if (srq) 1621 uobj_put_obj_read(srq); 1622 if (ind_tbl) 1623 uobj_put_obj_read(ind_tbl); 1624 1625 uobj_alloc_abort(&obj->uevent.uobject); 1626 return ret; 1627 } 1628 1629 static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file, 1630 struct ib_uverbs_ex_create_qp_resp *resp, 1631 struct ib_udata *ucore) 1632 { 1633 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1634 return -EFAULT; 1635 1636 return 0; 1637 } 1638 1639 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 1640 struct ib_device *ib_dev, 1641 const char __user *buf, int in_len, 1642 int out_len) 1643 { 1644 struct ib_uverbs_create_qp cmd; 1645 struct ib_uverbs_ex_create_qp cmd_ex; 1646 struct ib_udata ucore; 1647 struct ib_udata uhw; 1648 ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp); 1649 int err; 1650 1651 if (out_len < resp_size) 1652 return -ENOSPC; 1653 1654 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1655 return -EFAULT; 1656 1657 ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response), 1658 sizeof(cmd), resp_size); 1659 ib_uverbs_init_udata(&uhw, buf + sizeof(cmd), 1660 u64_to_user_ptr(cmd.response) + resp_size, 1661 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1662 out_len - resp_size); 1663 1664 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1665 cmd_ex.user_handle = cmd.user_handle; 1666 cmd_ex.pd_handle = cmd.pd_handle; 1667 cmd_ex.send_cq_handle = cmd.send_cq_handle; 1668 cmd_ex.recv_cq_handle = cmd.recv_cq_handle; 1669 cmd_ex.srq_handle = cmd.srq_handle; 1670 cmd_ex.max_send_wr = cmd.max_send_wr; 1671 cmd_ex.max_recv_wr = cmd.max_recv_wr; 1672 cmd_ex.max_send_sge = cmd.max_send_sge; 1673 cmd_ex.max_recv_sge = cmd.max_recv_sge; 1674 cmd_ex.max_inline_data = cmd.max_inline_data; 1675 cmd_ex.sq_sig_all = cmd.sq_sig_all; 1676 cmd_ex.qp_type = cmd.qp_type; 1677 cmd_ex.is_srq = cmd.is_srq; 1678 1679 err = create_qp(file, &ucore, &uhw, &cmd_ex, 1680 offsetof(typeof(cmd_ex), is_srq) + 1681 sizeof(cmd.is_srq), ib_uverbs_create_qp_cb, 1682 NULL); 1683 1684 if (err) 1685 return err; 1686 1687 return in_len; 1688 } 1689 1690 static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file, 1691 struct ib_uverbs_ex_create_qp_resp *resp, 1692 struct ib_udata *ucore) 1693 { 1694 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1695 return -EFAULT; 1696 1697 return 0; 1698 } 1699 1700 int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file, 1701 struct ib_device *ib_dev, 1702 struct ib_udata *ucore, 1703 struct ib_udata *uhw) 1704 { 1705 struct ib_uverbs_ex_create_qp_resp resp; 1706 struct ib_uverbs_ex_create_qp cmd = {0}; 1707 int err; 1708 1709 if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) + 1710 sizeof(cmd.comp_mask))) 1711 return -EINVAL; 1712 1713 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 1714 if (err) 1715 return err; 1716 1717 if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK) 1718 return -EINVAL; 1719 1720 if (cmd.reserved) 1721 return -EINVAL; 1722 1723 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1724 sizeof(resp.response_length))) 1725 return -ENOSPC; 1726 1727 err = create_qp(file, ucore, uhw, &cmd, 1728 min(ucore->inlen, sizeof(cmd)), 1729 ib_uverbs_ex_create_qp_cb, NULL); 1730 1731 if (err) 1732 return err; 1733 1734 return 0; 1735 } 1736 1737 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, 1738 struct ib_device *ib_dev, 1739 const char __user *buf, int in_len, int out_len) 1740 { 1741 struct ib_uverbs_open_qp cmd; 1742 struct ib_uverbs_create_qp_resp resp; 1743 struct ib_udata udata; 1744 struct ib_uqp_object *obj; 1745 struct ib_xrcd *xrcd; 1746 struct ib_uobject *uninitialized_var(xrcd_uobj); 1747 struct ib_qp *qp; 1748 struct ib_qp_open_attr attr; 1749 int ret; 1750 1751 if (out_len < sizeof resp) 1752 return -ENOSPC; 1753 1754 if (copy_from_user(&cmd, buf, sizeof cmd)) 1755 return -EFAULT; 1756 1757 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 1758 u64_to_user_ptr(cmd.response) + sizeof(resp), 1759 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1760 out_len - sizeof(resp)); 1761 1762 obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, 1763 file->ucontext); 1764 if (IS_ERR(obj)) 1765 return PTR_ERR(obj); 1766 1767 xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd.pd_handle, 1768 file->ucontext); 1769 if (IS_ERR(xrcd_uobj)) { 1770 ret = -EINVAL; 1771 goto err_put; 1772 } 1773 1774 xrcd = (struct ib_xrcd *)xrcd_uobj->object; 1775 if (!xrcd) { 1776 ret = -EINVAL; 1777 goto err_xrcd; 1778 } 1779 1780 attr.event_handler = ib_uverbs_qp_event_handler; 1781 attr.qp_context = file; 1782 attr.qp_num = cmd.qpn; 1783 attr.qp_type = cmd.qp_type; 1784 1785 obj->uevent.events_reported = 0; 1786 INIT_LIST_HEAD(&obj->uevent.event_list); 1787 INIT_LIST_HEAD(&obj->mcast_list); 1788 1789 qp = ib_open_qp(xrcd, &attr); 1790 if (IS_ERR(qp)) { 1791 ret = PTR_ERR(qp); 1792 goto err_xrcd; 1793 } 1794 1795 obj->uevent.uobject.object = qp; 1796 obj->uevent.uobject.user_handle = cmd.user_handle; 1797 1798 memset(&resp, 0, sizeof resp); 1799 resp.qpn = qp->qp_num; 1800 resp.qp_handle = obj->uevent.uobject.id; 1801 1802 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 1803 ret = -EFAULT; 1804 goto err_destroy; 1805 } 1806 1807 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 1808 atomic_inc(&obj->uxrcd->refcnt); 1809 qp->uobject = &obj->uevent.uobject; 1810 uobj_put_read(xrcd_uobj); 1811 1812 1813 uobj_alloc_commit(&obj->uevent.uobject); 1814 1815 return in_len; 1816 1817 err_destroy: 1818 ib_destroy_qp(qp); 1819 err_xrcd: 1820 uobj_put_read(xrcd_uobj); 1821 err_put: 1822 uobj_alloc_abort(&obj->uevent.uobject); 1823 return ret; 1824 } 1825 1826 static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr, 1827 struct rdma_ah_attr *rdma_attr) 1828 { 1829 const struct ib_global_route *grh; 1830 1831 uverb_attr->dlid = rdma_ah_get_dlid(rdma_attr); 1832 uverb_attr->sl = rdma_ah_get_sl(rdma_attr); 1833 uverb_attr->src_path_bits = rdma_ah_get_path_bits(rdma_attr); 1834 uverb_attr->static_rate = rdma_ah_get_static_rate(rdma_attr); 1835 uverb_attr->is_global = !!(rdma_ah_get_ah_flags(rdma_attr) & 1836 IB_AH_GRH); 1837 if (uverb_attr->is_global) { 1838 grh = rdma_ah_read_grh(rdma_attr); 1839 memcpy(uverb_attr->dgid, grh->dgid.raw, 16); 1840 uverb_attr->flow_label = grh->flow_label; 1841 uverb_attr->sgid_index = grh->sgid_index; 1842 uverb_attr->hop_limit = grh->hop_limit; 1843 uverb_attr->traffic_class = grh->traffic_class; 1844 } 1845 uverb_attr->port_num = rdma_ah_get_port_num(rdma_attr); 1846 } 1847 1848 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 1849 struct ib_device *ib_dev, 1850 const char __user *buf, int in_len, 1851 int out_len) 1852 { 1853 struct ib_uverbs_query_qp cmd; 1854 struct ib_uverbs_query_qp_resp resp; 1855 struct ib_qp *qp; 1856 struct ib_qp_attr *attr; 1857 struct ib_qp_init_attr *init_attr; 1858 int ret; 1859 1860 if (copy_from_user(&cmd, buf, sizeof cmd)) 1861 return -EFAULT; 1862 1863 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1864 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 1865 if (!attr || !init_attr) { 1866 ret = -ENOMEM; 1867 goto out; 1868 } 1869 1870 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext); 1871 if (!qp) { 1872 ret = -EINVAL; 1873 goto out; 1874 } 1875 1876 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 1877 1878 uobj_put_obj_read(qp); 1879 1880 if (ret) 1881 goto out; 1882 1883 memset(&resp, 0, sizeof resp); 1884 1885 resp.qp_state = attr->qp_state; 1886 resp.cur_qp_state = attr->cur_qp_state; 1887 resp.path_mtu = attr->path_mtu; 1888 resp.path_mig_state = attr->path_mig_state; 1889 resp.qkey = attr->qkey; 1890 resp.rq_psn = attr->rq_psn; 1891 resp.sq_psn = attr->sq_psn; 1892 resp.dest_qp_num = attr->dest_qp_num; 1893 resp.qp_access_flags = attr->qp_access_flags; 1894 resp.pkey_index = attr->pkey_index; 1895 resp.alt_pkey_index = attr->alt_pkey_index; 1896 resp.sq_draining = attr->sq_draining; 1897 resp.max_rd_atomic = attr->max_rd_atomic; 1898 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 1899 resp.min_rnr_timer = attr->min_rnr_timer; 1900 resp.port_num = attr->port_num; 1901 resp.timeout = attr->timeout; 1902 resp.retry_cnt = attr->retry_cnt; 1903 resp.rnr_retry = attr->rnr_retry; 1904 resp.alt_port_num = attr->alt_port_num; 1905 resp.alt_timeout = attr->alt_timeout; 1906 1907 copy_ah_attr_to_uverbs(&resp.dest, &attr->ah_attr); 1908 copy_ah_attr_to_uverbs(&resp.alt_dest, &attr->alt_ah_attr); 1909 1910 resp.max_send_wr = init_attr->cap.max_send_wr; 1911 resp.max_recv_wr = init_attr->cap.max_recv_wr; 1912 resp.max_send_sge = init_attr->cap.max_send_sge; 1913 resp.max_recv_sge = init_attr->cap.max_recv_sge; 1914 resp.max_inline_data = init_attr->cap.max_inline_data; 1915 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 1916 1917 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 1918 ret = -EFAULT; 1919 1920 out: 1921 kfree(attr); 1922 kfree(init_attr); 1923 1924 return ret ? ret : in_len; 1925 } 1926 1927 /* Remove ignored fields set in the attribute mask */ 1928 static int modify_qp_mask(enum ib_qp_type qp_type, int mask) 1929 { 1930 switch (qp_type) { 1931 case IB_QPT_XRC_INI: 1932 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); 1933 case IB_QPT_XRC_TGT: 1934 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | 1935 IB_QP_RNR_RETRY); 1936 default: 1937 return mask; 1938 } 1939 } 1940 1941 static void copy_ah_attr_from_uverbs(struct ib_device *dev, 1942 struct rdma_ah_attr *rdma_attr, 1943 struct ib_uverbs_qp_dest *uverb_attr) 1944 { 1945 rdma_attr->type = rdma_ah_find_type(dev, uverb_attr->port_num); 1946 if (uverb_attr->is_global) { 1947 rdma_ah_set_grh(rdma_attr, NULL, 1948 uverb_attr->flow_label, 1949 uverb_attr->sgid_index, 1950 uverb_attr->hop_limit, 1951 uverb_attr->traffic_class); 1952 rdma_ah_set_dgid_raw(rdma_attr, uverb_attr->dgid); 1953 } else { 1954 rdma_ah_set_ah_flags(rdma_attr, 0); 1955 } 1956 rdma_ah_set_dlid(rdma_attr, uverb_attr->dlid); 1957 rdma_ah_set_sl(rdma_attr, uverb_attr->sl); 1958 rdma_ah_set_path_bits(rdma_attr, uverb_attr->src_path_bits); 1959 rdma_ah_set_static_rate(rdma_attr, uverb_attr->static_rate); 1960 rdma_ah_set_port_num(rdma_attr, uverb_attr->port_num); 1961 rdma_ah_set_make_grd(rdma_attr, false); 1962 } 1963 1964 static int modify_qp(struct ib_uverbs_file *file, 1965 struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata) 1966 { 1967 struct ib_qp_attr *attr; 1968 struct ib_qp *qp; 1969 int ret; 1970 1971 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1972 if (!attr) 1973 return -ENOMEM; 1974 1975 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle, file->ucontext); 1976 if (!qp) { 1977 ret = -EINVAL; 1978 goto out; 1979 } 1980 1981 if ((cmd->base.attr_mask & IB_QP_PORT) && 1982 !rdma_is_port_valid(qp->device, cmd->base.port_num)) { 1983 ret = -EINVAL; 1984 goto release_qp; 1985 } 1986 1987 if ((cmd->base.attr_mask & IB_QP_AV) && 1988 !rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) { 1989 ret = -EINVAL; 1990 goto release_qp; 1991 } 1992 1993 if ((cmd->base.attr_mask & IB_QP_ALT_PATH) && 1994 (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) || 1995 !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num))) { 1996 ret = -EINVAL; 1997 goto release_qp; 1998 } 1999 2000 if ((cmd->base.attr_mask & IB_QP_CUR_STATE && 2001 cmd->base.cur_qp_state > IB_QPS_ERR) || 2002 cmd->base.qp_state > IB_QPS_ERR) { 2003 ret = -EINVAL; 2004 goto release_qp; 2005 } 2006 2007 attr->qp_state = cmd->base.qp_state; 2008 attr->cur_qp_state = cmd->base.cur_qp_state; 2009 attr->path_mtu = cmd->base.path_mtu; 2010 attr->path_mig_state = cmd->base.path_mig_state; 2011 attr->qkey = cmd->base.qkey; 2012 attr->rq_psn = cmd->base.rq_psn; 2013 attr->sq_psn = cmd->base.sq_psn; 2014 attr->dest_qp_num = cmd->base.dest_qp_num; 2015 attr->qp_access_flags = cmd->base.qp_access_flags; 2016 attr->pkey_index = cmd->base.pkey_index; 2017 attr->alt_pkey_index = cmd->base.alt_pkey_index; 2018 attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify; 2019 attr->max_rd_atomic = cmd->base.max_rd_atomic; 2020 attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic; 2021 attr->min_rnr_timer = cmd->base.min_rnr_timer; 2022 attr->port_num = cmd->base.port_num; 2023 attr->timeout = cmd->base.timeout; 2024 attr->retry_cnt = cmd->base.retry_cnt; 2025 attr->rnr_retry = cmd->base.rnr_retry; 2026 attr->alt_port_num = cmd->base.alt_port_num; 2027 attr->alt_timeout = cmd->base.alt_timeout; 2028 attr->rate_limit = cmd->rate_limit; 2029 2030 if (cmd->base.attr_mask & IB_QP_AV) 2031 copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr, 2032 &cmd->base.dest); 2033 2034 if (cmd->base.attr_mask & IB_QP_ALT_PATH) 2035 copy_ah_attr_from_uverbs(qp->device, &attr->alt_ah_attr, 2036 &cmd->base.alt_dest); 2037 2038 ret = ib_modify_qp_with_udata(qp, attr, 2039 modify_qp_mask(qp->qp_type, 2040 cmd->base.attr_mask), 2041 udata); 2042 2043 release_qp: 2044 uobj_put_obj_read(qp); 2045 out: 2046 kfree(attr); 2047 2048 return ret; 2049 } 2050 2051 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 2052 struct ib_device *ib_dev, 2053 const char __user *buf, int in_len, 2054 int out_len) 2055 { 2056 struct ib_uverbs_ex_modify_qp cmd = {}; 2057 struct ib_udata udata; 2058 int ret; 2059 2060 if (copy_from_user(&cmd.base, buf, sizeof(cmd.base))) 2061 return -EFAULT; 2062 2063 if (cmd.base.attr_mask & 2064 ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1)) 2065 return -EOPNOTSUPP; 2066 2067 ib_uverbs_init_udata(&udata, buf + sizeof(cmd.base), NULL, 2068 in_len - sizeof(cmd.base) - sizeof(struct ib_uverbs_cmd_hdr), 2069 out_len); 2070 2071 ret = modify_qp(file, &cmd, &udata); 2072 if (ret) 2073 return ret; 2074 2075 return in_len; 2076 } 2077 2078 int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file, 2079 struct ib_device *ib_dev, 2080 struct ib_udata *ucore, 2081 struct ib_udata *uhw) 2082 { 2083 struct ib_uverbs_ex_modify_qp cmd = {}; 2084 int ret; 2085 2086 /* 2087 * Last bit is reserved for extending the attr_mask by 2088 * using another field. 2089 */ 2090 BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31)); 2091 2092 if (ucore->inlen < sizeof(cmd.base)) 2093 return -EINVAL; 2094 2095 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 2096 if (ret) 2097 return ret; 2098 2099 if (cmd.base.attr_mask & 2100 ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1)) 2101 return -EOPNOTSUPP; 2102 2103 if (ucore->inlen > sizeof(cmd)) { 2104 if (!ib_is_udata_cleared(ucore, sizeof(cmd), 2105 ucore->inlen - sizeof(cmd))) 2106 return -EOPNOTSUPP; 2107 } 2108 2109 ret = modify_qp(file, &cmd, uhw); 2110 2111 return ret; 2112 } 2113 2114 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 2115 struct ib_device *ib_dev, 2116 const char __user *buf, int in_len, 2117 int out_len) 2118 { 2119 struct ib_uverbs_destroy_qp cmd; 2120 struct ib_uverbs_destroy_qp_resp resp; 2121 struct ib_uobject *uobj; 2122 struct ib_uqp_object *obj; 2123 int ret = -EINVAL; 2124 2125 if (copy_from_user(&cmd, buf, sizeof cmd)) 2126 return -EFAULT; 2127 2128 memset(&resp, 0, sizeof resp); 2129 2130 uobj = uobj_get_write(UVERBS_OBJECT_QP, cmd.qp_handle, 2131 file->ucontext); 2132 if (IS_ERR(uobj)) 2133 return PTR_ERR(uobj); 2134 2135 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 2136 /* 2137 * Make sure we don't free the memory in remove_commit as we still 2138 * needs the uobject memory to create the response. 2139 */ 2140 uverbs_uobject_get(uobj); 2141 2142 ret = uobj_remove_commit(uobj); 2143 if (ret) { 2144 uverbs_uobject_put(uobj); 2145 return ret; 2146 } 2147 2148 resp.events_reported = obj->uevent.events_reported; 2149 uverbs_uobject_put(uobj); 2150 2151 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 2152 return -EFAULT; 2153 2154 return in_len; 2155 } 2156 2157 static void *alloc_wr(size_t wr_size, __u32 num_sge) 2158 { 2159 if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) / 2160 sizeof (struct ib_sge)) 2161 return NULL; 2162 2163 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) + 2164 num_sge * sizeof (struct ib_sge), GFP_KERNEL); 2165 } 2166 2167 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 2168 struct ib_device *ib_dev, 2169 const char __user *buf, int in_len, 2170 int out_len) 2171 { 2172 struct ib_uverbs_post_send cmd; 2173 struct ib_uverbs_post_send_resp resp; 2174 struct ib_uverbs_send_wr *user_wr; 2175 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 2176 struct ib_qp *qp; 2177 int i, sg_ind; 2178 int is_ud; 2179 ssize_t ret = -EINVAL; 2180 size_t next_size; 2181 2182 if (copy_from_user(&cmd, buf, sizeof cmd)) 2183 return -EFAULT; 2184 2185 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 2186 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 2187 return -EINVAL; 2188 2189 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 2190 return -EINVAL; 2191 2192 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 2193 if (!user_wr) 2194 return -ENOMEM; 2195 2196 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext); 2197 if (!qp) 2198 goto out; 2199 2200 is_ud = qp->qp_type == IB_QPT_UD; 2201 sg_ind = 0; 2202 last = NULL; 2203 for (i = 0; i < cmd.wr_count; ++i) { 2204 if (copy_from_user(user_wr, 2205 buf + sizeof cmd + i * cmd.wqe_size, 2206 cmd.wqe_size)) { 2207 ret = -EFAULT; 2208 goto out_put; 2209 } 2210 2211 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 2212 ret = -EINVAL; 2213 goto out_put; 2214 } 2215 2216 if (is_ud) { 2217 struct ib_ud_wr *ud; 2218 2219 if (user_wr->opcode != IB_WR_SEND && 2220 user_wr->opcode != IB_WR_SEND_WITH_IMM) { 2221 ret = -EINVAL; 2222 goto out_put; 2223 } 2224 2225 next_size = sizeof(*ud); 2226 ud = alloc_wr(next_size, user_wr->num_sge); 2227 if (!ud) { 2228 ret = -ENOMEM; 2229 goto out_put; 2230 } 2231 2232 ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH, user_wr->wr.ud.ah, 2233 file->ucontext); 2234 if (!ud->ah) { 2235 kfree(ud); 2236 ret = -EINVAL; 2237 goto out_put; 2238 } 2239 ud->remote_qpn = user_wr->wr.ud.remote_qpn; 2240 ud->remote_qkey = user_wr->wr.ud.remote_qkey; 2241 2242 next = &ud->wr; 2243 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || 2244 user_wr->opcode == IB_WR_RDMA_WRITE || 2245 user_wr->opcode == IB_WR_RDMA_READ) { 2246 struct ib_rdma_wr *rdma; 2247 2248 next_size = sizeof(*rdma); 2249 rdma = alloc_wr(next_size, user_wr->num_sge); 2250 if (!rdma) { 2251 ret = -ENOMEM; 2252 goto out_put; 2253 } 2254 2255 rdma->remote_addr = user_wr->wr.rdma.remote_addr; 2256 rdma->rkey = user_wr->wr.rdma.rkey; 2257 2258 next = &rdma->wr; 2259 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || 2260 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 2261 struct ib_atomic_wr *atomic; 2262 2263 next_size = sizeof(*atomic); 2264 atomic = alloc_wr(next_size, user_wr->num_sge); 2265 if (!atomic) { 2266 ret = -ENOMEM; 2267 goto out_put; 2268 } 2269 2270 atomic->remote_addr = user_wr->wr.atomic.remote_addr; 2271 atomic->compare_add = user_wr->wr.atomic.compare_add; 2272 atomic->swap = user_wr->wr.atomic.swap; 2273 atomic->rkey = user_wr->wr.atomic.rkey; 2274 2275 next = &atomic->wr; 2276 } else if (user_wr->opcode == IB_WR_SEND || 2277 user_wr->opcode == IB_WR_SEND_WITH_IMM || 2278 user_wr->opcode == IB_WR_SEND_WITH_INV) { 2279 next_size = sizeof(*next); 2280 next = alloc_wr(next_size, user_wr->num_sge); 2281 if (!next) { 2282 ret = -ENOMEM; 2283 goto out_put; 2284 } 2285 } else { 2286 ret = -EINVAL; 2287 goto out_put; 2288 } 2289 2290 if (user_wr->opcode == IB_WR_SEND_WITH_IMM || 2291 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 2292 next->ex.imm_data = 2293 (__be32 __force) user_wr->ex.imm_data; 2294 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) { 2295 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey; 2296 } 2297 2298 if (!last) 2299 wr = next; 2300 else 2301 last->next = next; 2302 last = next; 2303 2304 next->next = NULL; 2305 next->wr_id = user_wr->wr_id; 2306 next->num_sge = user_wr->num_sge; 2307 next->opcode = user_wr->opcode; 2308 next->send_flags = user_wr->send_flags; 2309 2310 if (next->num_sge) { 2311 next->sg_list = (void *) next + 2312 ALIGN(next_size, sizeof(struct ib_sge)); 2313 if (copy_from_user(next->sg_list, 2314 buf + sizeof cmd + 2315 cmd.wr_count * cmd.wqe_size + 2316 sg_ind * sizeof (struct ib_sge), 2317 next->num_sge * sizeof (struct ib_sge))) { 2318 ret = -EFAULT; 2319 goto out_put; 2320 } 2321 sg_ind += next->num_sge; 2322 } else 2323 next->sg_list = NULL; 2324 } 2325 2326 resp.bad_wr = 0; 2327 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); 2328 if (ret) 2329 for (next = wr; next; next = next->next) { 2330 ++resp.bad_wr; 2331 if (next == bad_wr) 2332 break; 2333 } 2334 2335 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 2336 ret = -EFAULT; 2337 2338 out_put: 2339 uobj_put_obj_read(qp); 2340 2341 while (wr) { 2342 if (is_ud && ud_wr(wr)->ah) 2343 uobj_put_obj_read(ud_wr(wr)->ah); 2344 next = wr->next; 2345 kfree(wr); 2346 wr = next; 2347 } 2348 2349 out: 2350 kfree(user_wr); 2351 2352 return ret ? ret : in_len; 2353 } 2354 2355 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 2356 int in_len, 2357 u32 wr_count, 2358 u32 sge_count, 2359 u32 wqe_size) 2360 { 2361 struct ib_uverbs_recv_wr *user_wr; 2362 struct ib_recv_wr *wr = NULL, *last, *next; 2363 int sg_ind; 2364 int i; 2365 int ret; 2366 2367 if (in_len < wqe_size * wr_count + 2368 sge_count * sizeof (struct ib_uverbs_sge)) 2369 return ERR_PTR(-EINVAL); 2370 2371 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 2372 return ERR_PTR(-EINVAL); 2373 2374 user_wr = kmalloc(wqe_size, GFP_KERNEL); 2375 if (!user_wr) 2376 return ERR_PTR(-ENOMEM); 2377 2378 sg_ind = 0; 2379 last = NULL; 2380 for (i = 0; i < wr_count; ++i) { 2381 if (copy_from_user(user_wr, buf + i * wqe_size, 2382 wqe_size)) { 2383 ret = -EFAULT; 2384 goto err; 2385 } 2386 2387 if (user_wr->num_sge + sg_ind > sge_count) { 2388 ret = -EINVAL; 2389 goto err; 2390 } 2391 2392 if (user_wr->num_sge >= 2393 (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) / 2394 sizeof (struct ib_sge)) { 2395 ret = -EINVAL; 2396 goto err; 2397 } 2398 2399 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2400 user_wr->num_sge * sizeof (struct ib_sge), 2401 GFP_KERNEL); 2402 if (!next) { 2403 ret = -ENOMEM; 2404 goto err; 2405 } 2406 2407 if (!last) 2408 wr = next; 2409 else 2410 last->next = next; 2411 last = next; 2412 2413 next->next = NULL; 2414 next->wr_id = user_wr->wr_id; 2415 next->num_sge = user_wr->num_sge; 2416 2417 if (next->num_sge) { 2418 next->sg_list = (void *) next + 2419 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2420 if (copy_from_user(next->sg_list, 2421 buf + wr_count * wqe_size + 2422 sg_ind * sizeof (struct ib_sge), 2423 next->num_sge * sizeof (struct ib_sge))) { 2424 ret = -EFAULT; 2425 goto err; 2426 } 2427 sg_ind += next->num_sge; 2428 } else 2429 next->sg_list = NULL; 2430 } 2431 2432 kfree(user_wr); 2433 return wr; 2434 2435 err: 2436 kfree(user_wr); 2437 2438 while (wr) { 2439 next = wr->next; 2440 kfree(wr); 2441 wr = next; 2442 } 2443 2444 return ERR_PTR(ret); 2445 } 2446 2447 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 2448 struct ib_device *ib_dev, 2449 const char __user *buf, int in_len, 2450 int out_len) 2451 { 2452 struct ib_uverbs_post_recv cmd; 2453 struct ib_uverbs_post_recv_resp resp; 2454 struct ib_recv_wr *wr, *next, *bad_wr; 2455 struct ib_qp *qp; 2456 ssize_t ret = -EINVAL; 2457 2458 if (copy_from_user(&cmd, buf, sizeof cmd)) 2459 return -EFAULT; 2460 2461 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2462 in_len - sizeof cmd, cmd.wr_count, 2463 cmd.sge_count, cmd.wqe_size); 2464 if (IS_ERR(wr)) 2465 return PTR_ERR(wr); 2466 2467 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext); 2468 if (!qp) 2469 goto out; 2470 2471 resp.bad_wr = 0; 2472 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); 2473 2474 uobj_put_obj_read(qp); 2475 if (ret) { 2476 for (next = wr; next; next = next->next) { 2477 ++resp.bad_wr; 2478 if (next == bad_wr) 2479 break; 2480 } 2481 } 2482 2483 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 2484 ret = -EFAULT; 2485 2486 out: 2487 while (wr) { 2488 next = wr->next; 2489 kfree(wr); 2490 wr = next; 2491 } 2492 2493 return ret ? ret : in_len; 2494 } 2495 2496 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 2497 struct ib_device *ib_dev, 2498 const char __user *buf, int in_len, 2499 int out_len) 2500 { 2501 struct ib_uverbs_post_srq_recv cmd; 2502 struct ib_uverbs_post_srq_recv_resp resp; 2503 struct ib_recv_wr *wr, *next, *bad_wr; 2504 struct ib_srq *srq; 2505 ssize_t ret = -EINVAL; 2506 2507 if (copy_from_user(&cmd, buf, sizeof cmd)) 2508 return -EFAULT; 2509 2510 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2511 in_len - sizeof cmd, cmd.wr_count, 2512 cmd.sge_count, cmd.wqe_size); 2513 if (IS_ERR(wr)) 2514 return PTR_ERR(wr); 2515 2516 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file->ucontext); 2517 if (!srq) 2518 goto out; 2519 2520 resp.bad_wr = 0; 2521 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 2522 2523 uobj_put_obj_read(srq); 2524 2525 if (ret) 2526 for (next = wr; next; next = next->next) { 2527 ++resp.bad_wr; 2528 if (next == bad_wr) 2529 break; 2530 } 2531 2532 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 2533 ret = -EFAULT; 2534 2535 out: 2536 while (wr) { 2537 next = wr->next; 2538 kfree(wr); 2539 wr = next; 2540 } 2541 2542 return ret ? ret : in_len; 2543 } 2544 2545 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 2546 struct ib_device *ib_dev, 2547 const char __user *buf, int in_len, 2548 int out_len) 2549 { 2550 struct ib_uverbs_create_ah cmd; 2551 struct ib_uverbs_create_ah_resp resp; 2552 struct ib_uobject *uobj; 2553 struct ib_pd *pd; 2554 struct ib_ah *ah; 2555 struct rdma_ah_attr attr; 2556 int ret; 2557 struct ib_udata udata; 2558 2559 if (out_len < sizeof resp) 2560 return -ENOSPC; 2561 2562 if (copy_from_user(&cmd, buf, sizeof cmd)) 2563 return -EFAULT; 2564 2565 if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) 2566 return -EINVAL; 2567 2568 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 2569 u64_to_user_ptr(cmd.response) + sizeof(resp), 2570 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 2571 out_len - sizeof(resp)); 2572 2573 uobj = uobj_alloc(UVERBS_OBJECT_AH, file->ucontext); 2574 if (IS_ERR(uobj)) 2575 return PTR_ERR(uobj); 2576 2577 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext); 2578 if (!pd) { 2579 ret = -EINVAL; 2580 goto err; 2581 } 2582 2583 attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num); 2584 rdma_ah_set_make_grd(&attr, false); 2585 rdma_ah_set_dlid(&attr, cmd.attr.dlid); 2586 rdma_ah_set_sl(&attr, cmd.attr.sl); 2587 rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits); 2588 rdma_ah_set_static_rate(&attr, cmd.attr.static_rate); 2589 rdma_ah_set_port_num(&attr, cmd.attr.port_num); 2590 2591 if (cmd.attr.is_global) { 2592 rdma_ah_set_grh(&attr, NULL, cmd.attr.grh.flow_label, 2593 cmd.attr.grh.sgid_index, 2594 cmd.attr.grh.hop_limit, 2595 cmd.attr.grh.traffic_class); 2596 rdma_ah_set_dgid_raw(&attr, cmd.attr.grh.dgid); 2597 } else { 2598 rdma_ah_set_ah_flags(&attr, 0); 2599 } 2600 2601 ah = rdma_create_user_ah(pd, &attr, &udata); 2602 if (IS_ERR(ah)) { 2603 ret = PTR_ERR(ah); 2604 goto err_put; 2605 } 2606 2607 ah->uobject = uobj; 2608 uobj->user_handle = cmd.user_handle; 2609 uobj->object = ah; 2610 2611 resp.ah_handle = uobj->id; 2612 2613 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 2614 ret = -EFAULT; 2615 goto err_copy; 2616 } 2617 2618 uobj_put_obj_read(pd); 2619 uobj_alloc_commit(uobj); 2620 2621 return in_len; 2622 2623 err_copy: 2624 rdma_destroy_ah(ah); 2625 2626 err_put: 2627 uobj_put_obj_read(pd); 2628 2629 err: 2630 uobj_alloc_abort(uobj); 2631 return ret; 2632 } 2633 2634 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 2635 struct ib_device *ib_dev, 2636 const char __user *buf, int in_len, int out_len) 2637 { 2638 struct ib_uverbs_destroy_ah cmd; 2639 struct ib_uobject *uobj; 2640 int ret; 2641 2642 if (copy_from_user(&cmd, buf, sizeof cmd)) 2643 return -EFAULT; 2644 2645 uobj = uobj_get_write(UVERBS_OBJECT_AH, cmd.ah_handle, 2646 file->ucontext); 2647 if (IS_ERR(uobj)) 2648 return PTR_ERR(uobj); 2649 2650 ret = uobj_remove_commit(uobj); 2651 return ret ?: in_len; 2652 } 2653 2654 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 2655 struct ib_device *ib_dev, 2656 const char __user *buf, int in_len, 2657 int out_len) 2658 { 2659 struct ib_uverbs_attach_mcast cmd; 2660 struct ib_qp *qp; 2661 struct ib_uqp_object *obj; 2662 struct ib_uverbs_mcast_entry *mcast; 2663 int ret; 2664 2665 if (copy_from_user(&cmd, buf, sizeof cmd)) 2666 return -EFAULT; 2667 2668 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext); 2669 if (!qp) 2670 return -EINVAL; 2671 2672 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2673 2674 mutex_lock(&obj->mcast_lock); 2675 list_for_each_entry(mcast, &obj->mcast_list, list) 2676 if (cmd.mlid == mcast->lid && 2677 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2678 ret = 0; 2679 goto out_put; 2680 } 2681 2682 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 2683 if (!mcast) { 2684 ret = -ENOMEM; 2685 goto out_put; 2686 } 2687 2688 mcast->lid = cmd.mlid; 2689 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 2690 2691 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 2692 if (!ret) 2693 list_add_tail(&mcast->list, &obj->mcast_list); 2694 else 2695 kfree(mcast); 2696 2697 out_put: 2698 mutex_unlock(&obj->mcast_lock); 2699 uobj_put_obj_read(qp); 2700 2701 return ret ? ret : in_len; 2702 } 2703 2704 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 2705 struct ib_device *ib_dev, 2706 const char __user *buf, int in_len, 2707 int out_len) 2708 { 2709 struct ib_uverbs_detach_mcast cmd; 2710 struct ib_uqp_object *obj; 2711 struct ib_qp *qp; 2712 struct ib_uverbs_mcast_entry *mcast; 2713 int ret = -EINVAL; 2714 bool found = false; 2715 2716 if (copy_from_user(&cmd, buf, sizeof cmd)) 2717 return -EFAULT; 2718 2719 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext); 2720 if (!qp) 2721 return -EINVAL; 2722 2723 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2724 mutex_lock(&obj->mcast_lock); 2725 2726 list_for_each_entry(mcast, &obj->mcast_list, list) 2727 if (cmd.mlid == mcast->lid && 2728 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2729 list_del(&mcast->list); 2730 kfree(mcast); 2731 found = true; 2732 break; 2733 } 2734 2735 if (!found) { 2736 ret = -EINVAL; 2737 goto out_put; 2738 } 2739 2740 ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid); 2741 2742 out_put: 2743 mutex_unlock(&obj->mcast_lock); 2744 uobj_put_obj_read(qp); 2745 return ret ? ret : in_len; 2746 } 2747 2748 struct ib_uflow_resources { 2749 size_t max; 2750 size_t num; 2751 struct ib_flow_action *collection[0]; 2752 }; 2753 2754 static struct ib_uflow_resources *flow_resources_alloc(size_t num_specs) 2755 { 2756 struct ib_uflow_resources *resources; 2757 2758 resources = 2759 kmalloc(sizeof(*resources) + 2760 num_specs * sizeof(*resources->collection), GFP_KERNEL); 2761 2762 if (!resources) 2763 return NULL; 2764 2765 resources->num = 0; 2766 resources->max = num_specs; 2767 2768 return resources; 2769 } 2770 2771 void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res) 2772 { 2773 unsigned int i; 2774 2775 for (i = 0; i < uflow_res->num; i++) 2776 atomic_dec(&uflow_res->collection[i]->usecnt); 2777 2778 kfree(uflow_res); 2779 } 2780 2781 static void flow_resources_add(struct ib_uflow_resources *uflow_res, 2782 struct ib_flow_action *action) 2783 { 2784 WARN_ON(uflow_res->num >= uflow_res->max); 2785 2786 atomic_inc(&action->usecnt); 2787 uflow_res->collection[uflow_res->num++] = action; 2788 } 2789 2790 static int kern_spec_to_ib_spec_action(struct ib_ucontext *ucontext, 2791 struct ib_uverbs_flow_spec *kern_spec, 2792 union ib_flow_spec *ib_spec, 2793 struct ib_uflow_resources *uflow_res) 2794 { 2795 ib_spec->type = kern_spec->type; 2796 switch (ib_spec->type) { 2797 case IB_FLOW_SPEC_ACTION_TAG: 2798 if (kern_spec->flow_tag.size != 2799 sizeof(struct ib_uverbs_flow_spec_action_tag)) 2800 return -EINVAL; 2801 2802 ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag); 2803 ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id; 2804 break; 2805 case IB_FLOW_SPEC_ACTION_DROP: 2806 if (kern_spec->drop.size != 2807 sizeof(struct ib_uverbs_flow_spec_action_drop)) 2808 return -EINVAL; 2809 2810 ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop); 2811 break; 2812 case IB_FLOW_SPEC_ACTION_HANDLE: 2813 if (kern_spec->action.size != 2814 sizeof(struct ib_uverbs_flow_spec_action_handle)) 2815 return -EOPNOTSUPP; 2816 ib_spec->action.act = uobj_get_obj_read(flow_action, 2817 UVERBS_OBJECT_FLOW_ACTION, 2818 kern_spec->action.handle, 2819 ucontext); 2820 if (!ib_spec->action.act) 2821 return -EINVAL; 2822 ib_spec->action.size = 2823 sizeof(struct ib_flow_spec_action_handle); 2824 flow_resources_add(uflow_res, ib_spec->action.act); 2825 uobj_put_obj_read(ib_spec->action.act); 2826 break; 2827 default: 2828 return -EINVAL; 2829 } 2830 return 0; 2831 } 2832 2833 static size_t kern_spec_filter_sz(const struct ib_uverbs_flow_spec_hdr *spec) 2834 { 2835 /* Returns user space filter size, includes padding */ 2836 return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2; 2837 } 2838 2839 static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size, 2840 u16 ib_real_filter_sz) 2841 { 2842 /* 2843 * User space filter structures must be 64 bit aligned, otherwise this 2844 * may pass, but we won't handle additional new attributes. 2845 */ 2846 2847 if (kern_filter_size > ib_real_filter_sz) { 2848 if (memchr_inv(kern_spec_filter + 2849 ib_real_filter_sz, 0, 2850 kern_filter_size - ib_real_filter_sz)) 2851 return -EINVAL; 2852 return ib_real_filter_sz; 2853 } 2854 return kern_filter_size; 2855 } 2856 2857 int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type, 2858 const void *kern_spec_mask, 2859 const void *kern_spec_val, 2860 size_t kern_filter_sz, 2861 union ib_flow_spec *ib_spec) 2862 { 2863 ssize_t actual_filter_sz; 2864 ssize_t ib_filter_sz; 2865 2866 /* User flow spec size must be aligned to 4 bytes */ 2867 if (kern_filter_sz != ALIGN(kern_filter_sz, 4)) 2868 return -EINVAL; 2869 2870 ib_spec->type = type; 2871 2872 if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL)) 2873 return -EINVAL; 2874 2875 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) { 2876 case IB_FLOW_SPEC_ETH: 2877 ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz); 2878 actual_filter_sz = spec_filter_size(kern_spec_mask, 2879 kern_filter_sz, 2880 ib_filter_sz); 2881 if (actual_filter_sz <= 0) 2882 return -EINVAL; 2883 ib_spec->size = sizeof(struct ib_flow_spec_eth); 2884 memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz); 2885 memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz); 2886 break; 2887 case IB_FLOW_SPEC_IPV4: 2888 ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz); 2889 actual_filter_sz = spec_filter_size(kern_spec_mask, 2890 kern_filter_sz, 2891 ib_filter_sz); 2892 if (actual_filter_sz <= 0) 2893 return -EINVAL; 2894 ib_spec->size = sizeof(struct ib_flow_spec_ipv4); 2895 memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz); 2896 memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz); 2897 break; 2898 case IB_FLOW_SPEC_IPV6: 2899 ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz); 2900 actual_filter_sz = spec_filter_size(kern_spec_mask, 2901 kern_filter_sz, 2902 ib_filter_sz); 2903 if (actual_filter_sz <= 0) 2904 return -EINVAL; 2905 ib_spec->size = sizeof(struct ib_flow_spec_ipv6); 2906 memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz); 2907 memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz); 2908 2909 if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) || 2910 (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20)) 2911 return -EINVAL; 2912 break; 2913 case IB_FLOW_SPEC_TCP: 2914 case IB_FLOW_SPEC_UDP: 2915 ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz); 2916 actual_filter_sz = spec_filter_size(kern_spec_mask, 2917 kern_filter_sz, 2918 ib_filter_sz); 2919 if (actual_filter_sz <= 0) 2920 return -EINVAL; 2921 ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp); 2922 memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz); 2923 memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz); 2924 break; 2925 case IB_FLOW_SPEC_VXLAN_TUNNEL: 2926 ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz); 2927 actual_filter_sz = spec_filter_size(kern_spec_mask, 2928 kern_filter_sz, 2929 ib_filter_sz); 2930 if (actual_filter_sz <= 0) 2931 return -EINVAL; 2932 ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel); 2933 memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz); 2934 memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz); 2935 2936 if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) || 2937 (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24)) 2938 return -EINVAL; 2939 break; 2940 case IB_FLOW_SPEC_ESP: 2941 ib_filter_sz = offsetof(struct ib_flow_esp_filter, real_sz); 2942 actual_filter_sz = spec_filter_size(kern_spec_mask, 2943 kern_filter_sz, 2944 ib_filter_sz); 2945 if (actual_filter_sz <= 0) 2946 return -EINVAL; 2947 ib_spec->esp.size = sizeof(struct ib_flow_spec_esp); 2948 memcpy(&ib_spec->esp.val, kern_spec_val, actual_filter_sz); 2949 memcpy(&ib_spec->esp.mask, kern_spec_mask, actual_filter_sz); 2950 break; 2951 default: 2952 return -EINVAL; 2953 } 2954 return 0; 2955 } 2956 2957 static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec, 2958 union ib_flow_spec *ib_spec) 2959 { 2960 ssize_t kern_filter_sz; 2961 void *kern_spec_mask; 2962 void *kern_spec_val; 2963 2964 if (kern_spec->reserved) 2965 return -EINVAL; 2966 2967 kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr); 2968 2969 kern_spec_val = (void *)kern_spec + 2970 sizeof(struct ib_uverbs_flow_spec_hdr); 2971 kern_spec_mask = kern_spec_val + kern_filter_sz; 2972 2973 return ib_uverbs_kern_spec_to_ib_spec_filter(kern_spec->type, 2974 kern_spec_mask, 2975 kern_spec_val, 2976 kern_filter_sz, ib_spec); 2977 } 2978 2979 static int kern_spec_to_ib_spec(struct ib_ucontext *ucontext, 2980 struct ib_uverbs_flow_spec *kern_spec, 2981 union ib_flow_spec *ib_spec, 2982 struct ib_uflow_resources *uflow_res) 2983 { 2984 if (kern_spec->reserved) 2985 return -EINVAL; 2986 2987 if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG) 2988 return kern_spec_to_ib_spec_action(ucontext, kern_spec, ib_spec, 2989 uflow_res); 2990 else 2991 return kern_spec_to_ib_spec_filter(kern_spec, ib_spec); 2992 } 2993 2994 int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file, 2995 struct ib_device *ib_dev, 2996 struct ib_udata *ucore, 2997 struct ib_udata *uhw) 2998 { 2999 struct ib_uverbs_ex_create_wq cmd = {}; 3000 struct ib_uverbs_ex_create_wq_resp resp = {}; 3001 struct ib_uwq_object *obj; 3002 int err = 0; 3003 struct ib_cq *cq; 3004 struct ib_pd *pd; 3005 struct ib_wq *wq; 3006 struct ib_wq_init_attr wq_init_attr = {}; 3007 size_t required_cmd_sz; 3008 size_t required_resp_len; 3009 3010 required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge); 3011 required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn); 3012 3013 if (ucore->inlen < required_cmd_sz) 3014 return -EINVAL; 3015 3016 if (ucore->outlen < required_resp_len) 3017 return -ENOSPC; 3018 3019 if (ucore->inlen > sizeof(cmd) && 3020 !ib_is_udata_cleared(ucore, sizeof(cmd), 3021 ucore->inlen - sizeof(cmd))) 3022 return -EOPNOTSUPP; 3023 3024 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3025 if (err) 3026 return err; 3027 3028 if (cmd.comp_mask) 3029 return -EOPNOTSUPP; 3030 3031 obj = (struct ib_uwq_object *)uobj_alloc(UVERBS_OBJECT_WQ, 3032 file->ucontext); 3033 if (IS_ERR(obj)) 3034 return PTR_ERR(obj); 3035 3036 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext); 3037 if (!pd) { 3038 err = -EINVAL; 3039 goto err_uobj; 3040 } 3041 3042 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext); 3043 if (!cq) { 3044 err = -EINVAL; 3045 goto err_put_pd; 3046 } 3047 3048 wq_init_attr.cq = cq; 3049 wq_init_attr.max_sge = cmd.max_sge; 3050 wq_init_attr.max_wr = cmd.max_wr; 3051 wq_init_attr.wq_context = file; 3052 wq_init_attr.wq_type = cmd.wq_type; 3053 wq_init_attr.event_handler = ib_uverbs_wq_event_handler; 3054 if (ucore->inlen >= (offsetof(typeof(cmd), create_flags) + 3055 sizeof(cmd.create_flags))) 3056 wq_init_attr.create_flags = cmd.create_flags; 3057 obj->uevent.events_reported = 0; 3058 INIT_LIST_HEAD(&obj->uevent.event_list); 3059 3060 if (!pd->device->create_wq) { 3061 err = -EOPNOTSUPP; 3062 goto err_put_cq; 3063 } 3064 wq = pd->device->create_wq(pd, &wq_init_attr, uhw); 3065 if (IS_ERR(wq)) { 3066 err = PTR_ERR(wq); 3067 goto err_put_cq; 3068 } 3069 3070 wq->uobject = &obj->uevent.uobject; 3071 obj->uevent.uobject.object = wq; 3072 wq->wq_type = wq_init_attr.wq_type; 3073 wq->cq = cq; 3074 wq->pd = pd; 3075 wq->device = pd->device; 3076 wq->wq_context = wq_init_attr.wq_context; 3077 atomic_set(&wq->usecnt, 0); 3078 atomic_inc(&pd->usecnt); 3079 atomic_inc(&cq->usecnt); 3080 wq->uobject = &obj->uevent.uobject; 3081 obj->uevent.uobject.object = wq; 3082 3083 memset(&resp, 0, sizeof(resp)); 3084 resp.wq_handle = obj->uevent.uobject.id; 3085 resp.max_sge = wq_init_attr.max_sge; 3086 resp.max_wr = wq_init_attr.max_wr; 3087 resp.wqn = wq->wq_num; 3088 resp.response_length = required_resp_len; 3089 err = ib_copy_to_udata(ucore, 3090 &resp, resp.response_length); 3091 if (err) 3092 goto err_copy; 3093 3094 uobj_put_obj_read(pd); 3095 uobj_put_obj_read(cq); 3096 uobj_alloc_commit(&obj->uevent.uobject); 3097 return 0; 3098 3099 err_copy: 3100 ib_destroy_wq(wq); 3101 err_put_cq: 3102 uobj_put_obj_read(cq); 3103 err_put_pd: 3104 uobj_put_obj_read(pd); 3105 err_uobj: 3106 uobj_alloc_abort(&obj->uevent.uobject); 3107 3108 return err; 3109 } 3110 3111 int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file, 3112 struct ib_device *ib_dev, 3113 struct ib_udata *ucore, 3114 struct ib_udata *uhw) 3115 { 3116 struct ib_uverbs_ex_destroy_wq cmd = {}; 3117 struct ib_uverbs_ex_destroy_wq_resp resp = {}; 3118 struct ib_uobject *uobj; 3119 struct ib_uwq_object *obj; 3120 size_t required_cmd_sz; 3121 size_t required_resp_len; 3122 int ret; 3123 3124 required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle); 3125 required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); 3126 3127 if (ucore->inlen < required_cmd_sz) 3128 return -EINVAL; 3129 3130 if (ucore->outlen < required_resp_len) 3131 return -ENOSPC; 3132 3133 if (ucore->inlen > sizeof(cmd) && 3134 !ib_is_udata_cleared(ucore, sizeof(cmd), 3135 ucore->inlen - sizeof(cmd))) 3136 return -EOPNOTSUPP; 3137 3138 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3139 if (ret) 3140 return ret; 3141 3142 if (cmd.comp_mask) 3143 return -EOPNOTSUPP; 3144 3145 resp.response_length = required_resp_len; 3146 uobj = uobj_get_write(UVERBS_OBJECT_WQ, cmd.wq_handle, 3147 file->ucontext); 3148 if (IS_ERR(uobj)) 3149 return PTR_ERR(uobj); 3150 3151 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject); 3152 /* 3153 * Make sure we don't free the memory in remove_commit as we still 3154 * needs the uobject memory to create the response. 3155 */ 3156 uverbs_uobject_get(uobj); 3157 3158 ret = uobj_remove_commit(uobj); 3159 resp.events_reported = obj->uevent.events_reported; 3160 uverbs_uobject_put(uobj); 3161 if (ret) 3162 return ret; 3163 3164 return ib_copy_to_udata(ucore, &resp, resp.response_length); 3165 } 3166 3167 int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file, 3168 struct ib_device *ib_dev, 3169 struct ib_udata *ucore, 3170 struct ib_udata *uhw) 3171 { 3172 struct ib_uverbs_ex_modify_wq cmd = {}; 3173 struct ib_wq *wq; 3174 struct ib_wq_attr wq_attr = {}; 3175 size_t required_cmd_sz; 3176 int ret; 3177 3178 required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state); 3179 if (ucore->inlen < required_cmd_sz) 3180 return -EINVAL; 3181 3182 if (ucore->inlen > sizeof(cmd) && 3183 !ib_is_udata_cleared(ucore, sizeof(cmd), 3184 ucore->inlen - sizeof(cmd))) 3185 return -EOPNOTSUPP; 3186 3187 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3188 if (ret) 3189 return ret; 3190 3191 if (!cmd.attr_mask) 3192 return -EINVAL; 3193 3194 if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS)) 3195 return -EINVAL; 3196 3197 wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, file->ucontext); 3198 if (!wq) 3199 return -EINVAL; 3200 3201 wq_attr.curr_wq_state = cmd.curr_wq_state; 3202 wq_attr.wq_state = cmd.wq_state; 3203 if (cmd.attr_mask & IB_WQ_FLAGS) { 3204 wq_attr.flags = cmd.flags; 3205 wq_attr.flags_mask = cmd.flags_mask; 3206 } 3207 if (!wq->device->modify_wq) { 3208 ret = -EOPNOTSUPP; 3209 goto out; 3210 } 3211 ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw); 3212 out: 3213 uobj_put_obj_read(wq); 3214 return ret; 3215 } 3216 3217 int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, 3218 struct ib_device *ib_dev, 3219 struct ib_udata *ucore, 3220 struct ib_udata *uhw) 3221 { 3222 struct ib_uverbs_ex_create_rwq_ind_table cmd = {}; 3223 struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {}; 3224 struct ib_uobject *uobj; 3225 int err = 0; 3226 struct ib_rwq_ind_table_init_attr init_attr = {}; 3227 struct ib_rwq_ind_table *rwq_ind_tbl; 3228 struct ib_wq **wqs = NULL; 3229 u32 *wqs_handles = NULL; 3230 struct ib_wq *wq = NULL; 3231 int i, j, num_read_wqs; 3232 u32 num_wq_handles; 3233 u32 expected_in_size; 3234 size_t required_cmd_sz_header; 3235 size_t required_resp_len; 3236 3237 required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size); 3238 required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num); 3239 3240 if (ucore->inlen < required_cmd_sz_header) 3241 return -EINVAL; 3242 3243 if (ucore->outlen < required_resp_len) 3244 return -ENOSPC; 3245 3246 err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header); 3247 if (err) 3248 return err; 3249 3250 ucore->inbuf += required_cmd_sz_header; 3251 ucore->inlen -= required_cmd_sz_header; 3252 3253 if (cmd.comp_mask) 3254 return -EOPNOTSUPP; 3255 3256 if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE) 3257 return -EINVAL; 3258 3259 num_wq_handles = 1 << cmd.log_ind_tbl_size; 3260 expected_in_size = num_wq_handles * sizeof(__u32); 3261 if (num_wq_handles == 1) 3262 /* input size for wq handles is u64 aligned */ 3263 expected_in_size += sizeof(__u32); 3264 3265 if (ucore->inlen < expected_in_size) 3266 return -EINVAL; 3267 3268 if (ucore->inlen > expected_in_size && 3269 !ib_is_udata_cleared(ucore, expected_in_size, 3270 ucore->inlen - expected_in_size)) 3271 return -EOPNOTSUPP; 3272 3273 wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles), 3274 GFP_KERNEL); 3275 if (!wqs_handles) 3276 return -ENOMEM; 3277 3278 err = ib_copy_from_udata(wqs_handles, ucore, 3279 num_wq_handles * sizeof(__u32)); 3280 if (err) 3281 goto err_free; 3282 3283 wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL); 3284 if (!wqs) { 3285 err = -ENOMEM; 3286 goto err_free; 3287 } 3288 3289 for (num_read_wqs = 0; num_read_wqs < num_wq_handles; 3290 num_read_wqs++) { 3291 wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, wqs_handles[num_read_wqs], 3292 file->ucontext); 3293 if (!wq) { 3294 err = -EINVAL; 3295 goto put_wqs; 3296 } 3297 3298 wqs[num_read_wqs] = wq; 3299 } 3300 3301 uobj = uobj_alloc(UVERBS_OBJECT_RWQ_IND_TBL, file->ucontext); 3302 if (IS_ERR(uobj)) { 3303 err = PTR_ERR(uobj); 3304 goto put_wqs; 3305 } 3306 3307 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size; 3308 init_attr.ind_tbl = wqs; 3309 3310 if (!ib_dev->create_rwq_ind_table) { 3311 err = -EOPNOTSUPP; 3312 goto err_uobj; 3313 } 3314 rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw); 3315 3316 if (IS_ERR(rwq_ind_tbl)) { 3317 err = PTR_ERR(rwq_ind_tbl); 3318 goto err_uobj; 3319 } 3320 3321 rwq_ind_tbl->ind_tbl = wqs; 3322 rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size; 3323 rwq_ind_tbl->uobject = uobj; 3324 uobj->object = rwq_ind_tbl; 3325 rwq_ind_tbl->device = ib_dev; 3326 atomic_set(&rwq_ind_tbl->usecnt, 0); 3327 3328 for (i = 0; i < num_wq_handles; i++) 3329 atomic_inc(&wqs[i]->usecnt); 3330 3331 resp.ind_tbl_handle = uobj->id; 3332 resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num; 3333 resp.response_length = required_resp_len; 3334 3335 err = ib_copy_to_udata(ucore, 3336 &resp, resp.response_length); 3337 if (err) 3338 goto err_copy; 3339 3340 kfree(wqs_handles); 3341 3342 for (j = 0; j < num_read_wqs; j++) 3343 uobj_put_obj_read(wqs[j]); 3344 3345 uobj_alloc_commit(uobj); 3346 return 0; 3347 3348 err_copy: 3349 ib_destroy_rwq_ind_table(rwq_ind_tbl); 3350 err_uobj: 3351 uobj_alloc_abort(uobj); 3352 put_wqs: 3353 for (j = 0; j < num_read_wqs; j++) 3354 uobj_put_obj_read(wqs[j]); 3355 err_free: 3356 kfree(wqs_handles); 3357 kfree(wqs); 3358 return err; 3359 } 3360 3361 int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file, 3362 struct ib_device *ib_dev, 3363 struct ib_udata *ucore, 3364 struct ib_udata *uhw) 3365 { 3366 struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {}; 3367 struct ib_uobject *uobj; 3368 int ret; 3369 size_t required_cmd_sz; 3370 3371 required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle); 3372 3373 if (ucore->inlen < required_cmd_sz) 3374 return -EINVAL; 3375 3376 if (ucore->inlen > sizeof(cmd) && 3377 !ib_is_udata_cleared(ucore, sizeof(cmd), 3378 ucore->inlen - sizeof(cmd))) 3379 return -EOPNOTSUPP; 3380 3381 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3382 if (ret) 3383 return ret; 3384 3385 if (cmd.comp_mask) 3386 return -EOPNOTSUPP; 3387 3388 uobj = uobj_get_write(UVERBS_OBJECT_RWQ_IND_TBL, cmd.ind_tbl_handle, 3389 file->ucontext); 3390 if (IS_ERR(uobj)) 3391 return PTR_ERR(uobj); 3392 3393 return uobj_remove_commit(uobj); 3394 } 3395 3396 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, 3397 struct ib_device *ib_dev, 3398 struct ib_udata *ucore, 3399 struct ib_udata *uhw) 3400 { 3401 struct ib_uverbs_create_flow cmd; 3402 struct ib_uverbs_create_flow_resp resp; 3403 struct ib_uobject *uobj; 3404 struct ib_uflow_object *uflow; 3405 struct ib_flow *flow_id; 3406 struct ib_uverbs_flow_attr *kern_flow_attr; 3407 struct ib_flow_attr *flow_attr; 3408 struct ib_qp *qp; 3409 struct ib_uflow_resources *uflow_res; 3410 int err = 0; 3411 void *kern_spec; 3412 void *ib_spec; 3413 int i; 3414 3415 if (ucore->inlen < sizeof(cmd)) 3416 return -EINVAL; 3417 3418 if (ucore->outlen < sizeof(resp)) 3419 return -ENOSPC; 3420 3421 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3422 if (err) 3423 return err; 3424 3425 ucore->inbuf += sizeof(cmd); 3426 ucore->inlen -= sizeof(cmd); 3427 3428 if (cmd.comp_mask) 3429 return -EINVAL; 3430 3431 if (!capable(CAP_NET_RAW)) 3432 return -EPERM; 3433 3434 if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED) 3435 return -EINVAL; 3436 3437 if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && 3438 ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) || 3439 (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT))) 3440 return -EINVAL; 3441 3442 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) 3443 return -EINVAL; 3444 3445 if (cmd.flow_attr.size > ucore->inlen || 3446 cmd.flow_attr.size > 3447 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) 3448 return -EINVAL; 3449 3450 if (cmd.flow_attr.reserved[0] || 3451 cmd.flow_attr.reserved[1]) 3452 return -EINVAL; 3453 3454 if (cmd.flow_attr.num_of_specs) { 3455 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, 3456 GFP_KERNEL); 3457 if (!kern_flow_attr) 3458 return -ENOMEM; 3459 3460 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); 3461 err = ib_copy_from_udata(kern_flow_attr + 1, ucore, 3462 cmd.flow_attr.size); 3463 if (err) 3464 goto err_free_attr; 3465 } else { 3466 kern_flow_attr = &cmd.flow_attr; 3467 } 3468 3469 uobj = uobj_alloc(UVERBS_OBJECT_FLOW, file->ucontext); 3470 if (IS_ERR(uobj)) { 3471 err = PTR_ERR(uobj); 3472 goto err_free_attr; 3473 } 3474 3475 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext); 3476 if (!qp) { 3477 err = -EINVAL; 3478 goto err_uobj; 3479 } 3480 3481 flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs * 3482 sizeof(union ib_flow_spec), GFP_KERNEL); 3483 if (!flow_attr) { 3484 err = -ENOMEM; 3485 goto err_put; 3486 } 3487 uflow_res = flow_resources_alloc(cmd.flow_attr.num_of_specs); 3488 if (!uflow_res) { 3489 err = -ENOMEM; 3490 goto err_free_flow_attr; 3491 } 3492 3493 flow_attr->type = kern_flow_attr->type; 3494 flow_attr->priority = kern_flow_attr->priority; 3495 flow_attr->num_of_specs = kern_flow_attr->num_of_specs; 3496 flow_attr->port = kern_flow_attr->port; 3497 flow_attr->flags = kern_flow_attr->flags; 3498 flow_attr->size = sizeof(*flow_attr); 3499 3500 kern_spec = kern_flow_attr + 1; 3501 ib_spec = flow_attr + 1; 3502 for (i = 0; i < flow_attr->num_of_specs && 3503 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && 3504 cmd.flow_attr.size >= 3505 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { 3506 err = kern_spec_to_ib_spec(file->ucontext, kern_spec, ib_spec, 3507 uflow_res); 3508 if (err) 3509 goto err_free; 3510 flow_attr->size += 3511 ((union ib_flow_spec *) ib_spec)->size; 3512 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; 3513 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size; 3514 ib_spec += ((union ib_flow_spec *) ib_spec)->size; 3515 } 3516 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { 3517 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", 3518 i, cmd.flow_attr.size); 3519 err = -EINVAL; 3520 goto err_free; 3521 } 3522 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); 3523 if (IS_ERR(flow_id)) { 3524 err = PTR_ERR(flow_id); 3525 goto err_free; 3526 } 3527 flow_id->uobject = uobj; 3528 uobj->object = flow_id; 3529 uflow = container_of(uobj, typeof(*uflow), uobject); 3530 uflow->resources = uflow_res; 3531 3532 memset(&resp, 0, sizeof(resp)); 3533 resp.flow_handle = uobj->id; 3534 3535 err = ib_copy_to_udata(ucore, 3536 &resp, sizeof(resp)); 3537 if (err) 3538 goto err_copy; 3539 3540 uobj_put_obj_read(qp); 3541 uobj_alloc_commit(uobj); 3542 kfree(flow_attr); 3543 if (cmd.flow_attr.num_of_specs) 3544 kfree(kern_flow_attr); 3545 return 0; 3546 err_copy: 3547 ib_destroy_flow(flow_id); 3548 err_free: 3549 ib_uverbs_flow_resources_free(uflow_res); 3550 err_free_flow_attr: 3551 kfree(flow_attr); 3552 err_put: 3553 uobj_put_obj_read(qp); 3554 err_uobj: 3555 uobj_alloc_abort(uobj); 3556 err_free_attr: 3557 if (cmd.flow_attr.num_of_specs) 3558 kfree(kern_flow_attr); 3559 return err; 3560 } 3561 3562 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, 3563 struct ib_device *ib_dev, 3564 struct ib_udata *ucore, 3565 struct ib_udata *uhw) 3566 { 3567 struct ib_uverbs_destroy_flow cmd; 3568 struct ib_uobject *uobj; 3569 int ret; 3570 3571 if (ucore->inlen < sizeof(cmd)) 3572 return -EINVAL; 3573 3574 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3575 if (ret) 3576 return ret; 3577 3578 if (cmd.comp_mask) 3579 return -EINVAL; 3580 3581 uobj = uobj_get_write(UVERBS_OBJECT_FLOW, cmd.flow_handle, 3582 file->ucontext); 3583 if (IS_ERR(uobj)) 3584 return PTR_ERR(uobj); 3585 3586 ret = uobj_remove_commit(uobj); 3587 return ret; 3588 } 3589 3590 static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 3591 struct ib_device *ib_dev, 3592 struct ib_uverbs_create_xsrq *cmd, 3593 struct ib_udata *udata) 3594 { 3595 struct ib_uverbs_create_srq_resp resp; 3596 struct ib_usrq_object *obj; 3597 struct ib_pd *pd; 3598 struct ib_srq *srq; 3599 struct ib_uobject *uninitialized_var(xrcd_uobj); 3600 struct ib_srq_init_attr attr; 3601 int ret; 3602 3603 obj = (struct ib_usrq_object *)uobj_alloc(UVERBS_OBJECT_SRQ, 3604 file->ucontext); 3605 if (IS_ERR(obj)) 3606 return PTR_ERR(obj); 3607 3608 if (cmd->srq_type == IB_SRQT_TM) 3609 attr.ext.tag_matching.max_num_tags = cmd->max_num_tags; 3610 3611 if (cmd->srq_type == IB_SRQT_XRC) { 3612 xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->xrcd_handle, 3613 file->ucontext); 3614 if (IS_ERR(xrcd_uobj)) { 3615 ret = -EINVAL; 3616 goto err; 3617 } 3618 3619 attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object; 3620 if (!attr.ext.xrc.xrcd) { 3621 ret = -EINVAL; 3622 goto err_put_xrcd; 3623 } 3624 3625 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 3626 atomic_inc(&obj->uxrcd->refcnt); 3627 } 3628 3629 if (ib_srq_has_cq(cmd->srq_type)) { 3630 attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd->cq_handle, 3631 file->ucontext); 3632 if (!attr.ext.cq) { 3633 ret = -EINVAL; 3634 goto err_put_xrcd; 3635 } 3636 } 3637 3638 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, file->ucontext); 3639 if (!pd) { 3640 ret = -EINVAL; 3641 goto err_put_cq; 3642 } 3643 3644 attr.event_handler = ib_uverbs_srq_event_handler; 3645 attr.srq_context = file; 3646 attr.srq_type = cmd->srq_type; 3647 attr.attr.max_wr = cmd->max_wr; 3648 attr.attr.max_sge = cmd->max_sge; 3649 attr.attr.srq_limit = cmd->srq_limit; 3650 3651 obj->uevent.events_reported = 0; 3652 INIT_LIST_HEAD(&obj->uevent.event_list); 3653 3654 srq = pd->device->create_srq(pd, &attr, udata); 3655 if (IS_ERR(srq)) { 3656 ret = PTR_ERR(srq); 3657 goto err_put; 3658 } 3659 3660 srq->device = pd->device; 3661 srq->pd = pd; 3662 srq->srq_type = cmd->srq_type; 3663 srq->uobject = &obj->uevent.uobject; 3664 srq->event_handler = attr.event_handler; 3665 srq->srq_context = attr.srq_context; 3666 3667 if (ib_srq_has_cq(cmd->srq_type)) { 3668 srq->ext.cq = attr.ext.cq; 3669 atomic_inc(&attr.ext.cq->usecnt); 3670 } 3671 3672 if (cmd->srq_type == IB_SRQT_XRC) { 3673 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; 3674 atomic_inc(&attr.ext.xrc.xrcd->usecnt); 3675 } 3676 3677 atomic_inc(&pd->usecnt); 3678 atomic_set(&srq->usecnt, 0); 3679 3680 obj->uevent.uobject.object = srq; 3681 obj->uevent.uobject.user_handle = cmd->user_handle; 3682 3683 memset(&resp, 0, sizeof resp); 3684 resp.srq_handle = obj->uevent.uobject.id; 3685 resp.max_wr = attr.attr.max_wr; 3686 resp.max_sge = attr.attr.max_sge; 3687 if (cmd->srq_type == IB_SRQT_XRC) 3688 resp.srqn = srq->ext.xrc.srq_num; 3689 3690 if (copy_to_user(u64_to_user_ptr(cmd->response), 3691 &resp, sizeof resp)) { 3692 ret = -EFAULT; 3693 goto err_copy; 3694 } 3695 3696 if (cmd->srq_type == IB_SRQT_XRC) 3697 uobj_put_read(xrcd_uobj); 3698 3699 if (ib_srq_has_cq(cmd->srq_type)) 3700 uobj_put_obj_read(attr.ext.cq); 3701 3702 uobj_put_obj_read(pd); 3703 uobj_alloc_commit(&obj->uevent.uobject); 3704 3705 return 0; 3706 3707 err_copy: 3708 ib_destroy_srq(srq); 3709 3710 err_put: 3711 uobj_put_obj_read(pd); 3712 3713 err_put_cq: 3714 if (ib_srq_has_cq(cmd->srq_type)) 3715 uobj_put_obj_read(attr.ext.cq); 3716 3717 err_put_xrcd: 3718 if (cmd->srq_type == IB_SRQT_XRC) { 3719 atomic_dec(&obj->uxrcd->refcnt); 3720 uobj_put_read(xrcd_uobj); 3721 } 3722 3723 err: 3724 uobj_alloc_abort(&obj->uevent.uobject); 3725 return ret; 3726 } 3727 3728 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 3729 struct ib_device *ib_dev, 3730 const char __user *buf, int in_len, 3731 int out_len) 3732 { 3733 struct ib_uverbs_create_srq cmd; 3734 struct ib_uverbs_create_xsrq xcmd; 3735 struct ib_uverbs_create_srq_resp resp; 3736 struct ib_udata udata; 3737 int ret; 3738 3739 if (out_len < sizeof resp) 3740 return -ENOSPC; 3741 3742 if (copy_from_user(&cmd, buf, sizeof cmd)) 3743 return -EFAULT; 3744 3745 memset(&xcmd, 0, sizeof(xcmd)); 3746 xcmd.response = cmd.response; 3747 xcmd.user_handle = cmd.user_handle; 3748 xcmd.srq_type = IB_SRQT_BASIC; 3749 xcmd.pd_handle = cmd.pd_handle; 3750 xcmd.max_wr = cmd.max_wr; 3751 xcmd.max_sge = cmd.max_sge; 3752 xcmd.srq_limit = cmd.srq_limit; 3753 3754 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 3755 u64_to_user_ptr(cmd.response) + sizeof(resp), 3756 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 3757 out_len - sizeof(resp)); 3758 3759 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata); 3760 if (ret) 3761 return ret; 3762 3763 return in_len; 3764 } 3765 3766 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, 3767 struct ib_device *ib_dev, 3768 const char __user *buf, int in_len, int out_len) 3769 { 3770 struct ib_uverbs_create_xsrq cmd; 3771 struct ib_uverbs_create_srq_resp resp; 3772 struct ib_udata udata; 3773 int ret; 3774 3775 if (out_len < sizeof resp) 3776 return -ENOSPC; 3777 3778 if (copy_from_user(&cmd, buf, sizeof cmd)) 3779 return -EFAULT; 3780 3781 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 3782 u64_to_user_ptr(cmd.response) + sizeof(resp), 3783 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 3784 out_len - sizeof(resp)); 3785 3786 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata); 3787 if (ret) 3788 return ret; 3789 3790 return in_len; 3791 } 3792 3793 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 3794 struct ib_device *ib_dev, 3795 const char __user *buf, int in_len, 3796 int out_len) 3797 { 3798 struct ib_uverbs_modify_srq cmd; 3799 struct ib_udata udata; 3800 struct ib_srq *srq; 3801 struct ib_srq_attr attr; 3802 int ret; 3803 3804 if (copy_from_user(&cmd, buf, sizeof cmd)) 3805 return -EFAULT; 3806 3807 ib_uverbs_init_udata(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 3808 out_len); 3809 3810 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file->ucontext); 3811 if (!srq) 3812 return -EINVAL; 3813 3814 attr.max_wr = cmd.max_wr; 3815 attr.srq_limit = cmd.srq_limit; 3816 3817 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 3818 3819 uobj_put_obj_read(srq); 3820 3821 return ret ? ret : in_len; 3822 } 3823 3824 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 3825 struct ib_device *ib_dev, 3826 const char __user *buf, 3827 int in_len, int out_len) 3828 { 3829 struct ib_uverbs_query_srq cmd; 3830 struct ib_uverbs_query_srq_resp resp; 3831 struct ib_srq_attr attr; 3832 struct ib_srq *srq; 3833 int ret; 3834 3835 if (out_len < sizeof resp) 3836 return -ENOSPC; 3837 3838 if (copy_from_user(&cmd, buf, sizeof cmd)) 3839 return -EFAULT; 3840 3841 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file->ucontext); 3842 if (!srq) 3843 return -EINVAL; 3844 3845 ret = ib_query_srq(srq, &attr); 3846 3847 uobj_put_obj_read(srq); 3848 3849 if (ret) 3850 return ret; 3851 3852 memset(&resp, 0, sizeof resp); 3853 3854 resp.max_wr = attr.max_wr; 3855 resp.max_sge = attr.max_sge; 3856 resp.srq_limit = attr.srq_limit; 3857 3858 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 3859 return -EFAULT; 3860 3861 return in_len; 3862 } 3863 3864 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 3865 struct ib_device *ib_dev, 3866 const char __user *buf, int in_len, 3867 int out_len) 3868 { 3869 struct ib_uverbs_destroy_srq cmd; 3870 struct ib_uverbs_destroy_srq_resp resp; 3871 struct ib_uobject *uobj; 3872 struct ib_uevent_object *obj; 3873 int ret = -EINVAL; 3874 3875 if (copy_from_user(&cmd, buf, sizeof cmd)) 3876 return -EFAULT; 3877 3878 uobj = uobj_get_write(UVERBS_OBJECT_SRQ, cmd.srq_handle, 3879 file->ucontext); 3880 if (IS_ERR(uobj)) 3881 return PTR_ERR(uobj); 3882 3883 obj = container_of(uobj, struct ib_uevent_object, uobject); 3884 /* 3885 * Make sure we don't free the memory in remove_commit as we still 3886 * needs the uobject memory to create the response. 3887 */ 3888 uverbs_uobject_get(uobj); 3889 3890 memset(&resp, 0, sizeof(resp)); 3891 3892 ret = uobj_remove_commit(uobj); 3893 if (ret) { 3894 uverbs_uobject_put(uobj); 3895 return ret; 3896 } 3897 resp.events_reported = obj->events_reported; 3898 uverbs_uobject_put(uobj); 3899 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) 3900 return -EFAULT; 3901 3902 return in_len; 3903 } 3904 3905 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, 3906 struct ib_device *ib_dev, 3907 struct ib_udata *ucore, 3908 struct ib_udata *uhw) 3909 { 3910 struct ib_uverbs_ex_query_device_resp resp = { {0} }; 3911 struct ib_uverbs_ex_query_device cmd; 3912 struct ib_device_attr attr = {0}; 3913 int err; 3914 3915 if (!ib_dev->query_device) 3916 return -EOPNOTSUPP; 3917 3918 if (ucore->inlen < sizeof(cmd)) 3919 return -EINVAL; 3920 3921 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3922 if (err) 3923 return err; 3924 3925 if (cmd.comp_mask) 3926 return -EINVAL; 3927 3928 if (cmd.reserved) 3929 return -EINVAL; 3930 3931 resp.response_length = offsetof(typeof(resp), odp_caps); 3932 3933 if (ucore->outlen < resp.response_length) 3934 return -ENOSPC; 3935 3936 err = ib_dev->query_device(ib_dev, &attr, uhw); 3937 if (err) 3938 return err; 3939 3940 copy_query_dev_fields(file, ib_dev, &resp.base, &attr); 3941 3942 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps)) 3943 goto end; 3944 3945 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 3946 resp.odp_caps.general_caps = attr.odp_caps.general_caps; 3947 resp.odp_caps.per_transport_caps.rc_odp_caps = 3948 attr.odp_caps.per_transport_caps.rc_odp_caps; 3949 resp.odp_caps.per_transport_caps.uc_odp_caps = 3950 attr.odp_caps.per_transport_caps.uc_odp_caps; 3951 resp.odp_caps.per_transport_caps.ud_odp_caps = 3952 attr.odp_caps.per_transport_caps.ud_odp_caps; 3953 #endif 3954 resp.response_length += sizeof(resp.odp_caps); 3955 3956 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask)) 3957 goto end; 3958 3959 resp.timestamp_mask = attr.timestamp_mask; 3960 resp.response_length += sizeof(resp.timestamp_mask); 3961 3962 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock)) 3963 goto end; 3964 3965 resp.hca_core_clock = attr.hca_core_clock; 3966 resp.response_length += sizeof(resp.hca_core_clock); 3967 3968 if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex)) 3969 goto end; 3970 3971 resp.device_cap_flags_ex = attr.device_cap_flags; 3972 resp.response_length += sizeof(resp.device_cap_flags_ex); 3973 3974 if (ucore->outlen < resp.response_length + sizeof(resp.rss_caps)) 3975 goto end; 3976 3977 resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts; 3978 resp.rss_caps.max_rwq_indirection_tables = 3979 attr.rss_caps.max_rwq_indirection_tables; 3980 resp.rss_caps.max_rwq_indirection_table_size = 3981 attr.rss_caps.max_rwq_indirection_table_size; 3982 3983 resp.response_length += sizeof(resp.rss_caps); 3984 3985 if (ucore->outlen < resp.response_length + sizeof(resp.max_wq_type_rq)) 3986 goto end; 3987 3988 resp.max_wq_type_rq = attr.max_wq_type_rq; 3989 resp.response_length += sizeof(resp.max_wq_type_rq); 3990 3991 if (ucore->outlen < resp.response_length + sizeof(resp.raw_packet_caps)) 3992 goto end; 3993 3994 resp.raw_packet_caps = attr.raw_packet_caps; 3995 resp.response_length += sizeof(resp.raw_packet_caps); 3996 3997 if (ucore->outlen < resp.response_length + sizeof(resp.tm_caps)) 3998 goto end; 3999 4000 resp.tm_caps.max_rndv_hdr_size = attr.tm_caps.max_rndv_hdr_size; 4001 resp.tm_caps.max_num_tags = attr.tm_caps.max_num_tags; 4002 resp.tm_caps.max_ops = attr.tm_caps.max_ops; 4003 resp.tm_caps.max_sge = attr.tm_caps.max_sge; 4004 resp.tm_caps.flags = attr.tm_caps.flags; 4005 resp.response_length += sizeof(resp.tm_caps); 4006 4007 if (ucore->outlen < resp.response_length + sizeof(resp.cq_moderation_caps)) 4008 goto end; 4009 4010 resp.cq_moderation_caps.max_cq_moderation_count = 4011 attr.cq_caps.max_cq_moderation_count; 4012 resp.cq_moderation_caps.max_cq_moderation_period = 4013 attr.cq_caps.max_cq_moderation_period; 4014 resp.response_length += sizeof(resp.cq_moderation_caps); 4015 4016 if (ucore->outlen < resp.response_length + sizeof(resp.max_dm_size)) 4017 goto end; 4018 4019 resp.max_dm_size = attr.max_dm_size; 4020 resp.response_length += sizeof(resp.max_dm_size); 4021 end: 4022 err = ib_copy_to_udata(ucore, &resp, resp.response_length); 4023 return err; 4024 } 4025 4026 int ib_uverbs_ex_modify_cq(struct ib_uverbs_file *file, 4027 struct ib_device *ib_dev, 4028 struct ib_udata *ucore, 4029 struct ib_udata *uhw) 4030 { 4031 struct ib_uverbs_ex_modify_cq cmd = {}; 4032 struct ib_cq *cq; 4033 size_t required_cmd_sz; 4034 int ret; 4035 4036 required_cmd_sz = offsetof(typeof(cmd), reserved) + 4037 sizeof(cmd.reserved); 4038 if (ucore->inlen < required_cmd_sz) 4039 return -EINVAL; 4040 4041 /* sanity checks */ 4042 if (ucore->inlen > sizeof(cmd) && 4043 !ib_is_udata_cleared(ucore, sizeof(cmd), 4044 ucore->inlen - sizeof(cmd))) 4045 return -EOPNOTSUPP; 4046 4047 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 4048 if (ret) 4049 return ret; 4050 4051 if (!cmd.attr_mask || cmd.reserved) 4052 return -EINVAL; 4053 4054 if (cmd.attr_mask > IB_CQ_MODERATE) 4055 return -EOPNOTSUPP; 4056 4057 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext); 4058 if (!cq) 4059 return -EINVAL; 4060 4061 ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period); 4062 4063 uobj_put_obj_read(cq); 4064 4065 return ret; 4066 } 4067