1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/file.h> 37 #include <linux/fs.h> 38 #include <linux/slab.h> 39 #include <linux/sched.h> 40 41 #include <linux/uaccess.h> 42 43 #include <rdma/uverbs_types.h> 44 #include <rdma/uverbs_std_types.h> 45 #include "rdma_core.h" 46 47 #include "uverbs.h" 48 #include "core_priv.h" 49 50 static struct ib_uverbs_completion_event_file * 51 ib_uverbs_lookup_comp_file(int fd, struct ib_ucontext *context) 52 { 53 struct ib_uobject *uobj = uobj_get_read(UVERBS_OBJECT_COMP_CHANNEL, 54 fd, context); 55 struct ib_uobject_file *uobj_file; 56 57 if (IS_ERR(uobj)) 58 return (void *)uobj; 59 60 uverbs_uobject_get(uobj); 61 uobj_put_read(uobj); 62 63 uobj_file = container_of(uobj, struct ib_uobject_file, uobj); 64 return container_of(uobj_file, struct ib_uverbs_completion_event_file, 65 uobj_file); 66 } 67 68 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 69 struct ib_device *ib_dev, 70 const char __user *buf, 71 int in_len, int out_len) 72 { 73 struct ib_uverbs_get_context cmd; 74 struct ib_uverbs_get_context_resp resp; 75 struct ib_udata udata; 76 struct ib_ucontext *ucontext; 77 struct file *filp; 78 struct ib_rdmacg_object cg_obj; 79 int ret; 80 81 if (out_len < sizeof resp) 82 return -ENOSPC; 83 84 if (copy_from_user(&cmd, buf, sizeof cmd)) 85 return -EFAULT; 86 87 mutex_lock(&file->mutex); 88 89 if (file->ucontext) { 90 ret = -EINVAL; 91 goto err; 92 } 93 94 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 95 u64_to_user_ptr(cmd.response) + sizeof(resp), 96 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 97 out_len - sizeof(resp)); 98 99 ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); 100 if (ret) 101 goto err; 102 103 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata); 104 if (IS_ERR(ucontext)) { 105 ret = PTR_ERR(ucontext); 106 goto err_alloc; 107 } 108 109 ucontext->device = ib_dev; 110 ucontext->cg_obj = cg_obj; 111 /* ufile is required when some objects are released */ 112 ucontext->ufile = file; 113 uverbs_initialize_ucontext(ucontext); 114 115 rcu_read_lock(); 116 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); 117 rcu_read_unlock(); 118 ucontext->closing = 0; 119 120 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 121 ucontext->umem_tree = RB_ROOT_CACHED; 122 init_rwsem(&ucontext->umem_rwsem); 123 ucontext->odp_mrs_count = 0; 124 INIT_LIST_HEAD(&ucontext->no_private_counters); 125 126 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)) 127 ucontext->invalidate_range = NULL; 128 129 #endif 130 131 resp.num_comp_vectors = file->device->num_comp_vectors; 132 133 ret = get_unused_fd_flags(O_CLOEXEC); 134 if (ret < 0) 135 goto err_free; 136 resp.async_fd = ret; 137 138 filp = ib_uverbs_alloc_async_event_file(file, ib_dev); 139 if (IS_ERR(filp)) { 140 ret = PTR_ERR(filp); 141 goto err_fd; 142 } 143 144 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 145 ret = -EFAULT; 146 goto err_file; 147 } 148 149 file->ucontext = ucontext; 150 151 fd_install(resp.async_fd, filp); 152 153 mutex_unlock(&file->mutex); 154 155 return in_len; 156 157 err_file: 158 ib_uverbs_free_async_event_file(file); 159 fput(filp); 160 161 err_fd: 162 put_unused_fd(resp.async_fd); 163 164 err_free: 165 put_pid(ucontext->tgid); 166 ib_dev->dealloc_ucontext(ucontext); 167 168 err_alloc: 169 ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); 170 171 err: 172 mutex_unlock(&file->mutex); 173 return ret; 174 } 175 176 static void copy_query_dev_fields(struct ib_uverbs_file *file, 177 struct ib_device *ib_dev, 178 struct ib_uverbs_query_device_resp *resp, 179 struct ib_device_attr *attr) 180 { 181 resp->fw_ver = attr->fw_ver; 182 resp->node_guid = ib_dev->node_guid; 183 resp->sys_image_guid = attr->sys_image_guid; 184 resp->max_mr_size = attr->max_mr_size; 185 resp->page_size_cap = attr->page_size_cap; 186 resp->vendor_id = attr->vendor_id; 187 resp->vendor_part_id = attr->vendor_part_id; 188 resp->hw_ver = attr->hw_ver; 189 resp->max_qp = attr->max_qp; 190 resp->max_qp_wr = attr->max_qp_wr; 191 resp->device_cap_flags = lower_32_bits(attr->device_cap_flags); 192 resp->max_sge = attr->max_sge; 193 resp->max_sge_rd = attr->max_sge_rd; 194 resp->max_cq = attr->max_cq; 195 resp->max_cqe = attr->max_cqe; 196 resp->max_mr = attr->max_mr; 197 resp->max_pd = attr->max_pd; 198 resp->max_qp_rd_atom = attr->max_qp_rd_atom; 199 resp->max_ee_rd_atom = attr->max_ee_rd_atom; 200 resp->max_res_rd_atom = attr->max_res_rd_atom; 201 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom; 202 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom; 203 resp->atomic_cap = attr->atomic_cap; 204 resp->max_ee = attr->max_ee; 205 resp->max_rdd = attr->max_rdd; 206 resp->max_mw = attr->max_mw; 207 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp; 208 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp; 209 resp->max_mcast_grp = attr->max_mcast_grp; 210 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach; 211 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach; 212 resp->max_ah = attr->max_ah; 213 resp->max_fmr = attr->max_fmr; 214 resp->max_map_per_fmr = attr->max_map_per_fmr; 215 resp->max_srq = attr->max_srq; 216 resp->max_srq_wr = attr->max_srq_wr; 217 resp->max_srq_sge = attr->max_srq_sge; 218 resp->max_pkeys = attr->max_pkeys; 219 resp->local_ca_ack_delay = attr->local_ca_ack_delay; 220 resp->phys_port_cnt = ib_dev->phys_port_cnt; 221 } 222 223 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 224 struct ib_device *ib_dev, 225 const char __user *buf, 226 int in_len, int out_len) 227 { 228 struct ib_uverbs_query_device cmd; 229 struct ib_uverbs_query_device_resp resp; 230 231 if (out_len < sizeof resp) 232 return -ENOSPC; 233 234 if (copy_from_user(&cmd, buf, sizeof cmd)) 235 return -EFAULT; 236 237 memset(&resp, 0, sizeof resp); 238 copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs); 239 240 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 241 return -EFAULT; 242 243 return in_len; 244 } 245 246 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 247 struct ib_device *ib_dev, 248 const char __user *buf, 249 int in_len, int out_len) 250 { 251 struct ib_uverbs_query_port cmd; 252 struct ib_uverbs_query_port_resp resp; 253 struct ib_port_attr attr; 254 int ret; 255 256 if (out_len < sizeof resp) 257 return -ENOSPC; 258 259 if (copy_from_user(&cmd, buf, sizeof cmd)) 260 return -EFAULT; 261 262 ret = ib_query_port(ib_dev, cmd.port_num, &attr); 263 if (ret) 264 return ret; 265 266 memset(&resp, 0, sizeof resp); 267 268 resp.state = attr.state; 269 resp.max_mtu = attr.max_mtu; 270 resp.active_mtu = attr.active_mtu; 271 resp.gid_tbl_len = attr.gid_tbl_len; 272 resp.port_cap_flags = attr.port_cap_flags; 273 resp.max_msg_sz = attr.max_msg_sz; 274 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 275 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 276 resp.pkey_tbl_len = attr.pkey_tbl_len; 277 278 if (rdma_cap_opa_ah(ib_dev, cmd.port_num)) { 279 resp.lid = OPA_TO_IB_UCAST_LID(attr.lid); 280 resp.sm_lid = OPA_TO_IB_UCAST_LID(attr.sm_lid); 281 } else { 282 resp.lid = ib_lid_cpu16(attr.lid); 283 resp.sm_lid = ib_lid_cpu16(attr.sm_lid); 284 } 285 resp.lmc = attr.lmc; 286 resp.max_vl_num = attr.max_vl_num; 287 resp.sm_sl = attr.sm_sl; 288 resp.subnet_timeout = attr.subnet_timeout; 289 resp.init_type_reply = attr.init_type_reply; 290 resp.active_width = attr.active_width; 291 resp.active_speed = attr.active_speed; 292 resp.phys_state = attr.phys_state; 293 resp.link_layer = rdma_port_get_link_layer(ib_dev, 294 cmd.port_num); 295 296 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 297 return -EFAULT; 298 299 return in_len; 300 } 301 302 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 303 struct ib_device *ib_dev, 304 const char __user *buf, 305 int in_len, int out_len) 306 { 307 struct ib_uverbs_alloc_pd cmd; 308 struct ib_uverbs_alloc_pd_resp resp; 309 struct ib_udata udata; 310 struct ib_uobject *uobj; 311 struct ib_pd *pd; 312 int ret; 313 314 if (out_len < sizeof resp) 315 return -ENOSPC; 316 317 if (copy_from_user(&cmd, buf, sizeof cmd)) 318 return -EFAULT; 319 320 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 321 u64_to_user_ptr(cmd.response) + sizeof(resp), 322 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 323 out_len - sizeof(resp)); 324 325 uobj = uobj_alloc(UVERBS_OBJECT_PD, file->ucontext); 326 if (IS_ERR(uobj)) 327 return PTR_ERR(uobj); 328 329 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata); 330 if (IS_ERR(pd)) { 331 ret = PTR_ERR(pd); 332 goto err; 333 } 334 335 pd->device = ib_dev; 336 pd->uobject = uobj; 337 pd->__internal_mr = NULL; 338 atomic_set(&pd->usecnt, 0); 339 340 uobj->object = pd; 341 memset(&resp, 0, sizeof resp); 342 resp.pd_handle = uobj->id; 343 pd->res.type = RDMA_RESTRACK_PD; 344 rdma_restrack_add(&pd->res); 345 346 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 347 ret = -EFAULT; 348 goto err_copy; 349 } 350 351 uobj_alloc_commit(uobj); 352 353 return in_len; 354 355 err_copy: 356 ib_dealloc_pd(pd); 357 358 err: 359 uobj_alloc_abort(uobj); 360 return ret; 361 } 362 363 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 364 struct ib_device *ib_dev, 365 const char __user *buf, 366 int in_len, int out_len) 367 { 368 struct ib_uverbs_dealloc_pd cmd; 369 struct ib_uobject *uobj; 370 int ret; 371 372 if (copy_from_user(&cmd, buf, sizeof cmd)) 373 return -EFAULT; 374 375 uobj = uobj_get_write(UVERBS_OBJECT_PD, cmd.pd_handle, 376 file->ucontext); 377 if (IS_ERR(uobj)) 378 return PTR_ERR(uobj); 379 380 ret = uobj_remove_commit(uobj); 381 382 return ret ?: in_len; 383 } 384 385 struct xrcd_table_entry { 386 struct rb_node node; 387 struct ib_xrcd *xrcd; 388 struct inode *inode; 389 }; 390 391 static int xrcd_table_insert(struct ib_uverbs_device *dev, 392 struct inode *inode, 393 struct ib_xrcd *xrcd) 394 { 395 struct xrcd_table_entry *entry, *scan; 396 struct rb_node **p = &dev->xrcd_tree.rb_node; 397 struct rb_node *parent = NULL; 398 399 entry = kmalloc(sizeof *entry, GFP_KERNEL); 400 if (!entry) 401 return -ENOMEM; 402 403 entry->xrcd = xrcd; 404 entry->inode = inode; 405 406 while (*p) { 407 parent = *p; 408 scan = rb_entry(parent, struct xrcd_table_entry, node); 409 410 if (inode < scan->inode) { 411 p = &(*p)->rb_left; 412 } else if (inode > scan->inode) { 413 p = &(*p)->rb_right; 414 } else { 415 kfree(entry); 416 return -EEXIST; 417 } 418 } 419 420 rb_link_node(&entry->node, parent, p); 421 rb_insert_color(&entry->node, &dev->xrcd_tree); 422 igrab(inode); 423 return 0; 424 } 425 426 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, 427 struct inode *inode) 428 { 429 struct xrcd_table_entry *entry; 430 struct rb_node *p = dev->xrcd_tree.rb_node; 431 432 while (p) { 433 entry = rb_entry(p, struct xrcd_table_entry, node); 434 435 if (inode < entry->inode) 436 p = p->rb_left; 437 else if (inode > entry->inode) 438 p = p->rb_right; 439 else 440 return entry; 441 } 442 443 return NULL; 444 } 445 446 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) 447 { 448 struct xrcd_table_entry *entry; 449 450 entry = xrcd_table_search(dev, inode); 451 if (!entry) 452 return NULL; 453 454 return entry->xrcd; 455 } 456 457 static void xrcd_table_delete(struct ib_uverbs_device *dev, 458 struct inode *inode) 459 { 460 struct xrcd_table_entry *entry; 461 462 entry = xrcd_table_search(dev, inode); 463 if (entry) { 464 iput(inode); 465 rb_erase(&entry->node, &dev->xrcd_tree); 466 kfree(entry); 467 } 468 } 469 470 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, 471 struct ib_device *ib_dev, 472 const char __user *buf, int in_len, 473 int out_len) 474 { 475 struct ib_uverbs_open_xrcd cmd; 476 struct ib_uverbs_open_xrcd_resp resp; 477 struct ib_udata udata; 478 struct ib_uxrcd_object *obj; 479 struct ib_xrcd *xrcd = NULL; 480 struct fd f = {NULL, 0}; 481 struct inode *inode = NULL; 482 int ret = 0; 483 int new_xrcd = 0; 484 485 if (out_len < sizeof resp) 486 return -ENOSPC; 487 488 if (copy_from_user(&cmd, buf, sizeof cmd)) 489 return -EFAULT; 490 491 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 492 u64_to_user_ptr(cmd.response) + sizeof(resp), 493 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 494 out_len - sizeof(resp)); 495 496 mutex_lock(&file->device->xrcd_tree_mutex); 497 498 if (cmd.fd != -1) { 499 /* search for file descriptor */ 500 f = fdget(cmd.fd); 501 if (!f.file) { 502 ret = -EBADF; 503 goto err_tree_mutex_unlock; 504 } 505 506 inode = file_inode(f.file); 507 xrcd = find_xrcd(file->device, inode); 508 if (!xrcd && !(cmd.oflags & O_CREAT)) { 509 /* no file descriptor. Need CREATE flag */ 510 ret = -EAGAIN; 511 goto err_tree_mutex_unlock; 512 } 513 514 if (xrcd && cmd.oflags & O_EXCL) { 515 ret = -EINVAL; 516 goto err_tree_mutex_unlock; 517 } 518 } 519 520 obj = (struct ib_uxrcd_object *)uobj_alloc(UVERBS_OBJECT_XRCD, 521 file->ucontext); 522 if (IS_ERR(obj)) { 523 ret = PTR_ERR(obj); 524 goto err_tree_mutex_unlock; 525 } 526 527 if (!xrcd) { 528 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata); 529 if (IS_ERR(xrcd)) { 530 ret = PTR_ERR(xrcd); 531 goto err; 532 } 533 534 xrcd->inode = inode; 535 xrcd->device = ib_dev; 536 atomic_set(&xrcd->usecnt, 0); 537 mutex_init(&xrcd->tgt_qp_mutex); 538 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 539 new_xrcd = 1; 540 } 541 542 atomic_set(&obj->refcnt, 0); 543 obj->uobject.object = xrcd; 544 memset(&resp, 0, sizeof resp); 545 resp.xrcd_handle = obj->uobject.id; 546 547 if (inode) { 548 if (new_xrcd) { 549 /* create new inode/xrcd table entry */ 550 ret = xrcd_table_insert(file->device, inode, xrcd); 551 if (ret) 552 goto err_dealloc_xrcd; 553 } 554 atomic_inc(&xrcd->usecnt); 555 } 556 557 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 558 ret = -EFAULT; 559 goto err_copy; 560 } 561 562 if (f.file) 563 fdput(f); 564 565 mutex_unlock(&file->device->xrcd_tree_mutex); 566 567 uobj_alloc_commit(&obj->uobject); 568 569 return in_len; 570 571 err_copy: 572 if (inode) { 573 if (new_xrcd) 574 xrcd_table_delete(file->device, inode); 575 atomic_dec(&xrcd->usecnt); 576 } 577 578 err_dealloc_xrcd: 579 ib_dealloc_xrcd(xrcd); 580 581 err: 582 uobj_alloc_abort(&obj->uobject); 583 584 err_tree_mutex_unlock: 585 if (f.file) 586 fdput(f); 587 588 mutex_unlock(&file->device->xrcd_tree_mutex); 589 590 return ret; 591 } 592 593 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, 594 struct ib_device *ib_dev, 595 const char __user *buf, int in_len, 596 int out_len) 597 { 598 struct ib_uverbs_close_xrcd cmd; 599 struct ib_uobject *uobj; 600 int ret = 0; 601 602 if (copy_from_user(&cmd, buf, sizeof cmd)) 603 return -EFAULT; 604 605 uobj = uobj_get_write(UVERBS_OBJECT_XRCD, cmd.xrcd_handle, 606 file->ucontext); 607 if (IS_ERR(uobj)) 608 return PTR_ERR(uobj); 609 610 ret = uobj_remove_commit(uobj); 611 return ret ?: in_len; 612 } 613 614 int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, 615 struct ib_xrcd *xrcd, 616 enum rdma_remove_reason why) 617 { 618 struct inode *inode; 619 int ret; 620 621 inode = xrcd->inode; 622 if (inode && !atomic_dec_and_test(&xrcd->usecnt)) 623 return 0; 624 625 ret = ib_dealloc_xrcd(xrcd); 626 627 if (why == RDMA_REMOVE_DESTROY && ret) 628 atomic_inc(&xrcd->usecnt); 629 else if (inode) 630 xrcd_table_delete(dev, inode); 631 632 return ret; 633 } 634 635 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 636 struct ib_device *ib_dev, 637 const char __user *buf, int in_len, 638 int out_len) 639 { 640 struct ib_uverbs_reg_mr cmd; 641 struct ib_uverbs_reg_mr_resp resp; 642 struct ib_udata udata; 643 struct ib_uobject *uobj; 644 struct ib_pd *pd; 645 struct ib_mr *mr; 646 int ret; 647 648 if (out_len < sizeof resp) 649 return -ENOSPC; 650 651 if (copy_from_user(&cmd, buf, sizeof cmd)) 652 return -EFAULT; 653 654 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 655 u64_to_user_ptr(cmd.response) + sizeof(resp), 656 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 657 out_len - sizeof(resp)); 658 659 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 660 return -EINVAL; 661 662 ret = ib_check_mr_access(cmd.access_flags); 663 if (ret) 664 return ret; 665 666 uobj = uobj_alloc(UVERBS_OBJECT_MR, file->ucontext); 667 if (IS_ERR(uobj)) 668 return PTR_ERR(uobj); 669 670 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext); 671 if (!pd) { 672 ret = -EINVAL; 673 goto err_free; 674 } 675 676 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) { 677 if (!(pd->device->attrs.device_cap_flags & 678 IB_DEVICE_ON_DEMAND_PAGING)) { 679 pr_debug("ODP support not available\n"); 680 ret = -EINVAL; 681 goto err_put; 682 } 683 } 684 685 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 686 cmd.access_flags, &udata); 687 if (IS_ERR(mr)) { 688 ret = PTR_ERR(mr); 689 goto err_put; 690 } 691 692 mr->device = pd->device; 693 mr->pd = pd; 694 mr->dm = NULL; 695 mr->uobject = uobj; 696 atomic_inc(&pd->usecnt); 697 mr->res.type = RDMA_RESTRACK_MR; 698 rdma_restrack_add(&mr->res); 699 700 uobj->object = mr; 701 702 memset(&resp, 0, sizeof resp); 703 resp.lkey = mr->lkey; 704 resp.rkey = mr->rkey; 705 resp.mr_handle = uobj->id; 706 707 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 708 ret = -EFAULT; 709 goto err_copy; 710 } 711 712 uobj_put_obj_read(pd); 713 714 uobj_alloc_commit(uobj); 715 716 return in_len; 717 718 err_copy: 719 ib_dereg_mr(mr); 720 721 err_put: 722 uobj_put_obj_read(pd); 723 724 err_free: 725 uobj_alloc_abort(uobj); 726 return ret; 727 } 728 729 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, 730 struct ib_device *ib_dev, 731 const char __user *buf, int in_len, 732 int out_len) 733 { 734 struct ib_uverbs_rereg_mr cmd; 735 struct ib_uverbs_rereg_mr_resp resp; 736 struct ib_udata udata; 737 struct ib_pd *pd = NULL; 738 struct ib_mr *mr; 739 struct ib_pd *old_pd; 740 int ret; 741 struct ib_uobject *uobj; 742 743 if (out_len < sizeof(resp)) 744 return -ENOSPC; 745 746 if (copy_from_user(&cmd, buf, sizeof(cmd))) 747 return -EFAULT; 748 749 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 750 u64_to_user_ptr(cmd.response) + sizeof(resp), 751 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 752 out_len - sizeof(resp)); 753 754 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags) 755 return -EINVAL; 756 757 if ((cmd.flags & IB_MR_REREG_TRANS) && 758 (!cmd.start || !cmd.hca_va || 0 >= cmd.length || 759 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))) 760 return -EINVAL; 761 762 uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle, 763 file->ucontext); 764 if (IS_ERR(uobj)) 765 return PTR_ERR(uobj); 766 767 mr = uobj->object; 768 769 if (mr->dm) { 770 ret = -EINVAL; 771 goto put_uobjs; 772 } 773 774 if (cmd.flags & IB_MR_REREG_ACCESS) { 775 ret = ib_check_mr_access(cmd.access_flags); 776 if (ret) 777 goto put_uobjs; 778 } 779 780 if (cmd.flags & IB_MR_REREG_PD) { 781 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext); 782 if (!pd) { 783 ret = -EINVAL; 784 goto put_uobjs; 785 } 786 } 787 788 old_pd = mr->pd; 789 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, 790 cmd.length, cmd.hca_va, 791 cmd.access_flags, pd, &udata); 792 if (!ret) { 793 if (cmd.flags & IB_MR_REREG_PD) { 794 atomic_inc(&pd->usecnt); 795 mr->pd = pd; 796 atomic_dec(&old_pd->usecnt); 797 } 798 } else { 799 goto put_uobj_pd; 800 } 801 802 memset(&resp, 0, sizeof(resp)); 803 resp.lkey = mr->lkey; 804 resp.rkey = mr->rkey; 805 806 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) 807 ret = -EFAULT; 808 else 809 ret = in_len; 810 811 put_uobj_pd: 812 if (cmd.flags & IB_MR_REREG_PD) 813 uobj_put_obj_read(pd); 814 815 put_uobjs: 816 uobj_put_write(uobj); 817 818 return ret; 819 } 820 821 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 822 struct ib_device *ib_dev, 823 const char __user *buf, int in_len, 824 int out_len) 825 { 826 struct ib_uverbs_dereg_mr cmd; 827 struct ib_uobject *uobj; 828 int ret = -EINVAL; 829 830 if (copy_from_user(&cmd, buf, sizeof cmd)) 831 return -EFAULT; 832 833 uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle, 834 file->ucontext); 835 if (IS_ERR(uobj)) 836 return PTR_ERR(uobj); 837 838 ret = uobj_remove_commit(uobj); 839 840 return ret ?: in_len; 841 } 842 843 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, 844 struct ib_device *ib_dev, 845 const char __user *buf, int in_len, 846 int out_len) 847 { 848 struct ib_uverbs_alloc_mw cmd; 849 struct ib_uverbs_alloc_mw_resp resp; 850 struct ib_uobject *uobj; 851 struct ib_pd *pd; 852 struct ib_mw *mw; 853 struct ib_udata udata; 854 int ret; 855 856 if (out_len < sizeof(resp)) 857 return -ENOSPC; 858 859 if (copy_from_user(&cmd, buf, sizeof(cmd))) 860 return -EFAULT; 861 862 uobj = uobj_alloc(UVERBS_OBJECT_MW, file->ucontext); 863 if (IS_ERR(uobj)) 864 return PTR_ERR(uobj); 865 866 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext); 867 if (!pd) { 868 ret = -EINVAL; 869 goto err_free; 870 } 871 872 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 873 u64_to_user_ptr(cmd.response) + sizeof(resp), 874 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 875 out_len - sizeof(resp)); 876 877 mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata); 878 if (IS_ERR(mw)) { 879 ret = PTR_ERR(mw); 880 goto err_put; 881 } 882 883 mw->device = pd->device; 884 mw->pd = pd; 885 mw->uobject = uobj; 886 atomic_inc(&pd->usecnt); 887 888 uobj->object = mw; 889 890 memset(&resp, 0, sizeof(resp)); 891 resp.rkey = mw->rkey; 892 resp.mw_handle = uobj->id; 893 894 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) { 895 ret = -EFAULT; 896 goto err_copy; 897 } 898 899 uobj_put_obj_read(pd); 900 uobj_alloc_commit(uobj); 901 902 return in_len; 903 904 err_copy: 905 uverbs_dealloc_mw(mw); 906 err_put: 907 uobj_put_obj_read(pd); 908 err_free: 909 uobj_alloc_abort(uobj); 910 return ret; 911 } 912 913 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file, 914 struct ib_device *ib_dev, 915 const char __user *buf, int in_len, 916 int out_len) 917 { 918 struct ib_uverbs_dealloc_mw cmd; 919 struct ib_uobject *uobj; 920 int ret = -EINVAL; 921 922 if (copy_from_user(&cmd, buf, sizeof(cmd))) 923 return -EFAULT; 924 925 uobj = uobj_get_write(UVERBS_OBJECT_MW, cmd.mw_handle, 926 file->ucontext); 927 if (IS_ERR(uobj)) 928 return PTR_ERR(uobj); 929 930 ret = uobj_remove_commit(uobj); 931 return ret ?: in_len; 932 } 933 934 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 935 struct ib_device *ib_dev, 936 const char __user *buf, int in_len, 937 int out_len) 938 { 939 struct ib_uverbs_create_comp_channel cmd; 940 struct ib_uverbs_create_comp_channel_resp resp; 941 struct ib_uobject *uobj; 942 struct ib_uverbs_completion_event_file *ev_file; 943 944 if (out_len < sizeof resp) 945 return -ENOSPC; 946 947 if (copy_from_user(&cmd, buf, sizeof cmd)) 948 return -EFAULT; 949 950 uobj = uobj_alloc(UVERBS_OBJECT_COMP_CHANNEL, file->ucontext); 951 if (IS_ERR(uobj)) 952 return PTR_ERR(uobj); 953 954 resp.fd = uobj->id; 955 956 ev_file = container_of(uobj, struct ib_uverbs_completion_event_file, 957 uobj_file.uobj); 958 ib_uverbs_init_event_queue(&ev_file->ev_queue); 959 960 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 961 uobj_alloc_abort(uobj); 962 return -EFAULT; 963 } 964 965 uobj_alloc_commit(uobj); 966 return in_len; 967 } 968 969 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, 970 struct ib_device *ib_dev, 971 struct ib_udata *ucore, 972 struct ib_udata *uhw, 973 struct ib_uverbs_ex_create_cq *cmd, 974 size_t cmd_sz, 975 int (*cb)(struct ib_uverbs_file *file, 976 struct ib_ucq_object *obj, 977 struct ib_uverbs_ex_create_cq_resp *resp, 978 struct ib_udata *udata, 979 void *context), 980 void *context) 981 { 982 struct ib_ucq_object *obj; 983 struct ib_uverbs_completion_event_file *ev_file = NULL; 984 struct ib_cq *cq; 985 int ret; 986 struct ib_uverbs_ex_create_cq_resp resp; 987 struct ib_cq_init_attr attr = {}; 988 989 if (!ib_dev->create_cq) 990 return ERR_PTR(-EOPNOTSUPP); 991 992 if (cmd->comp_vector >= file->device->num_comp_vectors) 993 return ERR_PTR(-EINVAL); 994 995 obj = (struct ib_ucq_object *)uobj_alloc(UVERBS_OBJECT_CQ, 996 file->ucontext); 997 if (IS_ERR(obj)) 998 return obj; 999 1000 if (cmd->comp_channel >= 0) { 1001 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel, 1002 file->ucontext); 1003 if (IS_ERR(ev_file)) { 1004 ret = PTR_ERR(ev_file); 1005 goto err; 1006 } 1007 } 1008 1009 obj->uobject.user_handle = cmd->user_handle; 1010 obj->uverbs_file = file; 1011 obj->comp_events_reported = 0; 1012 obj->async_events_reported = 0; 1013 INIT_LIST_HEAD(&obj->comp_list); 1014 INIT_LIST_HEAD(&obj->async_list); 1015 1016 attr.cqe = cmd->cqe; 1017 attr.comp_vector = cmd->comp_vector; 1018 1019 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags)) 1020 attr.flags = cmd->flags; 1021 1022 cq = ib_dev->create_cq(ib_dev, &attr, file->ucontext, uhw); 1023 if (IS_ERR(cq)) { 1024 ret = PTR_ERR(cq); 1025 goto err_file; 1026 } 1027 1028 cq->device = ib_dev; 1029 cq->uobject = &obj->uobject; 1030 cq->comp_handler = ib_uverbs_comp_handler; 1031 cq->event_handler = ib_uverbs_cq_event_handler; 1032 cq->cq_context = ev_file ? &ev_file->ev_queue : NULL; 1033 atomic_set(&cq->usecnt, 0); 1034 1035 obj->uobject.object = cq; 1036 memset(&resp, 0, sizeof resp); 1037 resp.base.cq_handle = obj->uobject.id; 1038 resp.base.cqe = cq->cqe; 1039 1040 resp.response_length = offsetof(typeof(resp), response_length) + 1041 sizeof(resp.response_length); 1042 1043 cq->res.type = RDMA_RESTRACK_CQ; 1044 rdma_restrack_add(&cq->res); 1045 1046 ret = cb(file, obj, &resp, ucore, context); 1047 if (ret) 1048 goto err_cb; 1049 1050 uobj_alloc_commit(&obj->uobject); 1051 return obj; 1052 1053 err_cb: 1054 ib_destroy_cq(cq); 1055 1056 err_file: 1057 if (ev_file) 1058 ib_uverbs_release_ucq(file, ev_file, obj); 1059 1060 err: 1061 uobj_alloc_abort(&obj->uobject); 1062 1063 return ERR_PTR(ret); 1064 } 1065 1066 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file, 1067 struct ib_ucq_object *obj, 1068 struct ib_uverbs_ex_create_cq_resp *resp, 1069 struct ib_udata *ucore, void *context) 1070 { 1071 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1072 return -EFAULT; 1073 1074 return 0; 1075 } 1076 1077 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 1078 struct ib_device *ib_dev, 1079 const char __user *buf, int in_len, 1080 int out_len) 1081 { 1082 struct ib_uverbs_create_cq cmd; 1083 struct ib_uverbs_ex_create_cq cmd_ex; 1084 struct ib_uverbs_create_cq_resp resp; 1085 struct ib_udata ucore; 1086 struct ib_udata uhw; 1087 struct ib_ucq_object *obj; 1088 1089 if (out_len < sizeof(resp)) 1090 return -ENOSPC; 1091 1092 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1093 return -EFAULT; 1094 1095 ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response), 1096 sizeof(cmd), sizeof(resp)); 1097 1098 ib_uverbs_init_udata(&uhw, buf + sizeof(cmd), 1099 u64_to_user_ptr(cmd.response) + sizeof(resp), 1100 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1101 out_len - sizeof(resp)); 1102 1103 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1104 cmd_ex.user_handle = cmd.user_handle; 1105 cmd_ex.cqe = cmd.cqe; 1106 cmd_ex.comp_vector = cmd.comp_vector; 1107 cmd_ex.comp_channel = cmd.comp_channel; 1108 1109 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex, 1110 offsetof(typeof(cmd_ex), comp_channel) + 1111 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb, 1112 NULL); 1113 1114 if (IS_ERR(obj)) 1115 return PTR_ERR(obj); 1116 1117 return in_len; 1118 } 1119 1120 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file, 1121 struct ib_ucq_object *obj, 1122 struct ib_uverbs_ex_create_cq_resp *resp, 1123 struct ib_udata *ucore, void *context) 1124 { 1125 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1126 return -EFAULT; 1127 1128 return 0; 1129 } 1130 1131 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file, 1132 struct ib_device *ib_dev, 1133 struct ib_udata *ucore, 1134 struct ib_udata *uhw) 1135 { 1136 struct ib_uverbs_ex_create_cq_resp resp; 1137 struct ib_uverbs_ex_create_cq cmd; 1138 struct ib_ucq_object *obj; 1139 int err; 1140 1141 if (ucore->inlen < sizeof(cmd)) 1142 return -EINVAL; 1143 1144 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 1145 if (err) 1146 return err; 1147 1148 if (cmd.comp_mask) 1149 return -EINVAL; 1150 1151 if (cmd.reserved) 1152 return -EINVAL; 1153 1154 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1155 sizeof(resp.response_length))) 1156 return -ENOSPC; 1157 1158 obj = create_cq(file, ib_dev, ucore, uhw, &cmd, 1159 min(ucore->inlen, sizeof(cmd)), 1160 ib_uverbs_ex_create_cq_cb, NULL); 1161 1162 return PTR_ERR_OR_ZERO(obj); 1163 } 1164 1165 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 1166 struct ib_device *ib_dev, 1167 const char __user *buf, int in_len, 1168 int out_len) 1169 { 1170 struct ib_uverbs_resize_cq cmd; 1171 struct ib_uverbs_resize_cq_resp resp = {}; 1172 struct ib_udata udata; 1173 struct ib_cq *cq; 1174 int ret = -EINVAL; 1175 1176 if (copy_from_user(&cmd, buf, sizeof cmd)) 1177 return -EFAULT; 1178 1179 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 1180 u64_to_user_ptr(cmd.response) + sizeof(resp), 1181 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1182 out_len - sizeof(resp)); 1183 1184 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext); 1185 if (!cq) 1186 return -EINVAL; 1187 1188 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 1189 if (ret) 1190 goto out; 1191 1192 resp.cqe = cq->cqe; 1193 1194 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp.cqe)) 1195 ret = -EFAULT; 1196 1197 out: 1198 uobj_put_obj_read(cq); 1199 1200 return ret ? ret : in_len; 1201 } 1202 1203 static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest, 1204 struct ib_wc *wc) 1205 { 1206 struct ib_uverbs_wc tmp; 1207 1208 tmp.wr_id = wc->wr_id; 1209 tmp.status = wc->status; 1210 tmp.opcode = wc->opcode; 1211 tmp.vendor_err = wc->vendor_err; 1212 tmp.byte_len = wc->byte_len; 1213 tmp.ex.imm_data = wc->ex.imm_data; 1214 tmp.qp_num = wc->qp->qp_num; 1215 tmp.src_qp = wc->src_qp; 1216 tmp.wc_flags = wc->wc_flags; 1217 tmp.pkey_index = wc->pkey_index; 1218 if (rdma_cap_opa_ah(ib_dev, wc->port_num)) 1219 tmp.slid = OPA_TO_IB_UCAST_LID(wc->slid); 1220 else 1221 tmp.slid = ib_lid_cpu16(wc->slid); 1222 tmp.sl = wc->sl; 1223 tmp.dlid_path_bits = wc->dlid_path_bits; 1224 tmp.port_num = wc->port_num; 1225 tmp.reserved = 0; 1226 1227 if (copy_to_user(dest, &tmp, sizeof tmp)) 1228 return -EFAULT; 1229 1230 return 0; 1231 } 1232 1233 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 1234 struct ib_device *ib_dev, 1235 const char __user *buf, int in_len, 1236 int out_len) 1237 { 1238 struct ib_uverbs_poll_cq cmd; 1239 struct ib_uverbs_poll_cq_resp resp; 1240 u8 __user *header_ptr; 1241 u8 __user *data_ptr; 1242 struct ib_cq *cq; 1243 struct ib_wc wc; 1244 int ret; 1245 1246 if (copy_from_user(&cmd, buf, sizeof cmd)) 1247 return -EFAULT; 1248 1249 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext); 1250 if (!cq) 1251 return -EINVAL; 1252 1253 /* we copy a struct ib_uverbs_poll_cq_resp to user space */ 1254 header_ptr = u64_to_user_ptr(cmd.response); 1255 data_ptr = header_ptr + sizeof resp; 1256 1257 memset(&resp, 0, sizeof resp); 1258 while (resp.count < cmd.ne) { 1259 ret = ib_poll_cq(cq, 1, &wc); 1260 if (ret < 0) 1261 goto out_put; 1262 if (!ret) 1263 break; 1264 1265 ret = copy_wc_to_user(ib_dev, data_ptr, &wc); 1266 if (ret) 1267 goto out_put; 1268 1269 data_ptr += sizeof(struct ib_uverbs_wc); 1270 ++resp.count; 1271 } 1272 1273 if (copy_to_user(header_ptr, &resp, sizeof resp)) { 1274 ret = -EFAULT; 1275 goto out_put; 1276 } 1277 1278 ret = in_len; 1279 1280 out_put: 1281 uobj_put_obj_read(cq); 1282 return ret; 1283 } 1284 1285 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 1286 struct ib_device *ib_dev, 1287 const char __user *buf, int in_len, 1288 int out_len) 1289 { 1290 struct ib_uverbs_req_notify_cq cmd; 1291 struct ib_cq *cq; 1292 1293 if (copy_from_user(&cmd, buf, sizeof cmd)) 1294 return -EFAULT; 1295 1296 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext); 1297 if (!cq) 1298 return -EINVAL; 1299 1300 ib_req_notify_cq(cq, cmd.solicited_only ? 1301 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 1302 1303 uobj_put_obj_read(cq); 1304 1305 return in_len; 1306 } 1307 1308 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 1309 struct ib_device *ib_dev, 1310 const char __user *buf, int in_len, 1311 int out_len) 1312 { 1313 struct ib_uverbs_destroy_cq cmd; 1314 struct ib_uverbs_destroy_cq_resp resp; 1315 struct ib_uobject *uobj; 1316 struct ib_cq *cq; 1317 struct ib_ucq_object *obj; 1318 int ret = -EINVAL; 1319 1320 if (copy_from_user(&cmd, buf, sizeof cmd)) 1321 return -EFAULT; 1322 1323 uobj = uobj_get_write(UVERBS_OBJECT_CQ, cmd.cq_handle, 1324 file->ucontext); 1325 if (IS_ERR(uobj)) 1326 return PTR_ERR(uobj); 1327 1328 /* 1329 * Make sure we don't free the memory in remove_commit as we still 1330 * needs the uobject memory to create the response. 1331 */ 1332 uverbs_uobject_get(uobj); 1333 cq = uobj->object; 1334 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1335 1336 memset(&resp, 0, sizeof(resp)); 1337 1338 ret = uobj_remove_commit(uobj); 1339 if (ret) { 1340 uverbs_uobject_put(uobj); 1341 return ret; 1342 } 1343 1344 resp.comp_events_reported = obj->comp_events_reported; 1345 resp.async_events_reported = obj->async_events_reported; 1346 1347 uverbs_uobject_put(uobj); 1348 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 1349 return -EFAULT; 1350 1351 return in_len; 1352 } 1353 1354 static int create_qp(struct ib_uverbs_file *file, 1355 struct ib_udata *ucore, 1356 struct ib_udata *uhw, 1357 struct ib_uverbs_ex_create_qp *cmd, 1358 size_t cmd_sz, 1359 int (*cb)(struct ib_uverbs_file *file, 1360 struct ib_uverbs_ex_create_qp_resp *resp, 1361 struct ib_udata *udata), 1362 void *context) 1363 { 1364 struct ib_uqp_object *obj; 1365 struct ib_device *device; 1366 struct ib_pd *pd = NULL; 1367 struct ib_xrcd *xrcd = NULL; 1368 struct ib_uobject *xrcd_uobj = ERR_PTR(-ENOENT); 1369 struct ib_cq *scq = NULL, *rcq = NULL; 1370 struct ib_srq *srq = NULL; 1371 struct ib_qp *qp; 1372 char *buf; 1373 struct ib_qp_init_attr attr = {}; 1374 struct ib_uverbs_ex_create_qp_resp resp; 1375 int ret; 1376 struct ib_rwq_ind_table *ind_tbl = NULL; 1377 bool has_sq = true; 1378 1379 if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) 1380 return -EPERM; 1381 1382 obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, 1383 file->ucontext); 1384 if (IS_ERR(obj)) 1385 return PTR_ERR(obj); 1386 obj->uxrcd = NULL; 1387 obj->uevent.uobject.user_handle = cmd->user_handle; 1388 mutex_init(&obj->mcast_lock); 1389 1390 if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) + 1391 sizeof(cmd->rwq_ind_tbl_handle) && 1392 (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) { 1393 ind_tbl = uobj_get_obj_read(rwq_ind_table, UVERBS_OBJECT_RWQ_IND_TBL, 1394 cmd->rwq_ind_tbl_handle, 1395 file->ucontext); 1396 if (!ind_tbl) { 1397 ret = -EINVAL; 1398 goto err_put; 1399 } 1400 1401 attr.rwq_ind_tbl = ind_tbl; 1402 } 1403 1404 if (cmd_sz > sizeof(*cmd) && 1405 !ib_is_udata_cleared(ucore, sizeof(*cmd), 1406 cmd_sz - sizeof(*cmd))) { 1407 ret = -EOPNOTSUPP; 1408 goto err_put; 1409 } 1410 1411 if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) { 1412 ret = -EINVAL; 1413 goto err_put; 1414 } 1415 1416 if (ind_tbl && !cmd->max_send_wr) 1417 has_sq = false; 1418 1419 if (cmd->qp_type == IB_QPT_XRC_TGT) { 1420 xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->pd_handle, 1421 file->ucontext); 1422 1423 if (IS_ERR(xrcd_uobj)) { 1424 ret = -EINVAL; 1425 goto err_put; 1426 } 1427 1428 xrcd = (struct ib_xrcd *)xrcd_uobj->object; 1429 if (!xrcd) { 1430 ret = -EINVAL; 1431 goto err_put; 1432 } 1433 device = xrcd->device; 1434 } else { 1435 if (cmd->qp_type == IB_QPT_XRC_INI) { 1436 cmd->max_recv_wr = 0; 1437 cmd->max_recv_sge = 0; 1438 } else { 1439 if (cmd->is_srq) { 1440 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd->srq_handle, 1441 file->ucontext); 1442 if (!srq || srq->srq_type == IB_SRQT_XRC) { 1443 ret = -EINVAL; 1444 goto err_put; 1445 } 1446 } 1447 1448 if (!ind_tbl) { 1449 if (cmd->recv_cq_handle != cmd->send_cq_handle) { 1450 rcq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd->recv_cq_handle, 1451 file->ucontext); 1452 if (!rcq) { 1453 ret = -EINVAL; 1454 goto err_put; 1455 } 1456 } 1457 } 1458 } 1459 1460 if (has_sq) 1461 scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd->send_cq_handle, 1462 file->ucontext); 1463 if (!ind_tbl) 1464 rcq = rcq ?: scq; 1465 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, file->ucontext); 1466 if (!pd || (!scq && has_sq)) { 1467 ret = -EINVAL; 1468 goto err_put; 1469 } 1470 1471 device = pd->device; 1472 } 1473 1474 attr.event_handler = ib_uverbs_qp_event_handler; 1475 attr.qp_context = file; 1476 attr.send_cq = scq; 1477 attr.recv_cq = rcq; 1478 attr.srq = srq; 1479 attr.xrcd = xrcd; 1480 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR : 1481 IB_SIGNAL_REQ_WR; 1482 attr.qp_type = cmd->qp_type; 1483 attr.create_flags = 0; 1484 1485 attr.cap.max_send_wr = cmd->max_send_wr; 1486 attr.cap.max_recv_wr = cmd->max_recv_wr; 1487 attr.cap.max_send_sge = cmd->max_send_sge; 1488 attr.cap.max_recv_sge = cmd->max_recv_sge; 1489 attr.cap.max_inline_data = cmd->max_inline_data; 1490 1491 obj->uevent.events_reported = 0; 1492 INIT_LIST_HEAD(&obj->uevent.event_list); 1493 INIT_LIST_HEAD(&obj->mcast_list); 1494 1495 if (cmd_sz >= offsetof(typeof(*cmd), create_flags) + 1496 sizeof(cmd->create_flags)) 1497 attr.create_flags = cmd->create_flags; 1498 1499 if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | 1500 IB_QP_CREATE_CROSS_CHANNEL | 1501 IB_QP_CREATE_MANAGED_SEND | 1502 IB_QP_CREATE_MANAGED_RECV | 1503 IB_QP_CREATE_SCATTER_FCS | 1504 IB_QP_CREATE_CVLAN_STRIPPING | 1505 IB_QP_CREATE_SOURCE_QPN | 1506 IB_QP_CREATE_PCI_WRITE_END_PADDING)) { 1507 ret = -EINVAL; 1508 goto err_put; 1509 } 1510 1511 if (attr.create_flags & IB_QP_CREATE_SOURCE_QPN) { 1512 if (!capable(CAP_NET_RAW)) { 1513 ret = -EPERM; 1514 goto err_put; 1515 } 1516 1517 attr.source_qpn = cmd->source_qpn; 1518 } 1519 1520 buf = (void *)cmd + sizeof(*cmd); 1521 if (cmd_sz > sizeof(*cmd)) 1522 if (!(buf[0] == 0 && !memcmp(buf, buf + 1, 1523 cmd_sz - sizeof(*cmd) - 1))) { 1524 ret = -EINVAL; 1525 goto err_put; 1526 } 1527 1528 if (cmd->qp_type == IB_QPT_XRC_TGT) 1529 qp = ib_create_qp(pd, &attr); 1530 else 1531 qp = _ib_create_qp(device, pd, &attr, uhw, 1532 &obj->uevent.uobject); 1533 1534 if (IS_ERR(qp)) { 1535 ret = PTR_ERR(qp); 1536 goto err_put; 1537 } 1538 1539 if (cmd->qp_type != IB_QPT_XRC_TGT) { 1540 ret = ib_create_qp_security(qp, device); 1541 if (ret) 1542 goto err_cb; 1543 1544 qp->real_qp = qp; 1545 qp->pd = pd; 1546 qp->send_cq = attr.send_cq; 1547 qp->recv_cq = attr.recv_cq; 1548 qp->srq = attr.srq; 1549 qp->rwq_ind_tbl = ind_tbl; 1550 qp->event_handler = attr.event_handler; 1551 qp->qp_context = attr.qp_context; 1552 qp->qp_type = attr.qp_type; 1553 atomic_set(&qp->usecnt, 0); 1554 atomic_inc(&pd->usecnt); 1555 qp->port = 0; 1556 if (attr.send_cq) 1557 atomic_inc(&attr.send_cq->usecnt); 1558 if (attr.recv_cq) 1559 atomic_inc(&attr.recv_cq->usecnt); 1560 if (attr.srq) 1561 atomic_inc(&attr.srq->usecnt); 1562 if (ind_tbl) 1563 atomic_inc(&ind_tbl->usecnt); 1564 } else { 1565 /* It is done in _ib_create_qp for other QP types */ 1566 qp->uobject = &obj->uevent.uobject; 1567 } 1568 1569 obj->uevent.uobject.object = qp; 1570 1571 memset(&resp, 0, sizeof resp); 1572 resp.base.qpn = qp->qp_num; 1573 resp.base.qp_handle = obj->uevent.uobject.id; 1574 resp.base.max_recv_sge = attr.cap.max_recv_sge; 1575 resp.base.max_send_sge = attr.cap.max_send_sge; 1576 resp.base.max_recv_wr = attr.cap.max_recv_wr; 1577 resp.base.max_send_wr = attr.cap.max_send_wr; 1578 resp.base.max_inline_data = attr.cap.max_inline_data; 1579 1580 resp.response_length = offsetof(typeof(resp), response_length) + 1581 sizeof(resp.response_length); 1582 1583 ret = cb(file, &resp, ucore); 1584 if (ret) 1585 goto err_cb; 1586 1587 if (xrcd) { 1588 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, 1589 uobject); 1590 atomic_inc(&obj->uxrcd->refcnt); 1591 uobj_put_read(xrcd_uobj); 1592 } 1593 1594 if (pd) 1595 uobj_put_obj_read(pd); 1596 if (scq) 1597 uobj_put_obj_read(scq); 1598 if (rcq && rcq != scq) 1599 uobj_put_obj_read(rcq); 1600 if (srq) 1601 uobj_put_obj_read(srq); 1602 if (ind_tbl) 1603 uobj_put_obj_read(ind_tbl); 1604 1605 uobj_alloc_commit(&obj->uevent.uobject); 1606 1607 return 0; 1608 err_cb: 1609 ib_destroy_qp(qp); 1610 1611 err_put: 1612 if (!IS_ERR(xrcd_uobj)) 1613 uobj_put_read(xrcd_uobj); 1614 if (pd) 1615 uobj_put_obj_read(pd); 1616 if (scq) 1617 uobj_put_obj_read(scq); 1618 if (rcq && rcq != scq) 1619 uobj_put_obj_read(rcq); 1620 if (srq) 1621 uobj_put_obj_read(srq); 1622 if (ind_tbl) 1623 uobj_put_obj_read(ind_tbl); 1624 1625 uobj_alloc_abort(&obj->uevent.uobject); 1626 return ret; 1627 } 1628 1629 static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file, 1630 struct ib_uverbs_ex_create_qp_resp *resp, 1631 struct ib_udata *ucore) 1632 { 1633 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1634 return -EFAULT; 1635 1636 return 0; 1637 } 1638 1639 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 1640 struct ib_device *ib_dev, 1641 const char __user *buf, int in_len, 1642 int out_len) 1643 { 1644 struct ib_uverbs_create_qp cmd; 1645 struct ib_uverbs_ex_create_qp cmd_ex; 1646 struct ib_udata ucore; 1647 struct ib_udata uhw; 1648 ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp); 1649 int err; 1650 1651 if (out_len < resp_size) 1652 return -ENOSPC; 1653 1654 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1655 return -EFAULT; 1656 1657 ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response), 1658 sizeof(cmd), resp_size); 1659 ib_uverbs_init_udata(&uhw, buf + sizeof(cmd), 1660 u64_to_user_ptr(cmd.response) + resp_size, 1661 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1662 out_len - resp_size); 1663 1664 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1665 cmd_ex.user_handle = cmd.user_handle; 1666 cmd_ex.pd_handle = cmd.pd_handle; 1667 cmd_ex.send_cq_handle = cmd.send_cq_handle; 1668 cmd_ex.recv_cq_handle = cmd.recv_cq_handle; 1669 cmd_ex.srq_handle = cmd.srq_handle; 1670 cmd_ex.max_send_wr = cmd.max_send_wr; 1671 cmd_ex.max_recv_wr = cmd.max_recv_wr; 1672 cmd_ex.max_send_sge = cmd.max_send_sge; 1673 cmd_ex.max_recv_sge = cmd.max_recv_sge; 1674 cmd_ex.max_inline_data = cmd.max_inline_data; 1675 cmd_ex.sq_sig_all = cmd.sq_sig_all; 1676 cmd_ex.qp_type = cmd.qp_type; 1677 cmd_ex.is_srq = cmd.is_srq; 1678 1679 err = create_qp(file, &ucore, &uhw, &cmd_ex, 1680 offsetof(typeof(cmd_ex), is_srq) + 1681 sizeof(cmd.is_srq), ib_uverbs_create_qp_cb, 1682 NULL); 1683 1684 if (err) 1685 return err; 1686 1687 return in_len; 1688 } 1689 1690 static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file, 1691 struct ib_uverbs_ex_create_qp_resp *resp, 1692 struct ib_udata *ucore) 1693 { 1694 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1695 return -EFAULT; 1696 1697 return 0; 1698 } 1699 1700 int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file, 1701 struct ib_device *ib_dev, 1702 struct ib_udata *ucore, 1703 struct ib_udata *uhw) 1704 { 1705 struct ib_uverbs_ex_create_qp_resp resp; 1706 struct ib_uverbs_ex_create_qp cmd = {0}; 1707 int err; 1708 1709 if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) + 1710 sizeof(cmd.comp_mask))) 1711 return -EINVAL; 1712 1713 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 1714 if (err) 1715 return err; 1716 1717 if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK) 1718 return -EINVAL; 1719 1720 if (cmd.reserved) 1721 return -EINVAL; 1722 1723 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1724 sizeof(resp.response_length))) 1725 return -ENOSPC; 1726 1727 err = create_qp(file, ucore, uhw, &cmd, 1728 min(ucore->inlen, sizeof(cmd)), 1729 ib_uverbs_ex_create_qp_cb, NULL); 1730 1731 if (err) 1732 return err; 1733 1734 return 0; 1735 } 1736 1737 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, 1738 struct ib_device *ib_dev, 1739 const char __user *buf, int in_len, int out_len) 1740 { 1741 struct ib_uverbs_open_qp cmd; 1742 struct ib_uverbs_create_qp_resp resp; 1743 struct ib_udata udata; 1744 struct ib_uqp_object *obj; 1745 struct ib_xrcd *xrcd; 1746 struct ib_uobject *uninitialized_var(xrcd_uobj); 1747 struct ib_qp *qp; 1748 struct ib_qp_open_attr attr; 1749 int ret; 1750 1751 if (out_len < sizeof resp) 1752 return -ENOSPC; 1753 1754 if (copy_from_user(&cmd, buf, sizeof cmd)) 1755 return -EFAULT; 1756 1757 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 1758 u64_to_user_ptr(cmd.response) + sizeof(resp), 1759 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1760 out_len - sizeof(resp)); 1761 1762 obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, 1763 file->ucontext); 1764 if (IS_ERR(obj)) 1765 return PTR_ERR(obj); 1766 1767 xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd.pd_handle, 1768 file->ucontext); 1769 if (IS_ERR(xrcd_uobj)) { 1770 ret = -EINVAL; 1771 goto err_put; 1772 } 1773 1774 xrcd = (struct ib_xrcd *)xrcd_uobj->object; 1775 if (!xrcd) { 1776 ret = -EINVAL; 1777 goto err_xrcd; 1778 } 1779 1780 attr.event_handler = ib_uverbs_qp_event_handler; 1781 attr.qp_context = file; 1782 attr.qp_num = cmd.qpn; 1783 attr.qp_type = cmd.qp_type; 1784 1785 obj->uevent.events_reported = 0; 1786 INIT_LIST_HEAD(&obj->uevent.event_list); 1787 INIT_LIST_HEAD(&obj->mcast_list); 1788 1789 qp = ib_open_qp(xrcd, &attr); 1790 if (IS_ERR(qp)) { 1791 ret = PTR_ERR(qp); 1792 goto err_xrcd; 1793 } 1794 1795 obj->uevent.uobject.object = qp; 1796 obj->uevent.uobject.user_handle = cmd.user_handle; 1797 1798 memset(&resp, 0, sizeof resp); 1799 resp.qpn = qp->qp_num; 1800 resp.qp_handle = obj->uevent.uobject.id; 1801 1802 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 1803 ret = -EFAULT; 1804 goto err_destroy; 1805 } 1806 1807 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 1808 atomic_inc(&obj->uxrcd->refcnt); 1809 qp->uobject = &obj->uevent.uobject; 1810 uobj_put_read(xrcd_uobj); 1811 1812 1813 uobj_alloc_commit(&obj->uevent.uobject); 1814 1815 return in_len; 1816 1817 err_destroy: 1818 ib_destroy_qp(qp); 1819 err_xrcd: 1820 uobj_put_read(xrcd_uobj); 1821 err_put: 1822 uobj_alloc_abort(&obj->uevent.uobject); 1823 return ret; 1824 } 1825 1826 static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr, 1827 struct rdma_ah_attr *rdma_attr) 1828 { 1829 const struct ib_global_route *grh; 1830 1831 uverb_attr->dlid = rdma_ah_get_dlid(rdma_attr); 1832 uverb_attr->sl = rdma_ah_get_sl(rdma_attr); 1833 uverb_attr->src_path_bits = rdma_ah_get_path_bits(rdma_attr); 1834 uverb_attr->static_rate = rdma_ah_get_static_rate(rdma_attr); 1835 uverb_attr->is_global = !!(rdma_ah_get_ah_flags(rdma_attr) & 1836 IB_AH_GRH); 1837 if (uverb_attr->is_global) { 1838 grh = rdma_ah_read_grh(rdma_attr); 1839 memcpy(uverb_attr->dgid, grh->dgid.raw, 16); 1840 uverb_attr->flow_label = grh->flow_label; 1841 uverb_attr->sgid_index = grh->sgid_index; 1842 uverb_attr->hop_limit = grh->hop_limit; 1843 uverb_attr->traffic_class = grh->traffic_class; 1844 } 1845 uverb_attr->port_num = rdma_ah_get_port_num(rdma_attr); 1846 } 1847 1848 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 1849 struct ib_device *ib_dev, 1850 const char __user *buf, int in_len, 1851 int out_len) 1852 { 1853 struct ib_uverbs_query_qp cmd; 1854 struct ib_uverbs_query_qp_resp resp; 1855 struct ib_qp *qp; 1856 struct ib_qp_attr *attr; 1857 struct ib_qp_init_attr *init_attr; 1858 int ret; 1859 1860 if (copy_from_user(&cmd, buf, sizeof cmd)) 1861 return -EFAULT; 1862 1863 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1864 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 1865 if (!attr || !init_attr) { 1866 ret = -ENOMEM; 1867 goto out; 1868 } 1869 1870 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext); 1871 if (!qp) { 1872 ret = -EINVAL; 1873 goto out; 1874 } 1875 1876 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 1877 1878 uobj_put_obj_read(qp); 1879 1880 if (ret) 1881 goto out; 1882 1883 memset(&resp, 0, sizeof resp); 1884 1885 resp.qp_state = attr->qp_state; 1886 resp.cur_qp_state = attr->cur_qp_state; 1887 resp.path_mtu = attr->path_mtu; 1888 resp.path_mig_state = attr->path_mig_state; 1889 resp.qkey = attr->qkey; 1890 resp.rq_psn = attr->rq_psn; 1891 resp.sq_psn = attr->sq_psn; 1892 resp.dest_qp_num = attr->dest_qp_num; 1893 resp.qp_access_flags = attr->qp_access_flags; 1894 resp.pkey_index = attr->pkey_index; 1895 resp.alt_pkey_index = attr->alt_pkey_index; 1896 resp.sq_draining = attr->sq_draining; 1897 resp.max_rd_atomic = attr->max_rd_atomic; 1898 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 1899 resp.min_rnr_timer = attr->min_rnr_timer; 1900 resp.port_num = attr->port_num; 1901 resp.timeout = attr->timeout; 1902 resp.retry_cnt = attr->retry_cnt; 1903 resp.rnr_retry = attr->rnr_retry; 1904 resp.alt_port_num = attr->alt_port_num; 1905 resp.alt_timeout = attr->alt_timeout; 1906 1907 copy_ah_attr_to_uverbs(&resp.dest, &attr->ah_attr); 1908 copy_ah_attr_to_uverbs(&resp.alt_dest, &attr->alt_ah_attr); 1909 1910 resp.max_send_wr = init_attr->cap.max_send_wr; 1911 resp.max_recv_wr = init_attr->cap.max_recv_wr; 1912 resp.max_send_sge = init_attr->cap.max_send_sge; 1913 resp.max_recv_sge = init_attr->cap.max_recv_sge; 1914 resp.max_inline_data = init_attr->cap.max_inline_data; 1915 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 1916 1917 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 1918 ret = -EFAULT; 1919 1920 out: 1921 kfree(attr); 1922 kfree(init_attr); 1923 1924 return ret ? ret : in_len; 1925 } 1926 1927 /* Remove ignored fields set in the attribute mask */ 1928 static int modify_qp_mask(enum ib_qp_type qp_type, int mask) 1929 { 1930 switch (qp_type) { 1931 case IB_QPT_XRC_INI: 1932 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); 1933 case IB_QPT_XRC_TGT: 1934 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | 1935 IB_QP_RNR_RETRY); 1936 default: 1937 return mask; 1938 } 1939 } 1940 1941 static void copy_ah_attr_from_uverbs(struct ib_device *dev, 1942 struct rdma_ah_attr *rdma_attr, 1943 struct ib_uverbs_qp_dest *uverb_attr) 1944 { 1945 rdma_attr->type = rdma_ah_find_type(dev, uverb_attr->port_num); 1946 if (uverb_attr->is_global) { 1947 rdma_ah_set_grh(rdma_attr, NULL, 1948 uverb_attr->flow_label, 1949 uverb_attr->sgid_index, 1950 uverb_attr->hop_limit, 1951 uverb_attr->traffic_class); 1952 rdma_ah_set_dgid_raw(rdma_attr, uverb_attr->dgid); 1953 } else { 1954 rdma_ah_set_ah_flags(rdma_attr, 0); 1955 } 1956 rdma_ah_set_dlid(rdma_attr, uverb_attr->dlid); 1957 rdma_ah_set_sl(rdma_attr, uverb_attr->sl); 1958 rdma_ah_set_path_bits(rdma_attr, uverb_attr->src_path_bits); 1959 rdma_ah_set_static_rate(rdma_attr, uverb_attr->static_rate); 1960 rdma_ah_set_port_num(rdma_attr, uverb_attr->port_num); 1961 rdma_ah_set_make_grd(rdma_attr, false); 1962 } 1963 1964 static int modify_qp(struct ib_uverbs_file *file, 1965 struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata) 1966 { 1967 struct ib_qp_attr *attr; 1968 struct ib_qp *qp; 1969 int ret; 1970 1971 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1972 if (!attr) 1973 return -ENOMEM; 1974 1975 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle, file->ucontext); 1976 if (!qp) { 1977 ret = -EINVAL; 1978 goto out; 1979 } 1980 1981 if ((cmd->base.attr_mask & IB_QP_PORT) && 1982 !rdma_is_port_valid(qp->device, cmd->base.port_num)) { 1983 ret = -EINVAL; 1984 goto release_qp; 1985 } 1986 1987 if ((cmd->base.attr_mask & IB_QP_AV)) { 1988 if (!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) { 1989 ret = -EINVAL; 1990 goto release_qp; 1991 } 1992 1993 if (cmd->base.attr_mask & IB_QP_STATE && 1994 cmd->base.qp_state == IB_QPS_RTR) { 1995 /* We are in INIT->RTR TRANSITION (if we are not, 1996 * this transition will be rejected in subsequent checks). 1997 * In the INIT->RTR transition, we cannot have IB_QP_PORT set, 1998 * but the IB_QP_STATE flag is required. 1999 * 2000 * Since kernel 3.14 (commit dbf727de7440), the uverbs driver, 2001 * when IB_QP_AV is set, has required inclusion of a valid 2002 * port number in the primary AV. (AVs are created and handled 2003 * differently for infiniband and ethernet (RoCE) ports). 2004 * 2005 * Check the port number included in the primary AV against 2006 * the port number in the qp struct, which was set (and saved) 2007 * in the RST->INIT transition. 2008 */ 2009 if (cmd->base.dest.port_num != qp->real_qp->port) { 2010 ret = -EINVAL; 2011 goto release_qp; 2012 } 2013 } else { 2014 /* We are in SQD->SQD. (If we are not, this transition will 2015 * be rejected later in the verbs layer checks). 2016 * Check for both IB_QP_PORT and IB_QP_AV, these can be set 2017 * together in the SQD->SQD transition. 2018 * 2019 * If only IP_QP_AV was set, add in IB_QP_PORT as well (the 2020 * verbs layer driver does not track primary port changes 2021 * resulting from path migration. Thus, in SQD, if the primary 2022 * AV is modified, the primary port should also be modified). 2023 * 2024 * Note that in this transition, the IB_QP_STATE flag 2025 * is not allowed. 2026 */ 2027 if (((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT)) 2028 == (IB_QP_AV | IB_QP_PORT)) && 2029 cmd->base.port_num != cmd->base.dest.port_num) { 2030 ret = -EINVAL; 2031 goto release_qp; 2032 } 2033 if ((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT)) 2034 == IB_QP_AV) { 2035 cmd->base.attr_mask |= IB_QP_PORT; 2036 cmd->base.port_num = cmd->base.dest.port_num; 2037 } 2038 } 2039 } 2040 2041 if ((cmd->base.attr_mask & IB_QP_ALT_PATH) && 2042 (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) || 2043 !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num) || 2044 cmd->base.alt_port_num != cmd->base.alt_dest.port_num)) { 2045 ret = -EINVAL; 2046 goto release_qp; 2047 } 2048 2049 if ((cmd->base.attr_mask & IB_QP_CUR_STATE && 2050 cmd->base.cur_qp_state > IB_QPS_ERR) || 2051 cmd->base.qp_state > IB_QPS_ERR) { 2052 ret = -EINVAL; 2053 goto release_qp; 2054 } 2055 2056 attr->qp_state = cmd->base.qp_state; 2057 attr->cur_qp_state = cmd->base.cur_qp_state; 2058 attr->path_mtu = cmd->base.path_mtu; 2059 attr->path_mig_state = cmd->base.path_mig_state; 2060 attr->qkey = cmd->base.qkey; 2061 attr->rq_psn = cmd->base.rq_psn; 2062 attr->sq_psn = cmd->base.sq_psn; 2063 attr->dest_qp_num = cmd->base.dest_qp_num; 2064 attr->qp_access_flags = cmd->base.qp_access_flags; 2065 attr->pkey_index = cmd->base.pkey_index; 2066 attr->alt_pkey_index = cmd->base.alt_pkey_index; 2067 attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify; 2068 attr->max_rd_atomic = cmd->base.max_rd_atomic; 2069 attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic; 2070 attr->min_rnr_timer = cmd->base.min_rnr_timer; 2071 attr->port_num = cmd->base.port_num; 2072 attr->timeout = cmd->base.timeout; 2073 attr->retry_cnt = cmd->base.retry_cnt; 2074 attr->rnr_retry = cmd->base.rnr_retry; 2075 attr->alt_port_num = cmd->base.alt_port_num; 2076 attr->alt_timeout = cmd->base.alt_timeout; 2077 attr->rate_limit = cmd->rate_limit; 2078 2079 if (cmd->base.attr_mask & IB_QP_AV) 2080 copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr, 2081 &cmd->base.dest); 2082 2083 if (cmd->base.attr_mask & IB_QP_ALT_PATH) 2084 copy_ah_attr_from_uverbs(qp->device, &attr->alt_ah_attr, 2085 &cmd->base.alt_dest); 2086 2087 ret = ib_modify_qp_with_udata(qp, attr, 2088 modify_qp_mask(qp->qp_type, 2089 cmd->base.attr_mask), 2090 udata); 2091 2092 release_qp: 2093 uobj_put_obj_read(qp); 2094 out: 2095 kfree(attr); 2096 2097 return ret; 2098 } 2099 2100 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 2101 struct ib_device *ib_dev, 2102 const char __user *buf, int in_len, 2103 int out_len) 2104 { 2105 struct ib_uverbs_ex_modify_qp cmd = {}; 2106 struct ib_udata udata; 2107 int ret; 2108 2109 if (copy_from_user(&cmd.base, buf, sizeof(cmd.base))) 2110 return -EFAULT; 2111 2112 if (cmd.base.attr_mask & 2113 ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1)) 2114 return -EOPNOTSUPP; 2115 2116 ib_uverbs_init_udata(&udata, buf + sizeof(cmd.base), NULL, 2117 in_len - sizeof(cmd.base) - sizeof(struct ib_uverbs_cmd_hdr), 2118 out_len); 2119 2120 ret = modify_qp(file, &cmd, &udata); 2121 if (ret) 2122 return ret; 2123 2124 return in_len; 2125 } 2126 2127 int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file, 2128 struct ib_device *ib_dev, 2129 struct ib_udata *ucore, 2130 struct ib_udata *uhw) 2131 { 2132 struct ib_uverbs_ex_modify_qp cmd = {}; 2133 int ret; 2134 2135 /* 2136 * Last bit is reserved for extending the attr_mask by 2137 * using another field. 2138 */ 2139 BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31)); 2140 2141 if (ucore->inlen < sizeof(cmd.base)) 2142 return -EINVAL; 2143 2144 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 2145 if (ret) 2146 return ret; 2147 2148 if (cmd.base.attr_mask & 2149 ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1)) 2150 return -EOPNOTSUPP; 2151 2152 if (ucore->inlen > sizeof(cmd)) { 2153 if (!ib_is_udata_cleared(ucore, sizeof(cmd), 2154 ucore->inlen - sizeof(cmd))) 2155 return -EOPNOTSUPP; 2156 } 2157 2158 ret = modify_qp(file, &cmd, uhw); 2159 2160 return ret; 2161 } 2162 2163 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 2164 struct ib_device *ib_dev, 2165 const char __user *buf, int in_len, 2166 int out_len) 2167 { 2168 struct ib_uverbs_destroy_qp cmd; 2169 struct ib_uverbs_destroy_qp_resp resp; 2170 struct ib_uobject *uobj; 2171 struct ib_uqp_object *obj; 2172 int ret = -EINVAL; 2173 2174 if (copy_from_user(&cmd, buf, sizeof cmd)) 2175 return -EFAULT; 2176 2177 memset(&resp, 0, sizeof resp); 2178 2179 uobj = uobj_get_write(UVERBS_OBJECT_QP, cmd.qp_handle, 2180 file->ucontext); 2181 if (IS_ERR(uobj)) 2182 return PTR_ERR(uobj); 2183 2184 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 2185 /* 2186 * Make sure we don't free the memory in remove_commit as we still 2187 * needs the uobject memory to create the response. 2188 */ 2189 uverbs_uobject_get(uobj); 2190 2191 ret = uobj_remove_commit(uobj); 2192 if (ret) { 2193 uverbs_uobject_put(uobj); 2194 return ret; 2195 } 2196 2197 resp.events_reported = obj->uevent.events_reported; 2198 uverbs_uobject_put(uobj); 2199 2200 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 2201 return -EFAULT; 2202 2203 return in_len; 2204 } 2205 2206 static void *alloc_wr(size_t wr_size, __u32 num_sge) 2207 { 2208 if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) / 2209 sizeof (struct ib_sge)) 2210 return NULL; 2211 2212 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) + 2213 num_sge * sizeof (struct ib_sge), GFP_KERNEL); 2214 } 2215 2216 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 2217 struct ib_device *ib_dev, 2218 const char __user *buf, int in_len, 2219 int out_len) 2220 { 2221 struct ib_uverbs_post_send cmd; 2222 struct ib_uverbs_post_send_resp resp; 2223 struct ib_uverbs_send_wr *user_wr; 2224 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 2225 struct ib_qp *qp; 2226 int i, sg_ind; 2227 int is_ud; 2228 ssize_t ret = -EINVAL; 2229 size_t next_size; 2230 2231 if (copy_from_user(&cmd, buf, sizeof cmd)) 2232 return -EFAULT; 2233 2234 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 2235 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 2236 return -EINVAL; 2237 2238 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 2239 return -EINVAL; 2240 2241 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 2242 if (!user_wr) 2243 return -ENOMEM; 2244 2245 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext); 2246 if (!qp) 2247 goto out; 2248 2249 is_ud = qp->qp_type == IB_QPT_UD; 2250 sg_ind = 0; 2251 last = NULL; 2252 for (i = 0; i < cmd.wr_count; ++i) { 2253 if (copy_from_user(user_wr, 2254 buf + sizeof cmd + i * cmd.wqe_size, 2255 cmd.wqe_size)) { 2256 ret = -EFAULT; 2257 goto out_put; 2258 } 2259 2260 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 2261 ret = -EINVAL; 2262 goto out_put; 2263 } 2264 2265 if (is_ud) { 2266 struct ib_ud_wr *ud; 2267 2268 if (user_wr->opcode != IB_WR_SEND && 2269 user_wr->opcode != IB_WR_SEND_WITH_IMM) { 2270 ret = -EINVAL; 2271 goto out_put; 2272 } 2273 2274 next_size = sizeof(*ud); 2275 ud = alloc_wr(next_size, user_wr->num_sge); 2276 if (!ud) { 2277 ret = -ENOMEM; 2278 goto out_put; 2279 } 2280 2281 ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH, user_wr->wr.ud.ah, 2282 file->ucontext); 2283 if (!ud->ah) { 2284 kfree(ud); 2285 ret = -EINVAL; 2286 goto out_put; 2287 } 2288 ud->remote_qpn = user_wr->wr.ud.remote_qpn; 2289 ud->remote_qkey = user_wr->wr.ud.remote_qkey; 2290 2291 next = &ud->wr; 2292 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || 2293 user_wr->opcode == IB_WR_RDMA_WRITE || 2294 user_wr->opcode == IB_WR_RDMA_READ) { 2295 struct ib_rdma_wr *rdma; 2296 2297 next_size = sizeof(*rdma); 2298 rdma = alloc_wr(next_size, user_wr->num_sge); 2299 if (!rdma) { 2300 ret = -ENOMEM; 2301 goto out_put; 2302 } 2303 2304 rdma->remote_addr = user_wr->wr.rdma.remote_addr; 2305 rdma->rkey = user_wr->wr.rdma.rkey; 2306 2307 next = &rdma->wr; 2308 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || 2309 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 2310 struct ib_atomic_wr *atomic; 2311 2312 next_size = sizeof(*atomic); 2313 atomic = alloc_wr(next_size, user_wr->num_sge); 2314 if (!atomic) { 2315 ret = -ENOMEM; 2316 goto out_put; 2317 } 2318 2319 atomic->remote_addr = user_wr->wr.atomic.remote_addr; 2320 atomic->compare_add = user_wr->wr.atomic.compare_add; 2321 atomic->swap = user_wr->wr.atomic.swap; 2322 atomic->rkey = user_wr->wr.atomic.rkey; 2323 2324 next = &atomic->wr; 2325 } else if (user_wr->opcode == IB_WR_SEND || 2326 user_wr->opcode == IB_WR_SEND_WITH_IMM || 2327 user_wr->opcode == IB_WR_SEND_WITH_INV) { 2328 next_size = sizeof(*next); 2329 next = alloc_wr(next_size, user_wr->num_sge); 2330 if (!next) { 2331 ret = -ENOMEM; 2332 goto out_put; 2333 } 2334 } else { 2335 ret = -EINVAL; 2336 goto out_put; 2337 } 2338 2339 if (user_wr->opcode == IB_WR_SEND_WITH_IMM || 2340 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 2341 next->ex.imm_data = 2342 (__be32 __force) user_wr->ex.imm_data; 2343 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) { 2344 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey; 2345 } 2346 2347 if (!last) 2348 wr = next; 2349 else 2350 last->next = next; 2351 last = next; 2352 2353 next->next = NULL; 2354 next->wr_id = user_wr->wr_id; 2355 next->num_sge = user_wr->num_sge; 2356 next->opcode = user_wr->opcode; 2357 next->send_flags = user_wr->send_flags; 2358 2359 if (next->num_sge) { 2360 next->sg_list = (void *) next + 2361 ALIGN(next_size, sizeof(struct ib_sge)); 2362 if (copy_from_user(next->sg_list, 2363 buf + sizeof cmd + 2364 cmd.wr_count * cmd.wqe_size + 2365 sg_ind * sizeof (struct ib_sge), 2366 next->num_sge * sizeof (struct ib_sge))) { 2367 ret = -EFAULT; 2368 goto out_put; 2369 } 2370 sg_ind += next->num_sge; 2371 } else 2372 next->sg_list = NULL; 2373 } 2374 2375 resp.bad_wr = 0; 2376 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); 2377 if (ret) 2378 for (next = wr; next; next = next->next) { 2379 ++resp.bad_wr; 2380 if (next == bad_wr) 2381 break; 2382 } 2383 2384 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 2385 ret = -EFAULT; 2386 2387 out_put: 2388 uobj_put_obj_read(qp); 2389 2390 while (wr) { 2391 if (is_ud && ud_wr(wr)->ah) 2392 uobj_put_obj_read(ud_wr(wr)->ah); 2393 next = wr->next; 2394 kfree(wr); 2395 wr = next; 2396 } 2397 2398 out: 2399 kfree(user_wr); 2400 2401 return ret ? ret : in_len; 2402 } 2403 2404 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 2405 int in_len, 2406 u32 wr_count, 2407 u32 sge_count, 2408 u32 wqe_size) 2409 { 2410 struct ib_uverbs_recv_wr *user_wr; 2411 struct ib_recv_wr *wr = NULL, *last, *next; 2412 int sg_ind; 2413 int i; 2414 int ret; 2415 2416 if (in_len < wqe_size * wr_count + 2417 sge_count * sizeof (struct ib_uverbs_sge)) 2418 return ERR_PTR(-EINVAL); 2419 2420 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 2421 return ERR_PTR(-EINVAL); 2422 2423 user_wr = kmalloc(wqe_size, GFP_KERNEL); 2424 if (!user_wr) 2425 return ERR_PTR(-ENOMEM); 2426 2427 sg_ind = 0; 2428 last = NULL; 2429 for (i = 0; i < wr_count; ++i) { 2430 if (copy_from_user(user_wr, buf + i * wqe_size, 2431 wqe_size)) { 2432 ret = -EFAULT; 2433 goto err; 2434 } 2435 2436 if (user_wr->num_sge + sg_ind > sge_count) { 2437 ret = -EINVAL; 2438 goto err; 2439 } 2440 2441 if (user_wr->num_sge >= 2442 (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) / 2443 sizeof (struct ib_sge)) { 2444 ret = -EINVAL; 2445 goto err; 2446 } 2447 2448 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2449 user_wr->num_sge * sizeof (struct ib_sge), 2450 GFP_KERNEL); 2451 if (!next) { 2452 ret = -ENOMEM; 2453 goto err; 2454 } 2455 2456 if (!last) 2457 wr = next; 2458 else 2459 last->next = next; 2460 last = next; 2461 2462 next->next = NULL; 2463 next->wr_id = user_wr->wr_id; 2464 next->num_sge = user_wr->num_sge; 2465 2466 if (next->num_sge) { 2467 next->sg_list = (void *) next + 2468 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2469 if (copy_from_user(next->sg_list, 2470 buf + wr_count * wqe_size + 2471 sg_ind * sizeof (struct ib_sge), 2472 next->num_sge * sizeof (struct ib_sge))) { 2473 ret = -EFAULT; 2474 goto err; 2475 } 2476 sg_ind += next->num_sge; 2477 } else 2478 next->sg_list = NULL; 2479 } 2480 2481 kfree(user_wr); 2482 return wr; 2483 2484 err: 2485 kfree(user_wr); 2486 2487 while (wr) { 2488 next = wr->next; 2489 kfree(wr); 2490 wr = next; 2491 } 2492 2493 return ERR_PTR(ret); 2494 } 2495 2496 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 2497 struct ib_device *ib_dev, 2498 const char __user *buf, int in_len, 2499 int out_len) 2500 { 2501 struct ib_uverbs_post_recv cmd; 2502 struct ib_uverbs_post_recv_resp resp; 2503 struct ib_recv_wr *wr, *next, *bad_wr; 2504 struct ib_qp *qp; 2505 ssize_t ret = -EINVAL; 2506 2507 if (copy_from_user(&cmd, buf, sizeof cmd)) 2508 return -EFAULT; 2509 2510 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2511 in_len - sizeof cmd, cmd.wr_count, 2512 cmd.sge_count, cmd.wqe_size); 2513 if (IS_ERR(wr)) 2514 return PTR_ERR(wr); 2515 2516 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext); 2517 if (!qp) 2518 goto out; 2519 2520 resp.bad_wr = 0; 2521 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); 2522 2523 uobj_put_obj_read(qp); 2524 if (ret) { 2525 for (next = wr; next; next = next->next) { 2526 ++resp.bad_wr; 2527 if (next == bad_wr) 2528 break; 2529 } 2530 } 2531 2532 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 2533 ret = -EFAULT; 2534 2535 out: 2536 while (wr) { 2537 next = wr->next; 2538 kfree(wr); 2539 wr = next; 2540 } 2541 2542 return ret ? ret : in_len; 2543 } 2544 2545 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 2546 struct ib_device *ib_dev, 2547 const char __user *buf, int in_len, 2548 int out_len) 2549 { 2550 struct ib_uverbs_post_srq_recv cmd; 2551 struct ib_uverbs_post_srq_recv_resp resp; 2552 struct ib_recv_wr *wr, *next, *bad_wr; 2553 struct ib_srq *srq; 2554 ssize_t ret = -EINVAL; 2555 2556 if (copy_from_user(&cmd, buf, sizeof cmd)) 2557 return -EFAULT; 2558 2559 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2560 in_len - sizeof cmd, cmd.wr_count, 2561 cmd.sge_count, cmd.wqe_size); 2562 if (IS_ERR(wr)) 2563 return PTR_ERR(wr); 2564 2565 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file->ucontext); 2566 if (!srq) 2567 goto out; 2568 2569 resp.bad_wr = 0; 2570 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 2571 2572 uobj_put_obj_read(srq); 2573 2574 if (ret) 2575 for (next = wr; next; next = next->next) { 2576 ++resp.bad_wr; 2577 if (next == bad_wr) 2578 break; 2579 } 2580 2581 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 2582 ret = -EFAULT; 2583 2584 out: 2585 while (wr) { 2586 next = wr->next; 2587 kfree(wr); 2588 wr = next; 2589 } 2590 2591 return ret ? ret : in_len; 2592 } 2593 2594 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 2595 struct ib_device *ib_dev, 2596 const char __user *buf, int in_len, 2597 int out_len) 2598 { 2599 struct ib_uverbs_create_ah cmd; 2600 struct ib_uverbs_create_ah_resp resp; 2601 struct ib_uobject *uobj; 2602 struct ib_pd *pd; 2603 struct ib_ah *ah; 2604 struct rdma_ah_attr attr; 2605 int ret; 2606 struct ib_udata udata; 2607 2608 if (out_len < sizeof resp) 2609 return -ENOSPC; 2610 2611 if (copy_from_user(&cmd, buf, sizeof cmd)) 2612 return -EFAULT; 2613 2614 if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) 2615 return -EINVAL; 2616 2617 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 2618 u64_to_user_ptr(cmd.response) + sizeof(resp), 2619 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 2620 out_len - sizeof(resp)); 2621 2622 uobj = uobj_alloc(UVERBS_OBJECT_AH, file->ucontext); 2623 if (IS_ERR(uobj)) 2624 return PTR_ERR(uobj); 2625 2626 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext); 2627 if (!pd) { 2628 ret = -EINVAL; 2629 goto err; 2630 } 2631 2632 attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num); 2633 rdma_ah_set_make_grd(&attr, false); 2634 rdma_ah_set_dlid(&attr, cmd.attr.dlid); 2635 rdma_ah_set_sl(&attr, cmd.attr.sl); 2636 rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits); 2637 rdma_ah_set_static_rate(&attr, cmd.attr.static_rate); 2638 rdma_ah_set_port_num(&attr, cmd.attr.port_num); 2639 2640 if (cmd.attr.is_global) { 2641 rdma_ah_set_grh(&attr, NULL, cmd.attr.grh.flow_label, 2642 cmd.attr.grh.sgid_index, 2643 cmd.attr.grh.hop_limit, 2644 cmd.attr.grh.traffic_class); 2645 rdma_ah_set_dgid_raw(&attr, cmd.attr.grh.dgid); 2646 } else { 2647 rdma_ah_set_ah_flags(&attr, 0); 2648 } 2649 2650 ah = rdma_create_user_ah(pd, &attr, &udata); 2651 if (IS_ERR(ah)) { 2652 ret = PTR_ERR(ah); 2653 goto err_put; 2654 } 2655 2656 ah->uobject = uobj; 2657 uobj->user_handle = cmd.user_handle; 2658 uobj->object = ah; 2659 2660 resp.ah_handle = uobj->id; 2661 2662 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { 2663 ret = -EFAULT; 2664 goto err_copy; 2665 } 2666 2667 uobj_put_obj_read(pd); 2668 uobj_alloc_commit(uobj); 2669 2670 return in_len; 2671 2672 err_copy: 2673 rdma_destroy_ah(ah); 2674 2675 err_put: 2676 uobj_put_obj_read(pd); 2677 2678 err: 2679 uobj_alloc_abort(uobj); 2680 return ret; 2681 } 2682 2683 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 2684 struct ib_device *ib_dev, 2685 const char __user *buf, int in_len, int out_len) 2686 { 2687 struct ib_uverbs_destroy_ah cmd; 2688 struct ib_uobject *uobj; 2689 int ret; 2690 2691 if (copy_from_user(&cmd, buf, sizeof cmd)) 2692 return -EFAULT; 2693 2694 uobj = uobj_get_write(UVERBS_OBJECT_AH, cmd.ah_handle, 2695 file->ucontext); 2696 if (IS_ERR(uobj)) 2697 return PTR_ERR(uobj); 2698 2699 ret = uobj_remove_commit(uobj); 2700 return ret ?: in_len; 2701 } 2702 2703 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 2704 struct ib_device *ib_dev, 2705 const char __user *buf, int in_len, 2706 int out_len) 2707 { 2708 struct ib_uverbs_attach_mcast cmd; 2709 struct ib_qp *qp; 2710 struct ib_uqp_object *obj; 2711 struct ib_uverbs_mcast_entry *mcast; 2712 int ret; 2713 2714 if (copy_from_user(&cmd, buf, sizeof cmd)) 2715 return -EFAULT; 2716 2717 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext); 2718 if (!qp) 2719 return -EINVAL; 2720 2721 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2722 2723 mutex_lock(&obj->mcast_lock); 2724 list_for_each_entry(mcast, &obj->mcast_list, list) 2725 if (cmd.mlid == mcast->lid && 2726 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2727 ret = 0; 2728 goto out_put; 2729 } 2730 2731 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 2732 if (!mcast) { 2733 ret = -ENOMEM; 2734 goto out_put; 2735 } 2736 2737 mcast->lid = cmd.mlid; 2738 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 2739 2740 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 2741 if (!ret) 2742 list_add_tail(&mcast->list, &obj->mcast_list); 2743 else 2744 kfree(mcast); 2745 2746 out_put: 2747 mutex_unlock(&obj->mcast_lock); 2748 uobj_put_obj_read(qp); 2749 2750 return ret ? ret : in_len; 2751 } 2752 2753 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 2754 struct ib_device *ib_dev, 2755 const char __user *buf, int in_len, 2756 int out_len) 2757 { 2758 struct ib_uverbs_detach_mcast cmd; 2759 struct ib_uqp_object *obj; 2760 struct ib_qp *qp; 2761 struct ib_uverbs_mcast_entry *mcast; 2762 int ret = -EINVAL; 2763 bool found = false; 2764 2765 if (copy_from_user(&cmd, buf, sizeof cmd)) 2766 return -EFAULT; 2767 2768 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext); 2769 if (!qp) 2770 return -EINVAL; 2771 2772 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2773 mutex_lock(&obj->mcast_lock); 2774 2775 list_for_each_entry(mcast, &obj->mcast_list, list) 2776 if (cmd.mlid == mcast->lid && 2777 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2778 list_del(&mcast->list); 2779 kfree(mcast); 2780 found = true; 2781 break; 2782 } 2783 2784 if (!found) { 2785 ret = -EINVAL; 2786 goto out_put; 2787 } 2788 2789 ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid); 2790 2791 out_put: 2792 mutex_unlock(&obj->mcast_lock); 2793 uobj_put_obj_read(qp); 2794 return ret ? ret : in_len; 2795 } 2796 2797 struct ib_uflow_resources { 2798 size_t max; 2799 size_t num; 2800 size_t collection_num; 2801 size_t counters_num; 2802 struct ib_counters **counters; 2803 struct ib_flow_action **collection; 2804 }; 2805 2806 static struct ib_uflow_resources *flow_resources_alloc(size_t num_specs) 2807 { 2808 struct ib_uflow_resources *resources; 2809 2810 resources = kzalloc(sizeof(*resources), GFP_KERNEL); 2811 2812 if (!resources) 2813 goto err_res; 2814 2815 resources->counters = 2816 kcalloc(num_specs, sizeof(*resources->counters), GFP_KERNEL); 2817 2818 if (!resources->counters) 2819 goto err_cnt; 2820 2821 resources->collection = 2822 kcalloc(num_specs, sizeof(*resources->collection), GFP_KERNEL); 2823 2824 if (!resources->collection) 2825 goto err_collection; 2826 2827 resources->max = num_specs; 2828 2829 return resources; 2830 2831 err_collection: 2832 kfree(resources->counters); 2833 err_cnt: 2834 kfree(resources); 2835 err_res: 2836 return NULL; 2837 } 2838 2839 void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res) 2840 { 2841 unsigned int i; 2842 2843 for (i = 0; i < uflow_res->collection_num; i++) 2844 atomic_dec(&uflow_res->collection[i]->usecnt); 2845 2846 for (i = 0; i < uflow_res->counters_num; i++) 2847 atomic_dec(&uflow_res->counters[i]->usecnt); 2848 2849 kfree(uflow_res->collection); 2850 kfree(uflow_res->counters); 2851 kfree(uflow_res); 2852 } 2853 2854 static void flow_resources_add(struct ib_uflow_resources *uflow_res, 2855 enum ib_flow_spec_type type, 2856 void *ibobj) 2857 { 2858 WARN_ON(uflow_res->num >= uflow_res->max); 2859 2860 switch (type) { 2861 case IB_FLOW_SPEC_ACTION_HANDLE: 2862 atomic_inc(&((struct ib_flow_action *)ibobj)->usecnt); 2863 uflow_res->collection[uflow_res->collection_num++] = 2864 (struct ib_flow_action *)ibobj; 2865 break; 2866 case IB_FLOW_SPEC_ACTION_COUNT: 2867 atomic_inc(&((struct ib_counters *)ibobj)->usecnt); 2868 uflow_res->counters[uflow_res->counters_num++] = 2869 (struct ib_counters *)ibobj; 2870 break; 2871 default: 2872 WARN_ON(1); 2873 } 2874 2875 uflow_res->num++; 2876 } 2877 2878 static int kern_spec_to_ib_spec_action(struct ib_ucontext *ucontext, 2879 struct ib_uverbs_flow_spec *kern_spec, 2880 union ib_flow_spec *ib_spec, 2881 struct ib_uflow_resources *uflow_res) 2882 { 2883 ib_spec->type = kern_spec->type; 2884 switch (ib_spec->type) { 2885 case IB_FLOW_SPEC_ACTION_TAG: 2886 if (kern_spec->flow_tag.size != 2887 sizeof(struct ib_uverbs_flow_spec_action_tag)) 2888 return -EINVAL; 2889 2890 ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag); 2891 ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id; 2892 break; 2893 case IB_FLOW_SPEC_ACTION_DROP: 2894 if (kern_spec->drop.size != 2895 sizeof(struct ib_uverbs_flow_spec_action_drop)) 2896 return -EINVAL; 2897 2898 ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop); 2899 break; 2900 case IB_FLOW_SPEC_ACTION_HANDLE: 2901 if (kern_spec->action.size != 2902 sizeof(struct ib_uverbs_flow_spec_action_handle)) 2903 return -EOPNOTSUPP; 2904 ib_spec->action.act = uobj_get_obj_read(flow_action, 2905 UVERBS_OBJECT_FLOW_ACTION, 2906 kern_spec->action.handle, 2907 ucontext); 2908 if (!ib_spec->action.act) 2909 return -EINVAL; 2910 ib_spec->action.size = 2911 sizeof(struct ib_flow_spec_action_handle); 2912 flow_resources_add(uflow_res, 2913 IB_FLOW_SPEC_ACTION_HANDLE, 2914 ib_spec->action.act); 2915 uobj_put_obj_read(ib_spec->action.act); 2916 break; 2917 case IB_FLOW_SPEC_ACTION_COUNT: 2918 if (kern_spec->flow_count.size != 2919 sizeof(struct ib_uverbs_flow_spec_action_count)) 2920 return -EINVAL; 2921 ib_spec->flow_count.counters = 2922 uobj_get_obj_read(counters, 2923 UVERBS_OBJECT_COUNTERS, 2924 kern_spec->flow_count.handle, 2925 ucontext); 2926 if (!ib_spec->flow_count.counters) 2927 return -EINVAL; 2928 ib_spec->flow_count.size = 2929 sizeof(struct ib_flow_spec_action_count); 2930 flow_resources_add(uflow_res, 2931 IB_FLOW_SPEC_ACTION_COUNT, 2932 ib_spec->flow_count.counters); 2933 uobj_put_obj_read(ib_spec->flow_count.counters); 2934 break; 2935 default: 2936 return -EINVAL; 2937 } 2938 return 0; 2939 } 2940 2941 static size_t kern_spec_filter_sz(const struct ib_uverbs_flow_spec_hdr *spec) 2942 { 2943 /* Returns user space filter size, includes padding */ 2944 return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2; 2945 } 2946 2947 static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size, 2948 u16 ib_real_filter_sz) 2949 { 2950 /* 2951 * User space filter structures must be 64 bit aligned, otherwise this 2952 * may pass, but we won't handle additional new attributes. 2953 */ 2954 2955 if (kern_filter_size > ib_real_filter_sz) { 2956 if (memchr_inv(kern_spec_filter + 2957 ib_real_filter_sz, 0, 2958 kern_filter_size - ib_real_filter_sz)) 2959 return -EINVAL; 2960 return ib_real_filter_sz; 2961 } 2962 return kern_filter_size; 2963 } 2964 2965 int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type, 2966 const void *kern_spec_mask, 2967 const void *kern_spec_val, 2968 size_t kern_filter_sz, 2969 union ib_flow_spec *ib_spec) 2970 { 2971 ssize_t actual_filter_sz; 2972 ssize_t ib_filter_sz; 2973 2974 /* User flow spec size must be aligned to 4 bytes */ 2975 if (kern_filter_sz != ALIGN(kern_filter_sz, 4)) 2976 return -EINVAL; 2977 2978 ib_spec->type = type; 2979 2980 if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL)) 2981 return -EINVAL; 2982 2983 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) { 2984 case IB_FLOW_SPEC_ETH: 2985 ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz); 2986 actual_filter_sz = spec_filter_size(kern_spec_mask, 2987 kern_filter_sz, 2988 ib_filter_sz); 2989 if (actual_filter_sz <= 0) 2990 return -EINVAL; 2991 ib_spec->size = sizeof(struct ib_flow_spec_eth); 2992 memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz); 2993 memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz); 2994 break; 2995 case IB_FLOW_SPEC_IPV4: 2996 ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz); 2997 actual_filter_sz = spec_filter_size(kern_spec_mask, 2998 kern_filter_sz, 2999 ib_filter_sz); 3000 if (actual_filter_sz <= 0) 3001 return -EINVAL; 3002 ib_spec->size = sizeof(struct ib_flow_spec_ipv4); 3003 memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz); 3004 memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz); 3005 break; 3006 case IB_FLOW_SPEC_IPV6: 3007 ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz); 3008 actual_filter_sz = spec_filter_size(kern_spec_mask, 3009 kern_filter_sz, 3010 ib_filter_sz); 3011 if (actual_filter_sz <= 0) 3012 return -EINVAL; 3013 ib_spec->size = sizeof(struct ib_flow_spec_ipv6); 3014 memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz); 3015 memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz); 3016 3017 if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) || 3018 (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20)) 3019 return -EINVAL; 3020 break; 3021 case IB_FLOW_SPEC_TCP: 3022 case IB_FLOW_SPEC_UDP: 3023 ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz); 3024 actual_filter_sz = spec_filter_size(kern_spec_mask, 3025 kern_filter_sz, 3026 ib_filter_sz); 3027 if (actual_filter_sz <= 0) 3028 return -EINVAL; 3029 ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp); 3030 memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz); 3031 memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz); 3032 break; 3033 case IB_FLOW_SPEC_VXLAN_TUNNEL: 3034 ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz); 3035 actual_filter_sz = spec_filter_size(kern_spec_mask, 3036 kern_filter_sz, 3037 ib_filter_sz); 3038 if (actual_filter_sz <= 0) 3039 return -EINVAL; 3040 ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel); 3041 memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz); 3042 memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz); 3043 3044 if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) || 3045 (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24)) 3046 return -EINVAL; 3047 break; 3048 case IB_FLOW_SPEC_ESP: 3049 ib_filter_sz = offsetof(struct ib_flow_esp_filter, real_sz); 3050 actual_filter_sz = spec_filter_size(kern_spec_mask, 3051 kern_filter_sz, 3052 ib_filter_sz); 3053 if (actual_filter_sz <= 0) 3054 return -EINVAL; 3055 ib_spec->esp.size = sizeof(struct ib_flow_spec_esp); 3056 memcpy(&ib_spec->esp.val, kern_spec_val, actual_filter_sz); 3057 memcpy(&ib_spec->esp.mask, kern_spec_mask, actual_filter_sz); 3058 break; 3059 case IB_FLOW_SPEC_GRE: 3060 ib_filter_sz = offsetof(struct ib_flow_gre_filter, real_sz); 3061 actual_filter_sz = spec_filter_size(kern_spec_mask, 3062 kern_filter_sz, 3063 ib_filter_sz); 3064 if (actual_filter_sz <= 0) 3065 return -EINVAL; 3066 ib_spec->gre.size = sizeof(struct ib_flow_spec_gre); 3067 memcpy(&ib_spec->gre.val, kern_spec_val, actual_filter_sz); 3068 memcpy(&ib_spec->gre.mask, kern_spec_mask, actual_filter_sz); 3069 break; 3070 case IB_FLOW_SPEC_MPLS: 3071 ib_filter_sz = offsetof(struct ib_flow_mpls_filter, real_sz); 3072 actual_filter_sz = spec_filter_size(kern_spec_mask, 3073 kern_filter_sz, 3074 ib_filter_sz); 3075 if (actual_filter_sz <= 0) 3076 return -EINVAL; 3077 ib_spec->mpls.size = sizeof(struct ib_flow_spec_mpls); 3078 memcpy(&ib_spec->mpls.val, kern_spec_val, actual_filter_sz); 3079 memcpy(&ib_spec->mpls.mask, kern_spec_mask, actual_filter_sz); 3080 break; 3081 default: 3082 return -EINVAL; 3083 } 3084 return 0; 3085 } 3086 3087 static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec, 3088 union ib_flow_spec *ib_spec) 3089 { 3090 ssize_t kern_filter_sz; 3091 void *kern_spec_mask; 3092 void *kern_spec_val; 3093 3094 if (kern_spec->reserved) 3095 return -EINVAL; 3096 3097 kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr); 3098 3099 kern_spec_val = (void *)kern_spec + 3100 sizeof(struct ib_uverbs_flow_spec_hdr); 3101 kern_spec_mask = kern_spec_val + kern_filter_sz; 3102 3103 return ib_uverbs_kern_spec_to_ib_spec_filter(kern_spec->type, 3104 kern_spec_mask, 3105 kern_spec_val, 3106 kern_filter_sz, ib_spec); 3107 } 3108 3109 static int kern_spec_to_ib_spec(struct ib_ucontext *ucontext, 3110 struct ib_uverbs_flow_spec *kern_spec, 3111 union ib_flow_spec *ib_spec, 3112 struct ib_uflow_resources *uflow_res) 3113 { 3114 if (kern_spec->reserved) 3115 return -EINVAL; 3116 3117 if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG) 3118 return kern_spec_to_ib_spec_action(ucontext, kern_spec, ib_spec, 3119 uflow_res); 3120 else 3121 return kern_spec_to_ib_spec_filter(kern_spec, ib_spec); 3122 } 3123 3124 int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file, 3125 struct ib_device *ib_dev, 3126 struct ib_udata *ucore, 3127 struct ib_udata *uhw) 3128 { 3129 struct ib_uverbs_ex_create_wq cmd = {}; 3130 struct ib_uverbs_ex_create_wq_resp resp = {}; 3131 struct ib_uwq_object *obj; 3132 int err = 0; 3133 struct ib_cq *cq; 3134 struct ib_pd *pd; 3135 struct ib_wq *wq; 3136 struct ib_wq_init_attr wq_init_attr = {}; 3137 size_t required_cmd_sz; 3138 size_t required_resp_len; 3139 3140 required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge); 3141 required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn); 3142 3143 if (ucore->inlen < required_cmd_sz) 3144 return -EINVAL; 3145 3146 if (ucore->outlen < required_resp_len) 3147 return -ENOSPC; 3148 3149 if (ucore->inlen > sizeof(cmd) && 3150 !ib_is_udata_cleared(ucore, sizeof(cmd), 3151 ucore->inlen - sizeof(cmd))) 3152 return -EOPNOTSUPP; 3153 3154 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3155 if (err) 3156 return err; 3157 3158 if (cmd.comp_mask) 3159 return -EOPNOTSUPP; 3160 3161 obj = (struct ib_uwq_object *)uobj_alloc(UVERBS_OBJECT_WQ, 3162 file->ucontext); 3163 if (IS_ERR(obj)) 3164 return PTR_ERR(obj); 3165 3166 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext); 3167 if (!pd) { 3168 err = -EINVAL; 3169 goto err_uobj; 3170 } 3171 3172 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext); 3173 if (!cq) { 3174 err = -EINVAL; 3175 goto err_put_pd; 3176 } 3177 3178 wq_init_attr.cq = cq; 3179 wq_init_attr.max_sge = cmd.max_sge; 3180 wq_init_attr.max_wr = cmd.max_wr; 3181 wq_init_attr.wq_context = file; 3182 wq_init_attr.wq_type = cmd.wq_type; 3183 wq_init_attr.event_handler = ib_uverbs_wq_event_handler; 3184 if (ucore->inlen >= (offsetof(typeof(cmd), create_flags) + 3185 sizeof(cmd.create_flags))) 3186 wq_init_attr.create_flags = cmd.create_flags; 3187 obj->uevent.events_reported = 0; 3188 INIT_LIST_HEAD(&obj->uevent.event_list); 3189 3190 if (!pd->device->create_wq) { 3191 err = -EOPNOTSUPP; 3192 goto err_put_cq; 3193 } 3194 wq = pd->device->create_wq(pd, &wq_init_attr, uhw); 3195 if (IS_ERR(wq)) { 3196 err = PTR_ERR(wq); 3197 goto err_put_cq; 3198 } 3199 3200 wq->uobject = &obj->uevent.uobject; 3201 obj->uevent.uobject.object = wq; 3202 wq->wq_type = wq_init_attr.wq_type; 3203 wq->cq = cq; 3204 wq->pd = pd; 3205 wq->device = pd->device; 3206 wq->wq_context = wq_init_attr.wq_context; 3207 atomic_set(&wq->usecnt, 0); 3208 atomic_inc(&pd->usecnt); 3209 atomic_inc(&cq->usecnt); 3210 wq->uobject = &obj->uevent.uobject; 3211 obj->uevent.uobject.object = wq; 3212 3213 memset(&resp, 0, sizeof(resp)); 3214 resp.wq_handle = obj->uevent.uobject.id; 3215 resp.max_sge = wq_init_attr.max_sge; 3216 resp.max_wr = wq_init_attr.max_wr; 3217 resp.wqn = wq->wq_num; 3218 resp.response_length = required_resp_len; 3219 err = ib_copy_to_udata(ucore, 3220 &resp, resp.response_length); 3221 if (err) 3222 goto err_copy; 3223 3224 uobj_put_obj_read(pd); 3225 uobj_put_obj_read(cq); 3226 uobj_alloc_commit(&obj->uevent.uobject); 3227 return 0; 3228 3229 err_copy: 3230 ib_destroy_wq(wq); 3231 err_put_cq: 3232 uobj_put_obj_read(cq); 3233 err_put_pd: 3234 uobj_put_obj_read(pd); 3235 err_uobj: 3236 uobj_alloc_abort(&obj->uevent.uobject); 3237 3238 return err; 3239 } 3240 3241 int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file, 3242 struct ib_device *ib_dev, 3243 struct ib_udata *ucore, 3244 struct ib_udata *uhw) 3245 { 3246 struct ib_uverbs_ex_destroy_wq cmd = {}; 3247 struct ib_uverbs_ex_destroy_wq_resp resp = {}; 3248 struct ib_uobject *uobj; 3249 struct ib_uwq_object *obj; 3250 size_t required_cmd_sz; 3251 size_t required_resp_len; 3252 int ret; 3253 3254 required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle); 3255 required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); 3256 3257 if (ucore->inlen < required_cmd_sz) 3258 return -EINVAL; 3259 3260 if (ucore->outlen < required_resp_len) 3261 return -ENOSPC; 3262 3263 if (ucore->inlen > sizeof(cmd) && 3264 !ib_is_udata_cleared(ucore, sizeof(cmd), 3265 ucore->inlen - sizeof(cmd))) 3266 return -EOPNOTSUPP; 3267 3268 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3269 if (ret) 3270 return ret; 3271 3272 if (cmd.comp_mask) 3273 return -EOPNOTSUPP; 3274 3275 resp.response_length = required_resp_len; 3276 uobj = uobj_get_write(UVERBS_OBJECT_WQ, cmd.wq_handle, 3277 file->ucontext); 3278 if (IS_ERR(uobj)) 3279 return PTR_ERR(uobj); 3280 3281 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject); 3282 /* 3283 * Make sure we don't free the memory in remove_commit as we still 3284 * needs the uobject memory to create the response. 3285 */ 3286 uverbs_uobject_get(uobj); 3287 3288 ret = uobj_remove_commit(uobj); 3289 resp.events_reported = obj->uevent.events_reported; 3290 uverbs_uobject_put(uobj); 3291 if (ret) 3292 return ret; 3293 3294 return ib_copy_to_udata(ucore, &resp, resp.response_length); 3295 } 3296 3297 int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file, 3298 struct ib_device *ib_dev, 3299 struct ib_udata *ucore, 3300 struct ib_udata *uhw) 3301 { 3302 struct ib_uverbs_ex_modify_wq cmd = {}; 3303 struct ib_wq *wq; 3304 struct ib_wq_attr wq_attr = {}; 3305 size_t required_cmd_sz; 3306 int ret; 3307 3308 required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state); 3309 if (ucore->inlen < required_cmd_sz) 3310 return -EINVAL; 3311 3312 if (ucore->inlen > sizeof(cmd) && 3313 !ib_is_udata_cleared(ucore, sizeof(cmd), 3314 ucore->inlen - sizeof(cmd))) 3315 return -EOPNOTSUPP; 3316 3317 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3318 if (ret) 3319 return ret; 3320 3321 if (!cmd.attr_mask) 3322 return -EINVAL; 3323 3324 if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS)) 3325 return -EINVAL; 3326 3327 wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, file->ucontext); 3328 if (!wq) 3329 return -EINVAL; 3330 3331 wq_attr.curr_wq_state = cmd.curr_wq_state; 3332 wq_attr.wq_state = cmd.wq_state; 3333 if (cmd.attr_mask & IB_WQ_FLAGS) { 3334 wq_attr.flags = cmd.flags; 3335 wq_attr.flags_mask = cmd.flags_mask; 3336 } 3337 if (!wq->device->modify_wq) { 3338 ret = -EOPNOTSUPP; 3339 goto out; 3340 } 3341 ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw); 3342 out: 3343 uobj_put_obj_read(wq); 3344 return ret; 3345 } 3346 3347 int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, 3348 struct ib_device *ib_dev, 3349 struct ib_udata *ucore, 3350 struct ib_udata *uhw) 3351 { 3352 struct ib_uverbs_ex_create_rwq_ind_table cmd = {}; 3353 struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {}; 3354 struct ib_uobject *uobj; 3355 int err = 0; 3356 struct ib_rwq_ind_table_init_attr init_attr = {}; 3357 struct ib_rwq_ind_table *rwq_ind_tbl; 3358 struct ib_wq **wqs = NULL; 3359 u32 *wqs_handles = NULL; 3360 struct ib_wq *wq = NULL; 3361 int i, j, num_read_wqs; 3362 u32 num_wq_handles; 3363 u32 expected_in_size; 3364 size_t required_cmd_sz_header; 3365 size_t required_resp_len; 3366 3367 required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size); 3368 required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num); 3369 3370 if (ucore->inlen < required_cmd_sz_header) 3371 return -EINVAL; 3372 3373 if (ucore->outlen < required_resp_len) 3374 return -ENOSPC; 3375 3376 err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header); 3377 if (err) 3378 return err; 3379 3380 ucore->inbuf += required_cmd_sz_header; 3381 ucore->inlen -= required_cmd_sz_header; 3382 3383 if (cmd.comp_mask) 3384 return -EOPNOTSUPP; 3385 3386 if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE) 3387 return -EINVAL; 3388 3389 num_wq_handles = 1 << cmd.log_ind_tbl_size; 3390 expected_in_size = num_wq_handles * sizeof(__u32); 3391 if (num_wq_handles == 1) 3392 /* input size for wq handles is u64 aligned */ 3393 expected_in_size += sizeof(__u32); 3394 3395 if (ucore->inlen < expected_in_size) 3396 return -EINVAL; 3397 3398 if (ucore->inlen > expected_in_size && 3399 !ib_is_udata_cleared(ucore, expected_in_size, 3400 ucore->inlen - expected_in_size)) 3401 return -EOPNOTSUPP; 3402 3403 wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles), 3404 GFP_KERNEL); 3405 if (!wqs_handles) 3406 return -ENOMEM; 3407 3408 err = ib_copy_from_udata(wqs_handles, ucore, 3409 num_wq_handles * sizeof(__u32)); 3410 if (err) 3411 goto err_free; 3412 3413 wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL); 3414 if (!wqs) { 3415 err = -ENOMEM; 3416 goto err_free; 3417 } 3418 3419 for (num_read_wqs = 0; num_read_wqs < num_wq_handles; 3420 num_read_wqs++) { 3421 wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, wqs_handles[num_read_wqs], 3422 file->ucontext); 3423 if (!wq) { 3424 err = -EINVAL; 3425 goto put_wqs; 3426 } 3427 3428 wqs[num_read_wqs] = wq; 3429 } 3430 3431 uobj = uobj_alloc(UVERBS_OBJECT_RWQ_IND_TBL, file->ucontext); 3432 if (IS_ERR(uobj)) { 3433 err = PTR_ERR(uobj); 3434 goto put_wqs; 3435 } 3436 3437 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size; 3438 init_attr.ind_tbl = wqs; 3439 3440 if (!ib_dev->create_rwq_ind_table) { 3441 err = -EOPNOTSUPP; 3442 goto err_uobj; 3443 } 3444 rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw); 3445 3446 if (IS_ERR(rwq_ind_tbl)) { 3447 err = PTR_ERR(rwq_ind_tbl); 3448 goto err_uobj; 3449 } 3450 3451 rwq_ind_tbl->ind_tbl = wqs; 3452 rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size; 3453 rwq_ind_tbl->uobject = uobj; 3454 uobj->object = rwq_ind_tbl; 3455 rwq_ind_tbl->device = ib_dev; 3456 atomic_set(&rwq_ind_tbl->usecnt, 0); 3457 3458 for (i = 0; i < num_wq_handles; i++) 3459 atomic_inc(&wqs[i]->usecnt); 3460 3461 resp.ind_tbl_handle = uobj->id; 3462 resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num; 3463 resp.response_length = required_resp_len; 3464 3465 err = ib_copy_to_udata(ucore, 3466 &resp, resp.response_length); 3467 if (err) 3468 goto err_copy; 3469 3470 kfree(wqs_handles); 3471 3472 for (j = 0; j < num_read_wqs; j++) 3473 uobj_put_obj_read(wqs[j]); 3474 3475 uobj_alloc_commit(uobj); 3476 return 0; 3477 3478 err_copy: 3479 ib_destroy_rwq_ind_table(rwq_ind_tbl); 3480 err_uobj: 3481 uobj_alloc_abort(uobj); 3482 put_wqs: 3483 for (j = 0; j < num_read_wqs; j++) 3484 uobj_put_obj_read(wqs[j]); 3485 err_free: 3486 kfree(wqs_handles); 3487 kfree(wqs); 3488 return err; 3489 } 3490 3491 int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file, 3492 struct ib_device *ib_dev, 3493 struct ib_udata *ucore, 3494 struct ib_udata *uhw) 3495 { 3496 struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {}; 3497 struct ib_uobject *uobj; 3498 int ret; 3499 size_t required_cmd_sz; 3500 3501 required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle); 3502 3503 if (ucore->inlen < required_cmd_sz) 3504 return -EINVAL; 3505 3506 if (ucore->inlen > sizeof(cmd) && 3507 !ib_is_udata_cleared(ucore, sizeof(cmd), 3508 ucore->inlen - sizeof(cmd))) 3509 return -EOPNOTSUPP; 3510 3511 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3512 if (ret) 3513 return ret; 3514 3515 if (cmd.comp_mask) 3516 return -EOPNOTSUPP; 3517 3518 uobj = uobj_get_write(UVERBS_OBJECT_RWQ_IND_TBL, cmd.ind_tbl_handle, 3519 file->ucontext); 3520 if (IS_ERR(uobj)) 3521 return PTR_ERR(uobj); 3522 3523 return uobj_remove_commit(uobj); 3524 } 3525 3526 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, 3527 struct ib_device *ib_dev, 3528 struct ib_udata *ucore, 3529 struct ib_udata *uhw) 3530 { 3531 struct ib_uverbs_create_flow cmd; 3532 struct ib_uverbs_create_flow_resp resp; 3533 struct ib_uobject *uobj; 3534 struct ib_uflow_object *uflow; 3535 struct ib_flow *flow_id; 3536 struct ib_uverbs_flow_attr *kern_flow_attr; 3537 struct ib_flow_attr *flow_attr; 3538 struct ib_qp *qp; 3539 struct ib_uflow_resources *uflow_res; 3540 struct ib_uverbs_flow_spec_hdr *kern_spec; 3541 int err = 0; 3542 void *ib_spec; 3543 int i; 3544 3545 if (ucore->inlen < sizeof(cmd)) 3546 return -EINVAL; 3547 3548 if (ucore->outlen < sizeof(resp)) 3549 return -ENOSPC; 3550 3551 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3552 if (err) 3553 return err; 3554 3555 ucore->inbuf += sizeof(cmd); 3556 ucore->inlen -= sizeof(cmd); 3557 3558 if (cmd.comp_mask) 3559 return -EINVAL; 3560 3561 if (!capable(CAP_NET_RAW)) 3562 return -EPERM; 3563 3564 if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED) 3565 return -EINVAL; 3566 3567 if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && 3568 ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) || 3569 (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT))) 3570 return -EINVAL; 3571 3572 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) 3573 return -EINVAL; 3574 3575 if (cmd.flow_attr.size > ucore->inlen || 3576 cmd.flow_attr.size > 3577 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) 3578 return -EINVAL; 3579 3580 if (cmd.flow_attr.reserved[0] || 3581 cmd.flow_attr.reserved[1]) 3582 return -EINVAL; 3583 3584 if (cmd.flow_attr.num_of_specs) { 3585 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, 3586 GFP_KERNEL); 3587 if (!kern_flow_attr) 3588 return -ENOMEM; 3589 3590 *kern_flow_attr = cmd.flow_attr; 3591 err = ib_copy_from_udata(&kern_flow_attr->flow_specs, ucore, 3592 cmd.flow_attr.size); 3593 if (err) 3594 goto err_free_attr; 3595 } else { 3596 kern_flow_attr = &cmd.flow_attr; 3597 } 3598 3599 uobj = uobj_alloc(UVERBS_OBJECT_FLOW, file->ucontext); 3600 if (IS_ERR(uobj)) { 3601 err = PTR_ERR(uobj); 3602 goto err_free_attr; 3603 } 3604 3605 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext); 3606 if (!qp) { 3607 err = -EINVAL; 3608 goto err_uobj; 3609 } 3610 3611 if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) { 3612 err = -EINVAL; 3613 goto err_put; 3614 } 3615 3616 flow_attr = kzalloc(struct_size(flow_attr, flows, 3617 cmd.flow_attr.num_of_specs), GFP_KERNEL); 3618 if (!flow_attr) { 3619 err = -ENOMEM; 3620 goto err_put; 3621 } 3622 uflow_res = flow_resources_alloc(cmd.flow_attr.num_of_specs); 3623 if (!uflow_res) { 3624 err = -ENOMEM; 3625 goto err_free_flow_attr; 3626 } 3627 3628 flow_attr->type = kern_flow_attr->type; 3629 flow_attr->priority = kern_flow_attr->priority; 3630 flow_attr->num_of_specs = kern_flow_attr->num_of_specs; 3631 flow_attr->port = kern_flow_attr->port; 3632 flow_attr->flags = kern_flow_attr->flags; 3633 flow_attr->size = sizeof(*flow_attr); 3634 3635 kern_spec = kern_flow_attr->flow_specs; 3636 ib_spec = flow_attr + 1; 3637 for (i = 0; i < flow_attr->num_of_specs && 3638 cmd.flow_attr.size >= sizeof(*kern_spec) && 3639 cmd.flow_attr.size >= kern_spec->size; 3640 i++) { 3641 err = kern_spec_to_ib_spec( 3642 file->ucontext, (struct ib_uverbs_flow_spec *)kern_spec, 3643 ib_spec, uflow_res); 3644 if (err) 3645 goto err_free; 3646 3647 flow_attr->size += 3648 ((union ib_flow_spec *) ib_spec)->size; 3649 cmd.flow_attr.size -= kern_spec->size; 3650 kern_spec = ((void *)kern_spec) + kern_spec->size; 3651 ib_spec += ((union ib_flow_spec *) ib_spec)->size; 3652 } 3653 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { 3654 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", 3655 i, cmd.flow_attr.size); 3656 err = -EINVAL; 3657 goto err_free; 3658 } 3659 3660 flow_id = qp->device->create_flow(qp, flow_attr, 3661 IB_FLOW_DOMAIN_USER, uhw); 3662 3663 if (IS_ERR(flow_id)) { 3664 err = PTR_ERR(flow_id); 3665 goto err_free; 3666 } 3667 atomic_inc(&qp->usecnt); 3668 flow_id->qp = qp; 3669 flow_id->uobject = uobj; 3670 uobj->object = flow_id; 3671 uflow = container_of(uobj, typeof(*uflow), uobject); 3672 uflow->resources = uflow_res; 3673 3674 memset(&resp, 0, sizeof(resp)); 3675 resp.flow_handle = uobj->id; 3676 3677 err = ib_copy_to_udata(ucore, 3678 &resp, sizeof(resp)); 3679 if (err) 3680 goto err_copy; 3681 3682 uobj_put_obj_read(qp); 3683 uobj_alloc_commit(uobj); 3684 kfree(flow_attr); 3685 if (cmd.flow_attr.num_of_specs) 3686 kfree(kern_flow_attr); 3687 return 0; 3688 err_copy: 3689 ib_destroy_flow(flow_id); 3690 err_free: 3691 ib_uverbs_flow_resources_free(uflow_res); 3692 err_free_flow_attr: 3693 kfree(flow_attr); 3694 err_put: 3695 uobj_put_obj_read(qp); 3696 err_uobj: 3697 uobj_alloc_abort(uobj); 3698 err_free_attr: 3699 if (cmd.flow_attr.num_of_specs) 3700 kfree(kern_flow_attr); 3701 return err; 3702 } 3703 3704 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, 3705 struct ib_device *ib_dev, 3706 struct ib_udata *ucore, 3707 struct ib_udata *uhw) 3708 { 3709 struct ib_uverbs_destroy_flow cmd; 3710 struct ib_uobject *uobj; 3711 int ret; 3712 3713 if (ucore->inlen < sizeof(cmd)) 3714 return -EINVAL; 3715 3716 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3717 if (ret) 3718 return ret; 3719 3720 if (cmd.comp_mask) 3721 return -EINVAL; 3722 3723 uobj = uobj_get_write(UVERBS_OBJECT_FLOW, cmd.flow_handle, 3724 file->ucontext); 3725 if (IS_ERR(uobj)) 3726 return PTR_ERR(uobj); 3727 3728 ret = uobj_remove_commit(uobj); 3729 return ret; 3730 } 3731 3732 static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 3733 struct ib_device *ib_dev, 3734 struct ib_uverbs_create_xsrq *cmd, 3735 struct ib_udata *udata) 3736 { 3737 struct ib_uverbs_create_srq_resp resp; 3738 struct ib_usrq_object *obj; 3739 struct ib_pd *pd; 3740 struct ib_srq *srq; 3741 struct ib_uobject *uninitialized_var(xrcd_uobj); 3742 struct ib_srq_init_attr attr; 3743 int ret; 3744 3745 obj = (struct ib_usrq_object *)uobj_alloc(UVERBS_OBJECT_SRQ, 3746 file->ucontext); 3747 if (IS_ERR(obj)) 3748 return PTR_ERR(obj); 3749 3750 if (cmd->srq_type == IB_SRQT_TM) 3751 attr.ext.tag_matching.max_num_tags = cmd->max_num_tags; 3752 3753 if (cmd->srq_type == IB_SRQT_XRC) { 3754 xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->xrcd_handle, 3755 file->ucontext); 3756 if (IS_ERR(xrcd_uobj)) { 3757 ret = -EINVAL; 3758 goto err; 3759 } 3760 3761 attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object; 3762 if (!attr.ext.xrc.xrcd) { 3763 ret = -EINVAL; 3764 goto err_put_xrcd; 3765 } 3766 3767 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 3768 atomic_inc(&obj->uxrcd->refcnt); 3769 } 3770 3771 if (ib_srq_has_cq(cmd->srq_type)) { 3772 attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd->cq_handle, 3773 file->ucontext); 3774 if (!attr.ext.cq) { 3775 ret = -EINVAL; 3776 goto err_put_xrcd; 3777 } 3778 } 3779 3780 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, file->ucontext); 3781 if (!pd) { 3782 ret = -EINVAL; 3783 goto err_put_cq; 3784 } 3785 3786 attr.event_handler = ib_uverbs_srq_event_handler; 3787 attr.srq_context = file; 3788 attr.srq_type = cmd->srq_type; 3789 attr.attr.max_wr = cmd->max_wr; 3790 attr.attr.max_sge = cmd->max_sge; 3791 attr.attr.srq_limit = cmd->srq_limit; 3792 3793 obj->uevent.events_reported = 0; 3794 INIT_LIST_HEAD(&obj->uevent.event_list); 3795 3796 srq = pd->device->create_srq(pd, &attr, udata); 3797 if (IS_ERR(srq)) { 3798 ret = PTR_ERR(srq); 3799 goto err_put; 3800 } 3801 3802 srq->device = pd->device; 3803 srq->pd = pd; 3804 srq->srq_type = cmd->srq_type; 3805 srq->uobject = &obj->uevent.uobject; 3806 srq->event_handler = attr.event_handler; 3807 srq->srq_context = attr.srq_context; 3808 3809 if (ib_srq_has_cq(cmd->srq_type)) { 3810 srq->ext.cq = attr.ext.cq; 3811 atomic_inc(&attr.ext.cq->usecnt); 3812 } 3813 3814 if (cmd->srq_type == IB_SRQT_XRC) { 3815 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; 3816 atomic_inc(&attr.ext.xrc.xrcd->usecnt); 3817 } 3818 3819 atomic_inc(&pd->usecnt); 3820 atomic_set(&srq->usecnt, 0); 3821 3822 obj->uevent.uobject.object = srq; 3823 obj->uevent.uobject.user_handle = cmd->user_handle; 3824 3825 memset(&resp, 0, sizeof resp); 3826 resp.srq_handle = obj->uevent.uobject.id; 3827 resp.max_wr = attr.attr.max_wr; 3828 resp.max_sge = attr.attr.max_sge; 3829 if (cmd->srq_type == IB_SRQT_XRC) 3830 resp.srqn = srq->ext.xrc.srq_num; 3831 3832 if (copy_to_user(u64_to_user_ptr(cmd->response), 3833 &resp, sizeof resp)) { 3834 ret = -EFAULT; 3835 goto err_copy; 3836 } 3837 3838 if (cmd->srq_type == IB_SRQT_XRC) 3839 uobj_put_read(xrcd_uobj); 3840 3841 if (ib_srq_has_cq(cmd->srq_type)) 3842 uobj_put_obj_read(attr.ext.cq); 3843 3844 uobj_put_obj_read(pd); 3845 uobj_alloc_commit(&obj->uevent.uobject); 3846 3847 return 0; 3848 3849 err_copy: 3850 ib_destroy_srq(srq); 3851 3852 err_put: 3853 uobj_put_obj_read(pd); 3854 3855 err_put_cq: 3856 if (ib_srq_has_cq(cmd->srq_type)) 3857 uobj_put_obj_read(attr.ext.cq); 3858 3859 err_put_xrcd: 3860 if (cmd->srq_type == IB_SRQT_XRC) { 3861 atomic_dec(&obj->uxrcd->refcnt); 3862 uobj_put_read(xrcd_uobj); 3863 } 3864 3865 err: 3866 uobj_alloc_abort(&obj->uevent.uobject); 3867 return ret; 3868 } 3869 3870 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 3871 struct ib_device *ib_dev, 3872 const char __user *buf, int in_len, 3873 int out_len) 3874 { 3875 struct ib_uverbs_create_srq cmd; 3876 struct ib_uverbs_create_xsrq xcmd; 3877 struct ib_uverbs_create_srq_resp resp; 3878 struct ib_udata udata; 3879 int ret; 3880 3881 if (out_len < sizeof resp) 3882 return -ENOSPC; 3883 3884 if (copy_from_user(&cmd, buf, sizeof cmd)) 3885 return -EFAULT; 3886 3887 memset(&xcmd, 0, sizeof(xcmd)); 3888 xcmd.response = cmd.response; 3889 xcmd.user_handle = cmd.user_handle; 3890 xcmd.srq_type = IB_SRQT_BASIC; 3891 xcmd.pd_handle = cmd.pd_handle; 3892 xcmd.max_wr = cmd.max_wr; 3893 xcmd.max_sge = cmd.max_sge; 3894 xcmd.srq_limit = cmd.srq_limit; 3895 3896 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 3897 u64_to_user_ptr(cmd.response) + sizeof(resp), 3898 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 3899 out_len - sizeof(resp)); 3900 3901 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata); 3902 if (ret) 3903 return ret; 3904 3905 return in_len; 3906 } 3907 3908 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, 3909 struct ib_device *ib_dev, 3910 const char __user *buf, int in_len, int out_len) 3911 { 3912 struct ib_uverbs_create_xsrq cmd; 3913 struct ib_uverbs_create_srq_resp resp; 3914 struct ib_udata udata; 3915 int ret; 3916 3917 if (out_len < sizeof resp) 3918 return -ENOSPC; 3919 3920 if (copy_from_user(&cmd, buf, sizeof cmd)) 3921 return -EFAULT; 3922 3923 ib_uverbs_init_udata(&udata, buf + sizeof(cmd), 3924 u64_to_user_ptr(cmd.response) + sizeof(resp), 3925 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 3926 out_len - sizeof(resp)); 3927 3928 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata); 3929 if (ret) 3930 return ret; 3931 3932 return in_len; 3933 } 3934 3935 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 3936 struct ib_device *ib_dev, 3937 const char __user *buf, int in_len, 3938 int out_len) 3939 { 3940 struct ib_uverbs_modify_srq cmd; 3941 struct ib_udata udata; 3942 struct ib_srq *srq; 3943 struct ib_srq_attr attr; 3944 int ret; 3945 3946 if (copy_from_user(&cmd, buf, sizeof cmd)) 3947 return -EFAULT; 3948 3949 ib_uverbs_init_udata(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 3950 out_len); 3951 3952 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file->ucontext); 3953 if (!srq) 3954 return -EINVAL; 3955 3956 attr.max_wr = cmd.max_wr; 3957 attr.srq_limit = cmd.srq_limit; 3958 3959 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 3960 3961 uobj_put_obj_read(srq); 3962 3963 return ret ? ret : in_len; 3964 } 3965 3966 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 3967 struct ib_device *ib_dev, 3968 const char __user *buf, 3969 int in_len, int out_len) 3970 { 3971 struct ib_uverbs_query_srq cmd; 3972 struct ib_uverbs_query_srq_resp resp; 3973 struct ib_srq_attr attr; 3974 struct ib_srq *srq; 3975 int ret; 3976 3977 if (out_len < sizeof resp) 3978 return -ENOSPC; 3979 3980 if (copy_from_user(&cmd, buf, sizeof cmd)) 3981 return -EFAULT; 3982 3983 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file->ucontext); 3984 if (!srq) 3985 return -EINVAL; 3986 3987 ret = ib_query_srq(srq, &attr); 3988 3989 uobj_put_obj_read(srq); 3990 3991 if (ret) 3992 return ret; 3993 3994 memset(&resp, 0, sizeof resp); 3995 3996 resp.max_wr = attr.max_wr; 3997 resp.max_sge = attr.max_sge; 3998 resp.srq_limit = attr.srq_limit; 3999 4000 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) 4001 return -EFAULT; 4002 4003 return in_len; 4004 } 4005 4006 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 4007 struct ib_device *ib_dev, 4008 const char __user *buf, int in_len, 4009 int out_len) 4010 { 4011 struct ib_uverbs_destroy_srq cmd; 4012 struct ib_uverbs_destroy_srq_resp resp; 4013 struct ib_uobject *uobj; 4014 struct ib_uevent_object *obj; 4015 int ret = -EINVAL; 4016 4017 if (copy_from_user(&cmd, buf, sizeof cmd)) 4018 return -EFAULT; 4019 4020 uobj = uobj_get_write(UVERBS_OBJECT_SRQ, cmd.srq_handle, 4021 file->ucontext); 4022 if (IS_ERR(uobj)) 4023 return PTR_ERR(uobj); 4024 4025 obj = container_of(uobj, struct ib_uevent_object, uobject); 4026 /* 4027 * Make sure we don't free the memory in remove_commit as we still 4028 * needs the uobject memory to create the response. 4029 */ 4030 uverbs_uobject_get(uobj); 4031 4032 memset(&resp, 0, sizeof(resp)); 4033 4034 ret = uobj_remove_commit(uobj); 4035 if (ret) { 4036 uverbs_uobject_put(uobj); 4037 return ret; 4038 } 4039 resp.events_reported = obj->events_reported; 4040 uverbs_uobject_put(uobj); 4041 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) 4042 return -EFAULT; 4043 4044 return in_len; 4045 } 4046 4047 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, 4048 struct ib_device *ib_dev, 4049 struct ib_udata *ucore, 4050 struct ib_udata *uhw) 4051 { 4052 struct ib_uverbs_ex_query_device_resp resp = { {0} }; 4053 struct ib_uverbs_ex_query_device cmd; 4054 struct ib_device_attr attr = {0}; 4055 int err; 4056 4057 if (!ib_dev->query_device) 4058 return -EOPNOTSUPP; 4059 4060 if (ucore->inlen < sizeof(cmd)) 4061 return -EINVAL; 4062 4063 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 4064 if (err) 4065 return err; 4066 4067 if (cmd.comp_mask) 4068 return -EINVAL; 4069 4070 if (cmd.reserved) 4071 return -EINVAL; 4072 4073 resp.response_length = offsetof(typeof(resp), odp_caps); 4074 4075 if (ucore->outlen < resp.response_length) 4076 return -ENOSPC; 4077 4078 err = ib_dev->query_device(ib_dev, &attr, uhw); 4079 if (err) 4080 return err; 4081 4082 copy_query_dev_fields(file, ib_dev, &resp.base, &attr); 4083 4084 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps)) 4085 goto end; 4086 4087 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 4088 resp.odp_caps.general_caps = attr.odp_caps.general_caps; 4089 resp.odp_caps.per_transport_caps.rc_odp_caps = 4090 attr.odp_caps.per_transport_caps.rc_odp_caps; 4091 resp.odp_caps.per_transport_caps.uc_odp_caps = 4092 attr.odp_caps.per_transport_caps.uc_odp_caps; 4093 resp.odp_caps.per_transport_caps.ud_odp_caps = 4094 attr.odp_caps.per_transport_caps.ud_odp_caps; 4095 #endif 4096 resp.response_length += sizeof(resp.odp_caps); 4097 4098 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask)) 4099 goto end; 4100 4101 resp.timestamp_mask = attr.timestamp_mask; 4102 resp.response_length += sizeof(resp.timestamp_mask); 4103 4104 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock)) 4105 goto end; 4106 4107 resp.hca_core_clock = attr.hca_core_clock; 4108 resp.response_length += sizeof(resp.hca_core_clock); 4109 4110 if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex)) 4111 goto end; 4112 4113 resp.device_cap_flags_ex = attr.device_cap_flags; 4114 resp.response_length += sizeof(resp.device_cap_flags_ex); 4115 4116 if (ucore->outlen < resp.response_length + sizeof(resp.rss_caps)) 4117 goto end; 4118 4119 resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts; 4120 resp.rss_caps.max_rwq_indirection_tables = 4121 attr.rss_caps.max_rwq_indirection_tables; 4122 resp.rss_caps.max_rwq_indirection_table_size = 4123 attr.rss_caps.max_rwq_indirection_table_size; 4124 4125 resp.response_length += sizeof(resp.rss_caps); 4126 4127 if (ucore->outlen < resp.response_length + sizeof(resp.max_wq_type_rq)) 4128 goto end; 4129 4130 resp.max_wq_type_rq = attr.max_wq_type_rq; 4131 resp.response_length += sizeof(resp.max_wq_type_rq); 4132 4133 if (ucore->outlen < resp.response_length + sizeof(resp.raw_packet_caps)) 4134 goto end; 4135 4136 resp.raw_packet_caps = attr.raw_packet_caps; 4137 resp.response_length += sizeof(resp.raw_packet_caps); 4138 4139 if (ucore->outlen < resp.response_length + sizeof(resp.tm_caps)) 4140 goto end; 4141 4142 resp.tm_caps.max_rndv_hdr_size = attr.tm_caps.max_rndv_hdr_size; 4143 resp.tm_caps.max_num_tags = attr.tm_caps.max_num_tags; 4144 resp.tm_caps.max_ops = attr.tm_caps.max_ops; 4145 resp.tm_caps.max_sge = attr.tm_caps.max_sge; 4146 resp.tm_caps.flags = attr.tm_caps.flags; 4147 resp.response_length += sizeof(resp.tm_caps); 4148 4149 if (ucore->outlen < resp.response_length + sizeof(resp.cq_moderation_caps)) 4150 goto end; 4151 4152 resp.cq_moderation_caps.max_cq_moderation_count = 4153 attr.cq_caps.max_cq_moderation_count; 4154 resp.cq_moderation_caps.max_cq_moderation_period = 4155 attr.cq_caps.max_cq_moderation_period; 4156 resp.response_length += sizeof(resp.cq_moderation_caps); 4157 4158 if (ucore->outlen < resp.response_length + sizeof(resp.max_dm_size)) 4159 goto end; 4160 4161 resp.max_dm_size = attr.max_dm_size; 4162 resp.response_length += sizeof(resp.max_dm_size); 4163 end: 4164 err = ib_copy_to_udata(ucore, &resp, resp.response_length); 4165 return err; 4166 } 4167 4168 int ib_uverbs_ex_modify_cq(struct ib_uverbs_file *file, 4169 struct ib_device *ib_dev, 4170 struct ib_udata *ucore, 4171 struct ib_udata *uhw) 4172 { 4173 struct ib_uverbs_ex_modify_cq cmd = {}; 4174 struct ib_cq *cq; 4175 size_t required_cmd_sz; 4176 int ret; 4177 4178 required_cmd_sz = offsetof(typeof(cmd), reserved) + 4179 sizeof(cmd.reserved); 4180 if (ucore->inlen < required_cmd_sz) 4181 return -EINVAL; 4182 4183 /* sanity checks */ 4184 if (ucore->inlen > sizeof(cmd) && 4185 !ib_is_udata_cleared(ucore, sizeof(cmd), 4186 ucore->inlen - sizeof(cmd))) 4187 return -EOPNOTSUPP; 4188 4189 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 4190 if (ret) 4191 return ret; 4192 4193 if (!cmd.attr_mask || cmd.reserved) 4194 return -EINVAL; 4195 4196 if (cmd.attr_mask > IB_CQ_MODERATE) 4197 return -EOPNOTSUPP; 4198 4199 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext); 4200 if (!cq) 4201 return -EINVAL; 4202 4203 ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period); 4204 4205 uobj_put_obj_read(cq); 4206 4207 return ret; 4208 } 4209