1 /* 2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/completion.h> 34 #include <linux/file.h> 35 #include <linux/mutex.h> 36 #include <linux/poll.h> 37 #include <linux/sched.h> 38 #include <linux/idr.h> 39 #include <linux/in.h> 40 #include <linux/in6.h> 41 #include <linux/miscdevice.h> 42 #include <linux/slab.h> 43 #include <linux/sysctl.h> 44 #include <linux/module.h> 45 #include <linux/nsproxy.h> 46 47 #include <rdma/rdma_user_cm.h> 48 #include <rdma/ib_marshall.h> 49 #include <rdma/rdma_cm.h> 50 #include <rdma/rdma_cm_ib.h> 51 #include <rdma/ib_addr.h> 52 #include <rdma/ib.h> 53 54 MODULE_AUTHOR("Sean Hefty"); 55 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access"); 56 MODULE_LICENSE("Dual BSD/GPL"); 57 58 static unsigned int max_backlog = 1024; 59 60 static struct ctl_table_header *ucma_ctl_table_hdr; 61 static struct ctl_table ucma_ctl_table[] = { 62 { 63 .procname = "max_backlog", 64 .data = &max_backlog, 65 .maxlen = sizeof max_backlog, 66 .mode = 0644, 67 .proc_handler = proc_dointvec, 68 }, 69 { } 70 }; 71 72 struct ucma_file { 73 struct mutex mut; 74 struct file *filp; 75 struct list_head ctx_list; 76 struct list_head event_list; 77 wait_queue_head_t poll_wait; 78 struct workqueue_struct *close_wq; 79 }; 80 81 struct ucma_context { 82 int id; 83 struct completion comp; 84 atomic_t ref; 85 int events_reported; 86 int backlog; 87 88 struct ucma_file *file; 89 struct rdma_cm_id *cm_id; 90 u64 uid; 91 92 struct list_head list; 93 struct list_head mc_list; 94 /* mark that device is in process of destroying the internal HW 95 * resources, protected by the global mut 96 */ 97 int closing; 98 /* sync between removal event and id destroy, protected by file mut */ 99 int destroying; 100 struct work_struct close_work; 101 }; 102 103 struct ucma_multicast { 104 struct ucma_context *ctx; 105 int id; 106 int events_reported; 107 108 u64 uid; 109 u8 join_state; 110 struct list_head list; 111 struct sockaddr_storage addr; 112 }; 113 114 struct ucma_event { 115 struct ucma_context *ctx; 116 struct ucma_multicast *mc; 117 struct list_head list; 118 struct rdma_cm_id *cm_id; 119 struct rdma_ucm_event_resp resp; 120 struct work_struct close_work; 121 }; 122 123 static DEFINE_MUTEX(mut); 124 static DEFINE_IDR(ctx_idr); 125 static DEFINE_IDR(multicast_idr); 126 127 static inline struct ucma_context *_ucma_find_context(int id, 128 struct ucma_file *file) 129 { 130 struct ucma_context *ctx; 131 132 ctx = idr_find(&ctx_idr, id); 133 if (!ctx) 134 ctx = ERR_PTR(-ENOENT); 135 else if (ctx->file != file || !ctx->cm_id) 136 ctx = ERR_PTR(-EINVAL); 137 return ctx; 138 } 139 140 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id) 141 { 142 struct ucma_context *ctx; 143 144 mutex_lock(&mut); 145 ctx = _ucma_find_context(id, file); 146 if (!IS_ERR(ctx)) { 147 if (ctx->closing) 148 ctx = ERR_PTR(-EIO); 149 else 150 atomic_inc(&ctx->ref); 151 } 152 mutex_unlock(&mut); 153 return ctx; 154 } 155 156 static void ucma_put_ctx(struct ucma_context *ctx) 157 { 158 if (atomic_dec_and_test(&ctx->ref)) 159 complete(&ctx->comp); 160 } 161 162 static void ucma_close_event_id(struct work_struct *work) 163 { 164 struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work); 165 166 rdma_destroy_id(uevent_close->cm_id); 167 kfree(uevent_close); 168 } 169 170 static void ucma_close_id(struct work_struct *work) 171 { 172 struct ucma_context *ctx = container_of(work, struct ucma_context, close_work); 173 174 /* once all inflight tasks are finished, we close all underlying 175 * resources. The context is still alive till its explicit destryoing 176 * by its creator. 177 */ 178 ucma_put_ctx(ctx); 179 wait_for_completion(&ctx->comp); 180 /* No new events will be generated after destroying the id. */ 181 rdma_destroy_id(ctx->cm_id); 182 } 183 184 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) 185 { 186 struct ucma_context *ctx; 187 188 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 189 if (!ctx) 190 return NULL; 191 192 INIT_WORK(&ctx->close_work, ucma_close_id); 193 atomic_set(&ctx->ref, 1); 194 init_completion(&ctx->comp); 195 INIT_LIST_HEAD(&ctx->mc_list); 196 ctx->file = file; 197 198 mutex_lock(&mut); 199 ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL); 200 mutex_unlock(&mut); 201 if (ctx->id < 0) 202 goto error; 203 204 list_add_tail(&ctx->list, &file->ctx_list); 205 return ctx; 206 207 error: 208 kfree(ctx); 209 return NULL; 210 } 211 212 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx) 213 { 214 struct ucma_multicast *mc; 215 216 mc = kzalloc(sizeof(*mc), GFP_KERNEL); 217 if (!mc) 218 return NULL; 219 220 mutex_lock(&mut); 221 mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL); 222 mutex_unlock(&mut); 223 if (mc->id < 0) 224 goto error; 225 226 mc->ctx = ctx; 227 list_add_tail(&mc->list, &ctx->mc_list); 228 return mc; 229 230 error: 231 kfree(mc); 232 return NULL; 233 } 234 235 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst, 236 struct rdma_conn_param *src) 237 { 238 if (src->private_data_len) 239 memcpy(dst->private_data, src->private_data, 240 src->private_data_len); 241 dst->private_data_len = src->private_data_len; 242 dst->responder_resources =src->responder_resources; 243 dst->initiator_depth = src->initiator_depth; 244 dst->flow_control = src->flow_control; 245 dst->retry_count = src->retry_count; 246 dst->rnr_retry_count = src->rnr_retry_count; 247 dst->srq = src->srq; 248 dst->qp_num = src->qp_num; 249 } 250 251 static void ucma_copy_ud_event(struct ib_device *device, 252 struct rdma_ucm_ud_param *dst, 253 struct rdma_ud_param *src) 254 { 255 if (src->private_data_len) 256 memcpy(dst->private_data, src->private_data, 257 src->private_data_len); 258 dst->private_data_len = src->private_data_len; 259 ib_copy_ah_attr_to_user(device, &dst->ah_attr, &src->ah_attr); 260 dst->qp_num = src->qp_num; 261 dst->qkey = src->qkey; 262 } 263 264 static void ucma_set_event_context(struct ucma_context *ctx, 265 struct rdma_cm_event *event, 266 struct ucma_event *uevent) 267 { 268 uevent->ctx = ctx; 269 switch (event->event) { 270 case RDMA_CM_EVENT_MULTICAST_JOIN: 271 case RDMA_CM_EVENT_MULTICAST_ERROR: 272 uevent->mc = (struct ucma_multicast *) 273 event->param.ud.private_data; 274 uevent->resp.uid = uevent->mc->uid; 275 uevent->resp.id = uevent->mc->id; 276 break; 277 default: 278 uevent->resp.uid = ctx->uid; 279 uevent->resp.id = ctx->id; 280 break; 281 } 282 } 283 284 /* Called with file->mut locked for the relevant context. */ 285 static void ucma_removal_event_handler(struct rdma_cm_id *cm_id) 286 { 287 struct ucma_context *ctx = cm_id->context; 288 struct ucma_event *con_req_eve; 289 int event_found = 0; 290 291 if (ctx->destroying) 292 return; 293 294 /* only if context is pointing to cm_id that it owns it and can be 295 * queued to be closed, otherwise that cm_id is an inflight one that 296 * is part of that context event list pending to be detached and 297 * reattached to its new context as part of ucma_get_event, 298 * handled separately below. 299 */ 300 if (ctx->cm_id == cm_id) { 301 mutex_lock(&mut); 302 ctx->closing = 1; 303 mutex_unlock(&mut); 304 queue_work(ctx->file->close_wq, &ctx->close_work); 305 return; 306 } 307 308 list_for_each_entry(con_req_eve, &ctx->file->event_list, list) { 309 if (con_req_eve->cm_id == cm_id && 310 con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) { 311 list_del(&con_req_eve->list); 312 INIT_WORK(&con_req_eve->close_work, ucma_close_event_id); 313 queue_work(ctx->file->close_wq, &con_req_eve->close_work); 314 event_found = 1; 315 break; 316 } 317 } 318 if (!event_found) 319 pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n"); 320 } 321 322 static int ucma_event_handler(struct rdma_cm_id *cm_id, 323 struct rdma_cm_event *event) 324 { 325 struct ucma_event *uevent; 326 struct ucma_context *ctx = cm_id->context; 327 int ret = 0; 328 329 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL); 330 if (!uevent) 331 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST; 332 333 mutex_lock(&ctx->file->mut); 334 uevent->cm_id = cm_id; 335 ucma_set_event_context(ctx, event, uevent); 336 uevent->resp.event = event->event; 337 uevent->resp.status = event->status; 338 if (cm_id->qp_type == IB_QPT_UD) 339 ucma_copy_ud_event(cm_id->device, &uevent->resp.param.ud, 340 &event->param.ud); 341 else 342 ucma_copy_conn_event(&uevent->resp.param.conn, 343 &event->param.conn); 344 345 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { 346 if (!ctx->backlog) { 347 ret = -ENOMEM; 348 kfree(uevent); 349 goto out; 350 } 351 ctx->backlog--; 352 } else if (!ctx->uid || ctx->cm_id != cm_id) { 353 /* 354 * We ignore events for new connections until userspace has set 355 * their context. This can only happen if an error occurs on a 356 * new connection before the user accepts it. This is okay, 357 * since the accept will just fail later. However, we do need 358 * to release the underlying HW resources in case of a device 359 * removal event. 360 */ 361 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) 362 ucma_removal_event_handler(cm_id); 363 364 kfree(uevent); 365 goto out; 366 } 367 368 list_add_tail(&uevent->list, &ctx->file->event_list); 369 wake_up_interruptible(&ctx->file->poll_wait); 370 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) 371 ucma_removal_event_handler(cm_id); 372 out: 373 mutex_unlock(&ctx->file->mut); 374 return ret; 375 } 376 377 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf, 378 int in_len, int out_len) 379 { 380 struct ucma_context *ctx; 381 struct rdma_ucm_get_event cmd; 382 struct ucma_event *uevent; 383 int ret = 0; 384 385 if (out_len < sizeof uevent->resp) 386 return -ENOSPC; 387 388 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 389 return -EFAULT; 390 391 mutex_lock(&file->mut); 392 while (list_empty(&file->event_list)) { 393 mutex_unlock(&file->mut); 394 395 if (file->filp->f_flags & O_NONBLOCK) 396 return -EAGAIN; 397 398 if (wait_event_interruptible(file->poll_wait, 399 !list_empty(&file->event_list))) 400 return -ERESTARTSYS; 401 402 mutex_lock(&file->mut); 403 } 404 405 uevent = list_entry(file->event_list.next, struct ucma_event, list); 406 407 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) { 408 ctx = ucma_alloc_ctx(file); 409 if (!ctx) { 410 ret = -ENOMEM; 411 goto done; 412 } 413 uevent->ctx->backlog++; 414 ctx->cm_id = uevent->cm_id; 415 ctx->cm_id->context = ctx; 416 uevent->resp.id = ctx->id; 417 } 418 419 if (copy_to_user((void __user *)(unsigned long)cmd.response, 420 &uevent->resp, sizeof uevent->resp)) { 421 ret = -EFAULT; 422 goto done; 423 } 424 425 list_del(&uevent->list); 426 uevent->ctx->events_reported++; 427 if (uevent->mc) 428 uevent->mc->events_reported++; 429 kfree(uevent); 430 done: 431 mutex_unlock(&file->mut); 432 return ret; 433 } 434 435 static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type) 436 { 437 switch (cmd->ps) { 438 case RDMA_PS_TCP: 439 *qp_type = IB_QPT_RC; 440 return 0; 441 case RDMA_PS_UDP: 442 case RDMA_PS_IPOIB: 443 *qp_type = IB_QPT_UD; 444 return 0; 445 case RDMA_PS_IB: 446 *qp_type = cmd->qp_type; 447 return 0; 448 default: 449 return -EINVAL; 450 } 451 } 452 453 static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, 454 int in_len, int out_len) 455 { 456 struct rdma_ucm_create_id cmd; 457 struct rdma_ucm_create_id_resp resp; 458 struct ucma_context *ctx; 459 struct rdma_cm_id *cm_id; 460 enum ib_qp_type qp_type; 461 int ret; 462 463 if (out_len < sizeof(resp)) 464 return -ENOSPC; 465 466 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 467 return -EFAULT; 468 469 ret = ucma_get_qp_type(&cmd, &qp_type); 470 if (ret) 471 return ret; 472 473 mutex_lock(&file->mut); 474 ctx = ucma_alloc_ctx(file); 475 mutex_unlock(&file->mut); 476 if (!ctx) 477 return -ENOMEM; 478 479 ctx->uid = cmd.uid; 480 cm_id = rdma_create_id(current->nsproxy->net_ns, 481 ucma_event_handler, ctx, cmd.ps, qp_type); 482 if (IS_ERR(cm_id)) { 483 ret = PTR_ERR(cm_id); 484 goto err1; 485 } 486 487 resp.id = ctx->id; 488 if (copy_to_user((void __user *)(unsigned long)cmd.response, 489 &resp, sizeof(resp))) { 490 ret = -EFAULT; 491 goto err2; 492 } 493 494 ctx->cm_id = cm_id; 495 return 0; 496 497 err2: 498 rdma_destroy_id(cm_id); 499 err1: 500 mutex_lock(&mut); 501 idr_remove(&ctx_idr, ctx->id); 502 mutex_unlock(&mut); 503 mutex_lock(&file->mut); 504 list_del(&ctx->list); 505 mutex_unlock(&file->mut); 506 kfree(ctx); 507 return ret; 508 } 509 510 static void ucma_cleanup_multicast(struct ucma_context *ctx) 511 { 512 struct ucma_multicast *mc, *tmp; 513 514 mutex_lock(&mut); 515 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) { 516 list_del(&mc->list); 517 idr_remove(&multicast_idr, mc->id); 518 kfree(mc); 519 } 520 mutex_unlock(&mut); 521 } 522 523 static void ucma_cleanup_mc_events(struct ucma_multicast *mc) 524 { 525 struct ucma_event *uevent, *tmp; 526 527 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) { 528 if (uevent->mc != mc) 529 continue; 530 531 list_del(&uevent->list); 532 kfree(uevent); 533 } 534 } 535 536 /* 537 * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At 538 * this point, no new events will be reported from the hardware. However, we 539 * still need to cleanup the UCMA context for this ID. Specifically, there 540 * might be events that have not yet been consumed by the user space software. 541 * These might include pending connect requests which we have not completed 542 * processing. We cannot call rdma_destroy_id while holding the lock of the 543 * context (file->mut), as it might cause a deadlock. We therefore extract all 544 * relevant events from the context pending events list while holding the 545 * mutex. After that we release them as needed. 546 */ 547 static int ucma_free_ctx(struct ucma_context *ctx) 548 { 549 int events_reported; 550 struct ucma_event *uevent, *tmp; 551 LIST_HEAD(list); 552 553 554 ucma_cleanup_multicast(ctx); 555 556 /* Cleanup events not yet reported to the user. */ 557 mutex_lock(&ctx->file->mut); 558 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { 559 if (uevent->ctx == ctx) 560 list_move_tail(&uevent->list, &list); 561 } 562 list_del(&ctx->list); 563 mutex_unlock(&ctx->file->mut); 564 565 list_for_each_entry_safe(uevent, tmp, &list, list) { 566 list_del(&uevent->list); 567 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) 568 rdma_destroy_id(uevent->cm_id); 569 kfree(uevent); 570 } 571 572 events_reported = ctx->events_reported; 573 kfree(ctx); 574 return events_reported; 575 } 576 577 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf, 578 int in_len, int out_len) 579 { 580 struct rdma_ucm_destroy_id cmd; 581 struct rdma_ucm_destroy_id_resp resp; 582 struct ucma_context *ctx; 583 int ret = 0; 584 585 if (out_len < sizeof(resp)) 586 return -ENOSPC; 587 588 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 589 return -EFAULT; 590 591 mutex_lock(&mut); 592 ctx = _ucma_find_context(cmd.id, file); 593 if (!IS_ERR(ctx)) 594 idr_remove(&ctx_idr, ctx->id); 595 mutex_unlock(&mut); 596 597 if (IS_ERR(ctx)) 598 return PTR_ERR(ctx); 599 600 mutex_lock(&ctx->file->mut); 601 ctx->destroying = 1; 602 mutex_unlock(&ctx->file->mut); 603 604 flush_workqueue(ctx->file->close_wq); 605 /* At this point it's guaranteed that there is no inflight 606 * closing task */ 607 mutex_lock(&mut); 608 if (!ctx->closing) { 609 mutex_unlock(&mut); 610 ucma_put_ctx(ctx); 611 wait_for_completion(&ctx->comp); 612 rdma_destroy_id(ctx->cm_id); 613 } else { 614 mutex_unlock(&mut); 615 } 616 617 resp.events_reported = ucma_free_ctx(ctx); 618 if (copy_to_user((void __user *)(unsigned long)cmd.response, 619 &resp, sizeof(resp))) 620 ret = -EFAULT; 621 622 return ret; 623 } 624 625 static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf, 626 int in_len, int out_len) 627 { 628 struct rdma_ucm_bind_ip cmd; 629 struct ucma_context *ctx; 630 int ret; 631 632 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 633 return -EFAULT; 634 635 if (!rdma_addr_size_in6(&cmd.addr)) 636 return -EINVAL; 637 638 ctx = ucma_get_ctx(file, cmd.id); 639 if (IS_ERR(ctx)) 640 return PTR_ERR(ctx); 641 642 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); 643 ucma_put_ctx(ctx); 644 return ret; 645 } 646 647 static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf, 648 int in_len, int out_len) 649 { 650 struct rdma_ucm_bind cmd; 651 struct ucma_context *ctx; 652 int ret; 653 654 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 655 return -EFAULT; 656 657 if (cmd.reserved || !cmd.addr_size || 658 cmd.addr_size != rdma_addr_size_kss(&cmd.addr)) 659 return -EINVAL; 660 661 ctx = ucma_get_ctx(file, cmd.id); 662 if (IS_ERR(ctx)) 663 return PTR_ERR(ctx); 664 665 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); 666 ucma_put_ctx(ctx); 667 return ret; 668 } 669 670 static ssize_t ucma_resolve_ip(struct ucma_file *file, 671 const char __user *inbuf, 672 int in_len, int out_len) 673 { 674 struct rdma_ucm_resolve_ip cmd; 675 struct ucma_context *ctx; 676 int ret; 677 678 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 679 return -EFAULT; 680 681 if (!rdma_addr_size_in6(&cmd.src_addr) || 682 !rdma_addr_size_in6(&cmd.dst_addr)) 683 return -EINVAL; 684 685 ctx = ucma_get_ctx(file, cmd.id); 686 if (IS_ERR(ctx)) 687 return PTR_ERR(ctx); 688 689 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, 690 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); 691 ucma_put_ctx(ctx); 692 return ret; 693 } 694 695 static ssize_t ucma_resolve_addr(struct ucma_file *file, 696 const char __user *inbuf, 697 int in_len, int out_len) 698 { 699 struct rdma_ucm_resolve_addr cmd; 700 struct ucma_context *ctx; 701 int ret; 702 703 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 704 return -EFAULT; 705 706 if (cmd.reserved || 707 (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) || 708 !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr))) 709 return -EINVAL; 710 711 ctx = ucma_get_ctx(file, cmd.id); 712 if (IS_ERR(ctx)) 713 return PTR_ERR(ctx); 714 715 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, 716 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); 717 ucma_put_ctx(ctx); 718 return ret; 719 } 720 721 static ssize_t ucma_resolve_route(struct ucma_file *file, 722 const char __user *inbuf, 723 int in_len, int out_len) 724 { 725 struct rdma_ucm_resolve_route cmd; 726 struct ucma_context *ctx; 727 int ret; 728 729 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 730 return -EFAULT; 731 732 ctx = ucma_get_ctx(file, cmd.id); 733 if (IS_ERR(ctx)) 734 return PTR_ERR(ctx); 735 736 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms); 737 ucma_put_ctx(ctx); 738 return ret; 739 } 740 741 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp, 742 struct rdma_route *route) 743 { 744 struct rdma_dev_addr *dev_addr; 745 746 resp->num_paths = route->num_paths; 747 switch (route->num_paths) { 748 case 0: 749 dev_addr = &route->addr.dev_addr; 750 rdma_addr_get_dgid(dev_addr, 751 (union ib_gid *) &resp->ib_route[0].dgid); 752 rdma_addr_get_sgid(dev_addr, 753 (union ib_gid *) &resp->ib_route[0].sgid); 754 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 755 break; 756 case 2: 757 ib_copy_path_rec_to_user(&resp->ib_route[1], 758 &route->path_rec[1]); 759 /* fall through */ 760 case 1: 761 ib_copy_path_rec_to_user(&resp->ib_route[0], 762 &route->path_rec[0]); 763 break; 764 default: 765 break; 766 } 767 } 768 769 static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp, 770 struct rdma_route *route) 771 { 772 773 resp->num_paths = route->num_paths; 774 switch (route->num_paths) { 775 case 0: 776 rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr, 777 (union ib_gid *)&resp->ib_route[0].dgid); 778 rdma_ip2gid((struct sockaddr *)&route->addr.src_addr, 779 (union ib_gid *)&resp->ib_route[0].sgid); 780 resp->ib_route[0].pkey = cpu_to_be16(0xffff); 781 break; 782 case 2: 783 ib_copy_path_rec_to_user(&resp->ib_route[1], 784 &route->path_rec[1]); 785 /* fall through */ 786 case 1: 787 ib_copy_path_rec_to_user(&resp->ib_route[0], 788 &route->path_rec[0]); 789 break; 790 default: 791 break; 792 } 793 } 794 795 static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp, 796 struct rdma_route *route) 797 { 798 struct rdma_dev_addr *dev_addr; 799 800 dev_addr = &route->addr.dev_addr; 801 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid); 802 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid); 803 } 804 805 static ssize_t ucma_query_route(struct ucma_file *file, 806 const char __user *inbuf, 807 int in_len, int out_len) 808 { 809 struct rdma_ucm_query cmd; 810 struct rdma_ucm_query_route_resp resp; 811 struct ucma_context *ctx; 812 struct sockaddr *addr; 813 int ret = 0; 814 815 if (out_len < sizeof(resp)) 816 return -ENOSPC; 817 818 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 819 return -EFAULT; 820 821 ctx = ucma_get_ctx(file, cmd.id); 822 if (IS_ERR(ctx)) 823 return PTR_ERR(ctx); 824 825 memset(&resp, 0, sizeof resp); 826 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; 827 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? 828 sizeof(struct sockaddr_in) : 829 sizeof(struct sockaddr_in6)); 830 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; 831 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ? 832 sizeof(struct sockaddr_in) : 833 sizeof(struct sockaddr_in6)); 834 if (!ctx->cm_id->device) 835 goto out; 836 837 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid; 838 resp.port_num = ctx->cm_id->port_num; 839 840 if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num)) 841 ucma_copy_ib_route(&resp, &ctx->cm_id->route); 842 else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num)) 843 ucma_copy_iboe_route(&resp, &ctx->cm_id->route); 844 else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num)) 845 ucma_copy_iw_route(&resp, &ctx->cm_id->route); 846 847 out: 848 if (copy_to_user((void __user *)(unsigned long)cmd.response, 849 &resp, sizeof(resp))) 850 ret = -EFAULT; 851 852 ucma_put_ctx(ctx); 853 return ret; 854 } 855 856 static void ucma_query_device_addr(struct rdma_cm_id *cm_id, 857 struct rdma_ucm_query_addr_resp *resp) 858 { 859 if (!cm_id->device) 860 return; 861 862 resp->node_guid = (__force __u64) cm_id->device->node_guid; 863 resp->port_num = cm_id->port_num; 864 resp->pkey = (__force __u16) cpu_to_be16( 865 ib_addr_get_pkey(&cm_id->route.addr.dev_addr)); 866 } 867 868 static ssize_t ucma_query_addr(struct ucma_context *ctx, 869 void __user *response, int out_len) 870 { 871 struct rdma_ucm_query_addr_resp resp; 872 struct sockaddr *addr; 873 int ret = 0; 874 875 if (out_len < sizeof(resp)) 876 return -ENOSPC; 877 878 memset(&resp, 0, sizeof resp); 879 880 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; 881 resp.src_size = rdma_addr_size(addr); 882 memcpy(&resp.src_addr, addr, resp.src_size); 883 884 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; 885 resp.dst_size = rdma_addr_size(addr); 886 memcpy(&resp.dst_addr, addr, resp.dst_size); 887 888 ucma_query_device_addr(ctx->cm_id, &resp); 889 890 if (copy_to_user(response, &resp, sizeof(resp))) 891 ret = -EFAULT; 892 893 return ret; 894 } 895 896 static ssize_t ucma_query_path(struct ucma_context *ctx, 897 void __user *response, int out_len) 898 { 899 struct rdma_ucm_query_path_resp *resp; 900 int i, ret = 0; 901 902 if (out_len < sizeof(*resp)) 903 return -ENOSPC; 904 905 resp = kzalloc(out_len, GFP_KERNEL); 906 if (!resp) 907 return -ENOMEM; 908 909 resp->num_paths = ctx->cm_id->route.num_paths; 910 for (i = 0, out_len -= sizeof(*resp); 911 i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data); 912 i++, out_len -= sizeof(struct ib_path_rec_data)) { 913 struct sa_path_rec *rec = &ctx->cm_id->route.path_rec[i]; 914 915 resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY | 916 IB_PATH_BIDIRECTIONAL; 917 if (rec->rec_type == SA_PATH_REC_TYPE_OPA) { 918 struct sa_path_rec ib; 919 920 sa_convert_path_opa_to_ib(&ib, rec); 921 ib_sa_pack_path(&ib, &resp->path_data[i].path_rec); 922 923 } else { 924 ib_sa_pack_path(rec, &resp->path_data[i].path_rec); 925 } 926 } 927 928 if (copy_to_user(response, resp, 929 sizeof(*resp) + (i * sizeof(struct ib_path_rec_data)))) 930 ret = -EFAULT; 931 932 kfree(resp); 933 return ret; 934 } 935 936 static ssize_t ucma_query_gid(struct ucma_context *ctx, 937 void __user *response, int out_len) 938 { 939 struct rdma_ucm_query_addr_resp resp; 940 struct sockaddr_ib *addr; 941 int ret = 0; 942 943 if (out_len < sizeof(resp)) 944 return -ENOSPC; 945 946 memset(&resp, 0, sizeof resp); 947 948 ucma_query_device_addr(ctx->cm_id, &resp); 949 950 addr = (struct sockaddr_ib *) &resp.src_addr; 951 resp.src_size = sizeof(*addr); 952 if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) { 953 memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size); 954 } else { 955 addr->sib_family = AF_IB; 956 addr->sib_pkey = (__force __be16) resp.pkey; 957 rdma_read_gids(ctx->cm_id, (union ib_gid *)&addr->sib_addr, 958 NULL); 959 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *) 960 &ctx->cm_id->route.addr.src_addr); 961 } 962 963 addr = (struct sockaddr_ib *) &resp.dst_addr; 964 resp.dst_size = sizeof(*addr); 965 if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) { 966 memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size); 967 } else { 968 addr->sib_family = AF_IB; 969 addr->sib_pkey = (__force __be16) resp.pkey; 970 rdma_read_gids(ctx->cm_id, NULL, 971 (union ib_gid *)&addr->sib_addr); 972 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *) 973 &ctx->cm_id->route.addr.dst_addr); 974 } 975 976 if (copy_to_user(response, &resp, sizeof(resp))) 977 ret = -EFAULT; 978 979 return ret; 980 } 981 982 static ssize_t ucma_query(struct ucma_file *file, 983 const char __user *inbuf, 984 int in_len, int out_len) 985 { 986 struct rdma_ucm_query cmd; 987 struct ucma_context *ctx; 988 void __user *response; 989 int ret; 990 991 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 992 return -EFAULT; 993 994 response = (void __user *)(unsigned long) cmd.response; 995 ctx = ucma_get_ctx(file, cmd.id); 996 if (IS_ERR(ctx)) 997 return PTR_ERR(ctx); 998 999 switch (cmd.option) { 1000 case RDMA_USER_CM_QUERY_ADDR: 1001 ret = ucma_query_addr(ctx, response, out_len); 1002 break; 1003 case RDMA_USER_CM_QUERY_PATH: 1004 ret = ucma_query_path(ctx, response, out_len); 1005 break; 1006 case RDMA_USER_CM_QUERY_GID: 1007 ret = ucma_query_gid(ctx, response, out_len); 1008 break; 1009 default: 1010 ret = -ENOSYS; 1011 break; 1012 } 1013 1014 ucma_put_ctx(ctx); 1015 return ret; 1016 } 1017 1018 static void ucma_copy_conn_param(struct rdma_cm_id *id, 1019 struct rdma_conn_param *dst, 1020 struct rdma_ucm_conn_param *src) 1021 { 1022 dst->private_data = src->private_data; 1023 dst->private_data_len = src->private_data_len; 1024 dst->responder_resources =src->responder_resources; 1025 dst->initiator_depth = src->initiator_depth; 1026 dst->flow_control = src->flow_control; 1027 dst->retry_count = src->retry_count; 1028 dst->rnr_retry_count = src->rnr_retry_count; 1029 dst->srq = src->srq; 1030 dst->qp_num = src->qp_num; 1031 dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0; 1032 } 1033 1034 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, 1035 int in_len, int out_len) 1036 { 1037 struct rdma_ucm_connect cmd; 1038 struct rdma_conn_param conn_param; 1039 struct ucma_context *ctx; 1040 int ret; 1041 1042 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1043 return -EFAULT; 1044 1045 if (!cmd.conn_param.valid) 1046 return -EINVAL; 1047 1048 ctx = ucma_get_ctx(file, cmd.id); 1049 if (IS_ERR(ctx)) 1050 return PTR_ERR(ctx); 1051 1052 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); 1053 ret = rdma_connect(ctx->cm_id, &conn_param); 1054 ucma_put_ctx(ctx); 1055 return ret; 1056 } 1057 1058 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf, 1059 int in_len, int out_len) 1060 { 1061 struct rdma_ucm_listen cmd; 1062 struct ucma_context *ctx; 1063 int ret; 1064 1065 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1066 return -EFAULT; 1067 1068 ctx = ucma_get_ctx(file, cmd.id); 1069 if (IS_ERR(ctx)) 1070 return PTR_ERR(ctx); 1071 1072 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ? 1073 cmd.backlog : max_backlog; 1074 ret = rdma_listen(ctx->cm_id, ctx->backlog); 1075 ucma_put_ctx(ctx); 1076 return ret; 1077 } 1078 1079 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, 1080 int in_len, int out_len) 1081 { 1082 struct rdma_ucm_accept cmd; 1083 struct rdma_conn_param conn_param; 1084 struct ucma_context *ctx; 1085 int ret; 1086 1087 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1088 return -EFAULT; 1089 1090 ctx = ucma_get_ctx(file, cmd.id); 1091 if (IS_ERR(ctx)) 1092 return PTR_ERR(ctx); 1093 1094 if (cmd.conn_param.valid) { 1095 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); 1096 mutex_lock(&file->mut); 1097 ret = rdma_accept(ctx->cm_id, &conn_param); 1098 if (!ret) 1099 ctx->uid = cmd.uid; 1100 mutex_unlock(&file->mut); 1101 } else 1102 ret = rdma_accept(ctx->cm_id, NULL); 1103 1104 ucma_put_ctx(ctx); 1105 return ret; 1106 } 1107 1108 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf, 1109 int in_len, int out_len) 1110 { 1111 struct rdma_ucm_reject cmd; 1112 struct ucma_context *ctx; 1113 int ret; 1114 1115 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1116 return -EFAULT; 1117 1118 ctx = ucma_get_ctx(file, cmd.id); 1119 if (IS_ERR(ctx)) 1120 return PTR_ERR(ctx); 1121 1122 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len); 1123 ucma_put_ctx(ctx); 1124 return ret; 1125 } 1126 1127 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf, 1128 int in_len, int out_len) 1129 { 1130 struct rdma_ucm_disconnect cmd; 1131 struct ucma_context *ctx; 1132 int ret; 1133 1134 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1135 return -EFAULT; 1136 1137 ctx = ucma_get_ctx(file, cmd.id); 1138 if (IS_ERR(ctx)) 1139 return PTR_ERR(ctx); 1140 1141 ret = rdma_disconnect(ctx->cm_id); 1142 ucma_put_ctx(ctx); 1143 return ret; 1144 } 1145 1146 static ssize_t ucma_init_qp_attr(struct ucma_file *file, 1147 const char __user *inbuf, 1148 int in_len, int out_len) 1149 { 1150 struct rdma_ucm_init_qp_attr cmd; 1151 struct ib_uverbs_qp_attr resp; 1152 struct ucma_context *ctx; 1153 struct ib_qp_attr qp_attr; 1154 int ret; 1155 1156 if (out_len < sizeof(resp)) 1157 return -ENOSPC; 1158 1159 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1160 return -EFAULT; 1161 1162 if (cmd.qp_state > IB_QPS_ERR) 1163 return -EINVAL; 1164 1165 ctx = ucma_get_ctx(file, cmd.id); 1166 if (IS_ERR(ctx)) 1167 return PTR_ERR(ctx); 1168 1169 if (!ctx->cm_id->device) { 1170 ret = -EINVAL; 1171 goto out; 1172 } 1173 1174 resp.qp_attr_mask = 0; 1175 memset(&qp_attr, 0, sizeof qp_attr); 1176 qp_attr.qp_state = cmd.qp_state; 1177 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); 1178 if (ret) 1179 goto out; 1180 1181 ib_copy_qp_attr_to_user(ctx->cm_id->device, &resp, &qp_attr); 1182 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1183 &resp, sizeof(resp))) 1184 ret = -EFAULT; 1185 1186 out: 1187 ucma_put_ctx(ctx); 1188 return ret; 1189 } 1190 1191 static int ucma_set_option_id(struct ucma_context *ctx, int optname, 1192 void *optval, size_t optlen) 1193 { 1194 int ret = 0; 1195 1196 switch (optname) { 1197 case RDMA_OPTION_ID_TOS: 1198 if (optlen != sizeof(u8)) { 1199 ret = -EINVAL; 1200 break; 1201 } 1202 rdma_set_service_type(ctx->cm_id, *((u8 *) optval)); 1203 break; 1204 case RDMA_OPTION_ID_REUSEADDR: 1205 if (optlen != sizeof(int)) { 1206 ret = -EINVAL; 1207 break; 1208 } 1209 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0); 1210 break; 1211 case RDMA_OPTION_ID_AFONLY: 1212 if (optlen != sizeof(int)) { 1213 ret = -EINVAL; 1214 break; 1215 } 1216 ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0); 1217 break; 1218 default: 1219 ret = -ENOSYS; 1220 } 1221 1222 return ret; 1223 } 1224 1225 static int ucma_set_ib_path(struct ucma_context *ctx, 1226 struct ib_path_rec_data *path_data, size_t optlen) 1227 { 1228 struct sa_path_rec sa_path; 1229 struct rdma_cm_event event; 1230 int ret; 1231 1232 if (optlen % sizeof(*path_data)) 1233 return -EINVAL; 1234 1235 for (; optlen; optlen -= sizeof(*path_data), path_data++) { 1236 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY | 1237 IB_PATH_BIDIRECTIONAL)) 1238 break; 1239 } 1240 1241 if (!optlen) 1242 return -EINVAL; 1243 1244 memset(&sa_path, 0, sizeof(sa_path)); 1245 1246 sa_path.rec_type = SA_PATH_REC_TYPE_IB; 1247 ib_sa_unpack_path(path_data->path_rec, &sa_path); 1248 1249 if (rdma_cap_opa_ah(ctx->cm_id->device, ctx->cm_id->port_num)) { 1250 struct sa_path_rec opa; 1251 1252 sa_convert_path_ib_to_opa(&opa, &sa_path); 1253 ret = rdma_set_ib_path(ctx->cm_id, &opa); 1254 } else { 1255 ret = rdma_set_ib_path(ctx->cm_id, &sa_path); 1256 } 1257 if (ret) 1258 return ret; 1259 1260 memset(&event, 0, sizeof event); 1261 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1262 return ucma_event_handler(ctx->cm_id, &event); 1263 } 1264 1265 static int ucma_set_option_ib(struct ucma_context *ctx, int optname, 1266 void *optval, size_t optlen) 1267 { 1268 int ret; 1269 1270 switch (optname) { 1271 case RDMA_OPTION_IB_PATH: 1272 ret = ucma_set_ib_path(ctx, optval, optlen); 1273 break; 1274 default: 1275 ret = -ENOSYS; 1276 } 1277 1278 return ret; 1279 } 1280 1281 static int ucma_set_option_level(struct ucma_context *ctx, int level, 1282 int optname, void *optval, size_t optlen) 1283 { 1284 int ret; 1285 1286 switch (level) { 1287 case RDMA_OPTION_ID: 1288 ret = ucma_set_option_id(ctx, optname, optval, optlen); 1289 break; 1290 case RDMA_OPTION_IB: 1291 ret = ucma_set_option_ib(ctx, optname, optval, optlen); 1292 break; 1293 default: 1294 ret = -ENOSYS; 1295 } 1296 1297 return ret; 1298 } 1299 1300 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, 1301 int in_len, int out_len) 1302 { 1303 struct rdma_ucm_set_option cmd; 1304 struct ucma_context *ctx; 1305 void *optval; 1306 int ret; 1307 1308 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1309 return -EFAULT; 1310 1311 ctx = ucma_get_ctx(file, cmd.id); 1312 if (IS_ERR(ctx)) 1313 return PTR_ERR(ctx); 1314 1315 if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE)) 1316 return -EINVAL; 1317 1318 optval = memdup_user((void __user *) (unsigned long) cmd.optval, 1319 cmd.optlen); 1320 if (IS_ERR(optval)) { 1321 ret = PTR_ERR(optval); 1322 goto out; 1323 } 1324 1325 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval, 1326 cmd.optlen); 1327 kfree(optval); 1328 1329 out: 1330 ucma_put_ctx(ctx); 1331 return ret; 1332 } 1333 1334 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, 1335 int in_len, int out_len) 1336 { 1337 struct rdma_ucm_notify cmd; 1338 struct ucma_context *ctx; 1339 int ret = -EINVAL; 1340 1341 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1342 return -EFAULT; 1343 1344 ctx = ucma_get_ctx(file, cmd.id); 1345 if (IS_ERR(ctx)) 1346 return PTR_ERR(ctx); 1347 1348 if (ctx->cm_id->device) 1349 ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event); 1350 1351 ucma_put_ctx(ctx); 1352 return ret; 1353 } 1354 1355 static ssize_t ucma_process_join(struct ucma_file *file, 1356 struct rdma_ucm_join_mcast *cmd, int out_len) 1357 { 1358 struct rdma_ucm_create_id_resp resp; 1359 struct ucma_context *ctx; 1360 struct ucma_multicast *mc; 1361 struct sockaddr *addr; 1362 int ret; 1363 u8 join_state; 1364 1365 if (out_len < sizeof(resp)) 1366 return -ENOSPC; 1367 1368 addr = (struct sockaddr *) &cmd->addr; 1369 if (cmd->addr_size != rdma_addr_size(addr)) 1370 return -EINVAL; 1371 1372 if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER) 1373 join_state = BIT(FULLMEMBER_JOIN); 1374 else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER) 1375 join_state = BIT(SENDONLY_FULLMEMBER_JOIN); 1376 else 1377 return -EINVAL; 1378 1379 ctx = ucma_get_ctx(file, cmd->id); 1380 if (IS_ERR(ctx)) 1381 return PTR_ERR(ctx); 1382 1383 mutex_lock(&file->mut); 1384 mc = ucma_alloc_multicast(ctx); 1385 if (!mc) { 1386 ret = -ENOMEM; 1387 goto err1; 1388 } 1389 mc->join_state = join_state; 1390 mc->uid = cmd->uid; 1391 memcpy(&mc->addr, addr, cmd->addr_size); 1392 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr, 1393 join_state, mc); 1394 if (ret) 1395 goto err2; 1396 1397 resp.id = mc->id; 1398 if (copy_to_user((void __user *)(unsigned long) cmd->response, 1399 &resp, sizeof(resp))) { 1400 ret = -EFAULT; 1401 goto err3; 1402 } 1403 1404 mutex_unlock(&file->mut); 1405 ucma_put_ctx(ctx); 1406 return 0; 1407 1408 err3: 1409 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr); 1410 ucma_cleanup_mc_events(mc); 1411 err2: 1412 mutex_lock(&mut); 1413 idr_remove(&multicast_idr, mc->id); 1414 mutex_unlock(&mut); 1415 list_del(&mc->list); 1416 kfree(mc); 1417 err1: 1418 mutex_unlock(&file->mut); 1419 ucma_put_ctx(ctx); 1420 return ret; 1421 } 1422 1423 static ssize_t ucma_join_ip_multicast(struct ucma_file *file, 1424 const char __user *inbuf, 1425 int in_len, int out_len) 1426 { 1427 struct rdma_ucm_join_ip_mcast cmd; 1428 struct rdma_ucm_join_mcast join_cmd; 1429 1430 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1431 return -EFAULT; 1432 1433 join_cmd.response = cmd.response; 1434 join_cmd.uid = cmd.uid; 1435 join_cmd.id = cmd.id; 1436 join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr); 1437 if (!join_cmd.addr_size) 1438 return -EINVAL; 1439 1440 join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER; 1441 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); 1442 1443 return ucma_process_join(file, &join_cmd, out_len); 1444 } 1445 1446 static ssize_t ucma_join_multicast(struct ucma_file *file, 1447 const char __user *inbuf, 1448 int in_len, int out_len) 1449 { 1450 struct rdma_ucm_join_mcast cmd; 1451 1452 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1453 return -EFAULT; 1454 1455 if (!rdma_addr_size_kss(&cmd.addr)) 1456 return -EINVAL; 1457 1458 return ucma_process_join(file, &cmd, out_len); 1459 } 1460 1461 static ssize_t ucma_leave_multicast(struct ucma_file *file, 1462 const char __user *inbuf, 1463 int in_len, int out_len) 1464 { 1465 struct rdma_ucm_destroy_id cmd; 1466 struct rdma_ucm_destroy_id_resp resp; 1467 struct ucma_multicast *mc; 1468 int ret = 0; 1469 1470 if (out_len < sizeof(resp)) 1471 return -ENOSPC; 1472 1473 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1474 return -EFAULT; 1475 1476 mutex_lock(&mut); 1477 mc = idr_find(&multicast_idr, cmd.id); 1478 if (!mc) 1479 mc = ERR_PTR(-ENOENT); 1480 else if (mc->ctx->file != file) 1481 mc = ERR_PTR(-EINVAL); 1482 else if (!atomic_inc_not_zero(&mc->ctx->ref)) 1483 mc = ERR_PTR(-ENXIO); 1484 else 1485 idr_remove(&multicast_idr, mc->id); 1486 mutex_unlock(&mut); 1487 1488 if (IS_ERR(mc)) { 1489 ret = PTR_ERR(mc); 1490 goto out; 1491 } 1492 1493 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); 1494 mutex_lock(&mc->ctx->file->mut); 1495 ucma_cleanup_mc_events(mc); 1496 list_del(&mc->list); 1497 mutex_unlock(&mc->ctx->file->mut); 1498 1499 ucma_put_ctx(mc->ctx); 1500 resp.events_reported = mc->events_reported; 1501 kfree(mc); 1502 1503 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1504 &resp, sizeof(resp))) 1505 ret = -EFAULT; 1506 out: 1507 return ret; 1508 } 1509 1510 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2) 1511 { 1512 /* Acquire mutex's based on pointer comparison to prevent deadlock. */ 1513 if (file1 < file2) { 1514 mutex_lock(&file1->mut); 1515 mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING); 1516 } else { 1517 mutex_lock(&file2->mut); 1518 mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING); 1519 } 1520 } 1521 1522 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2) 1523 { 1524 if (file1 < file2) { 1525 mutex_unlock(&file2->mut); 1526 mutex_unlock(&file1->mut); 1527 } else { 1528 mutex_unlock(&file1->mut); 1529 mutex_unlock(&file2->mut); 1530 } 1531 } 1532 1533 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file) 1534 { 1535 struct ucma_event *uevent, *tmp; 1536 1537 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) 1538 if (uevent->ctx == ctx) 1539 list_move_tail(&uevent->list, &file->event_list); 1540 } 1541 1542 static ssize_t ucma_migrate_id(struct ucma_file *new_file, 1543 const char __user *inbuf, 1544 int in_len, int out_len) 1545 { 1546 struct rdma_ucm_migrate_id cmd; 1547 struct rdma_ucm_migrate_resp resp; 1548 struct ucma_context *ctx; 1549 struct fd f; 1550 struct ucma_file *cur_file; 1551 int ret = 0; 1552 1553 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1554 return -EFAULT; 1555 1556 /* Get current fd to protect against it being closed */ 1557 f = fdget(cmd.fd); 1558 if (!f.file) 1559 return -ENOENT; 1560 1561 /* Validate current fd and prevent destruction of id. */ 1562 ctx = ucma_get_ctx(f.file->private_data, cmd.id); 1563 if (IS_ERR(ctx)) { 1564 ret = PTR_ERR(ctx); 1565 goto file_put; 1566 } 1567 1568 cur_file = ctx->file; 1569 if (cur_file == new_file) { 1570 resp.events_reported = ctx->events_reported; 1571 goto response; 1572 } 1573 1574 /* 1575 * Migrate events between fd's, maintaining order, and avoiding new 1576 * events being added before existing events. 1577 */ 1578 ucma_lock_files(cur_file, new_file); 1579 mutex_lock(&mut); 1580 1581 list_move_tail(&ctx->list, &new_file->ctx_list); 1582 ucma_move_events(ctx, new_file); 1583 ctx->file = new_file; 1584 resp.events_reported = ctx->events_reported; 1585 1586 mutex_unlock(&mut); 1587 ucma_unlock_files(cur_file, new_file); 1588 1589 response: 1590 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1591 &resp, sizeof(resp))) 1592 ret = -EFAULT; 1593 1594 ucma_put_ctx(ctx); 1595 file_put: 1596 fdput(f); 1597 return ret; 1598 } 1599 1600 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file, 1601 const char __user *inbuf, 1602 int in_len, int out_len) = { 1603 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id, 1604 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id, 1605 [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip, 1606 [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip, 1607 [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route, 1608 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route, 1609 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect, 1610 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen, 1611 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept, 1612 [RDMA_USER_CM_CMD_REJECT] = ucma_reject, 1613 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect, 1614 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr, 1615 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event, 1616 [RDMA_USER_CM_CMD_GET_OPTION] = NULL, 1617 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option, 1618 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify, 1619 [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast, 1620 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast, 1621 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id, 1622 [RDMA_USER_CM_CMD_QUERY] = ucma_query, 1623 [RDMA_USER_CM_CMD_BIND] = ucma_bind, 1624 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr, 1625 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast 1626 }; 1627 1628 static ssize_t ucma_write(struct file *filp, const char __user *buf, 1629 size_t len, loff_t *pos) 1630 { 1631 struct ucma_file *file = filp->private_data; 1632 struct rdma_ucm_cmd_hdr hdr; 1633 ssize_t ret; 1634 1635 if (!ib_safe_file_access(filp)) { 1636 pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n", 1637 task_tgid_vnr(current), current->comm); 1638 return -EACCES; 1639 } 1640 1641 if (len < sizeof(hdr)) 1642 return -EINVAL; 1643 1644 if (copy_from_user(&hdr, buf, sizeof(hdr))) 1645 return -EFAULT; 1646 1647 if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table)) 1648 return -EINVAL; 1649 1650 if (hdr.in + sizeof(hdr) > len) 1651 return -EINVAL; 1652 1653 if (!ucma_cmd_table[hdr.cmd]) 1654 return -ENOSYS; 1655 1656 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out); 1657 if (!ret) 1658 ret = len; 1659 1660 return ret; 1661 } 1662 1663 static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait) 1664 { 1665 struct ucma_file *file = filp->private_data; 1666 __poll_t mask = 0; 1667 1668 poll_wait(filp, &file->poll_wait, wait); 1669 1670 if (!list_empty(&file->event_list)) 1671 mask = EPOLLIN | EPOLLRDNORM; 1672 1673 return mask; 1674 } 1675 1676 /* 1677 * ucma_open() does not need the BKL: 1678 * 1679 * - no global state is referred to; 1680 * - there is no ioctl method to race against; 1681 * - no further module initialization is required for open to work 1682 * after the device is registered. 1683 */ 1684 static int ucma_open(struct inode *inode, struct file *filp) 1685 { 1686 struct ucma_file *file; 1687 1688 file = kmalloc(sizeof *file, GFP_KERNEL); 1689 if (!file) 1690 return -ENOMEM; 1691 1692 file->close_wq = alloc_ordered_workqueue("ucma_close_id", 1693 WQ_MEM_RECLAIM); 1694 if (!file->close_wq) { 1695 kfree(file); 1696 return -ENOMEM; 1697 } 1698 1699 INIT_LIST_HEAD(&file->event_list); 1700 INIT_LIST_HEAD(&file->ctx_list); 1701 init_waitqueue_head(&file->poll_wait); 1702 mutex_init(&file->mut); 1703 1704 filp->private_data = file; 1705 file->filp = filp; 1706 1707 return nonseekable_open(inode, filp); 1708 } 1709 1710 static int ucma_close(struct inode *inode, struct file *filp) 1711 { 1712 struct ucma_file *file = filp->private_data; 1713 struct ucma_context *ctx, *tmp; 1714 1715 mutex_lock(&file->mut); 1716 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) { 1717 ctx->destroying = 1; 1718 mutex_unlock(&file->mut); 1719 1720 mutex_lock(&mut); 1721 idr_remove(&ctx_idr, ctx->id); 1722 mutex_unlock(&mut); 1723 1724 flush_workqueue(file->close_wq); 1725 /* At that step once ctx was marked as destroying and workqueue 1726 * was flushed we are safe from any inflights handlers that 1727 * might put other closing task. 1728 */ 1729 mutex_lock(&mut); 1730 if (!ctx->closing) { 1731 mutex_unlock(&mut); 1732 /* rdma_destroy_id ensures that no event handlers are 1733 * inflight for that id before releasing it. 1734 */ 1735 rdma_destroy_id(ctx->cm_id); 1736 } else { 1737 mutex_unlock(&mut); 1738 } 1739 1740 ucma_free_ctx(ctx); 1741 mutex_lock(&file->mut); 1742 } 1743 mutex_unlock(&file->mut); 1744 destroy_workqueue(file->close_wq); 1745 kfree(file); 1746 return 0; 1747 } 1748 1749 static const struct file_operations ucma_fops = { 1750 .owner = THIS_MODULE, 1751 .open = ucma_open, 1752 .release = ucma_close, 1753 .write = ucma_write, 1754 .poll = ucma_poll, 1755 .llseek = no_llseek, 1756 }; 1757 1758 static struct miscdevice ucma_misc = { 1759 .minor = MISC_DYNAMIC_MINOR, 1760 .name = "rdma_cm", 1761 .nodename = "infiniband/rdma_cm", 1762 .mode = 0666, 1763 .fops = &ucma_fops, 1764 }; 1765 1766 static ssize_t show_abi_version(struct device *dev, 1767 struct device_attribute *attr, 1768 char *buf) 1769 { 1770 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION); 1771 } 1772 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); 1773 1774 static int __init ucma_init(void) 1775 { 1776 int ret; 1777 1778 ret = misc_register(&ucma_misc); 1779 if (ret) 1780 return ret; 1781 1782 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version); 1783 if (ret) { 1784 pr_err("rdma_ucm: couldn't create abi_version attr\n"); 1785 goto err1; 1786 } 1787 1788 ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table); 1789 if (!ucma_ctl_table_hdr) { 1790 pr_err("rdma_ucm: couldn't register sysctl paths\n"); 1791 ret = -ENOMEM; 1792 goto err2; 1793 } 1794 return 0; 1795 err2: 1796 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); 1797 err1: 1798 misc_deregister(&ucma_misc); 1799 return ret; 1800 } 1801 1802 static void __exit ucma_cleanup(void) 1803 { 1804 unregister_net_sysctl_table(ucma_ctl_table_hdr); 1805 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); 1806 misc_deregister(&ucma_misc); 1807 idr_destroy(&ctx_idr); 1808 idr_destroy(&multicast_idr); 1809 } 1810 1811 module_init(ucma_init); 1812 module_exit(ucma_cleanup); 1813