1 /* 2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/completion.h> 34 #include <linux/file.h> 35 #include <linux/mutex.h> 36 #include <linux/poll.h> 37 #include <linux/sched.h> 38 #include <linux/idr.h> 39 #include <linux/in.h> 40 #include <linux/in6.h> 41 #include <linux/miscdevice.h> 42 #include <linux/slab.h> 43 #include <linux/sysctl.h> 44 #include <linux/module.h> 45 #include <linux/nsproxy.h> 46 47 #include <rdma/rdma_user_cm.h> 48 #include <rdma/ib_marshall.h> 49 #include <rdma/rdma_cm.h> 50 #include <rdma/rdma_cm_ib.h> 51 #include <rdma/ib_addr.h> 52 #include <rdma/ib.h> 53 54 MODULE_AUTHOR("Sean Hefty"); 55 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access"); 56 MODULE_LICENSE("Dual BSD/GPL"); 57 58 static unsigned int max_backlog = 1024; 59 60 static struct ctl_table_header *ucma_ctl_table_hdr; 61 static struct ctl_table ucma_ctl_table[] = { 62 { 63 .procname = "max_backlog", 64 .data = &max_backlog, 65 .maxlen = sizeof max_backlog, 66 .mode = 0644, 67 .proc_handler = proc_dointvec, 68 }, 69 { } 70 }; 71 72 struct ucma_file { 73 struct mutex mut; 74 struct file *filp; 75 struct list_head ctx_list; 76 struct list_head event_list; 77 wait_queue_head_t poll_wait; 78 struct workqueue_struct *close_wq; 79 }; 80 81 struct ucma_context { 82 int id; 83 struct completion comp; 84 atomic_t ref; 85 int events_reported; 86 int backlog; 87 88 struct ucma_file *file; 89 struct rdma_cm_id *cm_id; 90 u64 uid; 91 92 struct list_head list; 93 struct list_head mc_list; 94 /* mark that device is in process of destroying the internal HW 95 * resources, protected by the global mut 96 */ 97 int closing; 98 /* sync between removal event and id destroy, protected by file mut */ 99 int destroying; 100 struct work_struct close_work; 101 }; 102 103 struct ucma_multicast { 104 struct ucma_context *ctx; 105 int id; 106 int events_reported; 107 108 u64 uid; 109 u8 join_state; 110 struct list_head list; 111 struct sockaddr_storage addr; 112 }; 113 114 struct ucma_event { 115 struct ucma_context *ctx; 116 struct ucma_multicast *mc; 117 struct list_head list; 118 struct rdma_cm_id *cm_id; 119 struct rdma_ucm_event_resp resp; 120 struct work_struct close_work; 121 }; 122 123 static DEFINE_MUTEX(mut); 124 static DEFINE_IDR(ctx_idr); 125 static DEFINE_IDR(multicast_idr); 126 127 static const struct file_operations ucma_fops; 128 129 static inline struct ucma_context *_ucma_find_context(int id, 130 struct ucma_file *file) 131 { 132 struct ucma_context *ctx; 133 134 ctx = idr_find(&ctx_idr, id); 135 if (!ctx) 136 ctx = ERR_PTR(-ENOENT); 137 else if (ctx->file != file || !ctx->cm_id) 138 ctx = ERR_PTR(-EINVAL); 139 return ctx; 140 } 141 142 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id) 143 { 144 struct ucma_context *ctx; 145 146 mutex_lock(&mut); 147 ctx = _ucma_find_context(id, file); 148 if (!IS_ERR(ctx)) { 149 if (ctx->closing) 150 ctx = ERR_PTR(-EIO); 151 else 152 atomic_inc(&ctx->ref); 153 } 154 mutex_unlock(&mut); 155 return ctx; 156 } 157 158 static void ucma_put_ctx(struct ucma_context *ctx) 159 { 160 if (atomic_dec_and_test(&ctx->ref)) 161 complete(&ctx->comp); 162 } 163 164 /* 165 * Same as ucm_get_ctx but requires that ->cm_id->device is valid, eg that the 166 * CM_ID is bound. 167 */ 168 static struct ucma_context *ucma_get_ctx_dev(struct ucma_file *file, int id) 169 { 170 struct ucma_context *ctx = ucma_get_ctx(file, id); 171 172 if (IS_ERR(ctx)) 173 return ctx; 174 if (!ctx->cm_id->device) { 175 ucma_put_ctx(ctx); 176 return ERR_PTR(-EINVAL); 177 } 178 return ctx; 179 } 180 181 static void ucma_close_event_id(struct work_struct *work) 182 { 183 struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work); 184 185 rdma_destroy_id(uevent_close->cm_id); 186 kfree(uevent_close); 187 } 188 189 static void ucma_close_id(struct work_struct *work) 190 { 191 struct ucma_context *ctx = container_of(work, struct ucma_context, close_work); 192 193 /* once all inflight tasks are finished, we close all underlying 194 * resources. The context is still alive till its explicit destryoing 195 * by its creator. 196 */ 197 ucma_put_ctx(ctx); 198 wait_for_completion(&ctx->comp); 199 /* No new events will be generated after destroying the id. */ 200 rdma_destroy_id(ctx->cm_id); 201 } 202 203 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) 204 { 205 struct ucma_context *ctx; 206 207 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 208 if (!ctx) 209 return NULL; 210 211 INIT_WORK(&ctx->close_work, ucma_close_id); 212 atomic_set(&ctx->ref, 1); 213 init_completion(&ctx->comp); 214 INIT_LIST_HEAD(&ctx->mc_list); 215 ctx->file = file; 216 217 mutex_lock(&mut); 218 ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL); 219 mutex_unlock(&mut); 220 if (ctx->id < 0) 221 goto error; 222 223 list_add_tail(&ctx->list, &file->ctx_list); 224 return ctx; 225 226 error: 227 kfree(ctx); 228 return NULL; 229 } 230 231 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx) 232 { 233 struct ucma_multicast *mc; 234 235 mc = kzalloc(sizeof(*mc), GFP_KERNEL); 236 if (!mc) 237 return NULL; 238 239 mutex_lock(&mut); 240 mc->id = idr_alloc(&multicast_idr, NULL, 0, 0, GFP_KERNEL); 241 mutex_unlock(&mut); 242 if (mc->id < 0) 243 goto error; 244 245 mc->ctx = ctx; 246 list_add_tail(&mc->list, &ctx->mc_list); 247 return mc; 248 249 error: 250 kfree(mc); 251 return NULL; 252 } 253 254 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst, 255 struct rdma_conn_param *src) 256 { 257 if (src->private_data_len) 258 memcpy(dst->private_data, src->private_data, 259 src->private_data_len); 260 dst->private_data_len = src->private_data_len; 261 dst->responder_resources =src->responder_resources; 262 dst->initiator_depth = src->initiator_depth; 263 dst->flow_control = src->flow_control; 264 dst->retry_count = src->retry_count; 265 dst->rnr_retry_count = src->rnr_retry_count; 266 dst->srq = src->srq; 267 dst->qp_num = src->qp_num; 268 } 269 270 static void ucma_copy_ud_event(struct ib_device *device, 271 struct rdma_ucm_ud_param *dst, 272 struct rdma_ud_param *src) 273 { 274 if (src->private_data_len) 275 memcpy(dst->private_data, src->private_data, 276 src->private_data_len); 277 dst->private_data_len = src->private_data_len; 278 ib_copy_ah_attr_to_user(device, &dst->ah_attr, &src->ah_attr); 279 dst->qp_num = src->qp_num; 280 dst->qkey = src->qkey; 281 } 282 283 static void ucma_set_event_context(struct ucma_context *ctx, 284 struct rdma_cm_event *event, 285 struct ucma_event *uevent) 286 { 287 uevent->ctx = ctx; 288 switch (event->event) { 289 case RDMA_CM_EVENT_MULTICAST_JOIN: 290 case RDMA_CM_EVENT_MULTICAST_ERROR: 291 uevent->mc = (struct ucma_multicast *) 292 event->param.ud.private_data; 293 uevent->resp.uid = uevent->mc->uid; 294 uevent->resp.id = uevent->mc->id; 295 break; 296 default: 297 uevent->resp.uid = ctx->uid; 298 uevent->resp.id = ctx->id; 299 break; 300 } 301 } 302 303 /* Called with file->mut locked for the relevant context. */ 304 static void ucma_removal_event_handler(struct rdma_cm_id *cm_id) 305 { 306 struct ucma_context *ctx = cm_id->context; 307 struct ucma_event *con_req_eve; 308 int event_found = 0; 309 310 if (ctx->destroying) 311 return; 312 313 /* only if context is pointing to cm_id that it owns it and can be 314 * queued to be closed, otherwise that cm_id is an inflight one that 315 * is part of that context event list pending to be detached and 316 * reattached to its new context as part of ucma_get_event, 317 * handled separately below. 318 */ 319 if (ctx->cm_id == cm_id) { 320 mutex_lock(&mut); 321 ctx->closing = 1; 322 mutex_unlock(&mut); 323 queue_work(ctx->file->close_wq, &ctx->close_work); 324 return; 325 } 326 327 list_for_each_entry(con_req_eve, &ctx->file->event_list, list) { 328 if (con_req_eve->cm_id == cm_id && 329 con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) { 330 list_del(&con_req_eve->list); 331 INIT_WORK(&con_req_eve->close_work, ucma_close_event_id); 332 queue_work(ctx->file->close_wq, &con_req_eve->close_work); 333 event_found = 1; 334 break; 335 } 336 } 337 if (!event_found) 338 pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n"); 339 } 340 341 static int ucma_event_handler(struct rdma_cm_id *cm_id, 342 struct rdma_cm_event *event) 343 { 344 struct ucma_event *uevent; 345 struct ucma_context *ctx = cm_id->context; 346 int ret = 0; 347 348 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL); 349 if (!uevent) 350 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST; 351 352 mutex_lock(&ctx->file->mut); 353 uevent->cm_id = cm_id; 354 ucma_set_event_context(ctx, event, uevent); 355 uevent->resp.event = event->event; 356 uevent->resp.status = event->status; 357 if (cm_id->qp_type == IB_QPT_UD) 358 ucma_copy_ud_event(cm_id->device, &uevent->resp.param.ud, 359 &event->param.ud); 360 else 361 ucma_copy_conn_event(&uevent->resp.param.conn, 362 &event->param.conn); 363 364 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { 365 if (!ctx->backlog) { 366 ret = -ENOMEM; 367 kfree(uevent); 368 goto out; 369 } 370 ctx->backlog--; 371 } else if (!ctx->uid || ctx->cm_id != cm_id) { 372 /* 373 * We ignore events for new connections until userspace has set 374 * their context. This can only happen if an error occurs on a 375 * new connection before the user accepts it. This is okay, 376 * since the accept will just fail later. However, we do need 377 * to release the underlying HW resources in case of a device 378 * removal event. 379 */ 380 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) 381 ucma_removal_event_handler(cm_id); 382 383 kfree(uevent); 384 goto out; 385 } 386 387 list_add_tail(&uevent->list, &ctx->file->event_list); 388 wake_up_interruptible(&ctx->file->poll_wait); 389 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) 390 ucma_removal_event_handler(cm_id); 391 out: 392 mutex_unlock(&ctx->file->mut); 393 return ret; 394 } 395 396 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf, 397 int in_len, int out_len) 398 { 399 struct ucma_context *ctx; 400 struct rdma_ucm_get_event cmd; 401 struct ucma_event *uevent; 402 int ret = 0; 403 404 /* 405 * Old 32 bit user space does not send the 4 byte padding in the 406 * reserved field. We don't care, allow it to keep working. 407 */ 408 if (out_len < sizeof(uevent->resp) - sizeof(uevent->resp.reserved)) 409 return -ENOSPC; 410 411 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 412 return -EFAULT; 413 414 mutex_lock(&file->mut); 415 while (list_empty(&file->event_list)) { 416 mutex_unlock(&file->mut); 417 418 if (file->filp->f_flags & O_NONBLOCK) 419 return -EAGAIN; 420 421 if (wait_event_interruptible(file->poll_wait, 422 !list_empty(&file->event_list))) 423 return -ERESTARTSYS; 424 425 mutex_lock(&file->mut); 426 } 427 428 uevent = list_entry(file->event_list.next, struct ucma_event, list); 429 430 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) { 431 ctx = ucma_alloc_ctx(file); 432 if (!ctx) { 433 ret = -ENOMEM; 434 goto done; 435 } 436 uevent->ctx->backlog++; 437 ctx->cm_id = uevent->cm_id; 438 ctx->cm_id->context = ctx; 439 uevent->resp.id = ctx->id; 440 } 441 442 if (copy_to_user(u64_to_user_ptr(cmd.response), 443 &uevent->resp, 444 min_t(size_t, out_len, sizeof(uevent->resp)))) { 445 ret = -EFAULT; 446 goto done; 447 } 448 449 list_del(&uevent->list); 450 uevent->ctx->events_reported++; 451 if (uevent->mc) 452 uevent->mc->events_reported++; 453 kfree(uevent); 454 done: 455 mutex_unlock(&file->mut); 456 return ret; 457 } 458 459 static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type) 460 { 461 switch (cmd->ps) { 462 case RDMA_PS_TCP: 463 *qp_type = IB_QPT_RC; 464 return 0; 465 case RDMA_PS_UDP: 466 case RDMA_PS_IPOIB: 467 *qp_type = IB_QPT_UD; 468 return 0; 469 case RDMA_PS_IB: 470 *qp_type = cmd->qp_type; 471 return 0; 472 default: 473 return -EINVAL; 474 } 475 } 476 477 static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, 478 int in_len, int out_len) 479 { 480 struct rdma_ucm_create_id cmd; 481 struct rdma_ucm_create_id_resp resp; 482 struct ucma_context *ctx; 483 struct rdma_cm_id *cm_id; 484 enum ib_qp_type qp_type; 485 int ret; 486 487 if (out_len < sizeof(resp)) 488 return -ENOSPC; 489 490 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 491 return -EFAULT; 492 493 ret = ucma_get_qp_type(&cmd, &qp_type); 494 if (ret) 495 return ret; 496 497 mutex_lock(&file->mut); 498 ctx = ucma_alloc_ctx(file); 499 mutex_unlock(&file->mut); 500 if (!ctx) 501 return -ENOMEM; 502 503 ctx->uid = cmd.uid; 504 cm_id = __rdma_create_id(current->nsproxy->net_ns, 505 ucma_event_handler, ctx, cmd.ps, qp_type, NULL); 506 if (IS_ERR(cm_id)) { 507 ret = PTR_ERR(cm_id); 508 goto err1; 509 } 510 511 resp.id = ctx->id; 512 if (copy_to_user(u64_to_user_ptr(cmd.response), 513 &resp, sizeof(resp))) { 514 ret = -EFAULT; 515 goto err2; 516 } 517 518 ctx->cm_id = cm_id; 519 return 0; 520 521 err2: 522 rdma_destroy_id(cm_id); 523 err1: 524 mutex_lock(&mut); 525 idr_remove(&ctx_idr, ctx->id); 526 mutex_unlock(&mut); 527 mutex_lock(&file->mut); 528 list_del(&ctx->list); 529 mutex_unlock(&file->mut); 530 kfree(ctx); 531 return ret; 532 } 533 534 static void ucma_cleanup_multicast(struct ucma_context *ctx) 535 { 536 struct ucma_multicast *mc, *tmp; 537 538 mutex_lock(&mut); 539 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) { 540 list_del(&mc->list); 541 idr_remove(&multicast_idr, mc->id); 542 kfree(mc); 543 } 544 mutex_unlock(&mut); 545 } 546 547 static void ucma_cleanup_mc_events(struct ucma_multicast *mc) 548 { 549 struct ucma_event *uevent, *tmp; 550 551 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) { 552 if (uevent->mc != mc) 553 continue; 554 555 list_del(&uevent->list); 556 kfree(uevent); 557 } 558 } 559 560 /* 561 * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At 562 * this point, no new events will be reported from the hardware. However, we 563 * still need to cleanup the UCMA context for this ID. Specifically, there 564 * might be events that have not yet been consumed by the user space software. 565 * These might include pending connect requests which we have not completed 566 * processing. We cannot call rdma_destroy_id while holding the lock of the 567 * context (file->mut), as it might cause a deadlock. We therefore extract all 568 * relevant events from the context pending events list while holding the 569 * mutex. After that we release them as needed. 570 */ 571 static int ucma_free_ctx(struct ucma_context *ctx) 572 { 573 int events_reported; 574 struct ucma_event *uevent, *tmp; 575 LIST_HEAD(list); 576 577 578 ucma_cleanup_multicast(ctx); 579 580 /* Cleanup events not yet reported to the user. */ 581 mutex_lock(&ctx->file->mut); 582 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { 583 if (uevent->ctx == ctx) 584 list_move_tail(&uevent->list, &list); 585 } 586 list_del(&ctx->list); 587 mutex_unlock(&ctx->file->mut); 588 589 list_for_each_entry_safe(uevent, tmp, &list, list) { 590 list_del(&uevent->list); 591 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) 592 rdma_destroy_id(uevent->cm_id); 593 kfree(uevent); 594 } 595 596 events_reported = ctx->events_reported; 597 kfree(ctx); 598 return events_reported; 599 } 600 601 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf, 602 int in_len, int out_len) 603 { 604 struct rdma_ucm_destroy_id cmd; 605 struct rdma_ucm_destroy_id_resp resp; 606 struct ucma_context *ctx; 607 int ret = 0; 608 609 if (out_len < sizeof(resp)) 610 return -ENOSPC; 611 612 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 613 return -EFAULT; 614 615 mutex_lock(&mut); 616 ctx = _ucma_find_context(cmd.id, file); 617 if (!IS_ERR(ctx)) 618 idr_remove(&ctx_idr, ctx->id); 619 mutex_unlock(&mut); 620 621 if (IS_ERR(ctx)) 622 return PTR_ERR(ctx); 623 624 mutex_lock(&ctx->file->mut); 625 ctx->destroying = 1; 626 mutex_unlock(&ctx->file->mut); 627 628 flush_workqueue(ctx->file->close_wq); 629 /* At this point it's guaranteed that there is no inflight 630 * closing task */ 631 mutex_lock(&mut); 632 if (!ctx->closing) { 633 mutex_unlock(&mut); 634 ucma_put_ctx(ctx); 635 wait_for_completion(&ctx->comp); 636 rdma_destroy_id(ctx->cm_id); 637 } else { 638 mutex_unlock(&mut); 639 } 640 641 resp.events_reported = ucma_free_ctx(ctx); 642 if (copy_to_user(u64_to_user_ptr(cmd.response), 643 &resp, sizeof(resp))) 644 ret = -EFAULT; 645 646 return ret; 647 } 648 649 static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf, 650 int in_len, int out_len) 651 { 652 struct rdma_ucm_bind_ip cmd; 653 struct ucma_context *ctx; 654 int ret; 655 656 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 657 return -EFAULT; 658 659 if (!rdma_addr_size_in6(&cmd.addr)) 660 return -EINVAL; 661 662 ctx = ucma_get_ctx(file, cmd.id); 663 if (IS_ERR(ctx)) 664 return PTR_ERR(ctx); 665 666 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); 667 ucma_put_ctx(ctx); 668 return ret; 669 } 670 671 static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf, 672 int in_len, int out_len) 673 { 674 struct rdma_ucm_bind cmd; 675 struct ucma_context *ctx; 676 int ret; 677 678 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 679 return -EFAULT; 680 681 if (cmd.reserved || !cmd.addr_size || 682 cmd.addr_size != rdma_addr_size_kss(&cmd.addr)) 683 return -EINVAL; 684 685 ctx = ucma_get_ctx(file, cmd.id); 686 if (IS_ERR(ctx)) 687 return PTR_ERR(ctx); 688 689 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); 690 ucma_put_ctx(ctx); 691 return ret; 692 } 693 694 static ssize_t ucma_resolve_ip(struct ucma_file *file, 695 const char __user *inbuf, 696 int in_len, int out_len) 697 { 698 struct rdma_ucm_resolve_ip cmd; 699 struct ucma_context *ctx; 700 int ret; 701 702 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 703 return -EFAULT; 704 705 if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) || 706 !rdma_addr_size_in6(&cmd.dst_addr)) 707 return -EINVAL; 708 709 ctx = ucma_get_ctx(file, cmd.id); 710 if (IS_ERR(ctx)) 711 return PTR_ERR(ctx); 712 713 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, 714 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); 715 ucma_put_ctx(ctx); 716 return ret; 717 } 718 719 static ssize_t ucma_resolve_addr(struct ucma_file *file, 720 const char __user *inbuf, 721 int in_len, int out_len) 722 { 723 struct rdma_ucm_resolve_addr cmd; 724 struct ucma_context *ctx; 725 int ret; 726 727 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 728 return -EFAULT; 729 730 if (cmd.reserved || 731 (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) || 732 !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr))) 733 return -EINVAL; 734 735 ctx = ucma_get_ctx(file, cmd.id); 736 if (IS_ERR(ctx)) 737 return PTR_ERR(ctx); 738 739 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, 740 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); 741 ucma_put_ctx(ctx); 742 return ret; 743 } 744 745 static ssize_t ucma_resolve_route(struct ucma_file *file, 746 const char __user *inbuf, 747 int in_len, int out_len) 748 { 749 struct rdma_ucm_resolve_route cmd; 750 struct ucma_context *ctx; 751 int ret; 752 753 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 754 return -EFAULT; 755 756 ctx = ucma_get_ctx_dev(file, cmd.id); 757 if (IS_ERR(ctx)) 758 return PTR_ERR(ctx); 759 760 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms); 761 ucma_put_ctx(ctx); 762 return ret; 763 } 764 765 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp, 766 struct rdma_route *route) 767 { 768 struct rdma_dev_addr *dev_addr; 769 770 resp->num_paths = route->num_paths; 771 switch (route->num_paths) { 772 case 0: 773 dev_addr = &route->addr.dev_addr; 774 rdma_addr_get_dgid(dev_addr, 775 (union ib_gid *) &resp->ib_route[0].dgid); 776 rdma_addr_get_sgid(dev_addr, 777 (union ib_gid *) &resp->ib_route[0].sgid); 778 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 779 break; 780 case 2: 781 ib_copy_path_rec_to_user(&resp->ib_route[1], 782 &route->path_rec[1]); 783 /* fall through */ 784 case 1: 785 ib_copy_path_rec_to_user(&resp->ib_route[0], 786 &route->path_rec[0]); 787 break; 788 default: 789 break; 790 } 791 } 792 793 static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp, 794 struct rdma_route *route) 795 { 796 797 resp->num_paths = route->num_paths; 798 switch (route->num_paths) { 799 case 0: 800 rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr, 801 (union ib_gid *)&resp->ib_route[0].dgid); 802 rdma_ip2gid((struct sockaddr *)&route->addr.src_addr, 803 (union ib_gid *)&resp->ib_route[0].sgid); 804 resp->ib_route[0].pkey = cpu_to_be16(0xffff); 805 break; 806 case 2: 807 ib_copy_path_rec_to_user(&resp->ib_route[1], 808 &route->path_rec[1]); 809 /* fall through */ 810 case 1: 811 ib_copy_path_rec_to_user(&resp->ib_route[0], 812 &route->path_rec[0]); 813 break; 814 default: 815 break; 816 } 817 } 818 819 static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp, 820 struct rdma_route *route) 821 { 822 struct rdma_dev_addr *dev_addr; 823 824 dev_addr = &route->addr.dev_addr; 825 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid); 826 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid); 827 } 828 829 static ssize_t ucma_query_route(struct ucma_file *file, 830 const char __user *inbuf, 831 int in_len, int out_len) 832 { 833 struct rdma_ucm_query cmd; 834 struct rdma_ucm_query_route_resp resp; 835 struct ucma_context *ctx; 836 struct sockaddr *addr; 837 int ret = 0; 838 839 if (out_len < sizeof(resp)) 840 return -ENOSPC; 841 842 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 843 return -EFAULT; 844 845 ctx = ucma_get_ctx(file, cmd.id); 846 if (IS_ERR(ctx)) 847 return PTR_ERR(ctx); 848 849 memset(&resp, 0, sizeof resp); 850 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; 851 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? 852 sizeof(struct sockaddr_in) : 853 sizeof(struct sockaddr_in6)); 854 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; 855 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ? 856 sizeof(struct sockaddr_in) : 857 sizeof(struct sockaddr_in6)); 858 if (!ctx->cm_id->device) 859 goto out; 860 861 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid; 862 resp.port_num = ctx->cm_id->port_num; 863 864 if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num)) 865 ucma_copy_ib_route(&resp, &ctx->cm_id->route); 866 else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num)) 867 ucma_copy_iboe_route(&resp, &ctx->cm_id->route); 868 else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num)) 869 ucma_copy_iw_route(&resp, &ctx->cm_id->route); 870 871 out: 872 if (copy_to_user(u64_to_user_ptr(cmd.response), 873 &resp, sizeof(resp))) 874 ret = -EFAULT; 875 876 ucma_put_ctx(ctx); 877 return ret; 878 } 879 880 static void ucma_query_device_addr(struct rdma_cm_id *cm_id, 881 struct rdma_ucm_query_addr_resp *resp) 882 { 883 if (!cm_id->device) 884 return; 885 886 resp->node_guid = (__force __u64) cm_id->device->node_guid; 887 resp->port_num = cm_id->port_num; 888 resp->pkey = (__force __u16) cpu_to_be16( 889 ib_addr_get_pkey(&cm_id->route.addr.dev_addr)); 890 } 891 892 static ssize_t ucma_query_addr(struct ucma_context *ctx, 893 void __user *response, int out_len) 894 { 895 struct rdma_ucm_query_addr_resp resp; 896 struct sockaddr *addr; 897 int ret = 0; 898 899 if (out_len < sizeof(resp)) 900 return -ENOSPC; 901 902 memset(&resp, 0, sizeof resp); 903 904 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; 905 resp.src_size = rdma_addr_size(addr); 906 memcpy(&resp.src_addr, addr, resp.src_size); 907 908 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; 909 resp.dst_size = rdma_addr_size(addr); 910 memcpy(&resp.dst_addr, addr, resp.dst_size); 911 912 ucma_query_device_addr(ctx->cm_id, &resp); 913 914 if (copy_to_user(response, &resp, sizeof(resp))) 915 ret = -EFAULT; 916 917 return ret; 918 } 919 920 static ssize_t ucma_query_path(struct ucma_context *ctx, 921 void __user *response, int out_len) 922 { 923 struct rdma_ucm_query_path_resp *resp; 924 int i, ret = 0; 925 926 if (out_len < sizeof(*resp)) 927 return -ENOSPC; 928 929 resp = kzalloc(out_len, GFP_KERNEL); 930 if (!resp) 931 return -ENOMEM; 932 933 resp->num_paths = ctx->cm_id->route.num_paths; 934 for (i = 0, out_len -= sizeof(*resp); 935 i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data); 936 i++, out_len -= sizeof(struct ib_path_rec_data)) { 937 struct sa_path_rec *rec = &ctx->cm_id->route.path_rec[i]; 938 939 resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY | 940 IB_PATH_BIDIRECTIONAL; 941 if (rec->rec_type == SA_PATH_REC_TYPE_OPA) { 942 struct sa_path_rec ib; 943 944 sa_convert_path_opa_to_ib(&ib, rec); 945 ib_sa_pack_path(&ib, &resp->path_data[i].path_rec); 946 947 } else { 948 ib_sa_pack_path(rec, &resp->path_data[i].path_rec); 949 } 950 } 951 952 if (copy_to_user(response, resp, 953 sizeof(*resp) + (i * sizeof(struct ib_path_rec_data)))) 954 ret = -EFAULT; 955 956 kfree(resp); 957 return ret; 958 } 959 960 static ssize_t ucma_query_gid(struct ucma_context *ctx, 961 void __user *response, int out_len) 962 { 963 struct rdma_ucm_query_addr_resp resp; 964 struct sockaddr_ib *addr; 965 int ret = 0; 966 967 if (out_len < sizeof(resp)) 968 return -ENOSPC; 969 970 memset(&resp, 0, sizeof resp); 971 972 ucma_query_device_addr(ctx->cm_id, &resp); 973 974 addr = (struct sockaddr_ib *) &resp.src_addr; 975 resp.src_size = sizeof(*addr); 976 if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) { 977 memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size); 978 } else { 979 addr->sib_family = AF_IB; 980 addr->sib_pkey = (__force __be16) resp.pkey; 981 rdma_read_gids(ctx->cm_id, (union ib_gid *)&addr->sib_addr, 982 NULL); 983 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *) 984 &ctx->cm_id->route.addr.src_addr); 985 } 986 987 addr = (struct sockaddr_ib *) &resp.dst_addr; 988 resp.dst_size = sizeof(*addr); 989 if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) { 990 memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size); 991 } else { 992 addr->sib_family = AF_IB; 993 addr->sib_pkey = (__force __be16) resp.pkey; 994 rdma_read_gids(ctx->cm_id, NULL, 995 (union ib_gid *)&addr->sib_addr); 996 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *) 997 &ctx->cm_id->route.addr.dst_addr); 998 } 999 1000 if (copy_to_user(response, &resp, sizeof(resp))) 1001 ret = -EFAULT; 1002 1003 return ret; 1004 } 1005 1006 static ssize_t ucma_query(struct ucma_file *file, 1007 const char __user *inbuf, 1008 int in_len, int out_len) 1009 { 1010 struct rdma_ucm_query cmd; 1011 struct ucma_context *ctx; 1012 void __user *response; 1013 int ret; 1014 1015 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1016 return -EFAULT; 1017 1018 response = u64_to_user_ptr(cmd.response); 1019 ctx = ucma_get_ctx(file, cmd.id); 1020 if (IS_ERR(ctx)) 1021 return PTR_ERR(ctx); 1022 1023 switch (cmd.option) { 1024 case RDMA_USER_CM_QUERY_ADDR: 1025 ret = ucma_query_addr(ctx, response, out_len); 1026 break; 1027 case RDMA_USER_CM_QUERY_PATH: 1028 ret = ucma_query_path(ctx, response, out_len); 1029 break; 1030 case RDMA_USER_CM_QUERY_GID: 1031 ret = ucma_query_gid(ctx, response, out_len); 1032 break; 1033 default: 1034 ret = -ENOSYS; 1035 break; 1036 } 1037 1038 ucma_put_ctx(ctx); 1039 return ret; 1040 } 1041 1042 static void ucma_copy_conn_param(struct rdma_cm_id *id, 1043 struct rdma_conn_param *dst, 1044 struct rdma_ucm_conn_param *src) 1045 { 1046 dst->private_data = src->private_data; 1047 dst->private_data_len = src->private_data_len; 1048 dst->responder_resources =src->responder_resources; 1049 dst->initiator_depth = src->initiator_depth; 1050 dst->flow_control = src->flow_control; 1051 dst->retry_count = src->retry_count; 1052 dst->rnr_retry_count = src->rnr_retry_count; 1053 dst->srq = src->srq; 1054 dst->qp_num = src->qp_num; 1055 dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0; 1056 } 1057 1058 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, 1059 int in_len, int out_len) 1060 { 1061 struct rdma_ucm_connect cmd; 1062 struct rdma_conn_param conn_param; 1063 struct ucma_context *ctx; 1064 int ret; 1065 1066 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1067 return -EFAULT; 1068 1069 if (!cmd.conn_param.valid) 1070 return -EINVAL; 1071 1072 ctx = ucma_get_ctx_dev(file, cmd.id); 1073 if (IS_ERR(ctx)) 1074 return PTR_ERR(ctx); 1075 1076 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); 1077 ret = rdma_connect(ctx->cm_id, &conn_param); 1078 ucma_put_ctx(ctx); 1079 return ret; 1080 } 1081 1082 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf, 1083 int in_len, int out_len) 1084 { 1085 struct rdma_ucm_listen cmd; 1086 struct ucma_context *ctx; 1087 int ret; 1088 1089 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1090 return -EFAULT; 1091 1092 ctx = ucma_get_ctx(file, cmd.id); 1093 if (IS_ERR(ctx)) 1094 return PTR_ERR(ctx); 1095 1096 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ? 1097 cmd.backlog : max_backlog; 1098 ret = rdma_listen(ctx->cm_id, ctx->backlog); 1099 ucma_put_ctx(ctx); 1100 return ret; 1101 } 1102 1103 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, 1104 int in_len, int out_len) 1105 { 1106 struct rdma_ucm_accept cmd; 1107 struct rdma_conn_param conn_param; 1108 struct ucma_context *ctx; 1109 int ret; 1110 1111 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1112 return -EFAULT; 1113 1114 ctx = ucma_get_ctx_dev(file, cmd.id); 1115 if (IS_ERR(ctx)) 1116 return PTR_ERR(ctx); 1117 1118 if (cmd.conn_param.valid) { 1119 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); 1120 mutex_lock(&file->mut); 1121 ret = __rdma_accept(ctx->cm_id, &conn_param, NULL); 1122 if (!ret) 1123 ctx->uid = cmd.uid; 1124 mutex_unlock(&file->mut); 1125 } else 1126 ret = __rdma_accept(ctx->cm_id, NULL, NULL); 1127 1128 ucma_put_ctx(ctx); 1129 return ret; 1130 } 1131 1132 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf, 1133 int in_len, int out_len) 1134 { 1135 struct rdma_ucm_reject cmd; 1136 struct ucma_context *ctx; 1137 int ret; 1138 1139 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1140 return -EFAULT; 1141 1142 ctx = ucma_get_ctx_dev(file, cmd.id); 1143 if (IS_ERR(ctx)) 1144 return PTR_ERR(ctx); 1145 1146 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len); 1147 ucma_put_ctx(ctx); 1148 return ret; 1149 } 1150 1151 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf, 1152 int in_len, int out_len) 1153 { 1154 struct rdma_ucm_disconnect cmd; 1155 struct ucma_context *ctx; 1156 int ret; 1157 1158 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1159 return -EFAULT; 1160 1161 ctx = ucma_get_ctx_dev(file, cmd.id); 1162 if (IS_ERR(ctx)) 1163 return PTR_ERR(ctx); 1164 1165 ret = rdma_disconnect(ctx->cm_id); 1166 ucma_put_ctx(ctx); 1167 return ret; 1168 } 1169 1170 static ssize_t ucma_init_qp_attr(struct ucma_file *file, 1171 const char __user *inbuf, 1172 int in_len, int out_len) 1173 { 1174 struct rdma_ucm_init_qp_attr cmd; 1175 struct ib_uverbs_qp_attr resp; 1176 struct ucma_context *ctx; 1177 struct ib_qp_attr qp_attr; 1178 int ret; 1179 1180 if (out_len < sizeof(resp)) 1181 return -ENOSPC; 1182 1183 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1184 return -EFAULT; 1185 1186 if (cmd.qp_state > IB_QPS_ERR) 1187 return -EINVAL; 1188 1189 ctx = ucma_get_ctx_dev(file, cmd.id); 1190 if (IS_ERR(ctx)) 1191 return PTR_ERR(ctx); 1192 1193 resp.qp_attr_mask = 0; 1194 memset(&qp_attr, 0, sizeof qp_attr); 1195 qp_attr.qp_state = cmd.qp_state; 1196 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); 1197 if (ret) 1198 goto out; 1199 1200 ib_copy_qp_attr_to_user(ctx->cm_id->device, &resp, &qp_attr); 1201 if (copy_to_user(u64_to_user_ptr(cmd.response), 1202 &resp, sizeof(resp))) 1203 ret = -EFAULT; 1204 1205 out: 1206 ucma_put_ctx(ctx); 1207 return ret; 1208 } 1209 1210 static int ucma_set_option_id(struct ucma_context *ctx, int optname, 1211 void *optval, size_t optlen) 1212 { 1213 int ret = 0; 1214 1215 switch (optname) { 1216 case RDMA_OPTION_ID_TOS: 1217 if (optlen != sizeof(u8)) { 1218 ret = -EINVAL; 1219 break; 1220 } 1221 rdma_set_service_type(ctx->cm_id, *((u8 *) optval)); 1222 break; 1223 case RDMA_OPTION_ID_REUSEADDR: 1224 if (optlen != sizeof(int)) { 1225 ret = -EINVAL; 1226 break; 1227 } 1228 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0); 1229 break; 1230 case RDMA_OPTION_ID_AFONLY: 1231 if (optlen != sizeof(int)) { 1232 ret = -EINVAL; 1233 break; 1234 } 1235 ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0); 1236 break; 1237 default: 1238 ret = -ENOSYS; 1239 } 1240 1241 return ret; 1242 } 1243 1244 static int ucma_set_ib_path(struct ucma_context *ctx, 1245 struct ib_path_rec_data *path_data, size_t optlen) 1246 { 1247 struct sa_path_rec sa_path; 1248 struct rdma_cm_event event; 1249 int ret; 1250 1251 if (optlen % sizeof(*path_data)) 1252 return -EINVAL; 1253 1254 for (; optlen; optlen -= sizeof(*path_data), path_data++) { 1255 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY | 1256 IB_PATH_BIDIRECTIONAL)) 1257 break; 1258 } 1259 1260 if (!optlen) 1261 return -EINVAL; 1262 1263 if (!ctx->cm_id->device) 1264 return -EINVAL; 1265 1266 memset(&sa_path, 0, sizeof(sa_path)); 1267 1268 sa_path.rec_type = SA_PATH_REC_TYPE_IB; 1269 ib_sa_unpack_path(path_data->path_rec, &sa_path); 1270 1271 if (rdma_cap_opa_ah(ctx->cm_id->device, ctx->cm_id->port_num)) { 1272 struct sa_path_rec opa; 1273 1274 sa_convert_path_ib_to_opa(&opa, &sa_path); 1275 ret = rdma_set_ib_path(ctx->cm_id, &opa); 1276 } else { 1277 ret = rdma_set_ib_path(ctx->cm_id, &sa_path); 1278 } 1279 if (ret) 1280 return ret; 1281 1282 memset(&event, 0, sizeof event); 1283 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1284 return ucma_event_handler(ctx->cm_id, &event); 1285 } 1286 1287 static int ucma_set_option_ib(struct ucma_context *ctx, int optname, 1288 void *optval, size_t optlen) 1289 { 1290 int ret; 1291 1292 switch (optname) { 1293 case RDMA_OPTION_IB_PATH: 1294 ret = ucma_set_ib_path(ctx, optval, optlen); 1295 break; 1296 default: 1297 ret = -ENOSYS; 1298 } 1299 1300 return ret; 1301 } 1302 1303 static int ucma_set_option_level(struct ucma_context *ctx, int level, 1304 int optname, void *optval, size_t optlen) 1305 { 1306 int ret; 1307 1308 switch (level) { 1309 case RDMA_OPTION_ID: 1310 ret = ucma_set_option_id(ctx, optname, optval, optlen); 1311 break; 1312 case RDMA_OPTION_IB: 1313 ret = ucma_set_option_ib(ctx, optname, optval, optlen); 1314 break; 1315 default: 1316 ret = -ENOSYS; 1317 } 1318 1319 return ret; 1320 } 1321 1322 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, 1323 int in_len, int out_len) 1324 { 1325 struct rdma_ucm_set_option cmd; 1326 struct ucma_context *ctx; 1327 void *optval; 1328 int ret; 1329 1330 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1331 return -EFAULT; 1332 1333 if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE)) 1334 return -EINVAL; 1335 1336 ctx = ucma_get_ctx(file, cmd.id); 1337 if (IS_ERR(ctx)) 1338 return PTR_ERR(ctx); 1339 1340 optval = memdup_user(u64_to_user_ptr(cmd.optval), 1341 cmd.optlen); 1342 if (IS_ERR(optval)) { 1343 ret = PTR_ERR(optval); 1344 goto out; 1345 } 1346 1347 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval, 1348 cmd.optlen); 1349 kfree(optval); 1350 1351 out: 1352 ucma_put_ctx(ctx); 1353 return ret; 1354 } 1355 1356 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, 1357 int in_len, int out_len) 1358 { 1359 struct rdma_ucm_notify cmd; 1360 struct ucma_context *ctx; 1361 int ret = -EINVAL; 1362 1363 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1364 return -EFAULT; 1365 1366 ctx = ucma_get_ctx(file, cmd.id); 1367 if (IS_ERR(ctx)) 1368 return PTR_ERR(ctx); 1369 1370 if (ctx->cm_id->device) 1371 ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event); 1372 1373 ucma_put_ctx(ctx); 1374 return ret; 1375 } 1376 1377 static ssize_t ucma_process_join(struct ucma_file *file, 1378 struct rdma_ucm_join_mcast *cmd, int out_len) 1379 { 1380 struct rdma_ucm_create_id_resp resp; 1381 struct ucma_context *ctx; 1382 struct ucma_multicast *mc; 1383 struct sockaddr *addr; 1384 int ret; 1385 u8 join_state; 1386 1387 if (out_len < sizeof(resp)) 1388 return -ENOSPC; 1389 1390 addr = (struct sockaddr *) &cmd->addr; 1391 if (cmd->addr_size != rdma_addr_size(addr)) 1392 return -EINVAL; 1393 1394 if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER) 1395 join_state = BIT(FULLMEMBER_JOIN); 1396 else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER) 1397 join_state = BIT(SENDONLY_FULLMEMBER_JOIN); 1398 else 1399 return -EINVAL; 1400 1401 ctx = ucma_get_ctx_dev(file, cmd->id); 1402 if (IS_ERR(ctx)) 1403 return PTR_ERR(ctx); 1404 1405 mutex_lock(&file->mut); 1406 mc = ucma_alloc_multicast(ctx); 1407 if (!mc) { 1408 ret = -ENOMEM; 1409 goto err1; 1410 } 1411 mc->join_state = join_state; 1412 mc->uid = cmd->uid; 1413 memcpy(&mc->addr, addr, cmd->addr_size); 1414 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr, 1415 join_state, mc); 1416 if (ret) 1417 goto err2; 1418 1419 resp.id = mc->id; 1420 if (copy_to_user(u64_to_user_ptr(cmd->response), 1421 &resp, sizeof(resp))) { 1422 ret = -EFAULT; 1423 goto err3; 1424 } 1425 1426 mutex_lock(&mut); 1427 idr_replace(&multicast_idr, mc, mc->id); 1428 mutex_unlock(&mut); 1429 1430 mutex_unlock(&file->mut); 1431 ucma_put_ctx(ctx); 1432 return 0; 1433 1434 err3: 1435 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr); 1436 ucma_cleanup_mc_events(mc); 1437 err2: 1438 mutex_lock(&mut); 1439 idr_remove(&multicast_idr, mc->id); 1440 mutex_unlock(&mut); 1441 list_del(&mc->list); 1442 kfree(mc); 1443 err1: 1444 mutex_unlock(&file->mut); 1445 ucma_put_ctx(ctx); 1446 return ret; 1447 } 1448 1449 static ssize_t ucma_join_ip_multicast(struct ucma_file *file, 1450 const char __user *inbuf, 1451 int in_len, int out_len) 1452 { 1453 struct rdma_ucm_join_ip_mcast cmd; 1454 struct rdma_ucm_join_mcast join_cmd; 1455 1456 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1457 return -EFAULT; 1458 1459 join_cmd.response = cmd.response; 1460 join_cmd.uid = cmd.uid; 1461 join_cmd.id = cmd.id; 1462 join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr); 1463 if (!join_cmd.addr_size) 1464 return -EINVAL; 1465 1466 join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER; 1467 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); 1468 1469 return ucma_process_join(file, &join_cmd, out_len); 1470 } 1471 1472 static ssize_t ucma_join_multicast(struct ucma_file *file, 1473 const char __user *inbuf, 1474 int in_len, int out_len) 1475 { 1476 struct rdma_ucm_join_mcast cmd; 1477 1478 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1479 return -EFAULT; 1480 1481 if (!rdma_addr_size_kss(&cmd.addr)) 1482 return -EINVAL; 1483 1484 return ucma_process_join(file, &cmd, out_len); 1485 } 1486 1487 static ssize_t ucma_leave_multicast(struct ucma_file *file, 1488 const char __user *inbuf, 1489 int in_len, int out_len) 1490 { 1491 struct rdma_ucm_destroy_id cmd; 1492 struct rdma_ucm_destroy_id_resp resp; 1493 struct ucma_multicast *mc; 1494 int ret = 0; 1495 1496 if (out_len < sizeof(resp)) 1497 return -ENOSPC; 1498 1499 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1500 return -EFAULT; 1501 1502 mutex_lock(&mut); 1503 mc = idr_find(&multicast_idr, cmd.id); 1504 if (!mc) 1505 mc = ERR_PTR(-ENOENT); 1506 else if (mc->ctx->file != file) 1507 mc = ERR_PTR(-EINVAL); 1508 else if (!atomic_inc_not_zero(&mc->ctx->ref)) 1509 mc = ERR_PTR(-ENXIO); 1510 else 1511 idr_remove(&multicast_idr, mc->id); 1512 mutex_unlock(&mut); 1513 1514 if (IS_ERR(mc)) { 1515 ret = PTR_ERR(mc); 1516 goto out; 1517 } 1518 1519 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); 1520 mutex_lock(&mc->ctx->file->mut); 1521 ucma_cleanup_mc_events(mc); 1522 list_del(&mc->list); 1523 mutex_unlock(&mc->ctx->file->mut); 1524 1525 ucma_put_ctx(mc->ctx); 1526 resp.events_reported = mc->events_reported; 1527 kfree(mc); 1528 1529 if (copy_to_user(u64_to_user_ptr(cmd.response), 1530 &resp, sizeof(resp))) 1531 ret = -EFAULT; 1532 out: 1533 return ret; 1534 } 1535 1536 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2) 1537 { 1538 /* Acquire mutex's based on pointer comparison to prevent deadlock. */ 1539 if (file1 < file2) { 1540 mutex_lock(&file1->mut); 1541 mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING); 1542 } else { 1543 mutex_lock(&file2->mut); 1544 mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING); 1545 } 1546 } 1547 1548 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2) 1549 { 1550 if (file1 < file2) { 1551 mutex_unlock(&file2->mut); 1552 mutex_unlock(&file1->mut); 1553 } else { 1554 mutex_unlock(&file1->mut); 1555 mutex_unlock(&file2->mut); 1556 } 1557 } 1558 1559 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file) 1560 { 1561 struct ucma_event *uevent, *tmp; 1562 1563 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) 1564 if (uevent->ctx == ctx) 1565 list_move_tail(&uevent->list, &file->event_list); 1566 } 1567 1568 static ssize_t ucma_migrate_id(struct ucma_file *new_file, 1569 const char __user *inbuf, 1570 int in_len, int out_len) 1571 { 1572 struct rdma_ucm_migrate_id cmd; 1573 struct rdma_ucm_migrate_resp resp; 1574 struct ucma_context *ctx; 1575 struct fd f; 1576 struct ucma_file *cur_file; 1577 int ret = 0; 1578 1579 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1580 return -EFAULT; 1581 1582 /* Get current fd to protect against it being closed */ 1583 f = fdget(cmd.fd); 1584 if (!f.file) 1585 return -ENOENT; 1586 if (f.file->f_op != &ucma_fops) { 1587 ret = -EINVAL; 1588 goto file_put; 1589 } 1590 1591 /* Validate current fd and prevent destruction of id. */ 1592 ctx = ucma_get_ctx(f.file->private_data, cmd.id); 1593 if (IS_ERR(ctx)) { 1594 ret = PTR_ERR(ctx); 1595 goto file_put; 1596 } 1597 1598 cur_file = ctx->file; 1599 if (cur_file == new_file) { 1600 resp.events_reported = ctx->events_reported; 1601 goto response; 1602 } 1603 1604 /* 1605 * Migrate events between fd's, maintaining order, and avoiding new 1606 * events being added before existing events. 1607 */ 1608 ucma_lock_files(cur_file, new_file); 1609 mutex_lock(&mut); 1610 1611 list_move_tail(&ctx->list, &new_file->ctx_list); 1612 ucma_move_events(ctx, new_file); 1613 ctx->file = new_file; 1614 resp.events_reported = ctx->events_reported; 1615 1616 mutex_unlock(&mut); 1617 ucma_unlock_files(cur_file, new_file); 1618 1619 response: 1620 if (copy_to_user(u64_to_user_ptr(cmd.response), 1621 &resp, sizeof(resp))) 1622 ret = -EFAULT; 1623 1624 ucma_put_ctx(ctx); 1625 file_put: 1626 fdput(f); 1627 return ret; 1628 } 1629 1630 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file, 1631 const char __user *inbuf, 1632 int in_len, int out_len) = { 1633 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id, 1634 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id, 1635 [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip, 1636 [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip, 1637 [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route, 1638 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route, 1639 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect, 1640 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen, 1641 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept, 1642 [RDMA_USER_CM_CMD_REJECT] = ucma_reject, 1643 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect, 1644 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr, 1645 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event, 1646 [RDMA_USER_CM_CMD_GET_OPTION] = NULL, 1647 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option, 1648 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify, 1649 [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast, 1650 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast, 1651 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id, 1652 [RDMA_USER_CM_CMD_QUERY] = ucma_query, 1653 [RDMA_USER_CM_CMD_BIND] = ucma_bind, 1654 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr, 1655 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast 1656 }; 1657 1658 static ssize_t ucma_write(struct file *filp, const char __user *buf, 1659 size_t len, loff_t *pos) 1660 { 1661 struct ucma_file *file = filp->private_data; 1662 struct rdma_ucm_cmd_hdr hdr; 1663 ssize_t ret; 1664 1665 if (!ib_safe_file_access(filp)) { 1666 pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n", 1667 task_tgid_vnr(current), current->comm); 1668 return -EACCES; 1669 } 1670 1671 if (len < sizeof(hdr)) 1672 return -EINVAL; 1673 1674 if (copy_from_user(&hdr, buf, sizeof(hdr))) 1675 return -EFAULT; 1676 1677 if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table)) 1678 return -EINVAL; 1679 1680 if (hdr.in + sizeof(hdr) > len) 1681 return -EINVAL; 1682 1683 if (!ucma_cmd_table[hdr.cmd]) 1684 return -ENOSYS; 1685 1686 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out); 1687 if (!ret) 1688 ret = len; 1689 1690 return ret; 1691 } 1692 1693 static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait) 1694 { 1695 struct ucma_file *file = filp->private_data; 1696 __poll_t mask = 0; 1697 1698 poll_wait(filp, &file->poll_wait, wait); 1699 1700 if (!list_empty(&file->event_list)) 1701 mask = EPOLLIN | EPOLLRDNORM; 1702 1703 return mask; 1704 } 1705 1706 /* 1707 * ucma_open() does not need the BKL: 1708 * 1709 * - no global state is referred to; 1710 * - there is no ioctl method to race against; 1711 * - no further module initialization is required for open to work 1712 * after the device is registered. 1713 */ 1714 static int ucma_open(struct inode *inode, struct file *filp) 1715 { 1716 struct ucma_file *file; 1717 1718 file = kmalloc(sizeof *file, GFP_KERNEL); 1719 if (!file) 1720 return -ENOMEM; 1721 1722 file->close_wq = alloc_ordered_workqueue("ucma_close_id", 1723 WQ_MEM_RECLAIM); 1724 if (!file->close_wq) { 1725 kfree(file); 1726 return -ENOMEM; 1727 } 1728 1729 INIT_LIST_HEAD(&file->event_list); 1730 INIT_LIST_HEAD(&file->ctx_list); 1731 init_waitqueue_head(&file->poll_wait); 1732 mutex_init(&file->mut); 1733 1734 filp->private_data = file; 1735 file->filp = filp; 1736 1737 return nonseekable_open(inode, filp); 1738 } 1739 1740 static int ucma_close(struct inode *inode, struct file *filp) 1741 { 1742 struct ucma_file *file = filp->private_data; 1743 struct ucma_context *ctx, *tmp; 1744 1745 mutex_lock(&file->mut); 1746 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) { 1747 ctx->destroying = 1; 1748 mutex_unlock(&file->mut); 1749 1750 mutex_lock(&mut); 1751 idr_remove(&ctx_idr, ctx->id); 1752 mutex_unlock(&mut); 1753 1754 flush_workqueue(file->close_wq); 1755 /* At that step once ctx was marked as destroying and workqueue 1756 * was flushed we are safe from any inflights handlers that 1757 * might put other closing task. 1758 */ 1759 mutex_lock(&mut); 1760 if (!ctx->closing) { 1761 mutex_unlock(&mut); 1762 /* rdma_destroy_id ensures that no event handlers are 1763 * inflight for that id before releasing it. 1764 */ 1765 rdma_destroy_id(ctx->cm_id); 1766 } else { 1767 mutex_unlock(&mut); 1768 } 1769 1770 ucma_free_ctx(ctx); 1771 mutex_lock(&file->mut); 1772 } 1773 mutex_unlock(&file->mut); 1774 destroy_workqueue(file->close_wq); 1775 kfree(file); 1776 return 0; 1777 } 1778 1779 static const struct file_operations ucma_fops = { 1780 .owner = THIS_MODULE, 1781 .open = ucma_open, 1782 .release = ucma_close, 1783 .write = ucma_write, 1784 .poll = ucma_poll, 1785 .llseek = no_llseek, 1786 }; 1787 1788 static struct miscdevice ucma_misc = { 1789 .minor = MISC_DYNAMIC_MINOR, 1790 .name = "rdma_cm", 1791 .nodename = "infiniband/rdma_cm", 1792 .mode = 0666, 1793 .fops = &ucma_fops, 1794 }; 1795 1796 static ssize_t show_abi_version(struct device *dev, 1797 struct device_attribute *attr, 1798 char *buf) 1799 { 1800 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION); 1801 } 1802 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); 1803 1804 static int __init ucma_init(void) 1805 { 1806 int ret; 1807 1808 ret = misc_register(&ucma_misc); 1809 if (ret) 1810 return ret; 1811 1812 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version); 1813 if (ret) { 1814 pr_err("rdma_ucm: couldn't create abi_version attr\n"); 1815 goto err1; 1816 } 1817 1818 ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table); 1819 if (!ucma_ctl_table_hdr) { 1820 pr_err("rdma_ucm: couldn't register sysctl paths\n"); 1821 ret = -ENOMEM; 1822 goto err2; 1823 } 1824 return 0; 1825 err2: 1826 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); 1827 err1: 1828 misc_deregister(&ucma_misc); 1829 return ret; 1830 } 1831 1832 static void __exit ucma_cleanup(void) 1833 { 1834 unregister_net_sysctl_table(ucma_ctl_table_hdr); 1835 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); 1836 misc_deregister(&ucma_misc); 1837 idr_destroy(&ctx_idr); 1838 idr_destroy(&multicast_idr); 1839 } 1840 1841 module_init(ucma_init); 1842 module_exit(ucma_cleanup); 1843