1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 6 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU 10 * General Public License (GPL) Version 2, available from the file 11 * COPYING in the main directory of this source tree, or the 12 * OpenIB.org BSD license below: 13 * 14 * Redistribution and use in source and binary forms, with or 15 * without modification, are permitted provided that the following 16 * conditions are met: 17 * 18 * - Redistributions of source code must retain the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer. 21 * 22 * - Redistributions in binary form must reproduce the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer in the documentation and/or other materials 25 * provided with the distribution. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * SOFTWARE. 35 */ 36 37 #include <linux/module.h> 38 #include <linux/init.h> 39 #include <linux/device.h> 40 #include <linux/err.h> 41 #include <linux/fs.h> 42 #include <linux/poll.h> 43 #include <linux/sched.h> 44 #include <linux/file.h> 45 #include <linux/cdev.h> 46 #include <linux/anon_inodes.h> 47 #include <linux/slab.h> 48 #include <linux/sched/mm.h> 49 50 #include <linux/uaccess.h> 51 52 #include <rdma/ib.h> 53 #include <rdma/uverbs_std_types.h> 54 55 #include "uverbs.h" 56 #include "core_priv.h" 57 #include "rdma_core.h" 58 59 MODULE_AUTHOR("Roland Dreier"); 60 MODULE_DESCRIPTION("InfiniBand userspace verbs access"); 61 MODULE_LICENSE("Dual BSD/GPL"); 62 63 enum { 64 IB_UVERBS_MAJOR = 231, 65 IB_UVERBS_BASE_MINOR = 192, 66 IB_UVERBS_MAX_DEVICES = RDMA_MAX_PORTS, 67 IB_UVERBS_NUM_FIXED_MINOR = 32, 68 IB_UVERBS_NUM_DYNAMIC_MINOR = IB_UVERBS_MAX_DEVICES - IB_UVERBS_NUM_FIXED_MINOR, 69 }; 70 71 #define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR) 72 73 static dev_t dynamic_uverbs_dev; 74 static struct class *uverbs_class; 75 76 static DEFINE_IDA(uverbs_ida); 77 static void ib_uverbs_add_one(struct ib_device *device); 78 static void ib_uverbs_remove_one(struct ib_device *device, void *client_data); 79 80 /* 81 * Must be called with the ufile->device->disassociate_srcu held, and the lock 82 * must be held until use of the ucontext is finished. 83 */ 84 struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile) 85 { 86 /* 87 * We do not hold the hw_destroy_rwsem lock for this flow, instead 88 * srcu is used. It does not matter if someone races this with 89 * get_context, we get NULL or valid ucontext. 90 */ 91 struct ib_ucontext *ucontext = smp_load_acquire(&ufile->ucontext); 92 93 if (!srcu_dereference(ufile->device->ib_dev, 94 &ufile->device->disassociate_srcu)) 95 return ERR_PTR(-EIO); 96 97 if (!ucontext) 98 return ERR_PTR(-EINVAL); 99 100 return ucontext; 101 } 102 EXPORT_SYMBOL(ib_uverbs_get_ucontext_file); 103 104 int uverbs_dealloc_mw(struct ib_mw *mw) 105 { 106 struct ib_pd *pd = mw->pd; 107 int ret; 108 109 ret = mw->device->ops.dealloc_mw(mw); 110 if (!ret) 111 atomic_dec(&pd->usecnt); 112 return ret; 113 } 114 115 static void ib_uverbs_release_dev(struct device *device) 116 { 117 struct ib_uverbs_device *dev = 118 container_of(device, struct ib_uverbs_device, dev); 119 120 uverbs_destroy_api(dev->uapi); 121 cleanup_srcu_struct(&dev->disassociate_srcu); 122 kfree(dev); 123 } 124 125 static void ib_uverbs_release_async_event_file(struct kref *ref) 126 { 127 struct ib_uverbs_async_event_file *file = 128 container_of(ref, struct ib_uverbs_async_event_file, ref); 129 130 kfree(file); 131 } 132 133 void ib_uverbs_release_ucq(struct ib_uverbs_file *file, 134 struct ib_uverbs_completion_event_file *ev_file, 135 struct ib_ucq_object *uobj) 136 { 137 struct ib_uverbs_event *evt, *tmp; 138 139 if (ev_file) { 140 spin_lock_irq(&ev_file->ev_queue.lock); 141 list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) { 142 list_del(&evt->list); 143 kfree(evt); 144 } 145 spin_unlock_irq(&ev_file->ev_queue.lock); 146 147 uverbs_uobject_put(&ev_file->uobj); 148 } 149 150 spin_lock_irq(&file->async_file->ev_queue.lock); 151 list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) { 152 list_del(&evt->list); 153 kfree(evt); 154 } 155 spin_unlock_irq(&file->async_file->ev_queue.lock); 156 } 157 158 void ib_uverbs_release_uevent(struct ib_uverbs_file *file, 159 struct ib_uevent_object *uobj) 160 { 161 struct ib_uverbs_event *evt, *tmp; 162 163 spin_lock_irq(&file->async_file->ev_queue.lock); 164 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { 165 list_del(&evt->list); 166 kfree(evt); 167 } 168 spin_unlock_irq(&file->async_file->ev_queue.lock); 169 } 170 171 void ib_uverbs_detach_umcast(struct ib_qp *qp, 172 struct ib_uqp_object *uobj) 173 { 174 struct ib_uverbs_mcast_entry *mcast, *tmp; 175 176 list_for_each_entry_safe(mcast, tmp, &uobj->mcast_list, list) { 177 ib_detach_mcast(qp, &mcast->gid, mcast->lid); 178 list_del(&mcast->list); 179 kfree(mcast); 180 } 181 } 182 183 static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev) 184 { 185 complete(&dev->comp); 186 } 187 188 void ib_uverbs_release_file(struct kref *ref) 189 { 190 struct ib_uverbs_file *file = 191 container_of(ref, struct ib_uverbs_file, ref); 192 struct ib_device *ib_dev; 193 int srcu_key; 194 195 release_ufile_idr_uobject(file); 196 197 srcu_key = srcu_read_lock(&file->device->disassociate_srcu); 198 ib_dev = srcu_dereference(file->device->ib_dev, 199 &file->device->disassociate_srcu); 200 if (ib_dev && !ib_dev->ops.disassociate_ucontext) 201 module_put(ib_dev->owner); 202 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key); 203 204 if (atomic_dec_and_test(&file->device->refcount)) 205 ib_uverbs_comp_dev(file->device); 206 207 put_device(&file->device->dev); 208 kfree(file); 209 } 210 211 static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue, 212 struct ib_uverbs_file *uverbs_file, 213 struct file *filp, char __user *buf, 214 size_t count, loff_t *pos, 215 size_t eventsz) 216 { 217 struct ib_uverbs_event *event; 218 int ret = 0; 219 220 spin_lock_irq(&ev_queue->lock); 221 222 while (list_empty(&ev_queue->event_list)) { 223 spin_unlock_irq(&ev_queue->lock); 224 225 if (filp->f_flags & O_NONBLOCK) 226 return -EAGAIN; 227 228 if (wait_event_interruptible(ev_queue->poll_wait, 229 (!list_empty(&ev_queue->event_list) || 230 /* The barriers built into wait_event_interruptible() 231 * and wake_up() guarentee this will see the null set 232 * without using RCU 233 */ 234 !uverbs_file->device->ib_dev))) 235 return -ERESTARTSYS; 236 237 /* If device was disassociated and no event exists set an error */ 238 if (list_empty(&ev_queue->event_list) && 239 !uverbs_file->device->ib_dev) 240 return -EIO; 241 242 spin_lock_irq(&ev_queue->lock); 243 } 244 245 event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list); 246 247 if (eventsz > count) { 248 ret = -EINVAL; 249 event = NULL; 250 } else { 251 list_del(ev_queue->event_list.next); 252 if (event->counter) { 253 ++(*event->counter); 254 list_del(&event->obj_list); 255 } 256 } 257 258 spin_unlock_irq(&ev_queue->lock); 259 260 if (event) { 261 if (copy_to_user(buf, event, eventsz)) 262 ret = -EFAULT; 263 else 264 ret = eventsz; 265 } 266 267 kfree(event); 268 269 return ret; 270 } 271 272 static ssize_t ib_uverbs_async_event_read(struct file *filp, char __user *buf, 273 size_t count, loff_t *pos) 274 { 275 struct ib_uverbs_async_event_file *file = filp->private_data; 276 277 return ib_uverbs_event_read(&file->ev_queue, file->uverbs_file, filp, 278 buf, count, pos, 279 sizeof(struct ib_uverbs_async_event_desc)); 280 } 281 282 static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf, 283 size_t count, loff_t *pos) 284 { 285 struct ib_uverbs_completion_event_file *comp_ev_file = 286 filp->private_data; 287 288 return ib_uverbs_event_read(&comp_ev_file->ev_queue, 289 comp_ev_file->uobj.ufile, filp, 290 buf, count, pos, 291 sizeof(struct ib_uverbs_comp_event_desc)); 292 } 293 294 static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue, 295 struct file *filp, 296 struct poll_table_struct *wait) 297 { 298 __poll_t pollflags = 0; 299 300 poll_wait(filp, &ev_queue->poll_wait, wait); 301 302 spin_lock_irq(&ev_queue->lock); 303 if (!list_empty(&ev_queue->event_list)) 304 pollflags = EPOLLIN | EPOLLRDNORM; 305 spin_unlock_irq(&ev_queue->lock); 306 307 return pollflags; 308 } 309 310 static __poll_t ib_uverbs_async_event_poll(struct file *filp, 311 struct poll_table_struct *wait) 312 { 313 return ib_uverbs_event_poll(filp->private_data, filp, wait); 314 } 315 316 static __poll_t ib_uverbs_comp_event_poll(struct file *filp, 317 struct poll_table_struct *wait) 318 { 319 struct ib_uverbs_completion_event_file *comp_ev_file = 320 filp->private_data; 321 322 return ib_uverbs_event_poll(&comp_ev_file->ev_queue, filp, wait); 323 } 324 325 static int ib_uverbs_async_event_fasync(int fd, struct file *filp, int on) 326 { 327 struct ib_uverbs_event_queue *ev_queue = filp->private_data; 328 329 return fasync_helper(fd, filp, on, &ev_queue->async_queue); 330 } 331 332 static int ib_uverbs_comp_event_fasync(int fd, struct file *filp, int on) 333 { 334 struct ib_uverbs_completion_event_file *comp_ev_file = 335 filp->private_data; 336 337 return fasync_helper(fd, filp, on, &comp_ev_file->ev_queue.async_queue); 338 } 339 340 static int ib_uverbs_async_event_close(struct inode *inode, struct file *filp) 341 { 342 struct ib_uverbs_async_event_file *file = filp->private_data; 343 struct ib_uverbs_file *uverbs_file = file->uverbs_file; 344 struct ib_uverbs_event *entry, *tmp; 345 int closed_already = 0; 346 347 mutex_lock(&uverbs_file->device->lists_mutex); 348 spin_lock_irq(&file->ev_queue.lock); 349 closed_already = file->ev_queue.is_closed; 350 file->ev_queue.is_closed = 1; 351 list_for_each_entry_safe(entry, tmp, &file->ev_queue.event_list, list) { 352 if (entry->counter) 353 list_del(&entry->obj_list); 354 kfree(entry); 355 } 356 spin_unlock_irq(&file->ev_queue.lock); 357 if (!closed_already) { 358 list_del(&file->list); 359 ib_unregister_event_handler(&uverbs_file->event_handler); 360 } 361 mutex_unlock(&uverbs_file->device->lists_mutex); 362 363 kref_put(&uverbs_file->ref, ib_uverbs_release_file); 364 kref_put(&file->ref, ib_uverbs_release_async_event_file); 365 366 return 0; 367 } 368 369 static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp) 370 { 371 struct ib_uobject *uobj = filp->private_data; 372 struct ib_uverbs_completion_event_file *file = container_of( 373 uobj, struct ib_uverbs_completion_event_file, uobj); 374 struct ib_uverbs_event *entry, *tmp; 375 376 spin_lock_irq(&file->ev_queue.lock); 377 list_for_each_entry_safe(entry, tmp, &file->ev_queue.event_list, list) { 378 if (entry->counter) 379 list_del(&entry->obj_list); 380 kfree(entry); 381 } 382 file->ev_queue.is_closed = 1; 383 spin_unlock_irq(&file->ev_queue.lock); 384 385 uverbs_close_fd(filp); 386 387 return 0; 388 } 389 390 const struct file_operations uverbs_event_fops = { 391 .owner = THIS_MODULE, 392 .read = ib_uverbs_comp_event_read, 393 .poll = ib_uverbs_comp_event_poll, 394 .release = ib_uverbs_comp_event_close, 395 .fasync = ib_uverbs_comp_event_fasync, 396 .llseek = no_llseek, 397 }; 398 399 static const struct file_operations uverbs_async_event_fops = { 400 .owner = THIS_MODULE, 401 .read = ib_uverbs_async_event_read, 402 .poll = ib_uverbs_async_event_poll, 403 .release = ib_uverbs_async_event_close, 404 .fasync = ib_uverbs_async_event_fasync, 405 .llseek = no_llseek, 406 }; 407 408 void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context) 409 { 410 struct ib_uverbs_event_queue *ev_queue = cq_context; 411 struct ib_ucq_object *uobj; 412 struct ib_uverbs_event *entry; 413 unsigned long flags; 414 415 if (!ev_queue) 416 return; 417 418 spin_lock_irqsave(&ev_queue->lock, flags); 419 if (ev_queue->is_closed) { 420 spin_unlock_irqrestore(&ev_queue->lock, flags); 421 return; 422 } 423 424 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 425 if (!entry) { 426 spin_unlock_irqrestore(&ev_queue->lock, flags); 427 return; 428 } 429 430 uobj = container_of(cq->uobject, struct ib_ucq_object, uobject); 431 432 entry->desc.comp.cq_handle = cq->uobject->user_handle; 433 entry->counter = &uobj->comp_events_reported; 434 435 list_add_tail(&entry->list, &ev_queue->event_list); 436 list_add_tail(&entry->obj_list, &uobj->comp_list); 437 spin_unlock_irqrestore(&ev_queue->lock, flags); 438 439 wake_up_interruptible(&ev_queue->poll_wait); 440 kill_fasync(&ev_queue->async_queue, SIGIO, POLL_IN); 441 } 442 443 static void ib_uverbs_async_handler(struct ib_uverbs_file *file, 444 __u64 element, __u64 event, 445 struct list_head *obj_list, 446 u32 *counter) 447 { 448 struct ib_uverbs_event *entry; 449 unsigned long flags; 450 451 spin_lock_irqsave(&file->async_file->ev_queue.lock, flags); 452 if (file->async_file->ev_queue.is_closed) { 453 spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags); 454 return; 455 } 456 457 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 458 if (!entry) { 459 spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags); 460 return; 461 } 462 463 entry->desc.async.element = element; 464 entry->desc.async.event_type = event; 465 entry->desc.async.reserved = 0; 466 entry->counter = counter; 467 468 list_add_tail(&entry->list, &file->async_file->ev_queue.event_list); 469 if (obj_list) 470 list_add_tail(&entry->obj_list, obj_list); 471 spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags); 472 473 wake_up_interruptible(&file->async_file->ev_queue.poll_wait); 474 kill_fasync(&file->async_file->ev_queue.async_queue, SIGIO, POLL_IN); 475 } 476 477 void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr) 478 { 479 struct ib_ucq_object *uobj = container_of(event->element.cq->uobject, 480 struct ib_ucq_object, uobject); 481 482 ib_uverbs_async_handler(uobj->uobject.ufile, uobj->uobject.user_handle, 483 event->event, &uobj->async_list, 484 &uobj->async_events_reported); 485 } 486 487 void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr) 488 { 489 struct ib_uevent_object *uobj; 490 491 /* for XRC target qp's, check that qp is live */ 492 if (!event->element.qp->uobject) 493 return; 494 495 uobj = container_of(event->element.qp->uobject, 496 struct ib_uevent_object, uobject); 497 498 ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, 499 event->event, &uobj->event_list, 500 &uobj->events_reported); 501 } 502 503 void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr) 504 { 505 struct ib_uevent_object *uobj = container_of(event->element.wq->uobject, 506 struct ib_uevent_object, uobject); 507 508 ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, 509 event->event, &uobj->event_list, 510 &uobj->events_reported); 511 } 512 513 void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr) 514 { 515 struct ib_uevent_object *uobj; 516 517 uobj = container_of(event->element.srq->uobject, 518 struct ib_uevent_object, uobject); 519 520 ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, 521 event->event, &uobj->event_list, 522 &uobj->events_reported); 523 } 524 525 void ib_uverbs_event_handler(struct ib_event_handler *handler, 526 struct ib_event *event) 527 { 528 struct ib_uverbs_file *file = 529 container_of(handler, struct ib_uverbs_file, event_handler); 530 531 ib_uverbs_async_handler(file, event->element.port_num, event->event, 532 NULL, NULL); 533 } 534 535 void ib_uverbs_free_async_event_file(struct ib_uverbs_file *file) 536 { 537 kref_put(&file->async_file->ref, ib_uverbs_release_async_event_file); 538 file->async_file = NULL; 539 } 540 541 void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue) 542 { 543 spin_lock_init(&ev_queue->lock); 544 INIT_LIST_HEAD(&ev_queue->event_list); 545 init_waitqueue_head(&ev_queue->poll_wait); 546 ev_queue->is_closed = 0; 547 ev_queue->async_queue = NULL; 548 } 549 550 struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file, 551 struct ib_device *ib_dev) 552 { 553 struct ib_uverbs_async_event_file *ev_file; 554 struct file *filp; 555 556 ev_file = kzalloc(sizeof(*ev_file), GFP_KERNEL); 557 if (!ev_file) 558 return ERR_PTR(-ENOMEM); 559 560 ib_uverbs_init_event_queue(&ev_file->ev_queue); 561 ev_file->uverbs_file = uverbs_file; 562 kref_get(&ev_file->uverbs_file->ref); 563 kref_init(&ev_file->ref); 564 filp = anon_inode_getfile("[infinibandevent]", &uverbs_async_event_fops, 565 ev_file, O_RDONLY); 566 if (IS_ERR(filp)) 567 goto err_put_refs; 568 569 mutex_lock(&uverbs_file->device->lists_mutex); 570 list_add_tail(&ev_file->list, 571 &uverbs_file->device->uverbs_events_file_list); 572 mutex_unlock(&uverbs_file->device->lists_mutex); 573 574 WARN_ON(uverbs_file->async_file); 575 uverbs_file->async_file = ev_file; 576 kref_get(&uverbs_file->async_file->ref); 577 INIT_IB_EVENT_HANDLER(&uverbs_file->event_handler, 578 ib_dev, 579 ib_uverbs_event_handler); 580 ib_register_event_handler(&uverbs_file->event_handler); 581 /* At that point async file stuff was fully set */ 582 583 return filp; 584 585 err_put_refs: 586 kref_put(&ev_file->uverbs_file->ref, ib_uverbs_release_file); 587 kref_put(&ev_file->ref, ib_uverbs_release_async_event_file); 588 return filp; 589 } 590 591 static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr, 592 struct ib_uverbs_ex_cmd_hdr *ex_hdr, size_t count, 593 const struct uverbs_api_write_method *method_elm) 594 { 595 if (method_elm->is_ex) { 596 count -= sizeof(*hdr) + sizeof(*ex_hdr); 597 598 if ((hdr->in_words + ex_hdr->provider_in_words) * 8 != count) 599 return -EINVAL; 600 601 if (hdr->in_words * 8 < method_elm->req_size) 602 return -ENOSPC; 603 604 if (ex_hdr->cmd_hdr_reserved) 605 return -EINVAL; 606 607 if (ex_hdr->response) { 608 if (!hdr->out_words && !ex_hdr->provider_out_words) 609 return -EINVAL; 610 611 if (hdr->out_words * 8 < method_elm->resp_size) 612 return -ENOSPC; 613 614 if (!access_ok(u64_to_user_ptr(ex_hdr->response), 615 (hdr->out_words + ex_hdr->provider_out_words) * 8)) 616 return -EFAULT; 617 } else { 618 if (hdr->out_words || ex_hdr->provider_out_words) 619 return -EINVAL; 620 } 621 622 return 0; 623 } 624 625 /* not extended command */ 626 if (hdr->in_words * 4 != count) 627 return -EINVAL; 628 629 if (count < method_elm->req_size + sizeof(hdr)) { 630 /* 631 * rdma-core v18 and v19 have a bug where they send DESTROY_CQ 632 * with a 16 byte write instead of 24. Old kernels didn't 633 * check the size so they allowed this. Now that the size is 634 * checked provide a compatibility work around to not break 635 * those userspaces. 636 */ 637 if (hdr->command == IB_USER_VERBS_CMD_DESTROY_CQ && 638 count == 16) { 639 hdr->in_words = 6; 640 return 0; 641 } 642 return -ENOSPC; 643 } 644 if (hdr->out_words * 4 < method_elm->resp_size) 645 return -ENOSPC; 646 647 return 0; 648 } 649 650 static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, 651 size_t count, loff_t *pos) 652 { 653 struct ib_uverbs_file *file = filp->private_data; 654 const struct uverbs_api_write_method *method_elm; 655 struct uverbs_api *uapi = file->device->uapi; 656 struct ib_uverbs_ex_cmd_hdr ex_hdr; 657 struct ib_uverbs_cmd_hdr hdr; 658 struct uverbs_attr_bundle bundle; 659 int srcu_key; 660 ssize_t ret; 661 662 if (!ib_safe_file_access(filp)) { 663 pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n", 664 task_tgid_vnr(current), current->comm); 665 return -EACCES; 666 } 667 668 if (count < sizeof(hdr)) 669 return -EINVAL; 670 671 if (copy_from_user(&hdr, buf, sizeof(hdr))) 672 return -EFAULT; 673 674 method_elm = uapi_get_method(uapi, hdr.command); 675 if (IS_ERR(method_elm)) 676 return PTR_ERR(method_elm); 677 678 if (method_elm->is_ex) { 679 if (count < (sizeof(hdr) + sizeof(ex_hdr))) 680 return -EINVAL; 681 if (copy_from_user(&ex_hdr, buf + sizeof(hdr), sizeof(ex_hdr))) 682 return -EFAULT; 683 } 684 685 ret = verify_hdr(&hdr, &ex_hdr, count, method_elm); 686 if (ret) 687 return ret; 688 689 srcu_key = srcu_read_lock(&file->device->disassociate_srcu); 690 691 buf += sizeof(hdr); 692 693 bundle.ufile = file; 694 if (!method_elm->is_ex) { 695 size_t in_len = hdr.in_words * 4 - sizeof(hdr); 696 size_t out_len = hdr.out_words * 4; 697 u64 response = 0; 698 699 if (method_elm->has_udata) { 700 bundle.driver_udata.inlen = 701 in_len - method_elm->req_size; 702 in_len = method_elm->req_size; 703 if (bundle.driver_udata.inlen) 704 bundle.driver_udata.inbuf = buf + in_len; 705 else 706 bundle.driver_udata.inbuf = NULL; 707 } else { 708 memset(&bundle.driver_udata, 0, 709 sizeof(bundle.driver_udata)); 710 } 711 712 if (method_elm->has_resp) { 713 /* 714 * The macros check that if has_resp is set 715 * then the command request structure starts 716 * with a '__aligned u64 response' member. 717 */ 718 ret = get_user(response, (const u64 *)buf); 719 if (ret) 720 goto out_unlock; 721 722 if (method_elm->has_udata) { 723 bundle.driver_udata.outlen = 724 out_len - method_elm->resp_size; 725 out_len = method_elm->resp_size; 726 if (bundle.driver_udata.outlen) 727 bundle.driver_udata.outbuf = 728 u64_to_user_ptr(response + 729 out_len); 730 else 731 bundle.driver_udata.outbuf = NULL; 732 } 733 } else { 734 bundle.driver_udata.outlen = 0; 735 bundle.driver_udata.outbuf = NULL; 736 } 737 738 ib_uverbs_init_udata_buf_or_null( 739 &bundle.ucore, buf, u64_to_user_ptr(response), 740 in_len, out_len); 741 } else { 742 buf += sizeof(ex_hdr); 743 744 ib_uverbs_init_udata_buf_or_null(&bundle.ucore, buf, 745 u64_to_user_ptr(ex_hdr.response), 746 hdr.in_words * 8, hdr.out_words * 8); 747 748 ib_uverbs_init_udata_buf_or_null( 749 &bundle.driver_udata, buf + bundle.ucore.inlen, 750 u64_to_user_ptr(ex_hdr.response) + bundle.ucore.outlen, 751 ex_hdr.provider_in_words * 8, 752 ex_hdr.provider_out_words * 8); 753 754 } 755 756 ret = method_elm->handler(&bundle); 757 out_unlock: 758 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key); 759 return (ret) ? : count; 760 } 761 762 static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma) 763 { 764 struct ib_uverbs_file *file = filp->private_data; 765 struct ib_ucontext *ucontext; 766 int ret = 0; 767 int srcu_key; 768 769 srcu_key = srcu_read_lock(&file->device->disassociate_srcu); 770 ucontext = ib_uverbs_get_ucontext_file(file); 771 if (IS_ERR(ucontext)) { 772 ret = PTR_ERR(ucontext); 773 goto out; 774 } 775 776 ret = ucontext->device->ops.mmap(ucontext, vma); 777 out: 778 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key); 779 return ret; 780 } 781 782 /* 783 * Each time we map IO memory into user space this keeps track of the mapping. 784 * When the device is hot-unplugged we 'zap' the mmaps in user space to point 785 * to the zero page and allow the hot unplug to proceed. 786 * 787 * This is necessary for cases like PCI physical hot unplug as the actual BAR 788 * memory may vanish after this and access to it from userspace could MCE. 789 * 790 * RDMA drivers supporting disassociation must have their user space designed 791 * to cope in some way with their IO pages going to the zero page. 792 */ 793 struct rdma_umap_priv { 794 struct vm_area_struct *vma; 795 struct list_head list; 796 }; 797 798 static const struct vm_operations_struct rdma_umap_ops; 799 800 static void rdma_umap_priv_init(struct rdma_umap_priv *priv, 801 struct vm_area_struct *vma) 802 { 803 struct ib_uverbs_file *ufile = vma->vm_file->private_data; 804 805 priv->vma = vma; 806 vma->vm_private_data = priv; 807 vma->vm_ops = &rdma_umap_ops; 808 809 mutex_lock(&ufile->umap_lock); 810 list_add(&priv->list, &ufile->umaps); 811 mutex_unlock(&ufile->umap_lock); 812 } 813 814 /* 815 * The VMA has been dup'd, initialize the vm_private_data with a new tracking 816 * struct 817 */ 818 static void rdma_umap_open(struct vm_area_struct *vma) 819 { 820 struct ib_uverbs_file *ufile = vma->vm_file->private_data; 821 struct rdma_umap_priv *opriv = vma->vm_private_data; 822 struct rdma_umap_priv *priv; 823 824 if (!opriv) 825 return; 826 827 /* We are racing with disassociation */ 828 if (!down_read_trylock(&ufile->hw_destroy_rwsem)) 829 goto out_zap; 830 /* 831 * Disassociation already completed, the VMA should already be zapped. 832 */ 833 if (!ufile->ucontext) 834 goto out_unlock; 835 836 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 837 if (!priv) 838 goto out_unlock; 839 rdma_umap_priv_init(priv, vma); 840 841 up_read(&ufile->hw_destroy_rwsem); 842 return; 843 844 out_unlock: 845 up_read(&ufile->hw_destroy_rwsem); 846 out_zap: 847 /* 848 * We can't allow the VMA to be created with the actual IO pages, that 849 * would break our API contract, and it can't be stopped at this 850 * point, so zap it. 851 */ 852 vma->vm_private_data = NULL; 853 zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); 854 } 855 856 static void rdma_umap_close(struct vm_area_struct *vma) 857 { 858 struct ib_uverbs_file *ufile = vma->vm_file->private_data; 859 struct rdma_umap_priv *priv = vma->vm_private_data; 860 861 if (!priv) 862 return; 863 864 /* 865 * The vma holds a reference on the struct file that created it, which 866 * in turn means that the ib_uverbs_file is guaranteed to exist at 867 * this point. 868 */ 869 mutex_lock(&ufile->umap_lock); 870 list_del(&priv->list); 871 mutex_unlock(&ufile->umap_lock); 872 kfree(priv); 873 } 874 875 static const struct vm_operations_struct rdma_umap_ops = { 876 .open = rdma_umap_open, 877 .close = rdma_umap_close, 878 }; 879 880 static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext, 881 struct vm_area_struct *vma, 882 unsigned long size) 883 { 884 struct ib_uverbs_file *ufile = ucontext->ufile; 885 struct rdma_umap_priv *priv; 886 887 if (vma->vm_end - vma->vm_start != size) 888 return ERR_PTR(-EINVAL); 889 890 /* Driver is using this wrong, must be called by ib_uverbs_mmap */ 891 if (WARN_ON(!vma->vm_file || 892 vma->vm_file->private_data != ufile)) 893 return ERR_PTR(-EINVAL); 894 lockdep_assert_held(&ufile->device->disassociate_srcu); 895 896 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 897 if (!priv) 898 return ERR_PTR(-ENOMEM); 899 return priv; 900 } 901 902 /* 903 * Map IO memory into a process. This is to be called by drivers as part of 904 * their mmap() functions if they wish to send something like PCI-E BAR memory 905 * to userspace. 906 */ 907 int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma, 908 unsigned long pfn, unsigned long size, pgprot_t prot) 909 { 910 struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size); 911 912 if (IS_ERR(priv)) 913 return PTR_ERR(priv); 914 915 vma->vm_page_prot = prot; 916 if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) { 917 kfree(priv); 918 return -EAGAIN; 919 } 920 921 rdma_umap_priv_init(priv, vma); 922 return 0; 923 } 924 EXPORT_SYMBOL(rdma_user_mmap_io); 925 926 /* 927 * The page case is here for a slightly different reason, the driver expects 928 * to be able to free the page it is sharing to user space when it destroys 929 * its ucontext, which means we need to zap the user space references. 930 * 931 * We could handle this differently by providing an API to allocate a shared 932 * page and then only freeing the shared page when the last ufile is 933 * destroyed. 934 */ 935 int rdma_user_mmap_page(struct ib_ucontext *ucontext, 936 struct vm_area_struct *vma, struct page *page, 937 unsigned long size) 938 { 939 struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size); 940 941 if (IS_ERR(priv)) 942 return PTR_ERR(priv); 943 944 if (remap_pfn_range(vma, vma->vm_start, page_to_pfn(page), size, 945 vma->vm_page_prot)) { 946 kfree(priv); 947 return -EAGAIN; 948 } 949 950 rdma_umap_priv_init(priv, vma); 951 return 0; 952 } 953 EXPORT_SYMBOL(rdma_user_mmap_page); 954 955 void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile) 956 { 957 struct rdma_umap_priv *priv, *next_priv; 958 959 lockdep_assert_held(&ufile->hw_destroy_rwsem); 960 961 while (1) { 962 struct mm_struct *mm = NULL; 963 964 /* Get an arbitrary mm pointer that hasn't been cleaned yet */ 965 mutex_lock(&ufile->umap_lock); 966 if (!list_empty(&ufile->umaps)) { 967 mm = list_first_entry(&ufile->umaps, 968 struct rdma_umap_priv, list) 969 ->vma->vm_mm; 970 mmget(mm); 971 } 972 mutex_unlock(&ufile->umap_lock); 973 if (!mm) 974 return; 975 976 /* 977 * The umap_lock is nested under mmap_sem since it used within 978 * the vma_ops callbacks, so we have to clean the list one mm 979 * at a time to get the lock ordering right. Typically there 980 * will only be one mm, so no big deal. 981 */ 982 down_write(&mm->mmap_sem); 983 mutex_lock(&ufile->umap_lock); 984 list_for_each_entry_safe (priv, next_priv, &ufile->umaps, 985 list) { 986 struct vm_area_struct *vma = priv->vma; 987 988 if (vma->vm_mm != mm) 989 continue; 990 list_del_init(&priv->list); 991 992 zap_vma_ptes(vma, vma->vm_start, 993 vma->vm_end - vma->vm_start); 994 vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE); 995 } 996 mutex_unlock(&ufile->umap_lock); 997 up_write(&mm->mmap_sem); 998 mmput(mm); 999 } 1000 } 1001 1002 /* 1003 * ib_uverbs_open() does not need the BKL: 1004 * 1005 * - the ib_uverbs_device structures are properly reference counted and 1006 * everything else is purely local to the file being created, so 1007 * races against other open calls are not a problem; 1008 * - there is no ioctl method to race against; 1009 * - the open method will either immediately run -ENXIO, or all 1010 * required initialization will be done. 1011 */ 1012 static int ib_uverbs_open(struct inode *inode, struct file *filp) 1013 { 1014 struct ib_uverbs_device *dev; 1015 struct ib_uverbs_file *file; 1016 struct ib_device *ib_dev; 1017 int ret; 1018 int module_dependent; 1019 int srcu_key; 1020 1021 dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev); 1022 if (!atomic_inc_not_zero(&dev->refcount)) 1023 return -ENXIO; 1024 1025 get_device(&dev->dev); 1026 srcu_key = srcu_read_lock(&dev->disassociate_srcu); 1027 mutex_lock(&dev->lists_mutex); 1028 ib_dev = srcu_dereference(dev->ib_dev, 1029 &dev->disassociate_srcu); 1030 if (!ib_dev) { 1031 ret = -EIO; 1032 goto err; 1033 } 1034 1035 /* In case IB device supports disassociate ucontext, there is no hard 1036 * dependency between uverbs device and its low level device. 1037 */ 1038 module_dependent = !(ib_dev->ops.disassociate_ucontext); 1039 1040 if (module_dependent) { 1041 if (!try_module_get(ib_dev->owner)) { 1042 ret = -ENODEV; 1043 goto err; 1044 } 1045 } 1046 1047 file = kzalloc(sizeof(*file), GFP_KERNEL); 1048 if (!file) { 1049 ret = -ENOMEM; 1050 if (module_dependent) 1051 goto err_module; 1052 1053 goto err; 1054 } 1055 1056 file->device = dev; 1057 kref_init(&file->ref); 1058 mutex_init(&file->ucontext_lock); 1059 1060 spin_lock_init(&file->uobjects_lock); 1061 INIT_LIST_HEAD(&file->uobjects); 1062 init_rwsem(&file->hw_destroy_rwsem); 1063 mutex_init(&file->umap_lock); 1064 INIT_LIST_HEAD(&file->umaps); 1065 1066 filp->private_data = file; 1067 list_add_tail(&file->list, &dev->uverbs_file_list); 1068 mutex_unlock(&dev->lists_mutex); 1069 srcu_read_unlock(&dev->disassociate_srcu, srcu_key); 1070 1071 setup_ufile_idr_uobject(file); 1072 1073 return nonseekable_open(inode, filp); 1074 1075 err_module: 1076 module_put(ib_dev->owner); 1077 1078 err: 1079 mutex_unlock(&dev->lists_mutex); 1080 srcu_read_unlock(&dev->disassociate_srcu, srcu_key); 1081 if (atomic_dec_and_test(&dev->refcount)) 1082 ib_uverbs_comp_dev(dev); 1083 1084 put_device(&dev->dev); 1085 return ret; 1086 } 1087 1088 static int ib_uverbs_close(struct inode *inode, struct file *filp) 1089 { 1090 struct ib_uverbs_file *file = filp->private_data; 1091 1092 uverbs_destroy_ufile_hw(file, RDMA_REMOVE_CLOSE); 1093 1094 mutex_lock(&file->device->lists_mutex); 1095 list_del_init(&file->list); 1096 mutex_unlock(&file->device->lists_mutex); 1097 1098 if (file->async_file) 1099 kref_put(&file->async_file->ref, 1100 ib_uverbs_release_async_event_file); 1101 1102 kref_put(&file->ref, ib_uverbs_release_file); 1103 1104 return 0; 1105 } 1106 1107 static const struct file_operations uverbs_fops = { 1108 .owner = THIS_MODULE, 1109 .write = ib_uverbs_write, 1110 .open = ib_uverbs_open, 1111 .release = ib_uverbs_close, 1112 .llseek = no_llseek, 1113 .unlocked_ioctl = ib_uverbs_ioctl, 1114 .compat_ioctl = ib_uverbs_ioctl, 1115 }; 1116 1117 static const struct file_operations uverbs_mmap_fops = { 1118 .owner = THIS_MODULE, 1119 .write = ib_uverbs_write, 1120 .mmap = ib_uverbs_mmap, 1121 .open = ib_uverbs_open, 1122 .release = ib_uverbs_close, 1123 .llseek = no_llseek, 1124 .unlocked_ioctl = ib_uverbs_ioctl, 1125 .compat_ioctl = ib_uverbs_ioctl, 1126 }; 1127 1128 static struct ib_client uverbs_client = { 1129 .name = "uverbs", 1130 .add = ib_uverbs_add_one, 1131 .remove = ib_uverbs_remove_one 1132 }; 1133 1134 static ssize_t ibdev_show(struct device *device, struct device_attribute *attr, 1135 char *buf) 1136 { 1137 struct ib_uverbs_device *dev = 1138 container_of(device, struct ib_uverbs_device, dev); 1139 int ret = -ENODEV; 1140 int srcu_key; 1141 struct ib_device *ib_dev; 1142 1143 srcu_key = srcu_read_lock(&dev->disassociate_srcu); 1144 ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu); 1145 if (ib_dev) 1146 ret = sprintf(buf, "%s\n", dev_name(&ib_dev->dev)); 1147 srcu_read_unlock(&dev->disassociate_srcu, srcu_key); 1148 1149 return ret; 1150 } 1151 static DEVICE_ATTR_RO(ibdev); 1152 1153 static ssize_t abi_version_show(struct device *device, 1154 struct device_attribute *attr, char *buf) 1155 { 1156 struct ib_uverbs_device *dev = 1157 container_of(device, struct ib_uverbs_device, dev); 1158 int ret = -ENODEV; 1159 int srcu_key; 1160 struct ib_device *ib_dev; 1161 1162 srcu_key = srcu_read_lock(&dev->disassociate_srcu); 1163 ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu); 1164 if (ib_dev) 1165 ret = sprintf(buf, "%d\n", ib_dev->uverbs_abi_ver); 1166 srcu_read_unlock(&dev->disassociate_srcu, srcu_key); 1167 1168 return ret; 1169 } 1170 static DEVICE_ATTR_RO(abi_version); 1171 1172 static struct attribute *ib_dev_attrs[] = { 1173 &dev_attr_abi_version.attr, 1174 &dev_attr_ibdev.attr, 1175 NULL, 1176 }; 1177 1178 static const struct attribute_group dev_attr_group = { 1179 .attrs = ib_dev_attrs, 1180 }; 1181 1182 static CLASS_ATTR_STRING(abi_version, S_IRUGO, 1183 __stringify(IB_USER_VERBS_ABI_VERSION)); 1184 1185 static int ib_uverbs_create_uapi(struct ib_device *device, 1186 struct ib_uverbs_device *uverbs_dev) 1187 { 1188 struct uverbs_api *uapi; 1189 1190 uapi = uverbs_alloc_api(device); 1191 if (IS_ERR(uapi)) 1192 return PTR_ERR(uapi); 1193 1194 uverbs_dev->uapi = uapi; 1195 return 0; 1196 } 1197 1198 static void ib_uverbs_add_one(struct ib_device *device) 1199 { 1200 int devnum; 1201 dev_t base; 1202 struct ib_uverbs_device *uverbs_dev; 1203 int ret; 1204 1205 if (!device->ops.alloc_ucontext) 1206 return; 1207 1208 uverbs_dev = kzalloc(sizeof(*uverbs_dev), GFP_KERNEL); 1209 if (!uverbs_dev) 1210 return; 1211 1212 ret = init_srcu_struct(&uverbs_dev->disassociate_srcu); 1213 if (ret) { 1214 kfree(uverbs_dev); 1215 return; 1216 } 1217 1218 device_initialize(&uverbs_dev->dev); 1219 uverbs_dev->dev.class = uverbs_class; 1220 uverbs_dev->dev.parent = device->dev.parent; 1221 uverbs_dev->dev.release = ib_uverbs_release_dev; 1222 uverbs_dev->groups[0] = &dev_attr_group; 1223 uverbs_dev->dev.groups = uverbs_dev->groups; 1224 atomic_set(&uverbs_dev->refcount, 1); 1225 init_completion(&uverbs_dev->comp); 1226 uverbs_dev->xrcd_tree = RB_ROOT; 1227 mutex_init(&uverbs_dev->xrcd_tree_mutex); 1228 mutex_init(&uverbs_dev->lists_mutex); 1229 INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list); 1230 INIT_LIST_HEAD(&uverbs_dev->uverbs_events_file_list); 1231 rcu_assign_pointer(uverbs_dev->ib_dev, device); 1232 uverbs_dev->num_comp_vectors = device->num_comp_vectors; 1233 1234 devnum = ida_alloc_max(&uverbs_ida, IB_UVERBS_MAX_DEVICES - 1, 1235 GFP_KERNEL); 1236 if (devnum < 0) 1237 goto err; 1238 uverbs_dev->devnum = devnum; 1239 if (devnum >= IB_UVERBS_NUM_FIXED_MINOR) 1240 base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR; 1241 else 1242 base = IB_UVERBS_BASE_DEV + devnum; 1243 1244 if (ib_uverbs_create_uapi(device, uverbs_dev)) 1245 goto err_uapi; 1246 1247 uverbs_dev->dev.devt = base; 1248 dev_set_name(&uverbs_dev->dev, "uverbs%d", uverbs_dev->devnum); 1249 1250 cdev_init(&uverbs_dev->cdev, 1251 device->ops.mmap ? &uverbs_mmap_fops : &uverbs_fops); 1252 uverbs_dev->cdev.owner = THIS_MODULE; 1253 1254 ret = cdev_device_add(&uverbs_dev->cdev, &uverbs_dev->dev); 1255 if (ret) 1256 goto err_uapi; 1257 1258 ib_set_client_data(device, &uverbs_client, uverbs_dev); 1259 return; 1260 1261 err_uapi: 1262 ida_free(&uverbs_ida, devnum); 1263 err: 1264 if (atomic_dec_and_test(&uverbs_dev->refcount)) 1265 ib_uverbs_comp_dev(uverbs_dev); 1266 wait_for_completion(&uverbs_dev->comp); 1267 put_device(&uverbs_dev->dev); 1268 return; 1269 } 1270 1271 static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev, 1272 struct ib_device *ib_dev) 1273 { 1274 struct ib_uverbs_file *file; 1275 struct ib_uverbs_async_event_file *event_file; 1276 struct ib_event event; 1277 1278 /* Pending running commands to terminate */ 1279 uverbs_disassociate_api_pre(uverbs_dev); 1280 event.event = IB_EVENT_DEVICE_FATAL; 1281 event.element.port_num = 0; 1282 event.device = ib_dev; 1283 1284 mutex_lock(&uverbs_dev->lists_mutex); 1285 while (!list_empty(&uverbs_dev->uverbs_file_list)) { 1286 file = list_first_entry(&uverbs_dev->uverbs_file_list, 1287 struct ib_uverbs_file, list); 1288 list_del_init(&file->list); 1289 kref_get(&file->ref); 1290 1291 /* We must release the mutex before going ahead and calling 1292 * uverbs_cleanup_ufile, as it might end up indirectly calling 1293 * uverbs_close, for example due to freeing the resources (e.g 1294 * mmput). 1295 */ 1296 mutex_unlock(&uverbs_dev->lists_mutex); 1297 1298 ib_uverbs_event_handler(&file->event_handler, &event); 1299 uverbs_destroy_ufile_hw(file, RDMA_REMOVE_DRIVER_REMOVE); 1300 kref_put(&file->ref, ib_uverbs_release_file); 1301 1302 mutex_lock(&uverbs_dev->lists_mutex); 1303 } 1304 1305 while (!list_empty(&uverbs_dev->uverbs_events_file_list)) { 1306 event_file = list_first_entry(&uverbs_dev-> 1307 uverbs_events_file_list, 1308 struct ib_uverbs_async_event_file, 1309 list); 1310 spin_lock_irq(&event_file->ev_queue.lock); 1311 event_file->ev_queue.is_closed = 1; 1312 spin_unlock_irq(&event_file->ev_queue.lock); 1313 1314 list_del(&event_file->list); 1315 ib_unregister_event_handler( 1316 &event_file->uverbs_file->event_handler); 1317 event_file->uverbs_file->event_handler.device = 1318 NULL; 1319 1320 wake_up_interruptible(&event_file->ev_queue.poll_wait); 1321 kill_fasync(&event_file->ev_queue.async_queue, SIGIO, POLL_IN); 1322 } 1323 mutex_unlock(&uverbs_dev->lists_mutex); 1324 1325 uverbs_disassociate_api(uverbs_dev->uapi); 1326 } 1327 1328 static void ib_uverbs_remove_one(struct ib_device *device, void *client_data) 1329 { 1330 struct ib_uverbs_device *uverbs_dev = client_data; 1331 int wait_clients = 1; 1332 1333 if (!uverbs_dev) 1334 return; 1335 1336 cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev); 1337 ida_free(&uverbs_ida, uverbs_dev->devnum); 1338 1339 if (device->ops.disassociate_ucontext) { 1340 /* We disassociate HW resources and immediately return. 1341 * Userspace will see a EIO errno for all future access. 1342 * Upon returning, ib_device may be freed internally and is not 1343 * valid any more. 1344 * uverbs_device is still available until all clients close 1345 * their files, then the uverbs device ref count will be zero 1346 * and its resources will be freed. 1347 * Note: At this point no more files can be opened since the 1348 * cdev was deleted, however active clients can still issue 1349 * commands and close their open files. 1350 */ 1351 ib_uverbs_free_hw_resources(uverbs_dev, device); 1352 wait_clients = 0; 1353 } 1354 1355 if (atomic_dec_and_test(&uverbs_dev->refcount)) 1356 ib_uverbs_comp_dev(uverbs_dev); 1357 if (wait_clients) 1358 wait_for_completion(&uverbs_dev->comp); 1359 1360 put_device(&uverbs_dev->dev); 1361 } 1362 1363 static char *uverbs_devnode(struct device *dev, umode_t *mode) 1364 { 1365 if (mode) 1366 *mode = 0666; 1367 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); 1368 } 1369 1370 static int __init ib_uverbs_init(void) 1371 { 1372 int ret; 1373 1374 ret = register_chrdev_region(IB_UVERBS_BASE_DEV, 1375 IB_UVERBS_NUM_FIXED_MINOR, 1376 "infiniband_verbs"); 1377 if (ret) { 1378 pr_err("user_verbs: couldn't register device number\n"); 1379 goto out; 1380 } 1381 1382 ret = alloc_chrdev_region(&dynamic_uverbs_dev, 0, 1383 IB_UVERBS_NUM_DYNAMIC_MINOR, 1384 "infiniband_verbs"); 1385 if (ret) { 1386 pr_err("couldn't register dynamic device number\n"); 1387 goto out_alloc; 1388 } 1389 1390 uverbs_class = class_create(THIS_MODULE, "infiniband_verbs"); 1391 if (IS_ERR(uverbs_class)) { 1392 ret = PTR_ERR(uverbs_class); 1393 pr_err("user_verbs: couldn't create class infiniband_verbs\n"); 1394 goto out_chrdev; 1395 } 1396 1397 uverbs_class->devnode = uverbs_devnode; 1398 1399 ret = class_create_file(uverbs_class, &class_attr_abi_version.attr); 1400 if (ret) { 1401 pr_err("user_verbs: couldn't create abi_version attribute\n"); 1402 goto out_class; 1403 } 1404 1405 ret = ib_register_client(&uverbs_client); 1406 if (ret) { 1407 pr_err("user_verbs: couldn't register client\n"); 1408 goto out_class; 1409 } 1410 1411 return 0; 1412 1413 out_class: 1414 class_destroy(uverbs_class); 1415 1416 out_chrdev: 1417 unregister_chrdev_region(dynamic_uverbs_dev, 1418 IB_UVERBS_NUM_DYNAMIC_MINOR); 1419 1420 out_alloc: 1421 unregister_chrdev_region(IB_UVERBS_BASE_DEV, 1422 IB_UVERBS_NUM_FIXED_MINOR); 1423 1424 out: 1425 return ret; 1426 } 1427 1428 static void __exit ib_uverbs_cleanup(void) 1429 { 1430 ib_unregister_client(&uverbs_client); 1431 class_destroy(uverbs_class); 1432 unregister_chrdev_region(IB_UVERBS_BASE_DEV, 1433 IB_UVERBS_NUM_FIXED_MINOR); 1434 unregister_chrdev_region(dynamic_uverbs_dev, 1435 IB_UVERBS_NUM_DYNAMIC_MINOR); 1436 } 1437 1438 module_init(ib_uverbs_init); 1439 module_exit(ib_uverbs_cleanup); 1440