1 /* 2 * \author Rickard E. (Rik) Faith <faith@valinux.com> 3 * \author Daryll Strauss <daryll@valinux.com> 4 * \author Gareth Hughes <gareth@valinux.com> 5 */ 6 7 /* 8 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com 9 * 10 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 11 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 12 * All Rights Reserved. 13 * 14 * Permission is hereby granted, free of charge, to any person obtaining a 15 * copy of this software and associated documentation files (the "Software"), 16 * to deal in the Software without restriction, including without limitation 17 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 18 * and/or sell copies of the Software, and to permit persons to whom the 19 * Software is furnished to do so, subject to the following conditions: 20 * 21 * The above copyright notice and this permission notice (including the next 22 * paragraph) shall be included in all copies or substantial portions of the 23 * Software. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 28 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 29 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 30 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 31 * OTHER DEALINGS IN THE SOFTWARE. 32 */ 33 34 #include <linux/poll.h> 35 #include <linux/slab.h> 36 #include <linux/module.h> 37 38 #include <drm/drm_client.h> 39 #include <drm/drm_file.h> 40 #include <drm/drmP.h> 41 42 #include "drm_legacy.h" 43 #include "drm_internal.h" 44 #include "drm_crtc_internal.h" 45 46 /* from BKL pushdown */ 47 DEFINE_MUTEX(drm_global_mutex); 48 49 /** 50 * DOC: file operations 51 * 52 * Drivers must define the file operations structure that forms the DRM 53 * userspace API entry point, even though most of those operations are 54 * implemented in the DRM core. The resulting &struct file_operations must be 55 * stored in the &drm_driver.fops field. The mandatory functions are drm_open(), 56 * drm_read(), drm_ioctl() and drm_compat_ioctl() if CONFIG_COMPAT is enabled 57 * Note that drm_compat_ioctl will be NULL if CONFIG_COMPAT=n, so there's no 58 * need to sprinkle #ifdef into the code. Drivers which implement private ioctls 59 * that require 32/64 bit compatibility support must provide their own 60 * &file_operations.compat_ioctl handler that processes private ioctls and calls 61 * drm_compat_ioctl() for core ioctls. 62 * 63 * In addition drm_read() and drm_poll() provide support for DRM events. DRM 64 * events are a generic and extensible means to send asynchronous events to 65 * userspace through the file descriptor. They are used to send vblank event and 66 * page flip completions by the KMS API. But drivers can also use it for their 67 * own needs, e.g. to signal completion of rendering. 68 * 69 * For the driver-side event interface see drm_event_reserve_init() and 70 * drm_send_event() as the main starting points. 71 * 72 * The memory mapping implementation will vary depending on how the driver 73 * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap() 74 * function, modern drivers should use one of the provided memory-manager 75 * specific implementations. For GEM-based drivers this is drm_gem_mmap(), and 76 * for drivers which use the CMA GEM helpers it's drm_gem_cma_mmap(). 77 * 78 * No other file operations are supported by the DRM userspace API. Overall the 79 * following is an example &file_operations structure:: 80 * 81 * static const example_drm_fops = { 82 * .owner = THIS_MODULE, 83 * .open = drm_open, 84 * .release = drm_release, 85 * .unlocked_ioctl = drm_ioctl, 86 * .compat_ioctl = drm_compat_ioctl, // NULL if CONFIG_COMPAT=n 87 * .poll = drm_poll, 88 * .read = drm_read, 89 * .llseek = no_llseek, 90 * .mmap = drm_gem_mmap, 91 * }; 92 * 93 * For plain GEM based drivers there is the DEFINE_DRM_GEM_FOPS() macro, and for 94 * CMA based drivers there is the DEFINE_DRM_GEM_CMA_FOPS() macro to make this 95 * simpler. 96 * 97 * The driver's &file_operations must be stored in &drm_driver.fops. 98 * 99 * For driver-private IOCTL handling see the more detailed discussion in 100 * :ref:`IOCTL support in the userland interfaces chapter<drm_driver_ioctl>`. 101 */ 102 103 static int drm_open_helper(struct file *filp, struct drm_minor *minor); 104 105 /** 106 * drm_file_alloc - allocate file context 107 * @minor: minor to allocate on 108 * 109 * This allocates a new DRM file context. It is not linked into any context and 110 * can be used by the caller freely. Note that the context keeps a pointer to 111 * @minor, so it must be freed before @minor is. 112 * 113 * RETURNS: 114 * Pointer to newly allocated context, ERR_PTR on failure. 115 */ 116 struct drm_file *drm_file_alloc(struct drm_minor *minor) 117 { 118 struct drm_device *dev = minor->dev; 119 struct drm_file *file; 120 int ret; 121 122 file = kzalloc(sizeof(*file), GFP_KERNEL); 123 if (!file) 124 return ERR_PTR(-ENOMEM); 125 126 file->pid = get_pid(task_pid(current)); 127 file->minor = minor; 128 129 /* for compatibility root is always authenticated */ 130 file->authenticated = capable(CAP_SYS_ADMIN); 131 132 INIT_LIST_HEAD(&file->lhead); 133 INIT_LIST_HEAD(&file->fbs); 134 mutex_init(&file->fbs_lock); 135 INIT_LIST_HEAD(&file->blobs); 136 INIT_LIST_HEAD(&file->pending_event_list); 137 INIT_LIST_HEAD(&file->event_list); 138 init_waitqueue_head(&file->event_wait); 139 file->event_space = 4096; /* set aside 4k for event buffer */ 140 141 mutex_init(&file->event_read_lock); 142 143 if (drm_core_check_feature(dev, DRIVER_GEM)) 144 drm_gem_open(dev, file); 145 146 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 147 drm_syncobj_open(file); 148 149 if (drm_core_check_feature(dev, DRIVER_PRIME)) 150 drm_prime_init_file_private(&file->prime); 151 152 if (dev->driver->open) { 153 ret = dev->driver->open(dev, file); 154 if (ret < 0) 155 goto out_prime_destroy; 156 } 157 158 return file; 159 160 out_prime_destroy: 161 if (drm_core_check_feature(dev, DRIVER_PRIME)) 162 drm_prime_destroy_file_private(&file->prime); 163 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 164 drm_syncobj_release(file); 165 if (drm_core_check_feature(dev, DRIVER_GEM)) 166 drm_gem_release(dev, file); 167 put_pid(file->pid); 168 kfree(file); 169 170 return ERR_PTR(ret); 171 } 172 173 static void drm_events_release(struct drm_file *file_priv) 174 { 175 struct drm_device *dev = file_priv->minor->dev; 176 struct drm_pending_event *e, *et; 177 unsigned long flags; 178 179 spin_lock_irqsave(&dev->event_lock, flags); 180 181 /* Unlink pending events */ 182 list_for_each_entry_safe(e, et, &file_priv->pending_event_list, 183 pending_link) { 184 list_del(&e->pending_link); 185 e->file_priv = NULL; 186 } 187 188 /* Remove unconsumed events */ 189 list_for_each_entry_safe(e, et, &file_priv->event_list, link) { 190 list_del(&e->link); 191 kfree(e); 192 } 193 194 spin_unlock_irqrestore(&dev->event_lock, flags); 195 } 196 197 /** 198 * drm_file_free - free file context 199 * @file: context to free, or NULL 200 * 201 * This destroys and deallocates a DRM file context previously allocated via 202 * drm_file_alloc(). The caller must make sure to unlink it from any contexts 203 * before calling this. 204 * 205 * If NULL is passed, this is a no-op. 206 * 207 * RETURNS: 208 * 0 on success, or error code on failure. 209 */ 210 void drm_file_free(struct drm_file *file) 211 { 212 struct drm_device *dev; 213 214 if (!file) 215 return; 216 217 dev = file->minor->dev; 218 219 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", 220 task_pid_nr(current), 221 (long)old_encode_dev(file->minor->kdev->devt), 222 dev->open_count); 223 224 if (drm_core_check_feature(dev, DRIVER_LEGACY) && 225 dev->driver->preclose) 226 dev->driver->preclose(dev, file); 227 228 if (drm_core_check_feature(dev, DRIVER_LEGACY)) 229 drm_legacy_lock_release(dev, file->filp); 230 231 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 232 drm_legacy_reclaim_buffers(dev, file); 233 234 drm_events_release(file); 235 236 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 237 drm_fb_release(file); 238 drm_property_destroy_user_blobs(dev, file); 239 } 240 241 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 242 drm_syncobj_release(file); 243 244 if (drm_core_check_feature(dev, DRIVER_GEM)) 245 drm_gem_release(dev, file); 246 247 drm_legacy_ctxbitmap_flush(dev, file); 248 249 if (drm_is_primary_client(file)) 250 drm_master_release(file); 251 252 if (dev->driver->postclose) 253 dev->driver->postclose(dev, file); 254 255 if (drm_core_check_feature(dev, DRIVER_PRIME)) 256 drm_prime_destroy_file_private(&file->prime); 257 258 WARN_ON(!list_empty(&file->event_list)); 259 260 put_pid(file->pid); 261 kfree(file); 262 } 263 264 static void drm_close_helper(struct file *filp) 265 { 266 struct drm_file *file_priv = filp->private_data; 267 struct drm_device *dev = file_priv->minor->dev; 268 269 mutex_lock(&dev->filelist_mutex); 270 list_del(&file_priv->lhead); 271 mutex_unlock(&dev->filelist_mutex); 272 273 drm_file_free(file_priv); 274 } 275 276 static int drm_setup(struct drm_device * dev) 277 { 278 int ret; 279 280 if (dev->driver->firstopen && 281 drm_core_check_feature(dev, DRIVER_LEGACY)) { 282 ret = dev->driver->firstopen(dev); 283 if (ret != 0) 284 return ret; 285 } 286 287 ret = drm_legacy_dma_setup(dev); 288 if (ret < 0) 289 return ret; 290 291 292 DRM_DEBUG("\n"); 293 return 0; 294 } 295 296 /** 297 * drm_open - open method for DRM file 298 * @inode: device inode 299 * @filp: file pointer. 300 * 301 * This function must be used by drivers as their &file_operations.open method. 302 * It looks up the correct DRM device and instantiates all the per-file 303 * resources for it. It also calls the &drm_driver.open driver callback. 304 * 305 * RETURNS: 306 * 307 * 0 on success or negative errno value on falure. 308 */ 309 int drm_open(struct inode *inode, struct file *filp) 310 { 311 struct drm_device *dev; 312 struct drm_minor *minor; 313 int retcode; 314 int need_setup = 0; 315 316 minor = drm_minor_acquire(iminor(inode)); 317 if (IS_ERR(minor)) 318 return PTR_ERR(minor); 319 320 dev = minor->dev; 321 if (!dev->open_count++) 322 need_setup = 1; 323 324 /* share address_space across all char-devs of a single device */ 325 filp->f_mapping = dev->anon_inode->i_mapping; 326 327 retcode = drm_open_helper(filp, minor); 328 if (retcode) 329 goto err_undo; 330 if (need_setup) { 331 retcode = drm_setup(dev); 332 if (retcode) { 333 drm_close_helper(filp); 334 goto err_undo; 335 } 336 } 337 return 0; 338 339 err_undo: 340 dev->open_count--; 341 drm_minor_release(minor); 342 return retcode; 343 } 344 EXPORT_SYMBOL(drm_open); 345 346 /* 347 * Check whether DRI will run on this CPU. 348 * 349 * \return non-zero if the DRI will run on this CPU, or zero otherwise. 350 */ 351 static int drm_cpu_valid(void) 352 { 353 #if defined(__sparc__) && !defined(__sparc_v9__) 354 return 0; /* No cmpxchg before v9 sparc. */ 355 #endif 356 return 1; 357 } 358 359 /* 360 * Called whenever a process opens /dev/drm. 361 * 362 * \param filp file pointer. 363 * \param minor acquired minor-object. 364 * \return zero on success or a negative number on failure. 365 * 366 * Creates and initializes a drm_file structure for the file private data in \p 367 * filp and add it into the double linked list in \p dev. 368 */ 369 static int drm_open_helper(struct file *filp, struct drm_minor *minor) 370 { 371 struct drm_device *dev = minor->dev; 372 struct drm_file *priv; 373 int ret; 374 375 if (filp->f_flags & O_EXCL) 376 return -EBUSY; /* No exclusive opens */ 377 if (!drm_cpu_valid()) 378 return -EINVAL; 379 if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF) 380 return -EINVAL; 381 382 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor->index); 383 384 priv = drm_file_alloc(minor); 385 if (IS_ERR(priv)) 386 return PTR_ERR(priv); 387 388 if (drm_is_primary_client(priv)) { 389 ret = drm_master_open(priv); 390 if (ret) { 391 drm_file_free(priv); 392 return ret; 393 } 394 } 395 396 filp->private_data = priv; 397 filp->f_mode |= FMODE_UNSIGNED_OFFSET; 398 priv->filp = filp; 399 400 mutex_lock(&dev->filelist_mutex); 401 list_add(&priv->lhead, &dev->filelist); 402 mutex_unlock(&dev->filelist_mutex); 403 404 #ifdef __alpha__ 405 /* 406 * Default the hose 407 */ 408 if (!dev->hose) { 409 struct pci_dev *pci_dev; 410 pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL); 411 if (pci_dev) { 412 dev->hose = pci_dev->sysdata; 413 pci_dev_put(pci_dev); 414 } 415 if (!dev->hose) { 416 struct pci_bus *b = list_entry(pci_root_buses.next, 417 struct pci_bus, node); 418 if (b) 419 dev->hose = b->sysdata; 420 } 421 } 422 #endif 423 424 return 0; 425 } 426 427 void drm_lastclose(struct drm_device * dev) 428 { 429 DRM_DEBUG("\n"); 430 431 if (dev->driver->lastclose) 432 dev->driver->lastclose(dev); 433 DRM_DEBUG("driver lastclose completed\n"); 434 435 if (drm_core_check_feature(dev, DRIVER_LEGACY)) 436 drm_legacy_dev_reinit(dev); 437 438 drm_client_dev_restore(dev); 439 } 440 441 /** 442 * drm_release - release method for DRM file 443 * @inode: device inode 444 * @filp: file pointer. 445 * 446 * This function must be used by drivers as their &file_operations.release 447 * method. It frees any resources associated with the open file, and calls the 448 * &drm_driver.postclose driver callback. If this is the last open file for the 449 * DRM device also proceeds to call the &drm_driver.lastclose driver callback. 450 * 451 * RETURNS: 452 * 453 * Always succeeds and returns 0. 454 */ 455 int drm_release(struct inode *inode, struct file *filp) 456 { 457 struct drm_file *file_priv = filp->private_data; 458 struct drm_minor *minor = file_priv->minor; 459 struct drm_device *dev = minor->dev; 460 461 mutex_lock(&drm_global_mutex); 462 463 DRM_DEBUG("open_count = %d\n", dev->open_count); 464 465 drm_close_helper(filp); 466 467 if (!--dev->open_count) 468 drm_lastclose(dev); 469 470 mutex_unlock(&drm_global_mutex); 471 472 drm_minor_release(minor); 473 474 return 0; 475 } 476 EXPORT_SYMBOL(drm_release); 477 478 /** 479 * drm_read - read method for DRM file 480 * @filp: file pointer 481 * @buffer: userspace destination pointer for the read 482 * @count: count in bytes to read 483 * @offset: offset to read 484 * 485 * This function must be used by drivers as their &file_operations.read 486 * method iff they use DRM events for asynchronous signalling to userspace. 487 * Since events are used by the KMS API for vblank and page flip completion this 488 * means all modern display drivers must use it. 489 * 490 * @offset is ignored, DRM events are read like a pipe. Therefore drivers also 491 * must set the &file_operation.llseek to no_llseek(). Polling support is 492 * provided by drm_poll(). 493 * 494 * This function will only ever read a full event. Therefore userspace must 495 * supply a big enough buffer to fit any event to ensure forward progress. Since 496 * the maximum event space is currently 4K it's recommended to just use that for 497 * safety. 498 * 499 * RETURNS: 500 * 501 * Number of bytes read (always aligned to full events, and can be 0) or a 502 * negative error code on failure. 503 */ 504 ssize_t drm_read(struct file *filp, char __user *buffer, 505 size_t count, loff_t *offset) 506 { 507 struct drm_file *file_priv = filp->private_data; 508 struct drm_device *dev = file_priv->minor->dev; 509 ssize_t ret; 510 511 if (!access_ok(buffer, count)) 512 return -EFAULT; 513 514 ret = mutex_lock_interruptible(&file_priv->event_read_lock); 515 if (ret) 516 return ret; 517 518 for (;;) { 519 struct drm_pending_event *e = NULL; 520 521 spin_lock_irq(&dev->event_lock); 522 if (!list_empty(&file_priv->event_list)) { 523 e = list_first_entry(&file_priv->event_list, 524 struct drm_pending_event, link); 525 file_priv->event_space += e->event->length; 526 list_del(&e->link); 527 } 528 spin_unlock_irq(&dev->event_lock); 529 530 if (e == NULL) { 531 if (ret) 532 break; 533 534 if (filp->f_flags & O_NONBLOCK) { 535 ret = -EAGAIN; 536 break; 537 } 538 539 mutex_unlock(&file_priv->event_read_lock); 540 ret = wait_event_interruptible(file_priv->event_wait, 541 !list_empty(&file_priv->event_list)); 542 if (ret >= 0) 543 ret = mutex_lock_interruptible(&file_priv->event_read_lock); 544 if (ret) 545 return ret; 546 } else { 547 unsigned length = e->event->length; 548 549 if (length > count - ret) { 550 put_back_event: 551 spin_lock_irq(&dev->event_lock); 552 file_priv->event_space -= length; 553 list_add(&e->link, &file_priv->event_list); 554 spin_unlock_irq(&dev->event_lock); 555 wake_up_interruptible(&file_priv->event_wait); 556 break; 557 } 558 559 if (copy_to_user(buffer + ret, e->event, length)) { 560 if (ret == 0) 561 ret = -EFAULT; 562 goto put_back_event; 563 } 564 565 ret += length; 566 kfree(e); 567 } 568 } 569 mutex_unlock(&file_priv->event_read_lock); 570 571 return ret; 572 } 573 EXPORT_SYMBOL(drm_read); 574 575 /** 576 * drm_poll - poll method for DRM file 577 * @filp: file pointer 578 * @wait: poll waiter table 579 * 580 * This function must be used by drivers as their &file_operations.read method 581 * iff they use DRM events for asynchronous signalling to userspace. Since 582 * events are used by the KMS API for vblank and page flip completion this means 583 * all modern display drivers must use it. 584 * 585 * See also drm_read(). 586 * 587 * RETURNS: 588 * 589 * Mask of POLL flags indicating the current status of the file. 590 */ 591 __poll_t drm_poll(struct file *filp, struct poll_table_struct *wait) 592 { 593 struct drm_file *file_priv = filp->private_data; 594 __poll_t mask = 0; 595 596 poll_wait(filp, &file_priv->event_wait, wait); 597 598 if (!list_empty(&file_priv->event_list)) 599 mask |= EPOLLIN | EPOLLRDNORM; 600 601 return mask; 602 } 603 EXPORT_SYMBOL(drm_poll); 604 605 /** 606 * drm_event_reserve_init_locked - init a DRM event and reserve space for it 607 * @dev: DRM device 608 * @file_priv: DRM file private data 609 * @p: tracking structure for the pending event 610 * @e: actual event data to deliver to userspace 611 * 612 * This function prepares the passed in event for eventual delivery. If the event 613 * doesn't get delivered (because the IOCTL fails later on, before queuing up 614 * anything) then the even must be cancelled and freed using 615 * drm_event_cancel_free(). Successfully initialized events should be sent out 616 * using drm_send_event() or drm_send_event_locked() to signal completion of the 617 * asynchronous event to userspace. 618 * 619 * If callers embedded @p into a larger structure it must be allocated with 620 * kmalloc and @p must be the first member element. 621 * 622 * This is the locked version of drm_event_reserve_init() for callers which 623 * already hold &drm_device.event_lock. 624 * 625 * RETURNS: 626 * 627 * 0 on success or a negative error code on failure. 628 */ 629 int drm_event_reserve_init_locked(struct drm_device *dev, 630 struct drm_file *file_priv, 631 struct drm_pending_event *p, 632 struct drm_event *e) 633 { 634 if (file_priv->event_space < e->length) 635 return -ENOMEM; 636 637 file_priv->event_space -= e->length; 638 639 p->event = e; 640 list_add(&p->pending_link, &file_priv->pending_event_list); 641 p->file_priv = file_priv; 642 643 return 0; 644 } 645 EXPORT_SYMBOL(drm_event_reserve_init_locked); 646 647 /** 648 * drm_event_reserve_init - init a DRM event and reserve space for it 649 * @dev: DRM device 650 * @file_priv: DRM file private data 651 * @p: tracking structure for the pending event 652 * @e: actual event data to deliver to userspace 653 * 654 * This function prepares the passed in event for eventual delivery. If the event 655 * doesn't get delivered (because the IOCTL fails later on, before queuing up 656 * anything) then the even must be cancelled and freed using 657 * drm_event_cancel_free(). Successfully initialized events should be sent out 658 * using drm_send_event() or drm_send_event_locked() to signal completion of the 659 * asynchronous event to userspace. 660 * 661 * If callers embedded @p into a larger structure it must be allocated with 662 * kmalloc and @p must be the first member element. 663 * 664 * Callers which already hold &drm_device.event_lock should use 665 * drm_event_reserve_init_locked() instead. 666 * 667 * RETURNS: 668 * 669 * 0 on success or a negative error code on failure. 670 */ 671 int drm_event_reserve_init(struct drm_device *dev, 672 struct drm_file *file_priv, 673 struct drm_pending_event *p, 674 struct drm_event *e) 675 { 676 unsigned long flags; 677 int ret; 678 679 spin_lock_irqsave(&dev->event_lock, flags); 680 ret = drm_event_reserve_init_locked(dev, file_priv, p, e); 681 spin_unlock_irqrestore(&dev->event_lock, flags); 682 683 return ret; 684 } 685 EXPORT_SYMBOL(drm_event_reserve_init); 686 687 /** 688 * drm_event_cancel_free - free a DRM event and release its space 689 * @dev: DRM device 690 * @p: tracking structure for the pending event 691 * 692 * This function frees the event @p initialized with drm_event_reserve_init() 693 * and releases any allocated space. It is used to cancel an event when the 694 * nonblocking operation could not be submitted and needed to be aborted. 695 */ 696 void drm_event_cancel_free(struct drm_device *dev, 697 struct drm_pending_event *p) 698 { 699 unsigned long flags; 700 spin_lock_irqsave(&dev->event_lock, flags); 701 if (p->file_priv) { 702 p->file_priv->event_space += p->event->length; 703 list_del(&p->pending_link); 704 } 705 spin_unlock_irqrestore(&dev->event_lock, flags); 706 707 if (p->fence) 708 dma_fence_put(p->fence); 709 710 kfree(p); 711 } 712 EXPORT_SYMBOL(drm_event_cancel_free); 713 714 /** 715 * drm_send_event_locked - send DRM event to file descriptor 716 * @dev: DRM device 717 * @e: DRM event to deliver 718 * 719 * This function sends the event @e, initialized with drm_event_reserve_init(), 720 * to its associated userspace DRM file. Callers must already hold 721 * &drm_device.event_lock, see drm_send_event() for the unlocked version. 722 * 723 * Note that the core will take care of unlinking and disarming events when the 724 * corresponding DRM file is closed. Drivers need not worry about whether the 725 * DRM file for this event still exists and can call this function upon 726 * completion of the asynchronous work unconditionally. 727 */ 728 void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) 729 { 730 assert_spin_locked(&dev->event_lock); 731 732 if (e->completion) { 733 complete_all(e->completion); 734 e->completion_release(e->completion); 735 e->completion = NULL; 736 } 737 738 if (e->fence) { 739 dma_fence_signal(e->fence); 740 dma_fence_put(e->fence); 741 } 742 743 if (!e->file_priv) { 744 kfree(e); 745 return; 746 } 747 748 list_del(&e->pending_link); 749 list_add_tail(&e->link, 750 &e->file_priv->event_list); 751 wake_up_interruptible(&e->file_priv->event_wait); 752 } 753 EXPORT_SYMBOL(drm_send_event_locked); 754 755 /** 756 * drm_send_event - send DRM event to file descriptor 757 * @dev: DRM device 758 * @e: DRM event to deliver 759 * 760 * This function sends the event @e, initialized with drm_event_reserve_init(), 761 * to its associated userspace DRM file. This function acquires 762 * &drm_device.event_lock, see drm_send_event_locked() for callers which already 763 * hold this lock. 764 * 765 * Note that the core will take care of unlinking and disarming events when the 766 * corresponding DRM file is closed. Drivers need not worry about whether the 767 * DRM file for this event still exists and can call this function upon 768 * completion of the asynchronous work unconditionally. 769 */ 770 void drm_send_event(struct drm_device *dev, struct drm_pending_event *e) 771 { 772 unsigned long irqflags; 773 774 spin_lock_irqsave(&dev->event_lock, irqflags); 775 drm_send_event_locked(dev, e); 776 spin_unlock_irqrestore(&dev->event_lock, irqflags); 777 } 778 EXPORT_SYMBOL(drm_send_event); 779