1 /* 2 * Copyright © 2012 Red Hat 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Dave Airlie <airlied@redhat.com> 25 * Rob Clark <rob.clark@linaro.org> 26 * 27 */ 28 29 #include <linux/export.h> 30 #include <linux/dma-buf.h> 31 #include <linux/rbtree.h> 32 33 #include <drm/drm_drv.h> 34 #include <drm/drm_file.h> 35 #include <drm/drm_framebuffer.h> 36 #include <drm/drm_gem.h> 37 #include <drm/drm_prime.h> 38 39 #include "drm_internal.h" 40 41 /* 42 * DMA-BUF/GEM Object references and lifetime overview: 43 * 44 * On the export the dma_buf holds a reference to the exporting GEM 45 * object. It takes this reference in handle_to_fd_ioctl, when it 46 * first calls .prime_export and stores the exporting GEM object in 47 * the dma_buf priv. This reference needs to be released when the 48 * final reference to the &dma_buf itself is dropped and its 49 * &dma_buf_ops.release function is called. For GEM-based drivers, 50 * the dma_buf should be exported using drm_gem_dmabuf_export() and 51 * then released by drm_gem_dmabuf_release(). 52 * 53 * On the import the importing GEM object holds a reference to the 54 * dma_buf (which in turn holds a ref to the exporting GEM object). 55 * It takes that reference in the fd_to_handle ioctl. 56 * It calls dma_buf_get, creates an attachment to it and stores the 57 * attachment in the GEM object. When this attachment is destroyed 58 * when the imported object is destroyed, we remove the attachment 59 * and drop the reference to the dma_buf. 60 * 61 * When all the references to the &dma_buf are dropped, i.e. when 62 * userspace has closed both handles to the imported GEM object (through the 63 * FD_TO_HANDLE IOCTL) and closed the file descriptor of the exported 64 * (through the HANDLE_TO_FD IOCTL) dma_buf, and all kernel-internal references 65 * are also gone, then the dma_buf gets destroyed. This can also happen as a 66 * part of the clean up procedure in the drm_release() function if userspace 67 * fails to properly clean up. Note that both the kernel and userspace (by 68 * keeeping the PRIME file descriptors open) can hold references onto a 69 * &dma_buf. 70 * 71 * Thus the chain of references always flows in one direction 72 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem 73 * 74 * Self-importing: if userspace is using PRIME as a replacement for flink 75 * then it will get a fd->handle request for a GEM object that it created. 76 * Drivers should detect this situation and return back the gem object 77 * from the dma-buf private. Prime will do this automatically for drivers that 78 * use the drm_gem_prime_{import,export} helpers. 79 * 80 * GEM struct &dma_buf_ops symbols are now exported. They can be resued by 81 * drivers which implement GEM interface. 82 */ 83 84 struct drm_prime_member { 85 struct dma_buf *dma_buf; 86 uint32_t handle; 87 88 struct rb_node dmabuf_rb; 89 struct rb_node handle_rb; 90 }; 91 92 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, 93 struct dma_buf *dma_buf, uint32_t handle) 94 { 95 struct drm_prime_member *member; 96 struct rb_node **p, *rb; 97 98 member = kmalloc(sizeof(*member), GFP_KERNEL); 99 if (!member) 100 return -ENOMEM; 101 102 get_dma_buf(dma_buf); 103 member->dma_buf = dma_buf; 104 member->handle = handle; 105 106 rb = NULL; 107 p = &prime_fpriv->dmabufs.rb_node; 108 while (*p) { 109 struct drm_prime_member *pos; 110 111 rb = *p; 112 pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb); 113 if (dma_buf > pos->dma_buf) 114 p = &rb->rb_right; 115 else 116 p = &rb->rb_left; 117 } 118 rb_link_node(&member->dmabuf_rb, rb, p); 119 rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs); 120 121 rb = NULL; 122 p = &prime_fpriv->handles.rb_node; 123 while (*p) { 124 struct drm_prime_member *pos; 125 126 rb = *p; 127 pos = rb_entry(rb, struct drm_prime_member, handle_rb); 128 if (handle > pos->handle) 129 p = &rb->rb_right; 130 else 131 p = &rb->rb_left; 132 } 133 rb_link_node(&member->handle_rb, rb, p); 134 rb_insert_color(&member->handle_rb, &prime_fpriv->handles); 135 136 return 0; 137 } 138 139 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv, 140 uint32_t handle) 141 { 142 struct rb_node *rb; 143 144 rb = prime_fpriv->handles.rb_node; 145 while (rb) { 146 struct drm_prime_member *member; 147 148 member = rb_entry(rb, struct drm_prime_member, handle_rb); 149 if (member->handle == handle) 150 return member->dma_buf; 151 else if (member->handle < handle) 152 rb = rb->rb_right; 153 else 154 rb = rb->rb_left; 155 } 156 157 return NULL; 158 } 159 160 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, 161 struct dma_buf *dma_buf, 162 uint32_t *handle) 163 { 164 struct rb_node *rb; 165 166 rb = prime_fpriv->dmabufs.rb_node; 167 while (rb) { 168 struct drm_prime_member *member; 169 170 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb); 171 if (member->dma_buf == dma_buf) { 172 *handle = member->handle; 173 return 0; 174 } else if (member->dma_buf < dma_buf) { 175 rb = rb->rb_right; 176 } else { 177 rb = rb->rb_left; 178 } 179 } 180 181 return -ENOENT; 182 } 183 184 /** 185 * drm_gem_map_attach - dma_buf attach implementation for GEM 186 * @dma_buf: buffer to attach device to 187 * @attach: buffer attachment data 188 * 189 * Calls &drm_driver.gem_prime_pin for device specific handling. This can be 190 * used as the &dma_buf_ops.attach callback. 191 * 192 * Returns 0 on success, negative error code on failure. 193 */ 194 int drm_gem_map_attach(struct dma_buf *dma_buf, 195 struct dma_buf_attachment *attach) 196 { 197 struct drm_gem_object *obj = dma_buf->priv; 198 199 return drm_gem_pin(obj); 200 } 201 EXPORT_SYMBOL(drm_gem_map_attach); 202 203 /** 204 * drm_gem_map_detach - dma_buf detach implementation for GEM 205 * @dma_buf: buffer to detach from 206 * @attach: attachment to be detached 207 * 208 * Cleans up &dma_buf_attachment. This can be used as the &dma_buf_ops.detach 209 * callback. 210 */ 211 void drm_gem_map_detach(struct dma_buf *dma_buf, 212 struct dma_buf_attachment *attach) 213 { 214 struct drm_gem_object *obj = dma_buf->priv; 215 216 drm_gem_unpin(obj); 217 } 218 EXPORT_SYMBOL(drm_gem_map_detach); 219 220 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, 221 struct dma_buf *dma_buf) 222 { 223 struct rb_node *rb; 224 225 rb = prime_fpriv->dmabufs.rb_node; 226 while (rb) { 227 struct drm_prime_member *member; 228 229 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb); 230 if (member->dma_buf == dma_buf) { 231 rb_erase(&member->handle_rb, &prime_fpriv->handles); 232 rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs); 233 234 dma_buf_put(dma_buf); 235 kfree(member); 236 return; 237 } else if (member->dma_buf < dma_buf) { 238 rb = rb->rb_right; 239 } else { 240 rb = rb->rb_left; 241 } 242 } 243 } 244 245 /** 246 * drm_gem_map_dma_buf - map_dma_buf implementation for GEM 247 * @attach: attachment whose scatterlist is to be returned 248 * @dir: direction of DMA transfer 249 * 250 * Calls &drm_driver.gem_prime_get_sg_table and then maps the scatterlist. This 251 * can be used as the &dma_buf_ops.map_dma_buf callback. 252 * 253 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR 254 * on error. May return -EINTR if it is interrupted by a signal. 255 */ 256 257 struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, 258 enum dma_data_direction dir) 259 { 260 struct drm_gem_object *obj = attach->dmabuf->priv; 261 struct sg_table *sgt; 262 263 if (WARN_ON(dir == DMA_NONE)) 264 return ERR_PTR(-EINVAL); 265 266 if (obj->funcs) 267 sgt = obj->funcs->get_sg_table(obj); 268 else 269 sgt = obj->dev->driver->gem_prime_get_sg_table(obj); 270 271 if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, 272 DMA_ATTR_SKIP_CPU_SYNC)) { 273 sg_free_table(sgt); 274 kfree(sgt); 275 sgt = ERR_PTR(-ENOMEM); 276 } 277 278 return sgt; 279 } 280 EXPORT_SYMBOL(drm_gem_map_dma_buf); 281 282 /** 283 * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM 284 * @attach: attachment to unmap buffer from 285 * @sgt: scatterlist info of the buffer to unmap 286 * @dir: direction of DMA transfer 287 * 288 * This can be used as the &dma_buf_ops.unmap_dma_buf callback. 289 */ 290 void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, 291 struct sg_table *sgt, 292 enum dma_data_direction dir) 293 { 294 if (!sgt) 295 return; 296 297 dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, 298 DMA_ATTR_SKIP_CPU_SYNC); 299 sg_free_table(sgt); 300 kfree(sgt); 301 } 302 EXPORT_SYMBOL(drm_gem_unmap_dma_buf); 303 304 /** 305 * drm_gem_dmabuf_export - dma_buf export implementation for GEM 306 * @dev: parent device for the exported dmabuf 307 * @exp_info: the export information used by dma_buf_export() 308 * 309 * This wraps dma_buf_export() for use by generic GEM drivers that are using 310 * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take 311 * a reference to the &drm_device and the exported &drm_gem_object (stored in 312 * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release(). 313 * 314 * Returns the new dmabuf. 315 */ 316 struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev, 317 struct dma_buf_export_info *exp_info) 318 { 319 struct dma_buf *dma_buf; 320 321 dma_buf = dma_buf_export(exp_info); 322 if (IS_ERR(dma_buf)) 323 return dma_buf; 324 325 drm_dev_get(dev); 326 drm_gem_object_get(exp_info->priv); 327 328 return dma_buf; 329 } 330 EXPORT_SYMBOL(drm_gem_dmabuf_export); 331 332 /** 333 * drm_gem_dmabuf_release - dma_buf release implementation for GEM 334 * @dma_buf: buffer to be released 335 * 336 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers 337 * must use this in their dma_buf ops structure as the release callback. 338 * drm_gem_dmabuf_release() should be used in conjunction with 339 * drm_gem_dmabuf_export(). 340 */ 341 void drm_gem_dmabuf_release(struct dma_buf *dma_buf) 342 { 343 struct drm_gem_object *obj = dma_buf->priv; 344 struct drm_device *dev = obj->dev; 345 346 /* drop the reference on the export fd holds */ 347 drm_gem_object_put_unlocked(obj); 348 349 drm_dev_put(dev); 350 } 351 EXPORT_SYMBOL(drm_gem_dmabuf_release); 352 353 /** 354 * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM 355 * @dma_buf: buffer to be mapped 356 * 357 * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap 358 * callback. 359 * 360 * Returns the kernel virtual address. 361 */ 362 void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) 363 { 364 struct drm_gem_object *obj = dma_buf->priv; 365 void *vaddr; 366 367 vaddr = drm_gem_vmap(obj); 368 if (IS_ERR(vaddr)) 369 vaddr = NULL; 370 371 return vaddr; 372 } 373 EXPORT_SYMBOL(drm_gem_dmabuf_vmap); 374 375 /** 376 * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM 377 * @dma_buf: buffer to be unmapped 378 * @vaddr: the virtual address of the buffer 379 * 380 * Releases a kernel virtual mapping. This can be used as the 381 * &dma_buf_ops.vunmap callback. 382 */ 383 void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) 384 { 385 struct drm_gem_object *obj = dma_buf->priv; 386 387 drm_gem_vunmap(obj, vaddr); 388 } 389 EXPORT_SYMBOL(drm_gem_dmabuf_vunmap); 390 391 /** 392 * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM 393 * @dma_buf: buffer to be mapped 394 * @vma: virtual address range 395 * 396 * Provides memory mapping for the buffer. This can be used as the 397 * &dma_buf_ops.mmap callback. 398 * 399 * Returns 0 on success or a negative error code on failure. 400 */ 401 int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) 402 { 403 struct drm_gem_object *obj = dma_buf->priv; 404 struct drm_device *dev = obj->dev; 405 406 if (!dev->driver->gem_prime_mmap) 407 return -ENOSYS; 408 409 return dev->driver->gem_prime_mmap(obj, vma); 410 } 411 EXPORT_SYMBOL(drm_gem_dmabuf_mmap); 412 413 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { 414 .cache_sgt_mapping = true, 415 .attach = drm_gem_map_attach, 416 .detach = drm_gem_map_detach, 417 .map_dma_buf = drm_gem_map_dma_buf, 418 .unmap_dma_buf = drm_gem_unmap_dma_buf, 419 .release = drm_gem_dmabuf_release, 420 .mmap = drm_gem_dmabuf_mmap, 421 .vmap = drm_gem_dmabuf_vmap, 422 .vunmap = drm_gem_dmabuf_vunmap, 423 }; 424 425 /** 426 * DOC: PRIME Helpers 427 * 428 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of 429 * simpler APIs by using the helper functions @drm_gem_prime_export and 430 * @drm_gem_prime_import. These functions implement dma-buf support in terms of 431 * six lower-level driver callbacks: 432 * 433 * Export callbacks: 434 * 435 * * @gem_prime_pin (optional): prepare a GEM object for exporting 436 * * @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages 437 * * @gem_prime_vmap: vmap a buffer exported by your driver 438 * * @gem_prime_vunmap: vunmap a buffer exported by your driver 439 * * @gem_prime_mmap (optional): mmap a buffer exported by your driver 440 * 441 * Import callback: 442 * 443 * * @gem_prime_import_sg_table (import): produce a GEM object from another 444 * driver's scatter/gather table 445 */ 446 447 /** 448 * drm_gem_prime_export - helper library implementation of the export callback 449 * @dev: drm_device to export from 450 * @obj: GEM object to export 451 * @flags: flags like DRM_CLOEXEC and DRM_RDWR 452 * 453 * This is the implementation of the gem_prime_export functions for GEM drivers 454 * using the PRIME helpers. 455 */ 456 struct dma_buf *drm_gem_prime_export(struct drm_device *dev, 457 struct drm_gem_object *obj, 458 int flags) 459 { 460 struct dma_buf_export_info exp_info = { 461 .exp_name = KBUILD_MODNAME, /* white lie for debug */ 462 .owner = dev->driver->fops->owner, 463 .ops = &drm_gem_prime_dmabuf_ops, 464 .size = obj->size, 465 .flags = flags, 466 .priv = obj, 467 .resv = obj->resv, 468 }; 469 470 if (dev->driver->gem_prime_res_obj) 471 exp_info.resv = dev->driver->gem_prime_res_obj(obj); 472 473 return drm_gem_dmabuf_export(dev, &exp_info); 474 } 475 EXPORT_SYMBOL(drm_gem_prime_export); 476 477 static struct dma_buf *export_and_register_object(struct drm_device *dev, 478 struct drm_gem_object *obj, 479 uint32_t flags) 480 { 481 struct dma_buf *dmabuf; 482 483 /* prevent races with concurrent gem_close. */ 484 if (obj->handle_count == 0) { 485 dmabuf = ERR_PTR(-ENOENT); 486 return dmabuf; 487 } 488 489 if (obj->funcs && obj->funcs->export) 490 dmabuf = obj->funcs->export(obj, flags); 491 else if (dev->driver->gem_prime_export) 492 dmabuf = dev->driver->gem_prime_export(dev, obj, flags); 493 else 494 dmabuf = drm_gem_prime_export(dev, obj, flags); 495 if (IS_ERR(dmabuf)) { 496 /* normally the created dma-buf takes ownership of the ref, 497 * but if that fails then drop the ref 498 */ 499 return dmabuf; 500 } 501 502 /* 503 * Note that callers do not need to clean up the export cache 504 * since the check for obj->handle_count guarantees that someone 505 * will clean it up. 506 */ 507 obj->dma_buf = dmabuf; 508 get_dma_buf(obj->dma_buf); 509 510 return dmabuf; 511 } 512 513 /** 514 * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers 515 * @dev: dev to export the buffer from 516 * @file_priv: drm file-private structure 517 * @handle: buffer handle to export 518 * @flags: flags like DRM_CLOEXEC 519 * @prime_fd: pointer to storage for the fd id of the create dma-buf 520 * 521 * This is the PRIME export function which must be used mandatorily by GEM 522 * drivers to ensure correct lifetime management of the underlying GEM object. 523 * The actual exporting from GEM object to a dma-buf is done through the 524 * gem_prime_export driver callback. 525 */ 526 int drm_gem_prime_handle_to_fd(struct drm_device *dev, 527 struct drm_file *file_priv, uint32_t handle, 528 uint32_t flags, 529 int *prime_fd) 530 { 531 struct drm_gem_object *obj; 532 int ret = 0; 533 struct dma_buf *dmabuf; 534 535 mutex_lock(&file_priv->prime.lock); 536 obj = drm_gem_object_lookup(file_priv, handle); 537 if (!obj) { 538 ret = -ENOENT; 539 goto out_unlock; 540 } 541 542 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle); 543 if (dmabuf) { 544 get_dma_buf(dmabuf); 545 goto out_have_handle; 546 } 547 548 mutex_lock(&dev->object_name_lock); 549 /* re-export the original imported object */ 550 if (obj->import_attach) { 551 dmabuf = obj->import_attach->dmabuf; 552 get_dma_buf(dmabuf); 553 goto out_have_obj; 554 } 555 556 if (obj->dma_buf) { 557 get_dma_buf(obj->dma_buf); 558 dmabuf = obj->dma_buf; 559 goto out_have_obj; 560 } 561 562 dmabuf = export_and_register_object(dev, obj, flags); 563 if (IS_ERR(dmabuf)) { 564 /* normally the created dma-buf takes ownership of the ref, 565 * but if that fails then drop the ref 566 */ 567 ret = PTR_ERR(dmabuf); 568 mutex_unlock(&dev->object_name_lock); 569 goto out; 570 } 571 572 out_have_obj: 573 /* 574 * If we've exported this buffer then cheat and add it to the import list 575 * so we get the correct handle back. We must do this under the 576 * protection of dev->object_name_lock to ensure that a racing gem close 577 * ioctl doesn't miss to remove this buffer handle from the cache. 578 */ 579 ret = drm_prime_add_buf_handle(&file_priv->prime, 580 dmabuf, handle); 581 mutex_unlock(&dev->object_name_lock); 582 if (ret) 583 goto fail_put_dmabuf; 584 585 out_have_handle: 586 ret = dma_buf_fd(dmabuf, flags); 587 /* 588 * We must _not_ remove the buffer from the handle cache since the newly 589 * created dma buf is already linked in the global obj->dma_buf pointer, 590 * and that is invariant as long as a userspace gem handle exists. 591 * Closing the handle will clean out the cache anyway, so we don't leak. 592 */ 593 if (ret < 0) { 594 goto fail_put_dmabuf; 595 } else { 596 *prime_fd = ret; 597 ret = 0; 598 } 599 600 goto out; 601 602 fail_put_dmabuf: 603 dma_buf_put(dmabuf); 604 out: 605 drm_gem_object_put_unlocked(obj); 606 out_unlock: 607 mutex_unlock(&file_priv->prime.lock); 608 609 return ret; 610 } 611 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); 612 613 /** 614 * drm_gem_prime_mmap - PRIME mmap function for GEM drivers 615 * @obj: GEM object 616 * @vma: Virtual address range 617 * 618 * This function sets up a userspace mapping for PRIME exported buffers using 619 * the same codepath that is used for regular GEM buffer mapping on the DRM fd. 620 * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is 621 * called to set up the mapping. 622 * 623 * Drivers can use this as their &drm_driver.gem_prime_mmap callback. 624 */ 625 int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 626 { 627 struct drm_file *priv; 628 struct file *fil; 629 int ret; 630 631 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 632 fil = kzalloc(sizeof(*fil), GFP_KERNEL); 633 if (!priv || !fil) { 634 ret = -ENOMEM; 635 goto out; 636 } 637 638 /* Used by drm_gem_mmap() to lookup the GEM object */ 639 priv->minor = obj->dev->primary; 640 fil->private_data = priv; 641 642 ret = drm_vma_node_allow(&obj->vma_node, priv); 643 if (ret) 644 goto out; 645 646 vma->vm_pgoff += drm_vma_node_start(&obj->vma_node); 647 648 ret = obj->dev->driver->fops->mmap(fil, vma); 649 650 drm_vma_node_revoke(&obj->vma_node, priv); 651 out: 652 kfree(priv); 653 kfree(fil); 654 655 return ret; 656 } 657 EXPORT_SYMBOL(drm_gem_prime_mmap); 658 659 /** 660 * drm_gem_prime_import_dev - core implementation of the import callback 661 * @dev: drm_device to import into 662 * @dma_buf: dma-buf object to import 663 * @attach_dev: struct device to dma_buf attach 664 * 665 * This is the core of drm_gem_prime_import. It's designed to be called by 666 * drivers who want to use a different device structure than dev->dev for 667 * attaching via dma_buf. 668 */ 669 struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev, 670 struct dma_buf *dma_buf, 671 struct device *attach_dev) 672 { 673 struct dma_buf_attachment *attach; 674 struct sg_table *sgt; 675 struct drm_gem_object *obj; 676 int ret; 677 678 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) { 679 obj = dma_buf->priv; 680 if (obj->dev == dev) { 681 /* 682 * Importing dmabuf exported from out own gem increases 683 * refcount on gem itself instead of f_count of dmabuf. 684 */ 685 drm_gem_object_get(obj); 686 return obj; 687 } 688 } 689 690 if (!dev->driver->gem_prime_import_sg_table) 691 return ERR_PTR(-EINVAL); 692 693 attach = dma_buf_attach(dma_buf, attach_dev); 694 if (IS_ERR(attach)) 695 return ERR_CAST(attach); 696 697 get_dma_buf(dma_buf); 698 699 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 700 if (IS_ERR(sgt)) { 701 ret = PTR_ERR(sgt); 702 goto fail_detach; 703 } 704 705 obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt); 706 if (IS_ERR(obj)) { 707 ret = PTR_ERR(obj); 708 goto fail_unmap; 709 } 710 711 obj->import_attach = attach; 712 713 return obj; 714 715 fail_unmap: 716 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); 717 fail_detach: 718 dma_buf_detach(dma_buf, attach); 719 dma_buf_put(dma_buf); 720 721 return ERR_PTR(ret); 722 } 723 EXPORT_SYMBOL(drm_gem_prime_import_dev); 724 725 /** 726 * drm_gem_prime_import - helper library implementation of the import callback 727 * @dev: drm_device to import into 728 * @dma_buf: dma-buf object to import 729 * 730 * This is the implementation of the gem_prime_import functions for GEM drivers 731 * using the PRIME helpers. 732 */ 733 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, 734 struct dma_buf *dma_buf) 735 { 736 return drm_gem_prime_import_dev(dev, dma_buf, dev->dev); 737 } 738 EXPORT_SYMBOL(drm_gem_prime_import); 739 740 /** 741 * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers 742 * @dev: dev to export the buffer from 743 * @file_priv: drm file-private structure 744 * @prime_fd: fd id of the dma-buf which should be imported 745 * @handle: pointer to storage for the handle of the imported buffer object 746 * 747 * This is the PRIME import function which must be used mandatorily by GEM 748 * drivers to ensure correct lifetime management of the underlying GEM object. 749 * The actual importing of GEM object from the dma-buf is done through the 750 * gem_import_export driver callback. 751 */ 752 int drm_gem_prime_fd_to_handle(struct drm_device *dev, 753 struct drm_file *file_priv, int prime_fd, 754 uint32_t *handle) 755 { 756 struct dma_buf *dma_buf; 757 struct drm_gem_object *obj; 758 int ret; 759 760 dma_buf = dma_buf_get(prime_fd); 761 if (IS_ERR(dma_buf)) 762 return PTR_ERR(dma_buf); 763 764 mutex_lock(&file_priv->prime.lock); 765 766 ret = drm_prime_lookup_buf_handle(&file_priv->prime, 767 dma_buf, handle); 768 if (ret == 0) 769 goto out_put; 770 771 /* never seen this one, need to import */ 772 mutex_lock(&dev->object_name_lock); 773 if (dev->driver->gem_prime_import) 774 obj = dev->driver->gem_prime_import(dev, dma_buf); 775 else 776 obj = drm_gem_prime_import(dev, dma_buf); 777 if (IS_ERR(obj)) { 778 ret = PTR_ERR(obj); 779 goto out_unlock; 780 } 781 782 if (obj->dma_buf) { 783 WARN_ON(obj->dma_buf != dma_buf); 784 } else { 785 obj->dma_buf = dma_buf; 786 get_dma_buf(dma_buf); 787 } 788 789 /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */ 790 ret = drm_gem_handle_create_tail(file_priv, obj, handle); 791 drm_gem_object_put_unlocked(obj); 792 if (ret) 793 goto out_put; 794 795 ret = drm_prime_add_buf_handle(&file_priv->prime, 796 dma_buf, *handle); 797 mutex_unlock(&file_priv->prime.lock); 798 if (ret) 799 goto fail; 800 801 dma_buf_put(dma_buf); 802 803 return 0; 804 805 fail: 806 /* hmm, if driver attached, we are relying on the free-object path 807 * to detach.. which seems ok.. 808 */ 809 drm_gem_handle_delete(file_priv, *handle); 810 dma_buf_put(dma_buf); 811 return ret; 812 813 out_unlock: 814 mutex_unlock(&dev->object_name_lock); 815 out_put: 816 mutex_unlock(&file_priv->prime.lock); 817 dma_buf_put(dma_buf); 818 return ret; 819 } 820 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); 821 822 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, 823 struct drm_file *file_priv) 824 { 825 struct drm_prime_handle *args = data; 826 827 if (!drm_core_check_feature(dev, DRIVER_PRIME)) 828 return -EOPNOTSUPP; 829 830 if (!dev->driver->prime_handle_to_fd) 831 return -ENOSYS; 832 833 /* check flags are valid */ 834 if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR)) 835 return -EINVAL; 836 837 return dev->driver->prime_handle_to_fd(dev, file_priv, 838 args->handle, args->flags, &args->fd); 839 } 840 841 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, 842 struct drm_file *file_priv) 843 { 844 struct drm_prime_handle *args = data; 845 846 if (!drm_core_check_feature(dev, DRIVER_PRIME)) 847 return -EOPNOTSUPP; 848 849 if (!dev->driver->prime_fd_to_handle) 850 return -ENOSYS; 851 852 return dev->driver->prime_fd_to_handle(dev, file_priv, 853 args->fd, &args->handle); 854 } 855 856 /** 857 * drm_prime_pages_to_sg - converts a page array into an sg list 858 * @pages: pointer to the array of page pointers to convert 859 * @nr_pages: length of the page vector 860 * 861 * This helper creates an sg table object from a set of pages 862 * the driver is responsible for mapping the pages into the 863 * importers address space for use with dma_buf itself. 864 */ 865 struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages) 866 { 867 struct sg_table *sg = NULL; 868 int ret; 869 870 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 871 if (!sg) { 872 ret = -ENOMEM; 873 goto out; 874 } 875 876 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0, 877 nr_pages << PAGE_SHIFT, GFP_KERNEL); 878 if (ret) 879 goto out; 880 881 return sg; 882 out: 883 kfree(sg); 884 return ERR_PTR(ret); 885 } 886 EXPORT_SYMBOL(drm_prime_pages_to_sg); 887 888 /** 889 * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array 890 * @sgt: scatter-gather table to convert 891 * @pages: optional array of page pointers to store the page array in 892 * @addrs: optional array to store the dma bus address of each page 893 * @max_entries: size of both the passed-in arrays 894 * 895 * Exports an sg table into an array of pages and addresses. This is currently 896 * required by the TTM driver in order to do correct fault handling. 897 */ 898 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, 899 dma_addr_t *addrs, int max_entries) 900 { 901 unsigned count; 902 struct scatterlist *sg; 903 struct page *page; 904 u32 len, index; 905 dma_addr_t addr; 906 907 index = 0; 908 for_each_sg(sgt->sgl, sg, sgt->nents, count) { 909 len = sg->length; 910 page = sg_page(sg); 911 addr = sg_dma_address(sg); 912 913 while (len > 0) { 914 if (WARN_ON(index >= max_entries)) 915 return -1; 916 if (pages) 917 pages[index] = page; 918 if (addrs) 919 addrs[index] = addr; 920 921 page++; 922 addr += PAGE_SIZE; 923 len -= PAGE_SIZE; 924 index++; 925 } 926 } 927 return 0; 928 } 929 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays); 930 931 /** 932 * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object 933 * @obj: GEM object which was created from a dma-buf 934 * @sg: the sg-table which was pinned at import time 935 * 936 * This is the cleanup functions which GEM drivers need to call when they use 937 * @drm_gem_prime_import to import dma-bufs. 938 */ 939 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) 940 { 941 struct dma_buf_attachment *attach; 942 struct dma_buf *dma_buf; 943 attach = obj->import_attach; 944 if (sg) 945 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); 946 dma_buf = attach->dmabuf; 947 dma_buf_detach(attach->dmabuf, attach); 948 /* remove the reference */ 949 dma_buf_put(dma_buf); 950 } 951 EXPORT_SYMBOL(drm_prime_gem_destroy); 952 953 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) 954 { 955 mutex_init(&prime_fpriv->lock); 956 prime_fpriv->dmabufs = RB_ROOT; 957 prime_fpriv->handles = RB_ROOT; 958 } 959 960 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) 961 { 962 /* by now drm_gem_release should've made sure the list is empty */ 963 WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs)); 964 } 965