1 /* 2 * Copyright © 2012 Red Hat 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Dave Airlie <airlied@redhat.com> 25 * Rob Clark <rob.clark@linaro.org> 26 * 27 */ 28 29 #include <linux/export.h> 30 #include <linux/dma-buf.h> 31 #include <linux/rbtree.h> 32 #include <drm/drm_prime.h> 33 #include <drm/drm_gem.h> 34 #include <drm/drmP.h> 35 36 #include "drm_internal.h" 37 38 /* 39 * DMA-BUF/GEM Object references and lifetime overview: 40 * 41 * On the export the dma_buf holds a reference to the exporting GEM 42 * object. It takes this reference in handle_to_fd_ioctl, when it 43 * first calls .prime_export and stores the exporting GEM object in 44 * the dma_buf priv. This reference needs to be released when the 45 * final reference to the &dma_buf itself is dropped and its 46 * &dma_buf_ops.release function is called. For GEM-based drivers, 47 * the dma_buf should be exported using drm_gem_dmabuf_export() and 48 * then released by drm_gem_dmabuf_release(). 49 * 50 * On the import the importing GEM object holds a reference to the 51 * dma_buf (which in turn holds a ref to the exporting GEM object). 52 * It takes that reference in the fd_to_handle ioctl. 53 * It calls dma_buf_get, creates an attachment to it and stores the 54 * attachment in the GEM object. When this attachment is destroyed 55 * when the imported object is destroyed, we remove the attachment 56 * and drop the reference to the dma_buf. 57 * 58 * When all the references to the &dma_buf are dropped, i.e. when 59 * userspace has closed both handles to the imported GEM object (through the 60 * FD_TO_HANDLE IOCTL) and closed the file descriptor of the exported 61 * (through the HANDLE_TO_FD IOCTL) dma_buf, and all kernel-internal references 62 * are also gone, then the dma_buf gets destroyed. This can also happen as a 63 * part of the clean up procedure in the drm_release() function if userspace 64 * fails to properly clean up. Note that both the kernel and userspace (by 65 * keeeping the PRIME file descriptors open) can hold references onto a 66 * &dma_buf. 67 * 68 * Thus the chain of references always flows in one direction 69 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem 70 * 71 * Self-importing: if userspace is using PRIME as a replacement for flink 72 * then it will get a fd->handle request for a GEM object that it created. 73 * Drivers should detect this situation and return back the gem object 74 * from the dma-buf private. Prime will do this automatically for drivers that 75 * use the drm_gem_prime_{import,export} helpers. 76 * 77 * GEM struct &dma_buf_ops symbols are now exported. They can be resued by 78 * drivers which implement GEM interface. 79 */ 80 81 struct drm_prime_member { 82 struct dma_buf *dma_buf; 83 uint32_t handle; 84 85 struct rb_node dmabuf_rb; 86 struct rb_node handle_rb; 87 }; 88 89 struct drm_prime_attachment { 90 struct sg_table *sgt; 91 enum dma_data_direction dir; 92 }; 93 94 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, 95 struct dma_buf *dma_buf, uint32_t handle) 96 { 97 struct drm_prime_member *member; 98 struct rb_node **p, *rb; 99 100 member = kmalloc(sizeof(*member), GFP_KERNEL); 101 if (!member) 102 return -ENOMEM; 103 104 get_dma_buf(dma_buf); 105 member->dma_buf = dma_buf; 106 member->handle = handle; 107 108 rb = NULL; 109 p = &prime_fpriv->dmabufs.rb_node; 110 while (*p) { 111 struct drm_prime_member *pos; 112 113 rb = *p; 114 pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb); 115 if (dma_buf > pos->dma_buf) 116 p = &rb->rb_right; 117 else 118 p = &rb->rb_left; 119 } 120 rb_link_node(&member->dmabuf_rb, rb, p); 121 rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs); 122 123 rb = NULL; 124 p = &prime_fpriv->handles.rb_node; 125 while (*p) { 126 struct drm_prime_member *pos; 127 128 rb = *p; 129 pos = rb_entry(rb, struct drm_prime_member, handle_rb); 130 if (handle > pos->handle) 131 p = &rb->rb_right; 132 else 133 p = &rb->rb_left; 134 } 135 rb_link_node(&member->handle_rb, rb, p); 136 rb_insert_color(&member->handle_rb, &prime_fpriv->handles); 137 138 return 0; 139 } 140 141 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv, 142 uint32_t handle) 143 { 144 struct rb_node *rb; 145 146 rb = prime_fpriv->handles.rb_node; 147 while (rb) { 148 struct drm_prime_member *member; 149 150 member = rb_entry(rb, struct drm_prime_member, handle_rb); 151 if (member->handle == handle) 152 return member->dma_buf; 153 else if (member->handle < handle) 154 rb = rb->rb_right; 155 else 156 rb = rb->rb_left; 157 } 158 159 return NULL; 160 } 161 162 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, 163 struct dma_buf *dma_buf, 164 uint32_t *handle) 165 { 166 struct rb_node *rb; 167 168 rb = prime_fpriv->dmabufs.rb_node; 169 while (rb) { 170 struct drm_prime_member *member; 171 172 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb); 173 if (member->dma_buf == dma_buf) { 174 *handle = member->handle; 175 return 0; 176 } else if (member->dma_buf < dma_buf) { 177 rb = rb->rb_right; 178 } else { 179 rb = rb->rb_left; 180 } 181 } 182 183 return -ENOENT; 184 } 185 186 /** 187 * drm_gem_map_attach - dma_buf attach implementation for GEM 188 * @dma_buf: buffer to attach device to 189 * @attach: buffer attachment data 190 * 191 * Allocates &drm_prime_attachment and calls &drm_driver.gem_prime_pin for 192 * device specific attachment. This can be used as the &dma_buf_ops.attach 193 * callback. 194 * 195 * Returns 0 on success, negative error code on failure. 196 */ 197 int drm_gem_map_attach(struct dma_buf *dma_buf, 198 struct dma_buf_attachment *attach) 199 { 200 struct drm_prime_attachment *prime_attach; 201 struct drm_gem_object *obj = dma_buf->priv; 202 struct drm_device *dev = obj->dev; 203 204 prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL); 205 if (!prime_attach) 206 return -ENOMEM; 207 208 prime_attach->dir = DMA_NONE; 209 attach->priv = prime_attach; 210 211 if (!dev->driver->gem_prime_pin) 212 return 0; 213 214 return dev->driver->gem_prime_pin(obj); 215 } 216 EXPORT_SYMBOL(drm_gem_map_attach); 217 218 /** 219 * drm_gem_map_detach - dma_buf detach implementation for GEM 220 * @dma_buf: buffer to detach from 221 * @attach: attachment to be detached 222 * 223 * Cleans up &dma_buf_attachment. This can be used as the &dma_buf_ops.detach 224 * callback. 225 */ 226 void drm_gem_map_detach(struct dma_buf *dma_buf, 227 struct dma_buf_attachment *attach) 228 { 229 struct drm_prime_attachment *prime_attach = attach->priv; 230 struct drm_gem_object *obj = dma_buf->priv; 231 struct drm_device *dev = obj->dev; 232 233 if (prime_attach) { 234 struct sg_table *sgt = prime_attach->sgt; 235 236 if (sgt) { 237 if (prime_attach->dir != DMA_NONE) 238 dma_unmap_sg_attrs(attach->dev, sgt->sgl, 239 sgt->nents, 240 prime_attach->dir, 241 DMA_ATTR_SKIP_CPU_SYNC); 242 sg_free_table(sgt); 243 } 244 245 kfree(sgt); 246 kfree(prime_attach); 247 attach->priv = NULL; 248 } 249 250 if (dev->driver->gem_prime_unpin) 251 dev->driver->gem_prime_unpin(obj); 252 } 253 EXPORT_SYMBOL(drm_gem_map_detach); 254 255 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, 256 struct dma_buf *dma_buf) 257 { 258 struct rb_node *rb; 259 260 rb = prime_fpriv->dmabufs.rb_node; 261 while (rb) { 262 struct drm_prime_member *member; 263 264 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb); 265 if (member->dma_buf == dma_buf) { 266 rb_erase(&member->handle_rb, &prime_fpriv->handles); 267 rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs); 268 269 dma_buf_put(dma_buf); 270 kfree(member); 271 return; 272 } else if (member->dma_buf < dma_buf) { 273 rb = rb->rb_right; 274 } else { 275 rb = rb->rb_left; 276 } 277 } 278 } 279 280 /** 281 * drm_gem_map_dma_buf - map_dma_buf implementation for GEM 282 * @attach: attachment whose scatterlist is to be returned 283 * @dir: direction of DMA transfer 284 * 285 * Calls &drm_driver.gem_prime_get_sg_table and then maps the scatterlist. This 286 * can be used as the &dma_buf_ops.map_dma_buf callback. 287 * 288 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR 289 * on error. May return -EINTR if it is interrupted by a signal. 290 */ 291 292 struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, 293 enum dma_data_direction dir) 294 { 295 struct drm_prime_attachment *prime_attach = attach->priv; 296 struct drm_gem_object *obj = attach->dmabuf->priv; 297 struct sg_table *sgt; 298 299 if (WARN_ON(dir == DMA_NONE || !prime_attach)) 300 return ERR_PTR(-EINVAL); 301 302 /* return the cached mapping when possible */ 303 if (prime_attach->dir == dir) 304 return prime_attach->sgt; 305 306 /* 307 * two mappings with different directions for the same attachment are 308 * not allowed 309 */ 310 if (WARN_ON(prime_attach->dir != DMA_NONE)) 311 return ERR_PTR(-EBUSY); 312 313 sgt = obj->dev->driver->gem_prime_get_sg_table(obj); 314 315 if (!IS_ERR(sgt)) { 316 if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, 317 DMA_ATTR_SKIP_CPU_SYNC)) { 318 sg_free_table(sgt); 319 kfree(sgt); 320 sgt = ERR_PTR(-ENOMEM); 321 } else { 322 prime_attach->sgt = sgt; 323 prime_attach->dir = dir; 324 } 325 } 326 327 return sgt; 328 } 329 EXPORT_SYMBOL(drm_gem_map_dma_buf); 330 331 /** 332 * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM 333 * @attach: attachment to unmap buffer from 334 * @sgt: scatterlist info of the buffer to unmap 335 * @dir: direction of DMA transfer 336 * 337 * Not implemented. The unmap is done at drm_gem_map_detach(). This can be 338 * used as the &dma_buf_ops.unmap_dma_buf callback. 339 */ 340 void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, 341 struct sg_table *sgt, 342 enum dma_data_direction dir) 343 { 344 /* nothing to be done here */ 345 } 346 EXPORT_SYMBOL(drm_gem_unmap_dma_buf); 347 348 /** 349 * drm_gem_dmabuf_export - dma_buf export implementation for GEM 350 * @dev: parent device for the exported dmabuf 351 * @exp_info: the export information used by dma_buf_export() 352 * 353 * This wraps dma_buf_export() for use by generic GEM drivers that are using 354 * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take 355 * a reference to the &drm_device and the exported &drm_gem_object (stored in 356 * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release(). 357 * 358 * Returns the new dmabuf. 359 */ 360 struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev, 361 struct dma_buf_export_info *exp_info) 362 { 363 struct dma_buf *dma_buf; 364 365 dma_buf = dma_buf_export(exp_info); 366 if (IS_ERR(dma_buf)) 367 return dma_buf; 368 369 drm_dev_get(dev); 370 drm_gem_object_get(exp_info->priv); 371 372 return dma_buf; 373 } 374 EXPORT_SYMBOL(drm_gem_dmabuf_export); 375 376 /** 377 * drm_gem_dmabuf_release - dma_buf release implementation for GEM 378 * @dma_buf: buffer to be released 379 * 380 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers 381 * must use this in their dma_buf ops structure as the release callback. 382 * drm_gem_dmabuf_release() should be used in conjunction with 383 * drm_gem_dmabuf_export(). 384 */ 385 void drm_gem_dmabuf_release(struct dma_buf *dma_buf) 386 { 387 struct drm_gem_object *obj = dma_buf->priv; 388 struct drm_device *dev = obj->dev; 389 390 /* drop the reference on the export fd holds */ 391 drm_gem_object_put_unlocked(obj); 392 393 drm_dev_put(dev); 394 } 395 EXPORT_SYMBOL(drm_gem_dmabuf_release); 396 397 /** 398 * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM 399 * @dma_buf: buffer to be mapped 400 * 401 * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap 402 * callback. 403 * 404 * Returns the kernel virtual address. 405 */ 406 void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) 407 { 408 struct drm_gem_object *obj = dma_buf->priv; 409 struct drm_device *dev = obj->dev; 410 411 if (dev->driver->gem_prime_vmap) 412 return dev->driver->gem_prime_vmap(obj); 413 else 414 return NULL; 415 } 416 EXPORT_SYMBOL(drm_gem_dmabuf_vmap); 417 418 /** 419 * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM 420 * @dma_buf: buffer to be unmapped 421 * @vaddr: the virtual address of the buffer 422 * 423 * Releases a kernel virtual mapping. This can be used as the 424 * &dma_buf_ops.vunmap callback. 425 */ 426 void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) 427 { 428 struct drm_gem_object *obj = dma_buf->priv; 429 struct drm_device *dev = obj->dev; 430 431 if (dev->driver->gem_prime_vunmap) 432 dev->driver->gem_prime_vunmap(obj, vaddr); 433 } 434 EXPORT_SYMBOL(drm_gem_dmabuf_vunmap); 435 436 /** 437 * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM 438 * @dma_buf: buffer to be mapped 439 * @vma: virtual address range 440 * 441 * Provides memory mapping for the buffer. This can be used as the 442 * &dma_buf_ops.mmap callback. 443 * 444 * Returns 0 on success or a negative error code on failure. 445 */ 446 int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) 447 { 448 struct drm_gem_object *obj = dma_buf->priv; 449 struct drm_device *dev = obj->dev; 450 451 if (!dev->driver->gem_prime_mmap) 452 return -ENOSYS; 453 454 return dev->driver->gem_prime_mmap(obj, vma); 455 } 456 EXPORT_SYMBOL(drm_gem_dmabuf_mmap); 457 458 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { 459 .attach = drm_gem_map_attach, 460 .detach = drm_gem_map_detach, 461 .map_dma_buf = drm_gem_map_dma_buf, 462 .unmap_dma_buf = drm_gem_unmap_dma_buf, 463 .release = drm_gem_dmabuf_release, 464 .mmap = drm_gem_dmabuf_mmap, 465 .vmap = drm_gem_dmabuf_vmap, 466 .vunmap = drm_gem_dmabuf_vunmap, 467 }; 468 469 /** 470 * DOC: PRIME Helpers 471 * 472 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of 473 * simpler APIs by using the helper functions @drm_gem_prime_export and 474 * @drm_gem_prime_import. These functions implement dma-buf support in terms of 475 * six lower-level driver callbacks: 476 * 477 * Export callbacks: 478 * 479 * * @gem_prime_pin (optional): prepare a GEM object for exporting 480 * * @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages 481 * * @gem_prime_vmap: vmap a buffer exported by your driver 482 * * @gem_prime_vunmap: vunmap a buffer exported by your driver 483 * * @gem_prime_mmap (optional): mmap a buffer exported by your driver 484 * 485 * Import callback: 486 * 487 * * @gem_prime_import_sg_table (import): produce a GEM object from another 488 * driver's scatter/gather table 489 */ 490 491 /** 492 * drm_gem_prime_export - helper library implementation of the export callback 493 * @dev: drm_device to export from 494 * @obj: GEM object to export 495 * @flags: flags like DRM_CLOEXEC and DRM_RDWR 496 * 497 * This is the implementation of the gem_prime_export functions for GEM drivers 498 * using the PRIME helpers. 499 */ 500 struct dma_buf *drm_gem_prime_export(struct drm_device *dev, 501 struct drm_gem_object *obj, 502 int flags) 503 { 504 struct dma_buf_export_info exp_info = { 505 .exp_name = KBUILD_MODNAME, /* white lie for debug */ 506 .owner = dev->driver->fops->owner, 507 .ops = &drm_gem_prime_dmabuf_ops, 508 .size = obj->size, 509 .flags = flags, 510 .priv = obj, 511 }; 512 513 if (dev->driver->gem_prime_res_obj) 514 exp_info.resv = dev->driver->gem_prime_res_obj(obj); 515 516 return drm_gem_dmabuf_export(dev, &exp_info); 517 } 518 EXPORT_SYMBOL(drm_gem_prime_export); 519 520 static struct dma_buf *export_and_register_object(struct drm_device *dev, 521 struct drm_gem_object *obj, 522 uint32_t flags) 523 { 524 struct dma_buf *dmabuf; 525 526 /* prevent races with concurrent gem_close. */ 527 if (obj->handle_count == 0) { 528 dmabuf = ERR_PTR(-ENOENT); 529 return dmabuf; 530 } 531 532 dmabuf = dev->driver->gem_prime_export(dev, obj, flags); 533 if (IS_ERR(dmabuf)) { 534 /* normally the created dma-buf takes ownership of the ref, 535 * but if that fails then drop the ref 536 */ 537 return dmabuf; 538 } 539 540 /* 541 * Note that callers do not need to clean up the export cache 542 * since the check for obj->handle_count guarantees that someone 543 * will clean it up. 544 */ 545 obj->dma_buf = dmabuf; 546 get_dma_buf(obj->dma_buf); 547 548 return dmabuf; 549 } 550 551 /** 552 * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers 553 * @dev: dev to export the buffer from 554 * @file_priv: drm file-private structure 555 * @handle: buffer handle to export 556 * @flags: flags like DRM_CLOEXEC 557 * @prime_fd: pointer to storage for the fd id of the create dma-buf 558 * 559 * This is the PRIME export function which must be used mandatorily by GEM 560 * drivers to ensure correct lifetime management of the underlying GEM object. 561 * The actual exporting from GEM object to a dma-buf is done through the 562 * gem_prime_export driver callback. 563 */ 564 int drm_gem_prime_handle_to_fd(struct drm_device *dev, 565 struct drm_file *file_priv, uint32_t handle, 566 uint32_t flags, 567 int *prime_fd) 568 { 569 struct drm_gem_object *obj; 570 int ret = 0; 571 struct dma_buf *dmabuf; 572 573 mutex_lock(&file_priv->prime.lock); 574 obj = drm_gem_object_lookup(file_priv, handle); 575 if (!obj) { 576 ret = -ENOENT; 577 goto out_unlock; 578 } 579 580 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle); 581 if (dmabuf) { 582 get_dma_buf(dmabuf); 583 goto out_have_handle; 584 } 585 586 mutex_lock(&dev->object_name_lock); 587 /* re-export the original imported object */ 588 if (obj->import_attach) { 589 dmabuf = obj->import_attach->dmabuf; 590 get_dma_buf(dmabuf); 591 goto out_have_obj; 592 } 593 594 if (obj->dma_buf) { 595 get_dma_buf(obj->dma_buf); 596 dmabuf = obj->dma_buf; 597 goto out_have_obj; 598 } 599 600 dmabuf = export_and_register_object(dev, obj, flags); 601 if (IS_ERR(dmabuf)) { 602 /* normally the created dma-buf takes ownership of the ref, 603 * but if that fails then drop the ref 604 */ 605 ret = PTR_ERR(dmabuf); 606 mutex_unlock(&dev->object_name_lock); 607 goto out; 608 } 609 610 out_have_obj: 611 /* 612 * If we've exported this buffer then cheat and add it to the import list 613 * so we get the correct handle back. We must do this under the 614 * protection of dev->object_name_lock to ensure that a racing gem close 615 * ioctl doesn't miss to remove this buffer handle from the cache. 616 */ 617 ret = drm_prime_add_buf_handle(&file_priv->prime, 618 dmabuf, handle); 619 mutex_unlock(&dev->object_name_lock); 620 if (ret) 621 goto fail_put_dmabuf; 622 623 out_have_handle: 624 ret = dma_buf_fd(dmabuf, flags); 625 /* 626 * We must _not_ remove the buffer from the handle cache since the newly 627 * created dma buf is already linked in the global obj->dma_buf pointer, 628 * and that is invariant as long as a userspace gem handle exists. 629 * Closing the handle will clean out the cache anyway, so we don't leak. 630 */ 631 if (ret < 0) { 632 goto fail_put_dmabuf; 633 } else { 634 *prime_fd = ret; 635 ret = 0; 636 } 637 638 goto out; 639 640 fail_put_dmabuf: 641 dma_buf_put(dmabuf); 642 out: 643 drm_gem_object_put_unlocked(obj); 644 out_unlock: 645 mutex_unlock(&file_priv->prime.lock); 646 647 return ret; 648 } 649 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); 650 651 /** 652 * drm_gem_prime_import_dev - core implementation of the import callback 653 * @dev: drm_device to import into 654 * @dma_buf: dma-buf object to import 655 * @attach_dev: struct device to dma_buf attach 656 * 657 * This is the core of drm_gem_prime_import. It's designed to be called by 658 * drivers who want to use a different device structure than dev->dev for 659 * attaching via dma_buf. 660 */ 661 struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev, 662 struct dma_buf *dma_buf, 663 struct device *attach_dev) 664 { 665 struct dma_buf_attachment *attach; 666 struct sg_table *sgt; 667 struct drm_gem_object *obj; 668 int ret; 669 670 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) { 671 obj = dma_buf->priv; 672 if (obj->dev == dev) { 673 /* 674 * Importing dmabuf exported from out own gem increases 675 * refcount on gem itself instead of f_count of dmabuf. 676 */ 677 drm_gem_object_get(obj); 678 return obj; 679 } 680 } 681 682 if (!dev->driver->gem_prime_import_sg_table) 683 return ERR_PTR(-EINVAL); 684 685 attach = dma_buf_attach(dma_buf, attach_dev); 686 if (IS_ERR(attach)) 687 return ERR_CAST(attach); 688 689 get_dma_buf(dma_buf); 690 691 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 692 if (IS_ERR(sgt)) { 693 ret = PTR_ERR(sgt); 694 goto fail_detach; 695 } 696 697 obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt); 698 if (IS_ERR(obj)) { 699 ret = PTR_ERR(obj); 700 goto fail_unmap; 701 } 702 703 obj->import_attach = attach; 704 705 return obj; 706 707 fail_unmap: 708 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); 709 fail_detach: 710 dma_buf_detach(dma_buf, attach); 711 dma_buf_put(dma_buf); 712 713 return ERR_PTR(ret); 714 } 715 EXPORT_SYMBOL(drm_gem_prime_import_dev); 716 717 /** 718 * drm_gem_prime_import - helper library implementation of the import callback 719 * @dev: drm_device to import into 720 * @dma_buf: dma-buf object to import 721 * 722 * This is the implementation of the gem_prime_import functions for GEM drivers 723 * using the PRIME helpers. 724 */ 725 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, 726 struct dma_buf *dma_buf) 727 { 728 return drm_gem_prime_import_dev(dev, dma_buf, dev->dev); 729 } 730 EXPORT_SYMBOL(drm_gem_prime_import); 731 732 /** 733 * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers 734 * @dev: dev to export the buffer from 735 * @file_priv: drm file-private structure 736 * @prime_fd: fd id of the dma-buf which should be imported 737 * @handle: pointer to storage for the handle of the imported buffer object 738 * 739 * This is the PRIME import function which must be used mandatorily by GEM 740 * drivers to ensure correct lifetime management of the underlying GEM object. 741 * The actual importing of GEM object from the dma-buf is done through the 742 * gem_import_export driver callback. 743 */ 744 int drm_gem_prime_fd_to_handle(struct drm_device *dev, 745 struct drm_file *file_priv, int prime_fd, 746 uint32_t *handle) 747 { 748 struct dma_buf *dma_buf; 749 struct drm_gem_object *obj; 750 int ret; 751 752 dma_buf = dma_buf_get(prime_fd); 753 if (IS_ERR(dma_buf)) 754 return PTR_ERR(dma_buf); 755 756 mutex_lock(&file_priv->prime.lock); 757 758 ret = drm_prime_lookup_buf_handle(&file_priv->prime, 759 dma_buf, handle); 760 if (ret == 0) 761 goto out_put; 762 763 /* never seen this one, need to import */ 764 mutex_lock(&dev->object_name_lock); 765 obj = dev->driver->gem_prime_import(dev, dma_buf); 766 if (IS_ERR(obj)) { 767 ret = PTR_ERR(obj); 768 goto out_unlock; 769 } 770 771 if (obj->dma_buf) { 772 WARN_ON(obj->dma_buf != dma_buf); 773 } else { 774 obj->dma_buf = dma_buf; 775 get_dma_buf(dma_buf); 776 } 777 778 /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */ 779 ret = drm_gem_handle_create_tail(file_priv, obj, handle); 780 drm_gem_object_put_unlocked(obj); 781 if (ret) 782 goto out_put; 783 784 ret = drm_prime_add_buf_handle(&file_priv->prime, 785 dma_buf, *handle); 786 mutex_unlock(&file_priv->prime.lock); 787 if (ret) 788 goto fail; 789 790 dma_buf_put(dma_buf); 791 792 return 0; 793 794 fail: 795 /* hmm, if driver attached, we are relying on the free-object path 796 * to detach.. which seems ok.. 797 */ 798 drm_gem_handle_delete(file_priv, *handle); 799 dma_buf_put(dma_buf); 800 return ret; 801 802 out_unlock: 803 mutex_unlock(&dev->object_name_lock); 804 out_put: 805 mutex_unlock(&file_priv->prime.lock); 806 dma_buf_put(dma_buf); 807 return ret; 808 } 809 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); 810 811 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, 812 struct drm_file *file_priv) 813 { 814 struct drm_prime_handle *args = data; 815 816 if (!drm_core_check_feature(dev, DRIVER_PRIME)) 817 return -EOPNOTSUPP; 818 819 if (!dev->driver->prime_handle_to_fd) 820 return -ENOSYS; 821 822 /* check flags are valid */ 823 if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR)) 824 return -EINVAL; 825 826 return dev->driver->prime_handle_to_fd(dev, file_priv, 827 args->handle, args->flags, &args->fd); 828 } 829 830 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, 831 struct drm_file *file_priv) 832 { 833 struct drm_prime_handle *args = data; 834 835 if (!drm_core_check_feature(dev, DRIVER_PRIME)) 836 return -EOPNOTSUPP; 837 838 if (!dev->driver->prime_fd_to_handle) 839 return -ENOSYS; 840 841 return dev->driver->prime_fd_to_handle(dev, file_priv, 842 args->fd, &args->handle); 843 } 844 845 /** 846 * drm_prime_pages_to_sg - converts a page array into an sg list 847 * @pages: pointer to the array of page pointers to convert 848 * @nr_pages: length of the page vector 849 * 850 * This helper creates an sg table object from a set of pages 851 * the driver is responsible for mapping the pages into the 852 * importers address space for use with dma_buf itself. 853 */ 854 struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages) 855 { 856 struct sg_table *sg = NULL; 857 int ret; 858 859 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 860 if (!sg) { 861 ret = -ENOMEM; 862 goto out; 863 } 864 865 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0, 866 nr_pages << PAGE_SHIFT, GFP_KERNEL); 867 if (ret) 868 goto out; 869 870 return sg; 871 out: 872 kfree(sg); 873 return ERR_PTR(ret); 874 } 875 EXPORT_SYMBOL(drm_prime_pages_to_sg); 876 877 /** 878 * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array 879 * @sgt: scatter-gather table to convert 880 * @pages: optional array of page pointers to store the page array in 881 * @addrs: optional array to store the dma bus address of each page 882 * @max_entries: size of both the passed-in arrays 883 * 884 * Exports an sg table into an array of pages and addresses. This is currently 885 * required by the TTM driver in order to do correct fault handling. 886 */ 887 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, 888 dma_addr_t *addrs, int max_entries) 889 { 890 unsigned count; 891 struct scatterlist *sg; 892 struct page *page; 893 u32 len, index; 894 dma_addr_t addr; 895 896 index = 0; 897 for_each_sg(sgt->sgl, sg, sgt->nents, count) { 898 len = sg->length; 899 page = sg_page(sg); 900 addr = sg_dma_address(sg); 901 902 while (len > 0) { 903 if (WARN_ON(index >= max_entries)) 904 return -1; 905 if (pages) 906 pages[index] = page; 907 if (addrs) 908 addrs[index] = addr; 909 910 page++; 911 addr += PAGE_SIZE; 912 len -= PAGE_SIZE; 913 index++; 914 } 915 } 916 return 0; 917 } 918 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays); 919 920 /** 921 * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object 922 * @obj: GEM object which was created from a dma-buf 923 * @sg: the sg-table which was pinned at import time 924 * 925 * This is the cleanup functions which GEM drivers need to call when they use 926 * @drm_gem_prime_import to import dma-bufs. 927 */ 928 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) 929 { 930 struct dma_buf_attachment *attach; 931 struct dma_buf *dma_buf; 932 attach = obj->import_attach; 933 if (sg) 934 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); 935 dma_buf = attach->dmabuf; 936 dma_buf_detach(attach->dmabuf, attach); 937 /* remove the reference */ 938 dma_buf_put(dma_buf); 939 } 940 EXPORT_SYMBOL(drm_prime_gem_destroy); 941 942 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) 943 { 944 mutex_init(&prime_fpriv->lock); 945 prime_fpriv->dmabufs = RB_ROOT; 946 prime_fpriv->handles = RB_ROOT; 947 } 948 949 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) 950 { 951 /* by now drm_gem_release should've made sure the list is empty */ 952 WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs)); 953 } 954