1 /* 2 * Copyright © 2012 Red Hat 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Dave Airlie <airlied@redhat.com> 25 * Rob Clark <rob.clark@linaro.org> 26 * 27 */ 28 29 #include <linux/export.h> 30 #include <linux/dma-buf.h> 31 #include <drm/drmP.h> 32 #include <drm/drm_gem.h> 33 34 #include "drm_internal.h" 35 36 /* 37 * DMA-BUF/GEM Object references and lifetime overview: 38 * 39 * On the export the dma_buf holds a reference to the exporting GEM 40 * object. It takes this reference in handle_to_fd_ioctl, when it 41 * first calls .prime_export and stores the exporting GEM object in 42 * the dma_buf priv. This reference is released when the dma_buf 43 * object goes away in the driver .release function. 44 * 45 * On the import the importing GEM object holds a reference to the 46 * dma_buf (which in turn holds a ref to the exporting GEM object). 47 * It takes that reference in the fd_to_handle ioctl. 48 * It calls dma_buf_get, creates an attachment to it and stores the 49 * attachment in the GEM object. When this attachment is destroyed 50 * when the imported object is destroyed, we remove the attachment 51 * and drop the reference to the dma_buf. 52 * 53 * Thus the chain of references always flows in one direction 54 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem 55 * 56 * Self-importing: if userspace is using PRIME as a replacement for flink 57 * then it will get a fd->handle request for a GEM object that it created. 58 * Drivers should detect this situation and return back the gem object 59 * from the dma-buf private. Prime will do this automatically for drivers that 60 * use the drm_gem_prime_{import,export} helpers. 61 */ 62 63 struct drm_prime_member { 64 struct list_head entry; 65 struct dma_buf *dma_buf; 66 uint32_t handle; 67 }; 68 69 struct drm_prime_attachment { 70 struct sg_table *sgt; 71 enum dma_data_direction dir; 72 }; 73 74 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, 75 struct dma_buf *dma_buf, uint32_t handle) 76 { 77 struct drm_prime_member *member; 78 79 member = kmalloc(sizeof(*member), GFP_KERNEL); 80 if (!member) 81 return -ENOMEM; 82 83 get_dma_buf(dma_buf); 84 member->dma_buf = dma_buf; 85 member->handle = handle; 86 list_add(&member->entry, &prime_fpriv->head); 87 return 0; 88 } 89 90 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv, 91 uint32_t handle) 92 { 93 struct drm_prime_member *member; 94 95 list_for_each_entry(member, &prime_fpriv->head, entry) { 96 if (member->handle == handle) 97 return member->dma_buf; 98 } 99 100 return NULL; 101 } 102 103 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, 104 struct dma_buf *dma_buf, 105 uint32_t *handle) 106 { 107 struct drm_prime_member *member; 108 109 list_for_each_entry(member, &prime_fpriv->head, entry) { 110 if (member->dma_buf == dma_buf) { 111 *handle = member->handle; 112 return 0; 113 } 114 } 115 return -ENOENT; 116 } 117 118 static int drm_gem_map_attach(struct dma_buf *dma_buf, 119 struct device *target_dev, 120 struct dma_buf_attachment *attach) 121 { 122 struct drm_prime_attachment *prime_attach; 123 struct drm_gem_object *obj = dma_buf->priv; 124 struct drm_device *dev = obj->dev; 125 126 prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL); 127 if (!prime_attach) 128 return -ENOMEM; 129 130 prime_attach->dir = DMA_NONE; 131 attach->priv = prime_attach; 132 133 if (!dev->driver->gem_prime_pin) 134 return 0; 135 136 return dev->driver->gem_prime_pin(obj); 137 } 138 139 static void drm_gem_map_detach(struct dma_buf *dma_buf, 140 struct dma_buf_attachment *attach) 141 { 142 struct drm_prime_attachment *prime_attach = attach->priv; 143 struct drm_gem_object *obj = dma_buf->priv; 144 struct drm_device *dev = obj->dev; 145 struct sg_table *sgt; 146 147 if (dev->driver->gem_prime_unpin) 148 dev->driver->gem_prime_unpin(obj); 149 150 if (!prime_attach) 151 return; 152 153 sgt = prime_attach->sgt; 154 if (sgt) { 155 if (prime_attach->dir != DMA_NONE) 156 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, 157 prime_attach->dir); 158 sg_free_table(sgt); 159 } 160 161 kfree(sgt); 162 kfree(prime_attach); 163 attach->priv = NULL; 164 } 165 166 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, 167 struct dma_buf *dma_buf) 168 { 169 struct drm_prime_member *member, *safe; 170 171 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) { 172 if (member->dma_buf == dma_buf) { 173 dma_buf_put(dma_buf); 174 list_del(&member->entry); 175 kfree(member); 176 } 177 } 178 } 179 180 static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, 181 enum dma_data_direction dir) 182 { 183 struct drm_prime_attachment *prime_attach = attach->priv; 184 struct drm_gem_object *obj = attach->dmabuf->priv; 185 struct sg_table *sgt; 186 187 if (WARN_ON(dir == DMA_NONE || !prime_attach)) 188 return ERR_PTR(-EINVAL); 189 190 /* return the cached mapping when possible */ 191 if (prime_attach->dir == dir) 192 return prime_attach->sgt; 193 194 /* 195 * two mappings with different directions for the same attachment are 196 * not allowed 197 */ 198 if (WARN_ON(prime_attach->dir != DMA_NONE)) 199 return ERR_PTR(-EBUSY); 200 201 sgt = obj->dev->driver->gem_prime_get_sg_table(obj); 202 203 if (!IS_ERR(sgt)) { 204 if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) { 205 sg_free_table(sgt); 206 kfree(sgt); 207 sgt = ERR_PTR(-ENOMEM); 208 } else { 209 prime_attach->sgt = sgt; 210 prime_attach->dir = dir; 211 } 212 } 213 214 return sgt; 215 } 216 217 static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, 218 struct sg_table *sgt, 219 enum dma_data_direction dir) 220 { 221 /* nothing to be done here */ 222 } 223 224 /** 225 * drm_gem_dmabuf_release - dma_buf release implementation for GEM 226 * @dma_buf: buffer to be released 227 * 228 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers 229 * must use this in their dma_buf ops structure as the release callback. 230 */ 231 void drm_gem_dmabuf_release(struct dma_buf *dma_buf) 232 { 233 struct drm_gem_object *obj = dma_buf->priv; 234 235 /* drop the reference on the export fd holds */ 236 drm_gem_object_unreference_unlocked(obj); 237 } 238 EXPORT_SYMBOL(drm_gem_dmabuf_release); 239 240 static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) 241 { 242 struct drm_gem_object *obj = dma_buf->priv; 243 struct drm_device *dev = obj->dev; 244 245 return dev->driver->gem_prime_vmap(obj); 246 } 247 248 static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) 249 { 250 struct drm_gem_object *obj = dma_buf->priv; 251 struct drm_device *dev = obj->dev; 252 253 dev->driver->gem_prime_vunmap(obj, vaddr); 254 } 255 256 static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, 257 unsigned long page_num) 258 { 259 return NULL; 260 } 261 262 static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, 263 unsigned long page_num, void *addr) 264 { 265 266 } 267 static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, 268 unsigned long page_num) 269 { 270 return NULL; 271 } 272 273 static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, 274 unsigned long page_num, void *addr) 275 { 276 277 } 278 279 static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, 280 struct vm_area_struct *vma) 281 { 282 struct drm_gem_object *obj = dma_buf->priv; 283 struct drm_device *dev = obj->dev; 284 285 if (!dev->driver->gem_prime_mmap) 286 return -ENOSYS; 287 288 return dev->driver->gem_prime_mmap(obj, vma); 289 } 290 291 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { 292 .attach = drm_gem_map_attach, 293 .detach = drm_gem_map_detach, 294 .map_dma_buf = drm_gem_map_dma_buf, 295 .unmap_dma_buf = drm_gem_unmap_dma_buf, 296 .release = drm_gem_dmabuf_release, 297 .kmap = drm_gem_dmabuf_kmap, 298 .kmap_atomic = drm_gem_dmabuf_kmap_atomic, 299 .kunmap = drm_gem_dmabuf_kunmap, 300 .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic, 301 .mmap = drm_gem_dmabuf_mmap, 302 .vmap = drm_gem_dmabuf_vmap, 303 .vunmap = drm_gem_dmabuf_vunmap, 304 }; 305 306 /** 307 * DOC: PRIME Helpers 308 * 309 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of 310 * simpler APIs by using the helper functions @drm_gem_prime_export and 311 * @drm_gem_prime_import. These functions implement dma-buf support in terms of 312 * five lower-level driver callbacks: 313 * 314 * Export callbacks: 315 * 316 * - @gem_prime_pin (optional): prepare a GEM object for exporting 317 * 318 * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages 319 * 320 * - @gem_prime_vmap: vmap a buffer exported by your driver 321 * 322 * - @gem_prime_vunmap: vunmap a buffer exported by your driver 323 * 324 * Import callback: 325 * 326 * - @gem_prime_import_sg_table (import): produce a GEM object from another 327 * driver's scatter/gather table 328 */ 329 330 /** 331 * drm_gem_prime_export - helper library implementation of the export callback 332 * @dev: drm_device to export from 333 * @obj: GEM object to export 334 * @flags: flags like DRM_CLOEXEC 335 * 336 * This is the implementation of the gem_prime_export functions for GEM drivers 337 * using the PRIME helpers. 338 */ 339 struct dma_buf *drm_gem_prime_export(struct drm_device *dev, 340 struct drm_gem_object *obj, int flags) 341 { 342 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 343 344 exp_info.ops = &drm_gem_prime_dmabuf_ops; 345 exp_info.size = obj->size; 346 exp_info.flags = flags; 347 exp_info.priv = obj; 348 349 if (dev->driver->gem_prime_res_obj) 350 exp_info.resv = dev->driver->gem_prime_res_obj(obj); 351 352 return dma_buf_export(&exp_info); 353 } 354 EXPORT_SYMBOL(drm_gem_prime_export); 355 356 static struct dma_buf *export_and_register_object(struct drm_device *dev, 357 struct drm_gem_object *obj, 358 uint32_t flags) 359 { 360 struct dma_buf *dmabuf; 361 362 /* prevent races with concurrent gem_close. */ 363 if (obj->handle_count == 0) { 364 dmabuf = ERR_PTR(-ENOENT); 365 return dmabuf; 366 } 367 368 dmabuf = dev->driver->gem_prime_export(dev, obj, flags); 369 if (IS_ERR(dmabuf)) { 370 /* normally the created dma-buf takes ownership of the ref, 371 * but if that fails then drop the ref 372 */ 373 return dmabuf; 374 } 375 376 /* 377 * Note that callers do not need to clean up the export cache 378 * since the check for obj->handle_count guarantees that someone 379 * will clean it up. 380 */ 381 obj->dma_buf = dmabuf; 382 get_dma_buf(obj->dma_buf); 383 /* Grab a new ref since the callers is now used by the dma-buf */ 384 drm_gem_object_reference(obj); 385 386 return dmabuf; 387 } 388 389 /** 390 * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers 391 * @dev: dev to export the buffer from 392 * @file_priv: drm file-private structure 393 * @handle: buffer handle to export 394 * @flags: flags like DRM_CLOEXEC 395 * @prime_fd: pointer to storage for the fd id of the create dma-buf 396 * 397 * This is the PRIME export function which must be used mandatorily by GEM 398 * drivers to ensure correct lifetime management of the underlying GEM object. 399 * The actual exporting from GEM object to a dma-buf is done through the 400 * gem_prime_export driver callback. 401 */ 402 int drm_gem_prime_handle_to_fd(struct drm_device *dev, 403 struct drm_file *file_priv, uint32_t handle, 404 uint32_t flags, 405 int *prime_fd) 406 { 407 struct drm_gem_object *obj; 408 int ret = 0; 409 struct dma_buf *dmabuf; 410 411 mutex_lock(&file_priv->prime.lock); 412 obj = drm_gem_object_lookup(dev, file_priv, handle); 413 if (!obj) { 414 ret = -ENOENT; 415 goto out_unlock; 416 } 417 418 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle); 419 if (dmabuf) { 420 get_dma_buf(dmabuf); 421 goto out_have_handle; 422 } 423 424 mutex_lock(&dev->object_name_lock); 425 /* re-export the original imported object */ 426 if (obj->import_attach) { 427 dmabuf = obj->import_attach->dmabuf; 428 get_dma_buf(dmabuf); 429 goto out_have_obj; 430 } 431 432 if (obj->dma_buf) { 433 get_dma_buf(obj->dma_buf); 434 dmabuf = obj->dma_buf; 435 goto out_have_obj; 436 } 437 438 dmabuf = export_and_register_object(dev, obj, flags); 439 if (IS_ERR(dmabuf)) { 440 /* normally the created dma-buf takes ownership of the ref, 441 * but if that fails then drop the ref 442 */ 443 ret = PTR_ERR(dmabuf); 444 mutex_unlock(&dev->object_name_lock); 445 goto out; 446 } 447 448 out_have_obj: 449 /* 450 * If we've exported this buffer then cheat and add it to the import list 451 * so we get the correct handle back. We must do this under the 452 * protection of dev->object_name_lock to ensure that a racing gem close 453 * ioctl doesn't miss to remove this buffer handle from the cache. 454 */ 455 ret = drm_prime_add_buf_handle(&file_priv->prime, 456 dmabuf, handle); 457 mutex_unlock(&dev->object_name_lock); 458 if (ret) 459 goto fail_put_dmabuf; 460 461 out_have_handle: 462 ret = dma_buf_fd(dmabuf, flags); 463 /* 464 * We must _not_ remove the buffer from the handle cache since the newly 465 * created dma buf is already linked in the global obj->dma_buf pointer, 466 * and that is invariant as long as a userspace gem handle exists. 467 * Closing the handle will clean out the cache anyway, so we don't leak. 468 */ 469 if (ret < 0) { 470 goto fail_put_dmabuf; 471 } else { 472 *prime_fd = ret; 473 ret = 0; 474 } 475 476 goto out; 477 478 fail_put_dmabuf: 479 dma_buf_put(dmabuf); 480 out: 481 drm_gem_object_unreference_unlocked(obj); 482 out_unlock: 483 mutex_unlock(&file_priv->prime.lock); 484 485 return ret; 486 } 487 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); 488 489 /** 490 * drm_gem_prime_import - helper library implementation of the import callback 491 * @dev: drm_device to import into 492 * @dma_buf: dma-buf object to import 493 * 494 * This is the implementation of the gem_prime_import functions for GEM drivers 495 * using the PRIME helpers. 496 */ 497 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, 498 struct dma_buf *dma_buf) 499 { 500 struct dma_buf_attachment *attach; 501 struct sg_table *sgt; 502 struct drm_gem_object *obj; 503 int ret; 504 505 if (!dev->driver->gem_prime_import_sg_table) 506 return ERR_PTR(-EINVAL); 507 508 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) { 509 obj = dma_buf->priv; 510 if (obj->dev == dev) { 511 /* 512 * Importing dmabuf exported from out own gem increases 513 * refcount on gem itself instead of f_count of dmabuf. 514 */ 515 drm_gem_object_reference(obj); 516 return obj; 517 } 518 } 519 520 attach = dma_buf_attach(dma_buf, dev->dev); 521 if (IS_ERR(attach)) 522 return ERR_CAST(attach); 523 524 get_dma_buf(dma_buf); 525 526 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 527 if (IS_ERR(sgt)) { 528 ret = PTR_ERR(sgt); 529 goto fail_detach; 530 } 531 532 obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt); 533 if (IS_ERR(obj)) { 534 ret = PTR_ERR(obj); 535 goto fail_unmap; 536 } 537 538 obj->import_attach = attach; 539 540 return obj; 541 542 fail_unmap: 543 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); 544 fail_detach: 545 dma_buf_detach(dma_buf, attach); 546 dma_buf_put(dma_buf); 547 548 return ERR_PTR(ret); 549 } 550 EXPORT_SYMBOL(drm_gem_prime_import); 551 552 /** 553 * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers 554 * @dev: dev to export the buffer from 555 * @file_priv: drm file-private structure 556 * @prime_fd: fd id of the dma-buf which should be imported 557 * @handle: pointer to storage for the handle of the imported buffer object 558 * 559 * This is the PRIME import function which must be used mandatorily by GEM 560 * drivers to ensure correct lifetime management of the underlying GEM object. 561 * The actual importing of GEM object from the dma-buf is done through the 562 * gem_import_export driver callback. 563 */ 564 int drm_gem_prime_fd_to_handle(struct drm_device *dev, 565 struct drm_file *file_priv, int prime_fd, 566 uint32_t *handle) 567 { 568 struct dma_buf *dma_buf; 569 struct drm_gem_object *obj; 570 int ret; 571 572 dma_buf = dma_buf_get(prime_fd); 573 if (IS_ERR(dma_buf)) 574 return PTR_ERR(dma_buf); 575 576 mutex_lock(&file_priv->prime.lock); 577 578 ret = drm_prime_lookup_buf_handle(&file_priv->prime, 579 dma_buf, handle); 580 if (ret == 0) 581 goto out_put; 582 583 /* never seen this one, need to import */ 584 mutex_lock(&dev->object_name_lock); 585 obj = dev->driver->gem_prime_import(dev, dma_buf); 586 if (IS_ERR(obj)) { 587 ret = PTR_ERR(obj); 588 goto out_unlock; 589 } 590 591 if (obj->dma_buf) { 592 WARN_ON(obj->dma_buf != dma_buf); 593 } else { 594 obj->dma_buf = dma_buf; 595 get_dma_buf(dma_buf); 596 } 597 598 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ 599 ret = drm_gem_handle_create_tail(file_priv, obj, handle); 600 drm_gem_object_unreference_unlocked(obj); 601 if (ret) 602 goto out_put; 603 604 ret = drm_prime_add_buf_handle(&file_priv->prime, 605 dma_buf, *handle); 606 if (ret) 607 goto fail; 608 609 mutex_unlock(&file_priv->prime.lock); 610 611 dma_buf_put(dma_buf); 612 613 return 0; 614 615 fail: 616 /* hmm, if driver attached, we are relying on the free-object path 617 * to detach.. which seems ok.. 618 */ 619 drm_gem_handle_delete(file_priv, *handle); 620 out_unlock: 621 mutex_unlock(&dev->object_name_lock); 622 out_put: 623 dma_buf_put(dma_buf); 624 mutex_unlock(&file_priv->prime.lock); 625 return ret; 626 } 627 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); 628 629 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, 630 struct drm_file *file_priv) 631 { 632 struct drm_prime_handle *args = data; 633 uint32_t flags; 634 635 if (!drm_core_check_feature(dev, DRIVER_PRIME)) 636 return -EINVAL; 637 638 if (!dev->driver->prime_handle_to_fd) 639 return -ENOSYS; 640 641 /* check flags are valid */ 642 if (args->flags & ~DRM_CLOEXEC) 643 return -EINVAL; 644 645 /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */ 646 flags = args->flags & DRM_CLOEXEC; 647 648 return dev->driver->prime_handle_to_fd(dev, file_priv, 649 args->handle, flags, &args->fd); 650 } 651 652 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, 653 struct drm_file *file_priv) 654 { 655 struct drm_prime_handle *args = data; 656 657 if (!drm_core_check_feature(dev, DRIVER_PRIME)) 658 return -EINVAL; 659 660 if (!dev->driver->prime_fd_to_handle) 661 return -ENOSYS; 662 663 return dev->driver->prime_fd_to_handle(dev, file_priv, 664 args->fd, &args->handle); 665 } 666 667 /** 668 * drm_prime_pages_to_sg - converts a page array into an sg list 669 * @pages: pointer to the array of page pointers to convert 670 * @nr_pages: length of the page vector 671 * 672 * This helper creates an sg table object from a set of pages 673 * the driver is responsible for mapping the pages into the 674 * importers address space for use with dma_buf itself. 675 */ 676 struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages) 677 { 678 struct sg_table *sg = NULL; 679 int ret; 680 681 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 682 if (!sg) { 683 ret = -ENOMEM; 684 goto out; 685 } 686 687 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0, 688 nr_pages << PAGE_SHIFT, GFP_KERNEL); 689 if (ret) 690 goto out; 691 692 return sg; 693 out: 694 kfree(sg); 695 return ERR_PTR(ret); 696 } 697 EXPORT_SYMBOL(drm_prime_pages_to_sg); 698 699 /** 700 * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array 701 * @sgt: scatter-gather table to convert 702 * @pages: array of page pointers to store the page array in 703 * @addrs: optional array to store the dma bus address of each page 704 * @max_pages: size of both the passed-in arrays 705 * 706 * Exports an sg table into an array of pages and addresses. This is currently 707 * required by the TTM driver in order to do correct fault handling. 708 */ 709 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, 710 dma_addr_t *addrs, int max_pages) 711 { 712 unsigned count; 713 struct scatterlist *sg; 714 struct page *page; 715 u32 len; 716 int pg_index; 717 dma_addr_t addr; 718 719 pg_index = 0; 720 for_each_sg(sgt->sgl, sg, sgt->nents, count) { 721 len = sg->length; 722 page = sg_page(sg); 723 addr = sg_dma_address(sg); 724 725 while (len > 0) { 726 if (WARN_ON(pg_index >= max_pages)) 727 return -1; 728 pages[pg_index] = page; 729 if (addrs) 730 addrs[pg_index] = addr; 731 732 page++; 733 addr += PAGE_SIZE; 734 len -= PAGE_SIZE; 735 pg_index++; 736 } 737 } 738 return 0; 739 } 740 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays); 741 742 /** 743 * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object 744 * @obj: GEM object which was created from a dma-buf 745 * @sg: the sg-table which was pinned at import time 746 * 747 * This is the cleanup functions which GEM drivers need to call when they use 748 * @drm_gem_prime_import to import dma-bufs. 749 */ 750 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) 751 { 752 struct dma_buf_attachment *attach; 753 struct dma_buf *dma_buf; 754 attach = obj->import_attach; 755 if (sg) 756 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); 757 dma_buf = attach->dmabuf; 758 dma_buf_detach(attach->dmabuf, attach); 759 /* remove the reference */ 760 dma_buf_put(dma_buf); 761 } 762 EXPORT_SYMBOL(drm_prime_gem_destroy); 763 764 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) 765 { 766 INIT_LIST_HEAD(&prime_fpriv->head); 767 mutex_init(&prime_fpriv->lock); 768 } 769 770 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) 771 { 772 /* by now drm_gem_release should've made sure the list is empty */ 773 WARN_ON(!list_empty(&prime_fpriv->head)); 774 } 775