1 /* 2 * Copyright © 2012 Red Hat 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Dave Airlie <airlied@redhat.com> 25 * Rob Clark <rob.clark@linaro.org> 26 * 27 */ 28 29 #include <linux/export.h> 30 #include <linux/dma-buf.h> 31 #include <drm/drmP.h> 32 33 /* 34 * DMA-BUF/GEM Object references and lifetime overview: 35 * 36 * On the export the dma_buf holds a reference to the exporting GEM 37 * object. It takes this reference in handle_to_fd_ioctl, when it 38 * first calls .prime_export and stores the exporting GEM object in 39 * the dma_buf priv. This reference is released when the dma_buf 40 * object goes away in the driver .release function. 41 * 42 * On the import the importing GEM object holds a reference to the 43 * dma_buf (which in turn holds a ref to the exporting GEM object). 44 * It takes that reference in the fd_to_handle ioctl. 45 * It calls dma_buf_get, creates an attachment to it and stores the 46 * attachment in the GEM object. When this attachment is destroyed 47 * when the imported object is destroyed, we remove the attachment 48 * and drop the reference to the dma_buf. 49 * 50 * Thus the chain of references always flows in one direction 51 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem 52 * 53 * Self-importing: if userspace is using PRIME as a replacement for flink 54 * then it will get a fd->handle request for a GEM object that it created. 55 * Drivers should detect this situation and return back the gem object 56 * from the dma-buf private. Prime will do this automatically for drivers that 57 * use the drm_gem_prime_{import,export} helpers. 58 */ 59 60 struct drm_prime_member { 61 struct list_head entry; 62 struct dma_buf *dma_buf; 63 uint32_t handle; 64 }; 65 66 struct drm_prime_attachment { 67 struct sg_table *sgt; 68 enum dma_data_direction dir; 69 }; 70 71 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle) 72 { 73 struct drm_prime_member *member; 74 75 member = kmalloc(sizeof(*member), GFP_KERNEL); 76 if (!member) 77 return -ENOMEM; 78 79 get_dma_buf(dma_buf); 80 member->dma_buf = dma_buf; 81 member->handle = handle; 82 list_add(&member->entry, &prime_fpriv->head); 83 return 0; 84 } 85 86 static int drm_gem_map_attach(struct dma_buf *dma_buf, 87 struct device *target_dev, 88 struct dma_buf_attachment *attach) 89 { 90 struct drm_prime_attachment *prime_attach; 91 struct drm_gem_object *obj = dma_buf->priv; 92 struct drm_device *dev = obj->dev; 93 94 prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL); 95 if (!prime_attach) 96 return -ENOMEM; 97 98 prime_attach->dir = DMA_NONE; 99 attach->priv = prime_attach; 100 101 if (!dev->driver->gem_prime_pin) 102 return 0; 103 104 return dev->driver->gem_prime_pin(obj); 105 } 106 107 static void drm_gem_map_detach(struct dma_buf *dma_buf, 108 struct dma_buf_attachment *attach) 109 { 110 struct drm_prime_attachment *prime_attach = attach->priv; 111 struct drm_gem_object *obj = dma_buf->priv; 112 struct drm_device *dev = obj->dev; 113 struct sg_table *sgt; 114 115 if (dev->driver->gem_prime_unpin) 116 dev->driver->gem_prime_unpin(obj); 117 118 if (!prime_attach) 119 return; 120 121 sgt = prime_attach->sgt; 122 if (sgt) { 123 if (prime_attach->dir != DMA_NONE) 124 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, 125 prime_attach->dir); 126 sg_free_table(sgt); 127 } 128 129 kfree(sgt); 130 kfree(prime_attach); 131 attach->priv = NULL; 132 } 133 134 static void drm_prime_remove_buf_handle_locked( 135 struct drm_prime_file_private *prime_fpriv, 136 struct dma_buf *dma_buf) 137 { 138 struct drm_prime_member *member, *safe; 139 140 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) { 141 if (member->dma_buf == dma_buf) { 142 dma_buf_put(dma_buf); 143 list_del(&member->entry); 144 kfree(member); 145 } 146 } 147 } 148 149 static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, 150 enum dma_data_direction dir) 151 { 152 struct drm_prime_attachment *prime_attach = attach->priv; 153 struct drm_gem_object *obj = attach->dmabuf->priv; 154 struct sg_table *sgt; 155 156 if (WARN_ON(dir == DMA_NONE || !prime_attach)) 157 return ERR_PTR(-EINVAL); 158 159 /* return the cached mapping when possible */ 160 if (prime_attach->dir == dir) 161 return prime_attach->sgt; 162 163 /* 164 * two mappings with different directions for the same attachment are 165 * not allowed 166 */ 167 if (WARN_ON(prime_attach->dir != DMA_NONE)) 168 return ERR_PTR(-EBUSY); 169 170 mutex_lock(&obj->dev->struct_mutex); 171 172 sgt = obj->dev->driver->gem_prime_get_sg_table(obj); 173 174 if (!IS_ERR(sgt)) { 175 if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) { 176 sg_free_table(sgt); 177 kfree(sgt); 178 sgt = ERR_PTR(-ENOMEM); 179 } else { 180 prime_attach->sgt = sgt; 181 prime_attach->dir = dir; 182 } 183 } 184 185 mutex_unlock(&obj->dev->struct_mutex); 186 return sgt; 187 } 188 189 static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, 190 struct sg_table *sgt, enum dma_data_direction dir) 191 { 192 /* nothing to be done here */ 193 } 194 195 static void drm_gem_dmabuf_release(struct dma_buf *dma_buf) 196 { 197 struct drm_gem_object *obj = dma_buf->priv; 198 199 if (obj->export_dma_buf == dma_buf) { 200 /* drop the reference on the export fd holds */ 201 obj->export_dma_buf = NULL; 202 drm_gem_object_unreference_unlocked(obj); 203 } 204 } 205 206 static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) 207 { 208 struct drm_gem_object *obj = dma_buf->priv; 209 struct drm_device *dev = obj->dev; 210 211 return dev->driver->gem_prime_vmap(obj); 212 } 213 214 static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) 215 { 216 struct drm_gem_object *obj = dma_buf->priv; 217 struct drm_device *dev = obj->dev; 218 219 dev->driver->gem_prime_vunmap(obj, vaddr); 220 } 221 222 static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, 223 unsigned long page_num) 224 { 225 return NULL; 226 } 227 228 static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, 229 unsigned long page_num, void *addr) 230 { 231 232 } 233 static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, 234 unsigned long page_num) 235 { 236 return NULL; 237 } 238 239 static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, 240 unsigned long page_num, void *addr) 241 { 242 243 } 244 245 static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, 246 struct vm_area_struct *vma) 247 { 248 struct drm_gem_object *obj = dma_buf->priv; 249 struct drm_device *dev = obj->dev; 250 251 if (!dev->driver->gem_prime_mmap) 252 return -ENOSYS; 253 254 return dev->driver->gem_prime_mmap(obj, vma); 255 } 256 257 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { 258 .attach = drm_gem_map_attach, 259 .detach = drm_gem_map_detach, 260 .map_dma_buf = drm_gem_map_dma_buf, 261 .unmap_dma_buf = drm_gem_unmap_dma_buf, 262 .release = drm_gem_dmabuf_release, 263 .kmap = drm_gem_dmabuf_kmap, 264 .kmap_atomic = drm_gem_dmabuf_kmap_atomic, 265 .kunmap = drm_gem_dmabuf_kunmap, 266 .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic, 267 .mmap = drm_gem_dmabuf_mmap, 268 .vmap = drm_gem_dmabuf_vmap, 269 .vunmap = drm_gem_dmabuf_vunmap, 270 }; 271 272 /** 273 * DOC: PRIME Helpers 274 * 275 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of 276 * simpler APIs by using the helper functions @drm_gem_prime_export and 277 * @drm_gem_prime_import. These functions implement dma-buf support in terms of 278 * five lower-level driver callbacks: 279 * 280 * Export callbacks: 281 * 282 * - @gem_prime_pin (optional): prepare a GEM object for exporting 283 * 284 * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages 285 * 286 * - @gem_prime_vmap: vmap a buffer exported by your driver 287 * 288 * - @gem_prime_vunmap: vunmap a buffer exported by your driver 289 * 290 * Import callback: 291 * 292 * - @gem_prime_import_sg_table (import): produce a GEM object from another 293 * driver's scatter/gather table 294 */ 295 296 struct dma_buf *drm_gem_prime_export(struct drm_device *dev, 297 struct drm_gem_object *obj, int flags) 298 { 299 return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags); 300 } 301 EXPORT_SYMBOL(drm_gem_prime_export); 302 303 int drm_gem_prime_handle_to_fd(struct drm_device *dev, 304 struct drm_file *file_priv, uint32_t handle, uint32_t flags, 305 int *prime_fd) 306 { 307 struct drm_gem_object *obj; 308 void *buf; 309 int ret = 0; 310 struct dma_buf *dmabuf; 311 312 obj = drm_gem_object_lookup(dev, file_priv, handle); 313 if (!obj) 314 return -ENOENT; 315 316 mutex_lock(&file_priv->prime.lock); 317 /* re-export the original imported object */ 318 if (obj->import_attach) { 319 dmabuf = obj->import_attach->dmabuf; 320 goto out_have_obj; 321 } 322 323 if (obj->export_dma_buf) { 324 dmabuf = obj->export_dma_buf; 325 goto out_have_obj; 326 } 327 328 buf = dev->driver->gem_prime_export(dev, obj, flags); 329 if (IS_ERR(buf)) { 330 /* normally the created dma-buf takes ownership of the ref, 331 * but if that fails then drop the ref 332 */ 333 ret = PTR_ERR(buf); 334 goto out; 335 } 336 obj->export_dma_buf = buf; 337 338 /* if we've exported this buffer the cheat and add it to the import list 339 * so we get the correct handle back 340 */ 341 ret = drm_prime_add_buf_handle(&file_priv->prime, 342 obj->export_dma_buf, handle); 343 if (ret) 344 goto fail_put_dmabuf; 345 346 ret = dma_buf_fd(buf, flags); 347 if (ret < 0) 348 goto fail_rm_handle; 349 350 *prime_fd = ret; 351 mutex_unlock(&file_priv->prime.lock); 352 return 0; 353 354 out_have_obj: 355 get_dma_buf(dmabuf); 356 ret = dma_buf_fd(dmabuf, flags); 357 if (ret < 0) { 358 dma_buf_put(dmabuf); 359 } else { 360 *prime_fd = ret; 361 ret = 0; 362 } 363 364 goto out; 365 366 fail_rm_handle: 367 drm_prime_remove_buf_handle_locked(&file_priv->prime, buf); 368 fail_put_dmabuf: 369 /* clear NOT to be checked when releasing dma_buf */ 370 obj->export_dma_buf = NULL; 371 dma_buf_put(buf); 372 out: 373 drm_gem_object_unreference_unlocked(obj); 374 mutex_unlock(&file_priv->prime.lock); 375 return ret; 376 } 377 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); 378 379 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, 380 struct dma_buf *dma_buf) 381 { 382 struct dma_buf_attachment *attach; 383 struct sg_table *sgt; 384 struct drm_gem_object *obj; 385 int ret; 386 387 if (!dev->driver->gem_prime_import_sg_table) 388 return ERR_PTR(-EINVAL); 389 390 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) { 391 obj = dma_buf->priv; 392 if (obj->dev == dev) { 393 /* 394 * Importing dmabuf exported from out own gem increases 395 * refcount on gem itself instead of f_count of dmabuf. 396 */ 397 drm_gem_object_reference(obj); 398 return obj; 399 } 400 } 401 402 attach = dma_buf_attach(dma_buf, dev->dev); 403 if (IS_ERR(attach)) 404 return ERR_CAST(attach); 405 406 get_dma_buf(dma_buf); 407 408 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 409 if (IS_ERR_OR_NULL(sgt)) { 410 ret = PTR_ERR(sgt); 411 goto fail_detach; 412 } 413 414 obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt); 415 if (IS_ERR(obj)) { 416 ret = PTR_ERR(obj); 417 goto fail_unmap; 418 } 419 420 obj->import_attach = attach; 421 422 return obj; 423 424 fail_unmap: 425 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); 426 fail_detach: 427 dma_buf_detach(dma_buf, attach); 428 dma_buf_put(dma_buf); 429 430 return ERR_PTR(ret); 431 } 432 EXPORT_SYMBOL(drm_gem_prime_import); 433 434 int drm_gem_prime_fd_to_handle(struct drm_device *dev, 435 struct drm_file *file_priv, int prime_fd, uint32_t *handle) 436 { 437 struct dma_buf *dma_buf; 438 struct drm_gem_object *obj; 439 int ret; 440 441 dma_buf = dma_buf_get(prime_fd); 442 if (IS_ERR(dma_buf)) 443 return PTR_ERR(dma_buf); 444 445 mutex_lock(&file_priv->prime.lock); 446 447 ret = drm_prime_lookup_buf_handle(&file_priv->prime, 448 dma_buf, handle); 449 if (!ret) { 450 ret = 0; 451 goto out_put; 452 } 453 454 /* never seen this one, need to import */ 455 obj = dev->driver->gem_prime_import(dev, dma_buf); 456 if (IS_ERR(obj)) { 457 ret = PTR_ERR(obj); 458 goto out_put; 459 } 460 461 ret = drm_gem_handle_create(file_priv, obj, handle); 462 drm_gem_object_unreference_unlocked(obj); 463 if (ret) 464 goto out_put; 465 466 ret = drm_prime_add_buf_handle(&file_priv->prime, 467 dma_buf, *handle); 468 if (ret) 469 goto fail; 470 471 mutex_unlock(&file_priv->prime.lock); 472 473 dma_buf_put(dma_buf); 474 475 return 0; 476 477 fail: 478 /* hmm, if driver attached, we are relying on the free-object path 479 * to detach.. which seems ok.. 480 */ 481 drm_gem_object_handle_unreference_unlocked(obj); 482 out_put: 483 dma_buf_put(dma_buf); 484 mutex_unlock(&file_priv->prime.lock); 485 return ret; 486 } 487 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); 488 489 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, 490 struct drm_file *file_priv) 491 { 492 struct drm_prime_handle *args = data; 493 uint32_t flags; 494 495 if (!drm_core_check_feature(dev, DRIVER_PRIME)) 496 return -EINVAL; 497 498 if (!dev->driver->prime_handle_to_fd) 499 return -ENOSYS; 500 501 /* check flags are valid */ 502 if (args->flags & ~DRM_CLOEXEC) 503 return -EINVAL; 504 505 /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */ 506 flags = args->flags & DRM_CLOEXEC; 507 508 return dev->driver->prime_handle_to_fd(dev, file_priv, 509 args->handle, flags, &args->fd); 510 } 511 512 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, 513 struct drm_file *file_priv) 514 { 515 struct drm_prime_handle *args = data; 516 517 if (!drm_core_check_feature(dev, DRIVER_PRIME)) 518 return -EINVAL; 519 520 if (!dev->driver->prime_fd_to_handle) 521 return -ENOSYS; 522 523 return dev->driver->prime_fd_to_handle(dev, file_priv, 524 args->fd, &args->handle); 525 } 526 527 /* 528 * drm_prime_pages_to_sg 529 * 530 * this helper creates an sg table object from a set of pages 531 * the driver is responsible for mapping the pages into the 532 * importers address space 533 */ 534 struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages) 535 { 536 struct sg_table *sg = NULL; 537 int ret; 538 539 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 540 if (!sg) { 541 ret = -ENOMEM; 542 goto out; 543 } 544 545 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0, 546 nr_pages << PAGE_SHIFT, GFP_KERNEL); 547 if (ret) 548 goto out; 549 550 return sg; 551 out: 552 kfree(sg); 553 return ERR_PTR(ret); 554 } 555 EXPORT_SYMBOL(drm_prime_pages_to_sg); 556 557 /* export an sg table into an array of pages and addresses 558 this is currently required by the TTM driver in order to do correct fault 559 handling */ 560 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, 561 dma_addr_t *addrs, int max_pages) 562 { 563 unsigned count; 564 struct scatterlist *sg; 565 struct page *page; 566 u32 len, offset; 567 int pg_index; 568 dma_addr_t addr; 569 570 pg_index = 0; 571 for_each_sg(sgt->sgl, sg, sgt->nents, count) { 572 len = sg->length; 573 offset = sg->offset; 574 page = sg_page(sg); 575 addr = sg_dma_address(sg); 576 577 while (len > 0) { 578 if (WARN_ON(pg_index >= max_pages)) 579 return -1; 580 pages[pg_index] = page; 581 if (addrs) 582 addrs[pg_index] = addr; 583 584 page++; 585 addr += PAGE_SIZE; 586 len -= PAGE_SIZE; 587 pg_index++; 588 } 589 } 590 return 0; 591 } 592 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays); 593 /* helper function to cleanup a GEM/prime object */ 594 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) 595 { 596 struct dma_buf_attachment *attach; 597 struct dma_buf *dma_buf; 598 attach = obj->import_attach; 599 if (sg) 600 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); 601 dma_buf = attach->dmabuf; 602 dma_buf_detach(attach->dmabuf, attach); 603 /* remove the reference */ 604 dma_buf_put(dma_buf); 605 } 606 EXPORT_SYMBOL(drm_prime_gem_destroy); 607 608 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) 609 { 610 INIT_LIST_HEAD(&prime_fpriv->head); 611 mutex_init(&prime_fpriv->lock); 612 } 613 EXPORT_SYMBOL(drm_prime_init_file_private); 614 615 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) 616 { 617 /* by now drm_gem_release should've made sure the list is empty */ 618 WARN_ON(!list_empty(&prime_fpriv->head)); 619 } 620 EXPORT_SYMBOL(drm_prime_destroy_file_private); 621 622 int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle) 623 { 624 struct drm_prime_member *member; 625 626 list_for_each_entry(member, &prime_fpriv->head, entry) { 627 if (member->dma_buf == dma_buf) { 628 *handle = member->handle; 629 return 0; 630 } 631 } 632 return -ENOENT; 633 } 634 EXPORT_SYMBOL(drm_prime_lookup_buf_handle); 635 636 void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf) 637 { 638 mutex_lock(&prime_fpriv->lock); 639 drm_prime_remove_buf_handle_locked(prime_fpriv, dma_buf); 640 mutex_unlock(&prime_fpriv->lock); 641 } 642 EXPORT_SYMBOL(drm_prime_remove_buf_handle); 643