1 /* 2 * Copyright © 2012 Red Hat 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Dave Airlie <airlied@redhat.com> 25 * Rob Clark <rob.clark@linaro.org> 26 * 27 */ 28 29 #include <linux/export.h> 30 #include <linux/dma-buf.h> 31 #include <drm/drmP.h> 32 33 /* 34 * DMA-BUF/GEM Object references and lifetime overview: 35 * 36 * On the export the dma_buf holds a reference to the exporting GEM 37 * object. It takes this reference in handle_to_fd_ioctl, when it 38 * first calls .prime_export and stores the exporting GEM object in 39 * the dma_buf priv. This reference is released when the dma_buf 40 * object goes away in the driver .release function. 41 * 42 * On the import the importing GEM object holds a reference to the 43 * dma_buf (which in turn holds a ref to the exporting GEM object). 44 * It takes that reference in the fd_to_handle ioctl. 45 * It calls dma_buf_get, creates an attachment to it and stores the 46 * attachment in the GEM object. When this attachment is destroyed 47 * when the imported object is destroyed, we remove the attachment 48 * and drop the reference to the dma_buf. 49 * 50 * Thus the chain of references always flows in one direction 51 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem 52 * 53 * Self-importing: if userspace is using PRIME as a replacement for flink 54 * then it will get a fd->handle request for a GEM object that it created. 55 * Drivers should detect this situation and return back the gem object 56 * from the dma-buf private. Prime will do this automatically for drivers that 57 * use the drm_gem_prime_{import,export} helpers. 58 */ 59 60 struct drm_prime_member { 61 struct list_head entry; 62 struct dma_buf *dma_buf; 63 uint32_t handle; 64 }; 65 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle); 66 67 static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, 68 enum dma_data_direction dir) 69 { 70 struct drm_gem_object *obj = attach->dmabuf->priv; 71 struct sg_table *sgt; 72 73 mutex_lock(&obj->dev->struct_mutex); 74 75 sgt = obj->dev->driver->gem_prime_get_sg_table(obj); 76 77 if (!IS_ERR_OR_NULL(sgt)) 78 dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir); 79 80 mutex_unlock(&obj->dev->struct_mutex); 81 return sgt; 82 } 83 84 static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, 85 struct sg_table *sgt, enum dma_data_direction dir) 86 { 87 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); 88 sg_free_table(sgt); 89 kfree(sgt); 90 } 91 92 static void drm_gem_dmabuf_release(struct dma_buf *dma_buf) 93 { 94 struct drm_gem_object *obj = dma_buf->priv; 95 96 if (obj->export_dma_buf == dma_buf) { 97 /* drop the reference on the export fd holds */ 98 obj->export_dma_buf = NULL; 99 drm_gem_object_unreference_unlocked(obj); 100 } 101 } 102 103 static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) 104 { 105 struct drm_gem_object *obj = dma_buf->priv; 106 struct drm_device *dev = obj->dev; 107 108 return dev->driver->gem_prime_vmap(obj); 109 } 110 111 static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) 112 { 113 struct drm_gem_object *obj = dma_buf->priv; 114 struct drm_device *dev = obj->dev; 115 116 dev->driver->gem_prime_vunmap(obj, vaddr); 117 } 118 119 static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, 120 unsigned long page_num) 121 { 122 return NULL; 123 } 124 125 static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, 126 unsigned long page_num, void *addr) 127 { 128 129 } 130 static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, 131 unsigned long page_num) 132 { 133 return NULL; 134 } 135 136 static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, 137 unsigned long page_num, void *addr) 138 { 139 140 } 141 142 static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, 143 struct vm_area_struct *vma) 144 { 145 return -EINVAL; 146 } 147 148 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { 149 .map_dma_buf = drm_gem_map_dma_buf, 150 .unmap_dma_buf = drm_gem_unmap_dma_buf, 151 .release = drm_gem_dmabuf_release, 152 .kmap = drm_gem_dmabuf_kmap, 153 .kmap_atomic = drm_gem_dmabuf_kmap_atomic, 154 .kunmap = drm_gem_dmabuf_kunmap, 155 .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic, 156 .mmap = drm_gem_dmabuf_mmap, 157 .vmap = drm_gem_dmabuf_vmap, 158 .vunmap = drm_gem_dmabuf_vunmap, 159 }; 160 161 /** 162 * DOC: PRIME Helpers 163 * 164 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of 165 * simpler APIs by using the helper functions @drm_gem_prime_export and 166 * @drm_gem_prime_import. These functions implement dma-buf support in terms of 167 * five lower-level driver callbacks: 168 * 169 * Export callbacks: 170 * 171 * - @gem_prime_pin (optional): prepare a GEM object for exporting 172 * 173 * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages 174 * 175 * - @gem_prime_vmap: vmap a buffer exported by your driver 176 * 177 * - @gem_prime_vunmap: vunmap a buffer exported by your driver 178 * 179 * Import callback: 180 * 181 * - @gem_prime_import_sg_table (import): produce a GEM object from another 182 * driver's scatter/gather table 183 */ 184 185 struct dma_buf *drm_gem_prime_export(struct drm_device *dev, 186 struct drm_gem_object *obj, int flags) 187 { 188 if (dev->driver->gem_prime_pin) { 189 int ret = dev->driver->gem_prime_pin(obj); 190 if (ret) 191 return ERR_PTR(ret); 192 } 193 return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags); 194 } 195 EXPORT_SYMBOL(drm_gem_prime_export); 196 197 int drm_gem_prime_handle_to_fd(struct drm_device *dev, 198 struct drm_file *file_priv, uint32_t handle, uint32_t flags, 199 int *prime_fd) 200 { 201 struct drm_gem_object *obj; 202 void *buf; 203 int ret = 0; 204 struct dma_buf *dmabuf; 205 206 obj = drm_gem_object_lookup(dev, file_priv, handle); 207 if (!obj) 208 return -ENOENT; 209 210 mutex_lock(&file_priv->prime.lock); 211 /* re-export the original imported object */ 212 if (obj->import_attach) { 213 dmabuf = obj->import_attach->dmabuf; 214 goto out_have_obj; 215 } 216 217 if (obj->export_dma_buf) { 218 dmabuf = obj->export_dma_buf; 219 goto out_have_obj; 220 } 221 222 buf = dev->driver->gem_prime_export(dev, obj, flags); 223 if (IS_ERR(buf)) { 224 /* normally the created dma-buf takes ownership of the ref, 225 * but if that fails then drop the ref 226 */ 227 ret = PTR_ERR(buf); 228 goto out; 229 } 230 obj->export_dma_buf = buf; 231 232 /* if we've exported this buffer the cheat and add it to the import list 233 * so we get the correct handle back 234 */ 235 ret = drm_prime_add_buf_handle(&file_priv->prime, 236 obj->export_dma_buf, handle); 237 if (ret) 238 goto out; 239 240 *prime_fd = dma_buf_fd(buf, flags); 241 mutex_unlock(&file_priv->prime.lock); 242 return 0; 243 244 out_have_obj: 245 get_dma_buf(dmabuf); 246 *prime_fd = dma_buf_fd(dmabuf, flags); 247 out: 248 drm_gem_object_unreference_unlocked(obj); 249 mutex_unlock(&file_priv->prime.lock); 250 return ret; 251 } 252 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); 253 254 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, 255 struct dma_buf *dma_buf) 256 { 257 struct dma_buf_attachment *attach; 258 struct sg_table *sgt; 259 struct drm_gem_object *obj; 260 int ret; 261 262 if (!dev->driver->gem_prime_import_sg_table) 263 return ERR_PTR(-EINVAL); 264 265 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) { 266 obj = dma_buf->priv; 267 if (obj->dev == dev) { 268 /* 269 * Importing dmabuf exported from out own gem increases 270 * refcount on gem itself instead of f_count of dmabuf. 271 */ 272 drm_gem_object_reference(obj); 273 return obj; 274 } 275 } 276 277 attach = dma_buf_attach(dma_buf, dev->dev); 278 if (IS_ERR(attach)) 279 return ERR_PTR(PTR_ERR(attach)); 280 281 get_dma_buf(dma_buf); 282 283 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 284 if (IS_ERR_OR_NULL(sgt)) { 285 ret = PTR_ERR(sgt); 286 goto fail_detach; 287 } 288 289 obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt); 290 if (IS_ERR(obj)) { 291 ret = PTR_ERR(obj); 292 goto fail_unmap; 293 } 294 295 obj->import_attach = attach; 296 297 return obj; 298 299 fail_unmap: 300 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); 301 fail_detach: 302 dma_buf_detach(dma_buf, attach); 303 dma_buf_put(dma_buf); 304 305 return ERR_PTR(ret); 306 } 307 EXPORT_SYMBOL(drm_gem_prime_import); 308 309 int drm_gem_prime_fd_to_handle(struct drm_device *dev, 310 struct drm_file *file_priv, int prime_fd, uint32_t *handle) 311 { 312 struct dma_buf *dma_buf; 313 struct drm_gem_object *obj; 314 int ret; 315 316 dma_buf = dma_buf_get(prime_fd); 317 if (IS_ERR(dma_buf)) 318 return PTR_ERR(dma_buf); 319 320 mutex_lock(&file_priv->prime.lock); 321 322 ret = drm_prime_lookup_buf_handle(&file_priv->prime, 323 dma_buf, handle); 324 if (!ret) { 325 ret = 0; 326 goto out_put; 327 } 328 329 /* never seen this one, need to import */ 330 obj = dev->driver->gem_prime_import(dev, dma_buf); 331 if (IS_ERR(obj)) { 332 ret = PTR_ERR(obj); 333 goto out_put; 334 } 335 336 ret = drm_gem_handle_create(file_priv, obj, handle); 337 drm_gem_object_unreference_unlocked(obj); 338 if (ret) 339 goto out_put; 340 341 ret = drm_prime_add_buf_handle(&file_priv->prime, 342 dma_buf, *handle); 343 if (ret) 344 goto fail; 345 346 mutex_unlock(&file_priv->prime.lock); 347 348 dma_buf_put(dma_buf); 349 350 return 0; 351 352 fail: 353 /* hmm, if driver attached, we are relying on the free-object path 354 * to detach.. which seems ok.. 355 */ 356 drm_gem_object_handle_unreference_unlocked(obj); 357 out_put: 358 dma_buf_put(dma_buf); 359 mutex_unlock(&file_priv->prime.lock); 360 return ret; 361 } 362 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); 363 364 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, 365 struct drm_file *file_priv) 366 { 367 struct drm_prime_handle *args = data; 368 uint32_t flags; 369 370 if (!drm_core_check_feature(dev, DRIVER_PRIME)) 371 return -EINVAL; 372 373 if (!dev->driver->prime_handle_to_fd) 374 return -ENOSYS; 375 376 /* check flags are valid */ 377 if (args->flags & ~DRM_CLOEXEC) 378 return -EINVAL; 379 380 /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */ 381 flags = args->flags & DRM_CLOEXEC; 382 383 return dev->driver->prime_handle_to_fd(dev, file_priv, 384 args->handle, flags, &args->fd); 385 } 386 387 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, 388 struct drm_file *file_priv) 389 { 390 struct drm_prime_handle *args = data; 391 392 if (!drm_core_check_feature(dev, DRIVER_PRIME)) 393 return -EINVAL; 394 395 if (!dev->driver->prime_fd_to_handle) 396 return -ENOSYS; 397 398 return dev->driver->prime_fd_to_handle(dev, file_priv, 399 args->fd, &args->handle); 400 } 401 402 /* 403 * drm_prime_pages_to_sg 404 * 405 * this helper creates an sg table object from a set of pages 406 * the driver is responsible for mapping the pages into the 407 * importers address space 408 */ 409 struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages) 410 { 411 struct sg_table *sg = NULL; 412 int ret; 413 414 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 415 if (!sg) 416 goto out; 417 418 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0, 419 nr_pages << PAGE_SHIFT, GFP_KERNEL); 420 if (ret) 421 goto out; 422 423 return sg; 424 out: 425 kfree(sg); 426 return NULL; 427 } 428 EXPORT_SYMBOL(drm_prime_pages_to_sg); 429 430 /* export an sg table into an array of pages and addresses 431 this is currently required by the TTM driver in order to do correct fault 432 handling */ 433 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, 434 dma_addr_t *addrs, int max_pages) 435 { 436 unsigned count; 437 struct scatterlist *sg; 438 struct page *page; 439 u32 len, offset; 440 int pg_index; 441 dma_addr_t addr; 442 443 pg_index = 0; 444 for_each_sg(sgt->sgl, sg, sgt->nents, count) { 445 len = sg->length; 446 offset = sg->offset; 447 page = sg_page(sg); 448 addr = sg_dma_address(sg); 449 450 while (len > 0) { 451 if (WARN_ON(pg_index >= max_pages)) 452 return -1; 453 pages[pg_index] = page; 454 if (addrs) 455 addrs[pg_index] = addr; 456 457 page++; 458 addr += PAGE_SIZE; 459 len -= PAGE_SIZE; 460 pg_index++; 461 } 462 } 463 return 0; 464 } 465 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays); 466 /* helper function to cleanup a GEM/prime object */ 467 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) 468 { 469 struct dma_buf_attachment *attach; 470 struct dma_buf *dma_buf; 471 attach = obj->import_attach; 472 if (sg) 473 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); 474 dma_buf = attach->dmabuf; 475 dma_buf_detach(attach->dmabuf, attach); 476 /* remove the reference */ 477 dma_buf_put(dma_buf); 478 } 479 EXPORT_SYMBOL(drm_prime_gem_destroy); 480 481 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) 482 { 483 INIT_LIST_HEAD(&prime_fpriv->head); 484 mutex_init(&prime_fpriv->lock); 485 } 486 EXPORT_SYMBOL(drm_prime_init_file_private); 487 488 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) 489 { 490 /* by now drm_gem_release should've made sure the list is empty */ 491 WARN_ON(!list_empty(&prime_fpriv->head)); 492 } 493 EXPORT_SYMBOL(drm_prime_destroy_file_private); 494 495 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle) 496 { 497 struct drm_prime_member *member; 498 499 member = kmalloc(sizeof(*member), GFP_KERNEL); 500 if (!member) 501 return -ENOMEM; 502 503 get_dma_buf(dma_buf); 504 member->dma_buf = dma_buf; 505 member->handle = handle; 506 list_add(&member->entry, &prime_fpriv->head); 507 return 0; 508 } 509 510 int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle) 511 { 512 struct drm_prime_member *member; 513 514 list_for_each_entry(member, &prime_fpriv->head, entry) { 515 if (member->dma_buf == dma_buf) { 516 *handle = member->handle; 517 return 0; 518 } 519 } 520 return -ENOENT; 521 } 522 EXPORT_SYMBOL(drm_prime_lookup_buf_handle); 523 524 void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf) 525 { 526 struct drm_prime_member *member, *safe; 527 528 mutex_lock(&prime_fpriv->lock); 529 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) { 530 if (member->dma_buf == dma_buf) { 531 dma_buf_put(dma_buf); 532 list_del(&member->entry); 533 kfree(member); 534 } 535 } 536 mutex_unlock(&prime_fpriv->lock); 537 } 538 EXPORT_SYMBOL(drm_prime_remove_buf_handle); 539