1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ 4 * Author: Rob Clark <rob.clark@linaro.org> 5 */ 6 7 #include <linux/dma-buf.h> 8 #include <linux/highmem.h> 9 10 #include <drm/drm_prime.h> 11 12 #include "omap_drv.h" 13 14 MODULE_IMPORT_NS(DMA_BUF); 15 16 /* ----------------------------------------------------------------------------- 17 * DMABUF Export 18 */ 19 20 static struct sg_table *omap_gem_map_dma_buf( 21 struct dma_buf_attachment *attachment, 22 enum dma_data_direction dir) 23 { 24 struct drm_gem_object *obj = attachment->dmabuf->priv; 25 struct sg_table *sg; 26 sg = omap_gem_get_sg(obj, dir); 27 if (IS_ERR(sg)) 28 return sg; 29 30 return sg; 31 } 32 33 static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, 34 struct sg_table *sg, enum dma_data_direction dir) 35 { 36 struct drm_gem_object *obj = attachment->dmabuf->priv; 37 omap_gem_put_sg(obj, sg); 38 } 39 40 static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer, 41 enum dma_data_direction dir) 42 { 43 struct drm_gem_object *obj = buffer->priv; 44 struct page **pages; 45 if (omap_gem_flags(obj) & OMAP_BO_TILED_MASK) { 46 /* TODO we would need to pin at least part of the buffer to 47 * get de-tiled view. For now just reject it. 48 */ 49 return -ENOMEM; 50 } 51 /* make sure we have the pages: */ 52 return omap_gem_get_pages(obj, &pages, true); 53 } 54 55 static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer, 56 enum dma_data_direction dir) 57 { 58 struct drm_gem_object *obj = buffer->priv; 59 omap_gem_put_pages(obj); 60 return 0; 61 } 62 63 static int omap_gem_dmabuf_mmap(struct dma_buf *buffer, 64 struct vm_area_struct *vma) 65 { 66 struct drm_gem_object *obj = buffer->priv; 67 int ret = 0; 68 69 dma_resv_assert_held(buffer->resv); 70 71 ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); 72 if (ret < 0) 73 return ret; 74 75 return omap_gem_mmap_obj(obj, vma); 76 } 77 78 static const struct dma_buf_ops omap_dmabuf_ops = { 79 .map_dma_buf = omap_gem_map_dma_buf, 80 .unmap_dma_buf = omap_gem_unmap_dma_buf, 81 .release = drm_gem_dmabuf_release, 82 .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access, 83 .end_cpu_access = omap_gem_dmabuf_end_cpu_access, 84 .mmap = omap_gem_dmabuf_mmap, 85 }; 86 87 struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags) 88 { 89 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 90 91 exp_info.ops = &omap_dmabuf_ops; 92 exp_info.size = omap_gem_mmap_size(obj); 93 exp_info.flags = flags; 94 exp_info.priv = obj; 95 exp_info.resv = obj->resv; 96 97 return drm_gem_dmabuf_export(obj->dev, &exp_info); 98 } 99 100 /* ----------------------------------------------------------------------------- 101 * DMABUF Import 102 */ 103 104 struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, 105 struct dma_buf *dma_buf) 106 { 107 struct dma_buf_attachment *attach; 108 struct drm_gem_object *obj; 109 struct sg_table *sgt; 110 int ret; 111 112 if (dma_buf->ops == &omap_dmabuf_ops) { 113 obj = dma_buf->priv; 114 if (obj->dev == dev) { 115 /* 116 * Importing dmabuf exported from out own gem increases 117 * refcount on gem itself instead of f_count of dmabuf. 118 */ 119 drm_gem_object_get(obj); 120 return obj; 121 } 122 } 123 124 attach = dma_buf_attach(dma_buf, dev->dev); 125 if (IS_ERR(attach)) 126 return ERR_CAST(attach); 127 128 get_dma_buf(dma_buf); 129 130 sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE); 131 if (IS_ERR(sgt)) { 132 ret = PTR_ERR(sgt); 133 goto fail_detach; 134 } 135 136 obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt); 137 if (IS_ERR(obj)) { 138 ret = PTR_ERR(obj); 139 goto fail_unmap; 140 } 141 142 obj->import_attach = attach; 143 144 return obj; 145 146 fail_unmap: 147 dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_TO_DEVICE); 148 fail_detach: 149 dma_buf_detach(dma_buf, attach); 150 dma_buf_put(dma_buf); 151 152 return ERR_PTR(ret); 153 } 154