1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ 4 * Author: Rob Clark <rob.clark@linaro.org> 5 */ 6 7 #include <linux/dma-buf.h> 8 #include <linux/highmem.h> 9 10 #include <drm/drm_prime.h> 11 12 #include "omap_drv.h" 13 14 MODULE_IMPORT_NS(DMA_BUF); 15 16 /* ----------------------------------------------------------------------------- 17 * DMABUF Export 18 */ 19 20 static struct sg_table *omap_gem_map_dma_buf( 21 struct dma_buf_attachment *attachment, 22 enum dma_data_direction dir) 23 { 24 struct drm_gem_object *obj = attachment->dmabuf->priv; 25 struct sg_table *sg; 26 sg = omap_gem_get_sg(obj, dir); 27 if (IS_ERR(sg)) 28 return sg; 29 30 return sg; 31 } 32 33 static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, 34 struct sg_table *sg, enum dma_data_direction dir) 35 { 36 struct drm_gem_object *obj = attachment->dmabuf->priv; 37 omap_gem_put_sg(obj, sg); 38 } 39 40 static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer, 41 enum dma_data_direction dir) 42 { 43 struct drm_gem_object *obj = buffer->priv; 44 struct page **pages; 45 if (omap_gem_flags(obj) & OMAP_BO_TILED_MASK) { 46 /* TODO we would need to pin at least part of the buffer to 47 * get de-tiled view. For now just reject it. 48 */ 49 return -ENOMEM; 50 } 51 /* make sure we have the pages: */ 52 return omap_gem_get_pages(obj, &pages, true); 53 } 54 55 static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer, 56 enum dma_data_direction dir) 57 { 58 struct drm_gem_object *obj = buffer->priv; 59 omap_gem_put_pages(obj); 60 return 0; 61 } 62 63 static int omap_gem_dmabuf_mmap(struct dma_buf *buffer, 64 struct vm_area_struct *vma) 65 { 66 struct drm_gem_object *obj = buffer->priv; 67 int ret = 0; 68 69 ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); 70 if (ret < 0) 71 return ret; 72 73 return omap_gem_mmap_obj(obj, vma); 74 } 75 76 static const struct dma_buf_ops omap_dmabuf_ops = { 77 .map_dma_buf = omap_gem_map_dma_buf, 78 .unmap_dma_buf = omap_gem_unmap_dma_buf, 79 .release = drm_gem_dmabuf_release, 80 .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access, 81 .end_cpu_access = omap_gem_dmabuf_end_cpu_access, 82 .mmap = omap_gem_dmabuf_mmap, 83 }; 84 85 struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags) 86 { 87 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 88 89 exp_info.ops = &omap_dmabuf_ops; 90 exp_info.size = omap_gem_mmap_size(obj); 91 exp_info.flags = flags; 92 exp_info.priv = obj; 93 exp_info.resv = obj->resv; 94 95 return drm_gem_dmabuf_export(obj->dev, &exp_info); 96 } 97 98 /* ----------------------------------------------------------------------------- 99 * DMABUF Import 100 */ 101 102 struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, 103 struct dma_buf *dma_buf) 104 { 105 struct dma_buf_attachment *attach; 106 struct drm_gem_object *obj; 107 struct sg_table *sgt; 108 int ret; 109 110 if (dma_buf->ops == &omap_dmabuf_ops) { 111 obj = dma_buf->priv; 112 if (obj->dev == dev) { 113 /* 114 * Importing dmabuf exported from out own gem increases 115 * refcount on gem itself instead of f_count of dmabuf. 116 */ 117 drm_gem_object_get(obj); 118 return obj; 119 } 120 } 121 122 attach = dma_buf_attach(dma_buf, dev->dev); 123 if (IS_ERR(attach)) 124 return ERR_CAST(attach); 125 126 get_dma_buf(dma_buf); 127 128 sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE); 129 if (IS_ERR(sgt)) { 130 ret = PTR_ERR(sgt); 131 goto fail_detach; 132 } 133 134 obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt); 135 if (IS_ERR(obj)) { 136 ret = PTR_ERR(obj); 137 goto fail_unmap; 138 } 139 140 obj->import_attach = attach; 141 142 return obj; 143 144 fail_unmap: 145 dma_buf_unmap_attachment(attach, sgt, DMA_TO_DEVICE); 146 fail_detach: 147 dma_buf_detach(dma_buf, attach); 148 dma_buf_put(dma_buf); 149 150 return ERR_PTR(ret); 151 } 152