1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ 4 * Author: Rob Clark <rob.clark@linaro.org> 5 */ 6 7 #include <linux/dma-buf.h> 8 #include <linux/highmem.h> 9 10 #include <drm/drm_prime.h> 11 12 #include "omap_drv.h" 13 14 MODULE_IMPORT_NS(DMA_BUF); 15 16 /* ----------------------------------------------------------------------------- 17 * DMABUF Export 18 */ 19 20 static struct sg_table *omap_gem_map_dma_buf( 21 struct dma_buf_attachment *attachment, 22 enum dma_data_direction dir) 23 { 24 struct drm_gem_object *obj = attachment->dmabuf->priv; 25 struct sg_table *sg; 26 sg = omap_gem_get_sg(obj); 27 if (IS_ERR(sg)) 28 return sg; 29 30 /* this must be after omap_gem_pin() to ensure we have pages attached */ 31 omap_gem_dma_sync_buffer(obj, dir); 32 33 return sg; 34 } 35 36 static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, 37 struct sg_table *sg, enum dma_data_direction dir) 38 { 39 struct drm_gem_object *obj = attachment->dmabuf->priv; 40 omap_gem_put_sg(obj, sg); 41 } 42 43 static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer, 44 enum dma_data_direction dir) 45 { 46 struct drm_gem_object *obj = buffer->priv; 47 struct page **pages; 48 if (omap_gem_flags(obj) & OMAP_BO_TILED_MASK) { 49 /* TODO we would need to pin at least part of the buffer to 50 * get de-tiled view. For now just reject it. 51 */ 52 return -ENOMEM; 53 } 54 /* make sure we have the pages: */ 55 return omap_gem_get_pages(obj, &pages, true); 56 } 57 58 static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer, 59 enum dma_data_direction dir) 60 { 61 struct drm_gem_object *obj = buffer->priv; 62 omap_gem_put_pages(obj); 63 return 0; 64 } 65 66 static int omap_gem_dmabuf_mmap(struct dma_buf *buffer, 67 struct vm_area_struct *vma) 68 { 69 struct drm_gem_object *obj = buffer->priv; 70 int ret = 0; 71 72 ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); 73 if (ret < 0) 74 return ret; 75 76 return omap_gem_mmap_obj(obj, vma); 77 } 78 79 static const struct dma_buf_ops omap_dmabuf_ops = { 80 .map_dma_buf = omap_gem_map_dma_buf, 81 .unmap_dma_buf = omap_gem_unmap_dma_buf, 82 .release = drm_gem_dmabuf_release, 83 .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access, 84 .end_cpu_access = omap_gem_dmabuf_end_cpu_access, 85 .mmap = omap_gem_dmabuf_mmap, 86 }; 87 88 struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags) 89 { 90 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 91 92 exp_info.ops = &omap_dmabuf_ops; 93 exp_info.size = omap_gem_mmap_size(obj); 94 exp_info.flags = flags; 95 exp_info.priv = obj; 96 97 return drm_gem_dmabuf_export(obj->dev, &exp_info); 98 } 99 100 /* ----------------------------------------------------------------------------- 101 * DMABUF Import 102 */ 103 104 struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, 105 struct dma_buf *dma_buf) 106 { 107 struct dma_buf_attachment *attach; 108 struct drm_gem_object *obj; 109 struct sg_table *sgt; 110 int ret; 111 112 if (dma_buf->ops == &omap_dmabuf_ops) { 113 obj = dma_buf->priv; 114 if (obj->dev == dev) { 115 /* 116 * Importing dmabuf exported from out own gem increases 117 * refcount on gem itself instead of f_count of dmabuf. 118 */ 119 drm_gem_object_get(obj); 120 return obj; 121 } 122 } 123 124 attach = dma_buf_attach(dma_buf, dev->dev); 125 if (IS_ERR(attach)) 126 return ERR_CAST(attach); 127 128 get_dma_buf(dma_buf); 129 130 sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE); 131 if (IS_ERR(sgt)) { 132 ret = PTR_ERR(sgt); 133 goto fail_detach; 134 } 135 136 obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt); 137 if (IS_ERR(obj)) { 138 ret = PTR_ERR(obj); 139 goto fail_unmap; 140 } 141 142 obj->import_attach = attach; 143 144 return obj; 145 146 fail_unmap: 147 dma_buf_unmap_attachment(attach, sgt, DMA_TO_DEVICE); 148 fail_detach: 149 dma_buf_detach(dma_buf, attach); 150 dma_buf_put(dma_buf); 151 152 return ERR_PTR(ret); 153 } 154