1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ 4 * Author: Rob Clark <rob.clark@linaro.org> 5 */ 6 7 #include <linux/dma-buf.h> 8 #include <linux/highmem.h> 9 10 #include <drm/drm_prime.h> 11 12 #include "omap_drv.h" 13 14 MODULE_IMPORT_NS(DMA_BUF); 15 16 /* ----------------------------------------------------------------------------- 17 * DMABUF Export 18 */ 19 20 static struct sg_table *omap_gem_map_dma_buf( 21 struct dma_buf_attachment *attachment, 22 enum dma_data_direction dir) 23 { 24 struct drm_gem_object *obj = attachment->dmabuf->priv; 25 struct sg_table *sg; 26 sg = omap_gem_get_sg(obj); 27 if (IS_ERR(sg)) 28 return sg; 29 30 /* this must be after omap_gem_pin() to ensure we have pages attached */ 31 omap_gem_dma_sync_buffer(obj, dir); 32 33 return sg; 34 } 35 36 static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, 37 struct sg_table *sg, enum dma_data_direction dir) 38 { 39 struct drm_gem_object *obj = attachment->dmabuf->priv; 40 omap_gem_put_sg(obj, sg); 41 } 42 43 static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer, 44 enum dma_data_direction dir) 45 { 46 struct drm_gem_object *obj = buffer->priv; 47 struct page **pages; 48 if (omap_gem_flags(obj) & OMAP_BO_TILED_MASK) { 49 /* TODO we would need to pin at least part of the buffer to 50 * get de-tiled view. For now just reject it. 51 */ 52 return -ENOMEM; 53 } 54 /* make sure we have the pages: */ 55 return omap_gem_get_pages(obj, &pages, true); 56 } 57 58 static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer, 59 enum dma_data_direction dir) 60 { 61 struct drm_gem_object *obj = buffer->priv; 62 omap_gem_put_pages(obj); 63 return 0; 64 } 65 66 static int omap_gem_dmabuf_mmap(struct dma_buf *buffer, 67 struct vm_area_struct *vma) 68 { 69 struct drm_gem_object *obj = buffer->priv; 70 int ret = 0; 71 72 ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); 73 if (ret < 0) 74 return ret; 75 76 return omap_gem_mmap_obj(obj, vma); 77 } 78 79 static const struct dma_buf_ops omap_dmabuf_ops = { 80 .map_dma_buf = omap_gem_map_dma_buf, 81 .unmap_dma_buf = omap_gem_unmap_dma_buf, 82 .release = drm_gem_dmabuf_release, 83 .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access, 84 .end_cpu_access = omap_gem_dmabuf_end_cpu_access, 85 .mmap = omap_gem_dmabuf_mmap, 86 }; 87 88 struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags) 89 { 90 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 91 92 exp_info.ops = &omap_dmabuf_ops; 93 exp_info.size = omap_gem_mmap_size(obj); 94 exp_info.flags = flags; 95 exp_info.priv = obj; 96 exp_info.resv = obj->resv; 97 98 return drm_gem_dmabuf_export(obj->dev, &exp_info); 99 } 100 101 /* ----------------------------------------------------------------------------- 102 * DMABUF Import 103 */ 104 105 struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, 106 struct dma_buf *dma_buf) 107 { 108 struct dma_buf_attachment *attach; 109 struct drm_gem_object *obj; 110 struct sg_table *sgt; 111 int ret; 112 113 if (dma_buf->ops == &omap_dmabuf_ops) { 114 obj = dma_buf->priv; 115 if (obj->dev == dev) { 116 /* 117 * Importing dmabuf exported from out own gem increases 118 * refcount on gem itself instead of f_count of dmabuf. 119 */ 120 drm_gem_object_get(obj); 121 return obj; 122 } 123 } 124 125 attach = dma_buf_attach(dma_buf, dev->dev); 126 if (IS_ERR(attach)) 127 return ERR_CAST(attach); 128 129 get_dma_buf(dma_buf); 130 131 sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE); 132 if (IS_ERR(sgt)) { 133 ret = PTR_ERR(sgt); 134 goto fail_detach; 135 } 136 137 obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt); 138 if (IS_ERR(obj)) { 139 ret = PTR_ERR(obj); 140 goto fail_unmap; 141 } 142 143 obj->import_attach = attach; 144 145 return obj; 146 147 fail_unmap: 148 dma_buf_unmap_attachment(attach, sgt, DMA_TO_DEVICE); 149 fail_detach: 150 dma_buf_detach(dma_buf, attach); 151 dma_buf_put(dma_buf); 152 153 return ERR_PTR(ret); 154 } 155