1 /* 2 * drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c 3 * 4 * Copyright (C) 2011 Texas Instruments 5 * Author: Rob Clark <rob.clark@linaro.org> 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 as published by 9 * the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include <linux/dma-buf.h> 21 22 #include "omap_drv.h" 23 24 /* ----------------------------------------------------------------------------- 25 * DMABUF Export 26 */ 27 28 static struct sg_table *omap_gem_map_dma_buf( 29 struct dma_buf_attachment *attachment, 30 enum dma_data_direction dir) 31 { 32 struct drm_gem_object *obj = attachment->dmabuf->priv; 33 struct sg_table *sg; 34 dma_addr_t dma_addr; 35 int ret; 36 37 sg = kzalloc(sizeof(*sg), GFP_KERNEL); 38 if (!sg) 39 return ERR_PTR(-ENOMEM); 40 41 /* camera, etc, need physically contiguous.. but we need a 42 * better way to know this.. 43 */ 44 ret = omap_gem_pin(obj, &dma_addr); 45 if (ret) 46 goto out; 47 48 ret = sg_alloc_table(sg, 1, GFP_KERNEL); 49 if (ret) 50 goto out; 51 52 sg_init_table(sg->sgl, 1); 53 sg_dma_len(sg->sgl) = obj->size; 54 sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(dma_addr)), obj->size, 0); 55 sg_dma_address(sg->sgl) = dma_addr; 56 57 /* this must be after omap_gem_pin() to ensure we have pages attached */ 58 omap_gem_dma_sync_buffer(obj, dir); 59 60 return sg; 61 out: 62 kfree(sg); 63 return ERR_PTR(ret); 64 } 65 66 static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, 67 struct sg_table *sg, enum dma_data_direction dir) 68 { 69 struct drm_gem_object *obj = attachment->dmabuf->priv; 70 omap_gem_unpin(obj); 71 sg_free_table(sg); 72 kfree(sg); 73 } 74 75 static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer, 76 enum dma_data_direction dir) 77 { 78 struct drm_gem_object *obj = buffer->priv; 79 struct page **pages; 80 if (omap_gem_flags(obj) & OMAP_BO_TILED) { 81 /* TODO we would need to pin at least part of the buffer to 82 * get de-tiled view. For now just reject it. 83 */ 84 return -ENOMEM; 85 } 86 /* make sure we have the pages: */ 87 return omap_gem_get_pages(obj, &pages, true); 88 } 89 90 static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer, 91 enum dma_data_direction dir) 92 { 93 struct drm_gem_object *obj = buffer->priv; 94 omap_gem_put_pages(obj); 95 return 0; 96 } 97 98 99 static void *omap_gem_dmabuf_kmap_atomic(struct dma_buf *buffer, 100 unsigned long page_num) 101 { 102 struct drm_gem_object *obj = buffer->priv; 103 struct page **pages; 104 omap_gem_get_pages(obj, &pages, false); 105 omap_gem_cpu_sync_page(obj, page_num); 106 return kmap_atomic(pages[page_num]); 107 } 108 109 static void omap_gem_dmabuf_kunmap_atomic(struct dma_buf *buffer, 110 unsigned long page_num, void *addr) 111 { 112 kunmap_atomic(addr); 113 } 114 115 static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer, 116 unsigned long page_num) 117 { 118 struct drm_gem_object *obj = buffer->priv; 119 struct page **pages; 120 omap_gem_get_pages(obj, &pages, false); 121 omap_gem_cpu_sync_page(obj, page_num); 122 return kmap(pages[page_num]); 123 } 124 125 static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer, 126 unsigned long page_num, void *addr) 127 { 128 struct drm_gem_object *obj = buffer->priv; 129 struct page **pages; 130 omap_gem_get_pages(obj, &pages, false); 131 kunmap(pages[page_num]); 132 } 133 134 static int omap_gem_dmabuf_mmap(struct dma_buf *buffer, 135 struct vm_area_struct *vma) 136 { 137 struct drm_gem_object *obj = buffer->priv; 138 int ret = 0; 139 140 ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); 141 if (ret < 0) 142 return ret; 143 144 return omap_gem_mmap_obj(obj, vma); 145 } 146 147 static const struct dma_buf_ops omap_dmabuf_ops = { 148 .map_dma_buf = omap_gem_map_dma_buf, 149 .unmap_dma_buf = omap_gem_unmap_dma_buf, 150 .release = drm_gem_dmabuf_release, 151 .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access, 152 .end_cpu_access = omap_gem_dmabuf_end_cpu_access, 153 .map_atomic = omap_gem_dmabuf_kmap_atomic, 154 .unmap_atomic = omap_gem_dmabuf_kunmap_atomic, 155 .map = omap_gem_dmabuf_kmap, 156 .unmap = omap_gem_dmabuf_kunmap, 157 .mmap = omap_gem_dmabuf_mmap, 158 }; 159 160 struct dma_buf *omap_gem_prime_export(struct drm_device *dev, 161 struct drm_gem_object *obj, int flags) 162 { 163 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 164 165 exp_info.ops = &omap_dmabuf_ops; 166 exp_info.size = obj->size; 167 exp_info.flags = flags; 168 exp_info.priv = obj; 169 170 return drm_gem_dmabuf_export(dev, &exp_info); 171 } 172 173 /* ----------------------------------------------------------------------------- 174 * DMABUF Import 175 */ 176 177 struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, 178 struct dma_buf *dma_buf) 179 { 180 struct dma_buf_attachment *attach; 181 struct drm_gem_object *obj; 182 struct sg_table *sgt; 183 int ret; 184 185 if (dma_buf->ops == &omap_dmabuf_ops) { 186 obj = dma_buf->priv; 187 if (obj->dev == dev) { 188 /* 189 * Importing dmabuf exported from out own gem increases 190 * refcount on gem itself instead of f_count of dmabuf. 191 */ 192 drm_gem_object_reference(obj); 193 return obj; 194 } 195 } 196 197 attach = dma_buf_attach(dma_buf, dev->dev); 198 if (IS_ERR(attach)) 199 return ERR_CAST(attach); 200 201 get_dma_buf(dma_buf); 202 203 sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE); 204 if (IS_ERR(sgt)) { 205 ret = PTR_ERR(sgt); 206 goto fail_detach; 207 } 208 209 obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt); 210 if (IS_ERR(obj)) { 211 ret = PTR_ERR(obj); 212 goto fail_unmap; 213 } 214 215 obj->import_attach = attach; 216 217 return obj; 218 219 fail_unmap: 220 dma_buf_unmap_attachment(attach, sgt, DMA_TO_DEVICE); 221 fail_detach: 222 dma_buf_detach(dma_buf, attach); 223 dma_buf_put(dma_buf); 224 225 return ERR_PTR(ret); 226 } 227