1 /* 2 * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ 3 * Author: Rob Clark <rob.clark@linaro.org> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include <linux/dma-buf.h> 19 #include <linux/highmem.h> 20 21 #include <drm/drm_prime.h> 22 23 #include "omap_drv.h" 24 25 /* ----------------------------------------------------------------------------- 26 * DMABUF Export 27 */ 28 29 static struct sg_table *omap_gem_map_dma_buf( 30 struct dma_buf_attachment *attachment, 31 enum dma_data_direction dir) 32 { 33 struct drm_gem_object *obj = attachment->dmabuf->priv; 34 struct sg_table *sg; 35 dma_addr_t dma_addr; 36 int ret; 37 38 sg = kzalloc(sizeof(*sg), GFP_KERNEL); 39 if (!sg) 40 return ERR_PTR(-ENOMEM); 41 42 /* camera, etc, need physically contiguous.. but we need a 43 * better way to know this.. 44 */ 45 ret = omap_gem_pin(obj, &dma_addr); 46 if (ret) 47 goto out; 48 49 ret = sg_alloc_table(sg, 1, GFP_KERNEL); 50 if (ret) 51 goto out; 52 53 sg_init_table(sg->sgl, 1); 54 sg_dma_len(sg->sgl) = obj->size; 55 sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(dma_addr)), obj->size, 0); 56 sg_dma_address(sg->sgl) = dma_addr; 57 58 /* this must be after omap_gem_pin() to ensure we have pages attached */ 59 omap_gem_dma_sync_buffer(obj, dir); 60 61 return sg; 62 out: 63 kfree(sg); 64 return ERR_PTR(ret); 65 } 66 67 static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, 68 struct sg_table *sg, enum dma_data_direction dir) 69 { 70 struct drm_gem_object *obj = attachment->dmabuf->priv; 71 omap_gem_unpin(obj); 72 sg_free_table(sg); 73 kfree(sg); 74 } 75 76 static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer, 77 enum dma_data_direction dir) 78 { 79 struct drm_gem_object *obj = buffer->priv; 80 struct page **pages; 81 if (omap_gem_flags(obj) & OMAP_BO_TILED) { 82 /* TODO we would need to pin at least part of the buffer to 83 * get de-tiled view. For now just reject it. 84 */ 85 return -ENOMEM; 86 } 87 /* make sure we have the pages: */ 88 return omap_gem_get_pages(obj, &pages, true); 89 } 90 91 static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer, 92 enum dma_data_direction dir) 93 { 94 struct drm_gem_object *obj = buffer->priv; 95 omap_gem_put_pages(obj); 96 return 0; 97 } 98 99 static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer, 100 unsigned long page_num) 101 { 102 struct drm_gem_object *obj = buffer->priv; 103 struct page **pages; 104 omap_gem_get_pages(obj, &pages, false); 105 omap_gem_cpu_sync_page(obj, page_num); 106 return kmap(pages[page_num]); 107 } 108 109 static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer, 110 unsigned long page_num, void *addr) 111 { 112 struct drm_gem_object *obj = buffer->priv; 113 struct page **pages; 114 omap_gem_get_pages(obj, &pages, false); 115 kunmap(pages[page_num]); 116 } 117 118 static int omap_gem_dmabuf_mmap(struct dma_buf *buffer, 119 struct vm_area_struct *vma) 120 { 121 struct drm_gem_object *obj = buffer->priv; 122 int ret = 0; 123 124 ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); 125 if (ret < 0) 126 return ret; 127 128 return omap_gem_mmap_obj(obj, vma); 129 } 130 131 static const struct dma_buf_ops omap_dmabuf_ops = { 132 .map_dma_buf = omap_gem_map_dma_buf, 133 .unmap_dma_buf = omap_gem_unmap_dma_buf, 134 .release = drm_gem_dmabuf_release, 135 .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access, 136 .end_cpu_access = omap_gem_dmabuf_end_cpu_access, 137 .map = omap_gem_dmabuf_kmap, 138 .unmap = omap_gem_dmabuf_kunmap, 139 .mmap = omap_gem_dmabuf_mmap, 140 }; 141 142 struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags) 143 { 144 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 145 146 exp_info.ops = &omap_dmabuf_ops; 147 exp_info.size = obj->size; 148 exp_info.flags = flags; 149 exp_info.priv = obj; 150 151 return drm_gem_dmabuf_export(obj->dev, &exp_info); 152 } 153 154 /* ----------------------------------------------------------------------------- 155 * DMABUF Import 156 */ 157 158 struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, 159 struct dma_buf *dma_buf) 160 { 161 struct dma_buf_attachment *attach; 162 struct drm_gem_object *obj; 163 struct sg_table *sgt; 164 int ret; 165 166 if (dma_buf->ops == &omap_dmabuf_ops) { 167 obj = dma_buf->priv; 168 if (obj->dev == dev) { 169 /* 170 * Importing dmabuf exported from out own gem increases 171 * refcount on gem itself instead of f_count of dmabuf. 172 */ 173 drm_gem_object_get(obj); 174 return obj; 175 } 176 } 177 178 attach = dma_buf_attach(dma_buf, dev->dev); 179 if (IS_ERR(attach)) 180 return ERR_CAST(attach); 181 182 get_dma_buf(dma_buf); 183 184 sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE); 185 if (IS_ERR(sgt)) { 186 ret = PTR_ERR(sgt); 187 goto fail_detach; 188 } 189 190 obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt); 191 if (IS_ERR(obj)) { 192 ret = PTR_ERR(obj); 193 goto fail_unmap; 194 } 195 196 obj->import_attach = attach; 197 198 return obj; 199 200 fail_unmap: 201 dma_buf_unmap_attachment(attach, sgt, DMA_TO_DEVICE); 202 fail_detach: 203 dma_buf_detach(dma_buf, attach); 204 dma_buf_put(dma_buf); 205 206 return ERR_PTR(ret); 207 } 208