1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015 MediaTek Inc. 4 */ 5 6 #include <linux/dma-buf.h> 7 8 #include <drm/drm.h> 9 #include <drm/drm_device.h> 10 #include <drm/drm_gem.h> 11 #include <drm/drm_gem_cma_helper.h> 12 #include <drm/drm_prime.h> 13 14 #include "mtk_drm_drv.h" 15 #include "mtk_drm_gem.h" 16 17 static const struct drm_gem_object_funcs mtk_drm_gem_object_funcs = { 18 .free = mtk_drm_gem_free_object, 19 .get_sg_table = mtk_gem_prime_get_sg_table, 20 .vmap = mtk_drm_gem_prime_vmap, 21 .vunmap = mtk_drm_gem_prime_vunmap, 22 .vm_ops = &drm_gem_cma_vm_ops, 23 }; 24 25 static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev, 26 unsigned long size) 27 { 28 struct mtk_drm_gem_obj *mtk_gem_obj; 29 int ret; 30 31 size = round_up(size, PAGE_SIZE); 32 33 mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL); 34 if (!mtk_gem_obj) 35 return ERR_PTR(-ENOMEM); 36 37 mtk_gem_obj->base.funcs = &mtk_drm_gem_object_funcs; 38 39 ret = drm_gem_object_init(dev, &mtk_gem_obj->base, size); 40 if (ret < 0) { 41 DRM_ERROR("failed to initialize gem object\n"); 42 kfree(mtk_gem_obj); 43 return ERR_PTR(ret); 44 } 45 46 return mtk_gem_obj; 47 } 48 49 struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev, 50 size_t size, bool alloc_kmap) 51 { 52 struct mtk_drm_private *priv = dev->dev_private; 53 struct mtk_drm_gem_obj *mtk_gem; 54 struct drm_gem_object *obj; 55 int ret; 56 57 mtk_gem = mtk_drm_gem_init(dev, size); 58 if (IS_ERR(mtk_gem)) 59 return ERR_CAST(mtk_gem); 60 61 obj = &mtk_gem->base; 62 63 mtk_gem->dma_attrs = DMA_ATTR_WRITE_COMBINE; 64 65 if (!alloc_kmap) 66 mtk_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; 67 68 mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size, 69 &mtk_gem->dma_addr, GFP_KERNEL, 70 mtk_gem->dma_attrs); 71 if (!mtk_gem->cookie) { 72 DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size); 73 ret = -ENOMEM; 74 goto err_gem_free; 75 } 76 77 if (alloc_kmap) 78 mtk_gem->kvaddr = mtk_gem->cookie; 79 80 DRM_DEBUG_DRIVER("cookie = %p dma_addr = %pad size = %zu\n", 81 mtk_gem->cookie, &mtk_gem->dma_addr, 82 size); 83 84 return mtk_gem; 85 86 err_gem_free: 87 drm_gem_object_release(obj); 88 kfree(mtk_gem); 89 return ERR_PTR(ret); 90 } 91 92 void mtk_drm_gem_free_object(struct drm_gem_object *obj) 93 { 94 struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); 95 struct mtk_drm_private *priv = obj->dev->dev_private; 96 97 if (mtk_gem->sg) 98 drm_prime_gem_destroy(obj, mtk_gem->sg); 99 else 100 dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie, 101 mtk_gem->dma_addr, mtk_gem->dma_attrs); 102 103 /* release file pointer to gem object. */ 104 drm_gem_object_release(obj); 105 106 kfree(mtk_gem); 107 } 108 109 int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, 110 struct drm_mode_create_dumb *args) 111 { 112 struct mtk_drm_gem_obj *mtk_gem; 113 int ret; 114 115 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 116 args->size = args->pitch * args->height; 117 118 mtk_gem = mtk_drm_gem_create(dev, args->size, false); 119 if (IS_ERR(mtk_gem)) 120 return PTR_ERR(mtk_gem); 121 122 /* 123 * allocate a id of idr table where the obj is registered 124 * and handle has the id what user can see. 125 */ 126 ret = drm_gem_handle_create(file_priv, &mtk_gem->base, &args->handle); 127 if (ret) 128 goto err_handle_create; 129 130 /* drop reference from allocate - handle holds it now. */ 131 drm_gem_object_put(&mtk_gem->base); 132 133 return 0; 134 135 err_handle_create: 136 mtk_drm_gem_free_object(&mtk_gem->base); 137 return ret; 138 } 139 140 static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj, 141 struct vm_area_struct *vma) 142 143 { 144 int ret; 145 struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); 146 struct mtk_drm_private *priv = obj->dev->dev_private; 147 148 /* 149 * dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear 150 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). 151 */ 152 vma->vm_flags &= ~VM_PFNMAP; 153 154 ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie, 155 mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs); 156 if (ret) 157 drm_gem_vm_close(vma); 158 159 return ret; 160 } 161 162 int mtk_drm_gem_mmap_buf(struct drm_gem_object *obj, struct vm_area_struct *vma) 163 { 164 int ret; 165 166 ret = drm_gem_mmap_obj(obj, obj->size, vma); 167 if (ret) 168 return ret; 169 170 return mtk_drm_gem_object_mmap(obj, vma); 171 } 172 173 int mtk_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 174 { 175 struct drm_gem_object *obj; 176 int ret; 177 178 ret = drm_gem_mmap(filp, vma); 179 if (ret) 180 return ret; 181 182 obj = vma->vm_private_data; 183 184 /* 185 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the 186 * whole buffer from the start. 187 */ 188 vma->vm_pgoff = 0; 189 190 return mtk_drm_gem_object_mmap(obj, vma); 191 } 192 193 /* 194 * Allocate a sg_table for this GEM object. 195 * Note: Both the table's contents, and the sg_table itself must be freed by 196 * the caller. 197 * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error. 198 */ 199 struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj) 200 { 201 struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); 202 struct mtk_drm_private *priv = obj->dev->dev_private; 203 struct sg_table *sgt; 204 int ret; 205 206 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 207 if (!sgt) 208 return ERR_PTR(-ENOMEM); 209 210 ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie, 211 mtk_gem->dma_addr, obj->size, 212 mtk_gem->dma_attrs); 213 if (ret) { 214 DRM_ERROR("failed to allocate sgt, %d\n", ret); 215 kfree(sgt); 216 return ERR_PTR(ret); 217 } 218 219 return sgt; 220 } 221 222 struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, 223 struct dma_buf_attachment *attach, struct sg_table *sg) 224 { 225 struct mtk_drm_gem_obj *mtk_gem; 226 227 /* check if the entries in the sg_table are contiguous */ 228 if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) { 229 DRM_ERROR("sg_table is not contiguous"); 230 return ERR_PTR(-EINVAL); 231 } 232 233 mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size); 234 if (IS_ERR(mtk_gem)) 235 return ERR_CAST(mtk_gem); 236 237 mtk_gem->dma_addr = sg_dma_address(sg->sgl); 238 mtk_gem->sg = sg; 239 240 return &mtk_gem->base; 241 } 242 243 int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) 244 { 245 struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); 246 struct sg_table *sgt = NULL; 247 unsigned int npages; 248 249 if (mtk_gem->kvaddr) 250 goto out; 251 252 sgt = mtk_gem_prime_get_sg_table(obj); 253 if (IS_ERR(sgt)) 254 return PTR_ERR(sgt); 255 256 npages = obj->size >> PAGE_SHIFT; 257 mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL); 258 if (!mtk_gem->pages) { 259 kfree(sgt); 260 return -ENOMEM; 261 } 262 263 drm_prime_sg_to_page_addr_arrays(sgt, mtk_gem->pages, NULL, npages); 264 265 mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP, 266 pgprot_writecombine(PAGE_KERNEL)); 267 268 out: 269 kfree(sgt); 270 dma_buf_map_set_vaddr(map, mtk_gem->kvaddr); 271 272 return 0; 273 } 274 275 void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) 276 { 277 struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); 278 void *vaddr = map->vaddr; 279 280 if (!mtk_gem->pages) 281 return; 282 283 vunmap(vaddr); 284 mtk_gem->kvaddr = 0; 285 kfree(mtk_gem->pages); 286 } 287