1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* exynos_drm_gem.c 3 * 4 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 5 * Author: Inki Dae <inki.dae@samsung.com> 6 */ 7 8 9 #include <linux/dma-buf.h> 10 #include <linux/pfn_t.h> 11 #include <linux/shmem_fs.h> 12 #include <linux/module.h> 13 14 #include <drm/drm_prime.h> 15 #include <drm/drm_vma_manager.h> 16 #include <drm/exynos_drm.h> 17 18 #include "exynos_drm_drv.h" 19 #include "exynos_drm_gem.h" 20 21 MODULE_IMPORT_NS(DMA_BUF); 22 23 static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap) 24 { 25 struct drm_device *dev = exynos_gem->base.dev; 26 unsigned long attr = 0; 27 28 if (exynos_gem->dma_addr) { 29 DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n"); 30 return 0; 31 } 32 33 /* 34 * if EXYNOS_BO_CONTIG, fully physically contiguous memory 35 * region will be allocated else physically contiguous 36 * as possible. 37 */ 38 if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG)) 39 attr |= DMA_ATTR_FORCE_CONTIGUOUS; 40 41 /* 42 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping 43 * else cachable mapping. 44 */ 45 if (exynos_gem->flags & EXYNOS_BO_WC || 46 !(exynos_gem->flags & EXYNOS_BO_CACHABLE)) 47 attr |= DMA_ATTR_WRITE_COMBINE; 48 49 /* FBDev emulation requires kernel mapping */ 50 if (!kvmap) 51 attr |= DMA_ATTR_NO_KERNEL_MAPPING; 52 53 exynos_gem->dma_attrs = attr; 54 exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size, 55 &exynos_gem->dma_addr, GFP_KERNEL, 56 exynos_gem->dma_attrs); 57 if (!exynos_gem->cookie) { 58 DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n"); 59 return -ENOMEM; 60 } 61 62 if (kvmap) 63 exynos_gem->kvaddr = exynos_gem->cookie; 64 65 DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n", 66 (unsigned long)exynos_gem->dma_addr, exynos_gem->size); 67 return 0; 68 } 69 70 static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem) 71 { 72 struct drm_device *dev = exynos_gem->base.dev; 73 74 if (!exynos_gem->dma_addr) { 75 DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr is invalid.\n"); 76 return; 77 } 78 79 DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr(0x%lx), size(0x%lx)\n", 80 (unsigned long)exynos_gem->dma_addr, exynos_gem->size); 81 82 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie, 83 (dma_addr_t)exynos_gem->dma_addr, 84 exynos_gem->dma_attrs); 85 } 86 87 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, 88 struct drm_file *file_priv, 89 unsigned int *handle) 90 { 91 int ret; 92 93 /* 94 * allocate a id of idr table where the obj is registered 95 * and handle has the id what user can see. 96 */ 97 ret = drm_gem_handle_create(file_priv, obj, handle); 98 if (ret) 99 return ret; 100 101 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "gem handle = 0x%x\n", *handle); 102 103 /* drop reference from allocate - handle holds it now. */ 104 drm_gem_object_put(obj); 105 106 return 0; 107 } 108 109 void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem) 110 { 111 struct drm_gem_object *obj = &exynos_gem->base; 112 113 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "handle count = %d\n", 114 obj->handle_count); 115 116 /* 117 * do not release memory region from exporter. 118 * 119 * the region will be released by exporter 120 * once dmabuf's refcount becomes 0. 121 */ 122 if (obj->import_attach) 123 drm_prime_gem_destroy(obj, exynos_gem->sgt); 124 else 125 exynos_drm_free_buf(exynos_gem); 126 127 /* release file pointer to gem object. */ 128 drm_gem_object_release(obj); 129 130 kfree(exynos_gem); 131 } 132 133 static const struct vm_operations_struct exynos_drm_gem_vm_ops = { 134 .open = drm_gem_vm_open, 135 .close = drm_gem_vm_close, 136 }; 137 138 static const struct drm_gem_object_funcs exynos_drm_gem_object_funcs = { 139 .free = exynos_drm_gem_free_object, 140 .get_sg_table = exynos_drm_gem_prime_get_sg_table, 141 .vm_ops = &exynos_drm_gem_vm_ops, 142 }; 143 144 static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev, 145 unsigned long size) 146 { 147 struct exynos_drm_gem *exynos_gem; 148 struct drm_gem_object *obj; 149 int ret; 150 151 exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL); 152 if (!exynos_gem) 153 return ERR_PTR(-ENOMEM); 154 155 exynos_gem->size = size; 156 obj = &exynos_gem->base; 157 158 obj->funcs = &exynos_drm_gem_object_funcs; 159 160 ret = drm_gem_object_init(dev, obj, size); 161 if (ret < 0) { 162 DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n"); 163 kfree(exynos_gem); 164 return ERR_PTR(ret); 165 } 166 167 ret = drm_gem_create_mmap_offset(obj); 168 if (ret < 0) { 169 drm_gem_object_release(obj); 170 kfree(exynos_gem); 171 return ERR_PTR(ret); 172 } 173 174 DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %pK\n", obj->filp); 175 176 return exynos_gem; 177 } 178 179 struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev, 180 unsigned int flags, 181 unsigned long size, 182 bool kvmap) 183 { 184 struct exynos_drm_gem *exynos_gem; 185 int ret; 186 187 if (flags & ~(EXYNOS_BO_MASK)) { 188 DRM_DEV_ERROR(dev->dev, 189 "invalid GEM buffer flags: %u\n", flags); 190 return ERR_PTR(-EINVAL); 191 } 192 193 if (!size) { 194 DRM_DEV_ERROR(dev->dev, "invalid GEM buffer size: %lu\n", size); 195 return ERR_PTR(-EINVAL); 196 } 197 198 size = roundup(size, PAGE_SIZE); 199 200 exynos_gem = exynos_drm_gem_init(dev, size); 201 if (IS_ERR(exynos_gem)) 202 return exynos_gem; 203 204 if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) { 205 /* 206 * when no IOMMU is available, all allocated buffers are 207 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag 208 */ 209 flags &= ~EXYNOS_BO_NONCONTIG; 210 DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n"); 211 } 212 213 /* set memory type and cache attribute from user side. */ 214 exynos_gem->flags = flags; 215 216 ret = exynos_drm_alloc_buf(exynos_gem, kvmap); 217 if (ret < 0) { 218 drm_gem_object_release(&exynos_gem->base); 219 kfree(exynos_gem); 220 return ERR_PTR(ret); 221 } 222 223 return exynos_gem; 224 } 225 226 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, 227 struct drm_file *file_priv) 228 { 229 struct drm_exynos_gem_create *args = data; 230 struct exynos_drm_gem *exynos_gem; 231 int ret; 232 233 exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size, false); 234 if (IS_ERR(exynos_gem)) 235 return PTR_ERR(exynos_gem); 236 237 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv, 238 &args->handle); 239 if (ret) { 240 exynos_drm_gem_destroy(exynos_gem); 241 return ret; 242 } 243 244 return 0; 245 } 246 247 int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data, 248 struct drm_file *file_priv) 249 { 250 struct drm_exynos_gem_map *args = data; 251 252 return drm_gem_dumb_map_offset(file_priv, dev, args->handle, 253 &args->offset); 254 } 255 256 struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp, 257 unsigned int gem_handle) 258 { 259 struct drm_gem_object *obj; 260 261 obj = drm_gem_object_lookup(filp, gem_handle); 262 if (!obj) 263 return NULL; 264 return to_exynos_gem(obj); 265 } 266 267 static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem, 268 struct vm_area_struct *vma) 269 { 270 struct drm_device *drm_dev = exynos_gem->base.dev; 271 unsigned long vm_size; 272 int ret; 273 274 vma->vm_flags &= ~VM_PFNMAP; 275 vma->vm_pgoff = 0; 276 277 vm_size = vma->vm_end - vma->vm_start; 278 279 /* check if user-requested size is valid. */ 280 if (vm_size > exynos_gem->size) 281 return -EINVAL; 282 283 ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie, 284 exynos_gem->dma_addr, exynos_gem->size, 285 exynos_gem->dma_attrs); 286 if (ret < 0) { 287 DRM_ERROR("failed to mmap.\n"); 288 return ret; 289 } 290 291 return 0; 292 } 293 294 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, 295 struct drm_file *file_priv) 296 { 297 struct exynos_drm_gem *exynos_gem; 298 struct drm_exynos_gem_info *args = data; 299 struct drm_gem_object *obj; 300 301 obj = drm_gem_object_lookup(file_priv, args->handle); 302 if (!obj) { 303 DRM_DEV_ERROR(dev->dev, "failed to lookup gem object.\n"); 304 return -EINVAL; 305 } 306 307 exynos_gem = to_exynos_gem(obj); 308 309 args->flags = exynos_gem->flags; 310 args->size = exynos_gem->size; 311 312 drm_gem_object_put(obj); 313 314 return 0; 315 } 316 317 void exynos_drm_gem_free_object(struct drm_gem_object *obj) 318 { 319 exynos_drm_gem_destroy(to_exynos_gem(obj)); 320 } 321 322 int exynos_drm_gem_dumb_create(struct drm_file *file_priv, 323 struct drm_device *dev, 324 struct drm_mode_create_dumb *args) 325 { 326 struct exynos_drm_gem *exynos_gem; 327 unsigned int flags; 328 int ret; 329 330 /* 331 * allocate memory to be used for framebuffer. 332 * - this callback would be called by user application 333 * with DRM_IOCTL_MODE_CREATE_DUMB command. 334 */ 335 336 args->pitch = args->width * ((args->bpp + 7) / 8); 337 args->size = args->pitch * args->height; 338 339 if (is_drm_iommu_supported(dev)) 340 flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC; 341 else 342 flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC; 343 344 exynos_gem = exynos_drm_gem_create(dev, flags, args->size, false); 345 if (IS_ERR(exynos_gem)) { 346 dev_warn(dev->dev, "FB allocation failed.\n"); 347 return PTR_ERR(exynos_gem); 348 } 349 350 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv, 351 &args->handle); 352 if (ret) { 353 exynos_drm_gem_destroy(exynos_gem); 354 return ret; 355 } 356 357 return 0; 358 } 359 360 static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj, 361 struct vm_area_struct *vma) 362 { 363 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj); 364 int ret; 365 366 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n", 367 exynos_gem->flags); 368 369 /* non-cachable as default. */ 370 if (exynos_gem->flags & EXYNOS_BO_CACHABLE) 371 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 372 else if (exynos_gem->flags & EXYNOS_BO_WC) 373 vma->vm_page_prot = 374 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 375 else 376 vma->vm_page_prot = 377 pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 378 379 ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma); 380 if (ret) 381 goto err_close_vm; 382 383 return ret; 384 385 err_close_vm: 386 drm_gem_vm_close(vma); 387 388 return ret; 389 } 390 391 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 392 { 393 struct drm_gem_object *obj; 394 int ret; 395 396 /* set vm_area_struct. */ 397 ret = drm_gem_mmap(filp, vma); 398 if (ret < 0) { 399 DRM_ERROR("failed to mmap.\n"); 400 return ret; 401 } 402 403 obj = vma->vm_private_data; 404 405 if (obj->import_attach) 406 return dma_buf_mmap(obj->dma_buf, vma, 0); 407 408 return exynos_drm_gem_mmap_obj(obj, vma); 409 } 410 411 /* low-level interface prime helpers */ 412 struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev, 413 struct dma_buf *dma_buf) 414 { 415 return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev)); 416 } 417 418 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj) 419 { 420 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj); 421 struct drm_device *drm_dev = obj->dev; 422 struct sg_table *sgt; 423 int ret; 424 425 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 426 if (!sgt) 427 return ERR_PTR(-ENOMEM); 428 429 ret = dma_get_sgtable_attrs(to_dma_dev(drm_dev), sgt, exynos_gem->cookie, 430 exynos_gem->dma_addr, exynos_gem->size, 431 exynos_gem->dma_attrs); 432 if (ret) { 433 DRM_ERROR("failed to get sgtable, %d\n", ret); 434 kfree(sgt); 435 return ERR_PTR(ret); 436 } 437 438 return sgt; 439 } 440 441 struct drm_gem_object * 442 exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, 443 struct dma_buf_attachment *attach, 444 struct sg_table *sgt) 445 { 446 struct exynos_drm_gem *exynos_gem; 447 448 /* check if the entries in the sg_table are contiguous */ 449 if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) { 450 DRM_ERROR("buffer chunks must be mapped contiguously"); 451 return ERR_PTR(-EINVAL); 452 } 453 454 exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size); 455 if (IS_ERR(exynos_gem)) 456 return ERR_CAST(exynos_gem); 457 458 /* 459 * Buffer has been mapped as contiguous into DMA address space, 460 * but if there is IOMMU, it can be either CONTIG or NONCONTIG. 461 * We assume a simplified logic below: 462 */ 463 if (is_drm_iommu_supported(dev)) 464 exynos_gem->flags |= EXYNOS_BO_NONCONTIG; 465 else 466 exynos_gem->flags |= EXYNOS_BO_CONTIG; 467 468 exynos_gem->dma_addr = sg_dma_address(sgt->sgl); 469 exynos_gem->sgt = sgt; 470 return &exynos_gem->base; 471 } 472 473 int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj, 474 struct vm_area_struct *vma) 475 { 476 int ret; 477 478 ret = drm_gem_mmap_obj(obj, obj->size, vma); 479 if (ret < 0) 480 return ret; 481 482 return exynos_drm_gem_mmap_obj(obj, vma); 483 } 484