1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* exynos_drm_gem.c 3 * 4 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 5 * Author: Inki Dae <inki.dae@samsung.com> 6 */ 7 8 9 #include <linux/dma-buf.h> 10 #include <linux/pfn_t.h> 11 #include <linux/shmem_fs.h> 12 13 #include <drm/drm_prime.h> 14 #include <drm/drm_vma_manager.h> 15 #include <drm/exynos_drm.h> 16 17 #include "exynos_drm_drv.h" 18 #include "exynos_drm_gem.h" 19 20 static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap) 21 { 22 struct drm_device *dev = exynos_gem->base.dev; 23 unsigned long attr = 0; 24 25 if (exynos_gem->dma_addr) { 26 DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n"); 27 return 0; 28 } 29 30 /* 31 * if EXYNOS_BO_CONTIG, fully physically contiguous memory 32 * region will be allocated else physically contiguous 33 * as possible. 34 */ 35 if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG)) 36 attr |= DMA_ATTR_FORCE_CONTIGUOUS; 37 38 /* 39 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping 40 * else cachable mapping. 41 */ 42 if (exynos_gem->flags & EXYNOS_BO_WC || 43 !(exynos_gem->flags & EXYNOS_BO_CACHABLE)) 44 attr |= DMA_ATTR_WRITE_COMBINE; 45 else 46 attr |= DMA_ATTR_NON_CONSISTENT; 47 48 /* FBDev emulation requires kernel mapping */ 49 if (!kvmap) 50 attr |= DMA_ATTR_NO_KERNEL_MAPPING; 51 52 exynos_gem->dma_attrs = attr; 53 exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size, 54 &exynos_gem->dma_addr, GFP_KERNEL, 55 exynos_gem->dma_attrs); 56 if (!exynos_gem->cookie) { 57 DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n"); 58 return -ENOMEM; 59 } 60 61 if (kvmap) 62 exynos_gem->kvaddr = exynos_gem->cookie; 63 64 DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n", 65 (unsigned long)exynos_gem->dma_addr, exynos_gem->size); 66 return 0; 67 } 68 69 static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem) 70 { 71 struct drm_device *dev = exynos_gem->base.dev; 72 73 if (!exynos_gem->dma_addr) { 74 DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr is invalid.\n"); 75 return; 76 } 77 78 DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr(0x%lx), size(0x%lx)\n", 79 (unsigned long)exynos_gem->dma_addr, exynos_gem->size); 80 81 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie, 82 (dma_addr_t)exynos_gem->dma_addr, 83 exynos_gem->dma_attrs); 84 } 85 86 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, 87 struct drm_file *file_priv, 88 unsigned int *handle) 89 { 90 int ret; 91 92 /* 93 * allocate a id of idr table where the obj is registered 94 * and handle has the id what user can see. 95 */ 96 ret = drm_gem_handle_create(file_priv, obj, handle); 97 if (ret) 98 return ret; 99 100 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "gem handle = 0x%x\n", *handle); 101 102 /* drop reference from allocate - handle holds it now. */ 103 drm_gem_object_put(obj); 104 105 return 0; 106 } 107 108 void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem) 109 { 110 struct drm_gem_object *obj = &exynos_gem->base; 111 112 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "handle count = %d\n", 113 obj->handle_count); 114 115 /* 116 * do not release memory region from exporter. 117 * 118 * the region will be released by exporter 119 * once dmabuf's refcount becomes 0. 120 */ 121 if (obj->import_attach) 122 drm_prime_gem_destroy(obj, exynos_gem->sgt); 123 else 124 exynos_drm_free_buf(exynos_gem); 125 126 /* release file pointer to gem object. */ 127 drm_gem_object_release(obj); 128 129 kfree(exynos_gem); 130 } 131 132 static const struct vm_operations_struct exynos_drm_gem_vm_ops = { 133 .open = drm_gem_vm_open, 134 .close = drm_gem_vm_close, 135 }; 136 137 static const struct drm_gem_object_funcs exynos_drm_gem_object_funcs = { 138 .free = exynos_drm_gem_free_object, 139 .get_sg_table = exynos_drm_gem_prime_get_sg_table, 140 .vmap = exynos_drm_gem_prime_vmap, 141 .vunmap = exynos_drm_gem_prime_vunmap, 142 .vm_ops = &exynos_drm_gem_vm_ops, 143 }; 144 145 static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev, 146 unsigned long size) 147 { 148 struct exynos_drm_gem *exynos_gem; 149 struct drm_gem_object *obj; 150 int ret; 151 152 exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL); 153 if (!exynos_gem) 154 return ERR_PTR(-ENOMEM); 155 156 exynos_gem->size = size; 157 obj = &exynos_gem->base; 158 159 obj->funcs = &exynos_drm_gem_object_funcs; 160 161 ret = drm_gem_object_init(dev, obj, size); 162 if (ret < 0) { 163 DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n"); 164 kfree(exynos_gem); 165 return ERR_PTR(ret); 166 } 167 168 ret = drm_gem_create_mmap_offset(obj); 169 if (ret < 0) { 170 drm_gem_object_release(obj); 171 kfree(exynos_gem); 172 return ERR_PTR(ret); 173 } 174 175 DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %pK\n", obj->filp); 176 177 return exynos_gem; 178 } 179 180 struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev, 181 unsigned int flags, 182 unsigned long size, 183 bool kvmap) 184 { 185 struct exynos_drm_gem *exynos_gem; 186 int ret; 187 188 if (flags & ~(EXYNOS_BO_MASK)) { 189 DRM_DEV_ERROR(dev->dev, 190 "invalid GEM buffer flags: %u\n", flags); 191 return ERR_PTR(-EINVAL); 192 } 193 194 if (!size) { 195 DRM_DEV_ERROR(dev->dev, "invalid GEM buffer size: %lu\n", size); 196 return ERR_PTR(-EINVAL); 197 } 198 199 size = roundup(size, PAGE_SIZE); 200 201 exynos_gem = exynos_drm_gem_init(dev, size); 202 if (IS_ERR(exynos_gem)) 203 return exynos_gem; 204 205 if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) { 206 /* 207 * when no IOMMU is available, all allocated buffers are 208 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag 209 */ 210 flags &= ~EXYNOS_BO_NONCONTIG; 211 DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n"); 212 } 213 214 /* set memory type and cache attribute from user side. */ 215 exynos_gem->flags = flags; 216 217 ret = exynos_drm_alloc_buf(exynos_gem, kvmap); 218 if (ret < 0) { 219 drm_gem_object_release(&exynos_gem->base); 220 kfree(exynos_gem); 221 return ERR_PTR(ret); 222 } 223 224 return exynos_gem; 225 } 226 227 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, 228 struct drm_file *file_priv) 229 { 230 struct drm_exynos_gem_create *args = data; 231 struct exynos_drm_gem *exynos_gem; 232 int ret; 233 234 exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size, false); 235 if (IS_ERR(exynos_gem)) 236 return PTR_ERR(exynos_gem); 237 238 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv, 239 &args->handle); 240 if (ret) { 241 exynos_drm_gem_destroy(exynos_gem); 242 return ret; 243 } 244 245 return 0; 246 } 247 248 int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data, 249 struct drm_file *file_priv) 250 { 251 struct drm_exynos_gem_map *args = data; 252 253 return drm_gem_dumb_map_offset(file_priv, dev, args->handle, 254 &args->offset); 255 } 256 257 struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp, 258 unsigned int gem_handle) 259 { 260 struct drm_gem_object *obj; 261 262 obj = drm_gem_object_lookup(filp, gem_handle); 263 if (!obj) 264 return NULL; 265 return to_exynos_gem(obj); 266 } 267 268 static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem, 269 struct vm_area_struct *vma) 270 { 271 struct drm_device *drm_dev = exynos_gem->base.dev; 272 unsigned long vm_size; 273 int ret; 274 275 vma->vm_flags &= ~VM_PFNMAP; 276 vma->vm_pgoff = 0; 277 278 vm_size = vma->vm_end - vma->vm_start; 279 280 /* check if user-requested size is valid. */ 281 if (vm_size > exynos_gem->size) 282 return -EINVAL; 283 284 ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie, 285 exynos_gem->dma_addr, exynos_gem->size, 286 exynos_gem->dma_attrs); 287 if (ret < 0) { 288 DRM_ERROR("failed to mmap.\n"); 289 return ret; 290 } 291 292 return 0; 293 } 294 295 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, 296 struct drm_file *file_priv) 297 { 298 struct exynos_drm_gem *exynos_gem; 299 struct drm_exynos_gem_info *args = data; 300 struct drm_gem_object *obj; 301 302 obj = drm_gem_object_lookup(file_priv, args->handle); 303 if (!obj) { 304 DRM_DEV_ERROR(dev->dev, "failed to lookup gem object.\n"); 305 return -EINVAL; 306 } 307 308 exynos_gem = to_exynos_gem(obj); 309 310 args->flags = exynos_gem->flags; 311 args->size = exynos_gem->size; 312 313 drm_gem_object_put(obj); 314 315 return 0; 316 } 317 318 void exynos_drm_gem_free_object(struct drm_gem_object *obj) 319 { 320 exynos_drm_gem_destroy(to_exynos_gem(obj)); 321 } 322 323 int exynos_drm_gem_dumb_create(struct drm_file *file_priv, 324 struct drm_device *dev, 325 struct drm_mode_create_dumb *args) 326 { 327 struct exynos_drm_gem *exynos_gem; 328 unsigned int flags; 329 int ret; 330 331 /* 332 * allocate memory to be used for framebuffer. 333 * - this callback would be called by user application 334 * with DRM_IOCTL_MODE_CREATE_DUMB command. 335 */ 336 337 args->pitch = args->width * ((args->bpp + 7) / 8); 338 args->size = args->pitch * args->height; 339 340 if (is_drm_iommu_supported(dev)) 341 flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC; 342 else 343 flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC; 344 345 exynos_gem = exynos_drm_gem_create(dev, flags, args->size, false); 346 if (IS_ERR(exynos_gem)) { 347 dev_warn(dev->dev, "FB allocation failed.\n"); 348 return PTR_ERR(exynos_gem); 349 } 350 351 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv, 352 &args->handle); 353 if (ret) { 354 exynos_drm_gem_destroy(exynos_gem); 355 return ret; 356 } 357 358 return 0; 359 } 360 361 static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj, 362 struct vm_area_struct *vma) 363 { 364 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj); 365 int ret; 366 367 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n", 368 exynos_gem->flags); 369 370 /* non-cachable as default. */ 371 if (exynos_gem->flags & EXYNOS_BO_CACHABLE) 372 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 373 else if (exynos_gem->flags & EXYNOS_BO_WC) 374 vma->vm_page_prot = 375 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 376 else 377 vma->vm_page_prot = 378 pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 379 380 ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma); 381 if (ret) 382 goto err_close_vm; 383 384 return ret; 385 386 err_close_vm: 387 drm_gem_vm_close(vma); 388 389 return ret; 390 } 391 392 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 393 { 394 struct drm_gem_object *obj; 395 int ret; 396 397 /* set vm_area_struct. */ 398 ret = drm_gem_mmap(filp, vma); 399 if (ret < 0) { 400 DRM_ERROR("failed to mmap.\n"); 401 return ret; 402 } 403 404 obj = vma->vm_private_data; 405 406 if (obj->import_attach) 407 return dma_buf_mmap(obj->dma_buf, vma, 0); 408 409 return exynos_drm_gem_mmap_obj(obj, vma); 410 } 411 412 /* low-level interface prime helpers */ 413 struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev, 414 struct dma_buf *dma_buf) 415 { 416 return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev)); 417 } 418 419 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj) 420 { 421 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj); 422 struct drm_device *drm_dev = obj->dev; 423 struct sg_table *sgt; 424 int ret; 425 426 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 427 if (!sgt) 428 return ERR_PTR(-ENOMEM); 429 430 ret = dma_get_sgtable_attrs(to_dma_dev(drm_dev), sgt, exynos_gem->cookie, 431 exynos_gem->dma_addr, exynos_gem->size, 432 exynos_gem->dma_attrs); 433 if (ret) { 434 DRM_ERROR("failed to get sgtable, %d\n", ret); 435 kfree(sgt); 436 return ERR_PTR(ret); 437 } 438 439 return sgt; 440 } 441 442 struct drm_gem_object * 443 exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, 444 struct dma_buf_attachment *attach, 445 struct sg_table *sgt) 446 { 447 struct exynos_drm_gem *exynos_gem; 448 449 if (sgt->nents < 1) 450 return ERR_PTR(-EINVAL); 451 452 /* 453 * Check if the provided buffer has been mapped as contiguous 454 * into DMA address space. 455 */ 456 if (sgt->nents > 1) { 457 dma_addr_t next_addr = sg_dma_address(sgt->sgl); 458 struct scatterlist *s; 459 unsigned int i; 460 461 for_each_sg(sgt->sgl, s, sgt->nents, i) { 462 if (!sg_dma_len(s)) 463 break; 464 if (sg_dma_address(s) != next_addr) { 465 DRM_ERROR("buffer chunks must be mapped contiguously"); 466 return ERR_PTR(-EINVAL); 467 } 468 next_addr = sg_dma_address(s) + sg_dma_len(s); 469 } 470 } 471 472 exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size); 473 if (IS_ERR(exynos_gem)) 474 return ERR_CAST(exynos_gem); 475 476 /* 477 * Buffer has been mapped as contiguous into DMA address space, 478 * but if there is IOMMU, it can be either CONTIG or NONCONTIG. 479 * We assume a simplified logic below: 480 */ 481 if (is_drm_iommu_supported(dev)) 482 exynos_gem->flags |= EXYNOS_BO_NONCONTIG; 483 else 484 exynos_gem->flags |= EXYNOS_BO_CONTIG; 485 486 exynos_gem->dma_addr = sg_dma_address(sgt->sgl); 487 exynos_gem->sgt = sgt; 488 return &exynos_gem->base; 489 } 490 491 void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj) 492 { 493 return NULL; 494 } 495 496 void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 497 { 498 /* Nothing to do */ 499 } 500 501 int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj, 502 struct vm_area_struct *vma) 503 { 504 int ret; 505 506 ret = drm_gem_mmap_obj(obj, obj->size, vma); 507 if (ret < 0) 508 return ret; 509 510 return exynos_drm_gem_mmap_obj(obj, vma); 511 } 512