1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* exynos_drm_gem.c 3 * 4 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 5 * Author: Inki Dae <inki.dae@samsung.com> 6 */ 7 8 9 #include <linux/dma-buf.h> 10 #include <linux/pfn_t.h> 11 #include <linux/shmem_fs.h> 12 13 #include <drm/drm_prime.h> 14 #include <drm/drm_vma_manager.h> 15 #include <drm/exynos_drm.h> 16 17 #include "exynos_drm_drv.h" 18 #include "exynos_drm_gem.h" 19 20 static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap) 21 { 22 struct drm_device *dev = exynos_gem->base.dev; 23 unsigned long attr = 0; 24 25 if (exynos_gem->dma_addr) { 26 DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n"); 27 return 0; 28 } 29 30 /* 31 * if EXYNOS_BO_CONTIG, fully physically contiguous memory 32 * region will be allocated else physically contiguous 33 * as possible. 34 */ 35 if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG)) 36 attr |= DMA_ATTR_FORCE_CONTIGUOUS; 37 38 /* 39 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping 40 * else cachable mapping. 41 */ 42 if (exynos_gem->flags & EXYNOS_BO_WC || 43 !(exynos_gem->flags & EXYNOS_BO_CACHABLE)) 44 attr |= DMA_ATTR_WRITE_COMBINE; 45 46 /* FBDev emulation requires kernel mapping */ 47 if (!kvmap) 48 attr |= DMA_ATTR_NO_KERNEL_MAPPING; 49 50 exynos_gem->dma_attrs = attr; 51 exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size, 52 &exynos_gem->dma_addr, GFP_KERNEL, 53 exynos_gem->dma_attrs); 54 if (!exynos_gem->cookie) { 55 DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n"); 56 return -ENOMEM; 57 } 58 59 if (kvmap) 60 exynos_gem->kvaddr = exynos_gem->cookie; 61 62 DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n", 63 (unsigned long)exynos_gem->dma_addr, exynos_gem->size); 64 return 0; 65 } 66 67 static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem) 68 { 69 struct drm_device *dev = exynos_gem->base.dev; 70 71 if (!exynos_gem->dma_addr) { 72 DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr is invalid.\n"); 73 return; 74 } 75 76 DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr(0x%lx), size(0x%lx)\n", 77 (unsigned long)exynos_gem->dma_addr, exynos_gem->size); 78 79 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie, 80 (dma_addr_t)exynos_gem->dma_addr, 81 exynos_gem->dma_attrs); 82 } 83 84 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, 85 struct drm_file *file_priv, 86 unsigned int *handle) 87 { 88 int ret; 89 90 /* 91 * allocate a id of idr table where the obj is registered 92 * and handle has the id what user can see. 93 */ 94 ret = drm_gem_handle_create(file_priv, obj, handle); 95 if (ret) 96 return ret; 97 98 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "gem handle = 0x%x\n", *handle); 99 100 /* drop reference from allocate - handle holds it now. */ 101 drm_gem_object_put(obj); 102 103 return 0; 104 } 105 106 void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem) 107 { 108 struct drm_gem_object *obj = &exynos_gem->base; 109 110 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "handle count = %d\n", 111 obj->handle_count); 112 113 /* 114 * do not release memory region from exporter. 115 * 116 * the region will be released by exporter 117 * once dmabuf's refcount becomes 0. 118 */ 119 if (obj->import_attach) 120 drm_prime_gem_destroy(obj, exynos_gem->sgt); 121 else 122 exynos_drm_free_buf(exynos_gem); 123 124 /* release file pointer to gem object. */ 125 drm_gem_object_release(obj); 126 127 kfree(exynos_gem); 128 } 129 130 static const struct vm_operations_struct exynos_drm_gem_vm_ops = { 131 .open = drm_gem_vm_open, 132 .close = drm_gem_vm_close, 133 }; 134 135 static const struct drm_gem_object_funcs exynos_drm_gem_object_funcs = { 136 .free = exynos_drm_gem_free_object, 137 .get_sg_table = exynos_drm_gem_prime_get_sg_table, 138 .vmap = exynos_drm_gem_prime_vmap, 139 .vunmap = exynos_drm_gem_prime_vunmap, 140 .vm_ops = &exynos_drm_gem_vm_ops, 141 }; 142 143 static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev, 144 unsigned long size) 145 { 146 struct exynos_drm_gem *exynos_gem; 147 struct drm_gem_object *obj; 148 int ret; 149 150 exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL); 151 if (!exynos_gem) 152 return ERR_PTR(-ENOMEM); 153 154 exynos_gem->size = size; 155 obj = &exynos_gem->base; 156 157 obj->funcs = &exynos_drm_gem_object_funcs; 158 159 ret = drm_gem_object_init(dev, obj, size); 160 if (ret < 0) { 161 DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n"); 162 kfree(exynos_gem); 163 return ERR_PTR(ret); 164 } 165 166 ret = drm_gem_create_mmap_offset(obj); 167 if (ret < 0) { 168 drm_gem_object_release(obj); 169 kfree(exynos_gem); 170 return ERR_PTR(ret); 171 } 172 173 DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %pK\n", obj->filp); 174 175 return exynos_gem; 176 } 177 178 struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev, 179 unsigned int flags, 180 unsigned long size, 181 bool kvmap) 182 { 183 struct exynos_drm_gem *exynos_gem; 184 int ret; 185 186 if (flags & ~(EXYNOS_BO_MASK)) { 187 DRM_DEV_ERROR(dev->dev, 188 "invalid GEM buffer flags: %u\n", flags); 189 return ERR_PTR(-EINVAL); 190 } 191 192 if (!size) { 193 DRM_DEV_ERROR(dev->dev, "invalid GEM buffer size: %lu\n", size); 194 return ERR_PTR(-EINVAL); 195 } 196 197 size = roundup(size, PAGE_SIZE); 198 199 exynos_gem = exynos_drm_gem_init(dev, size); 200 if (IS_ERR(exynos_gem)) 201 return exynos_gem; 202 203 if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) { 204 /* 205 * when no IOMMU is available, all allocated buffers are 206 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag 207 */ 208 flags &= ~EXYNOS_BO_NONCONTIG; 209 DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n"); 210 } 211 212 /* set memory type and cache attribute from user side. */ 213 exynos_gem->flags = flags; 214 215 ret = exynos_drm_alloc_buf(exynos_gem, kvmap); 216 if (ret < 0) { 217 drm_gem_object_release(&exynos_gem->base); 218 kfree(exynos_gem); 219 return ERR_PTR(ret); 220 } 221 222 return exynos_gem; 223 } 224 225 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, 226 struct drm_file *file_priv) 227 { 228 struct drm_exynos_gem_create *args = data; 229 struct exynos_drm_gem *exynos_gem; 230 int ret; 231 232 exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size, false); 233 if (IS_ERR(exynos_gem)) 234 return PTR_ERR(exynos_gem); 235 236 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv, 237 &args->handle); 238 if (ret) { 239 exynos_drm_gem_destroy(exynos_gem); 240 return ret; 241 } 242 243 return 0; 244 } 245 246 int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data, 247 struct drm_file *file_priv) 248 { 249 struct drm_exynos_gem_map *args = data; 250 251 return drm_gem_dumb_map_offset(file_priv, dev, args->handle, 252 &args->offset); 253 } 254 255 struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp, 256 unsigned int gem_handle) 257 { 258 struct drm_gem_object *obj; 259 260 obj = drm_gem_object_lookup(filp, gem_handle); 261 if (!obj) 262 return NULL; 263 return to_exynos_gem(obj); 264 } 265 266 static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem, 267 struct vm_area_struct *vma) 268 { 269 struct drm_device *drm_dev = exynos_gem->base.dev; 270 unsigned long vm_size; 271 int ret; 272 273 vma->vm_flags &= ~VM_PFNMAP; 274 vma->vm_pgoff = 0; 275 276 vm_size = vma->vm_end - vma->vm_start; 277 278 /* check if user-requested size is valid. */ 279 if (vm_size > exynos_gem->size) 280 return -EINVAL; 281 282 ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie, 283 exynos_gem->dma_addr, exynos_gem->size, 284 exynos_gem->dma_attrs); 285 if (ret < 0) { 286 DRM_ERROR("failed to mmap.\n"); 287 return ret; 288 } 289 290 return 0; 291 } 292 293 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, 294 struct drm_file *file_priv) 295 { 296 struct exynos_drm_gem *exynos_gem; 297 struct drm_exynos_gem_info *args = data; 298 struct drm_gem_object *obj; 299 300 obj = drm_gem_object_lookup(file_priv, args->handle); 301 if (!obj) { 302 DRM_DEV_ERROR(dev->dev, "failed to lookup gem object.\n"); 303 return -EINVAL; 304 } 305 306 exynos_gem = to_exynos_gem(obj); 307 308 args->flags = exynos_gem->flags; 309 args->size = exynos_gem->size; 310 311 drm_gem_object_put(obj); 312 313 return 0; 314 } 315 316 void exynos_drm_gem_free_object(struct drm_gem_object *obj) 317 { 318 exynos_drm_gem_destroy(to_exynos_gem(obj)); 319 } 320 321 int exynos_drm_gem_dumb_create(struct drm_file *file_priv, 322 struct drm_device *dev, 323 struct drm_mode_create_dumb *args) 324 { 325 struct exynos_drm_gem *exynos_gem; 326 unsigned int flags; 327 int ret; 328 329 /* 330 * allocate memory to be used for framebuffer. 331 * - this callback would be called by user application 332 * with DRM_IOCTL_MODE_CREATE_DUMB command. 333 */ 334 335 args->pitch = args->width * ((args->bpp + 7) / 8); 336 args->size = args->pitch * args->height; 337 338 if (is_drm_iommu_supported(dev)) 339 flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC; 340 else 341 flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC; 342 343 exynos_gem = exynos_drm_gem_create(dev, flags, args->size, false); 344 if (IS_ERR(exynos_gem)) { 345 dev_warn(dev->dev, "FB allocation failed.\n"); 346 return PTR_ERR(exynos_gem); 347 } 348 349 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv, 350 &args->handle); 351 if (ret) { 352 exynos_drm_gem_destroy(exynos_gem); 353 return ret; 354 } 355 356 return 0; 357 } 358 359 static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj, 360 struct vm_area_struct *vma) 361 { 362 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj); 363 int ret; 364 365 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n", 366 exynos_gem->flags); 367 368 /* non-cachable as default. */ 369 if (exynos_gem->flags & EXYNOS_BO_CACHABLE) 370 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 371 else if (exynos_gem->flags & EXYNOS_BO_WC) 372 vma->vm_page_prot = 373 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 374 else 375 vma->vm_page_prot = 376 pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 377 378 ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma); 379 if (ret) 380 goto err_close_vm; 381 382 return ret; 383 384 err_close_vm: 385 drm_gem_vm_close(vma); 386 387 return ret; 388 } 389 390 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 391 { 392 struct drm_gem_object *obj; 393 int ret; 394 395 /* set vm_area_struct. */ 396 ret = drm_gem_mmap(filp, vma); 397 if (ret < 0) { 398 DRM_ERROR("failed to mmap.\n"); 399 return ret; 400 } 401 402 obj = vma->vm_private_data; 403 404 if (obj->import_attach) 405 return dma_buf_mmap(obj->dma_buf, vma, 0); 406 407 return exynos_drm_gem_mmap_obj(obj, vma); 408 } 409 410 /* low-level interface prime helpers */ 411 struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev, 412 struct dma_buf *dma_buf) 413 { 414 return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev)); 415 } 416 417 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj) 418 { 419 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj); 420 struct drm_device *drm_dev = obj->dev; 421 struct sg_table *sgt; 422 int ret; 423 424 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 425 if (!sgt) 426 return ERR_PTR(-ENOMEM); 427 428 ret = dma_get_sgtable_attrs(to_dma_dev(drm_dev), sgt, exynos_gem->cookie, 429 exynos_gem->dma_addr, exynos_gem->size, 430 exynos_gem->dma_attrs); 431 if (ret) { 432 DRM_ERROR("failed to get sgtable, %d\n", ret); 433 kfree(sgt); 434 return ERR_PTR(ret); 435 } 436 437 return sgt; 438 } 439 440 struct drm_gem_object * 441 exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, 442 struct dma_buf_attachment *attach, 443 struct sg_table *sgt) 444 { 445 struct exynos_drm_gem *exynos_gem; 446 447 /* check if the entries in the sg_table are contiguous */ 448 if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) { 449 DRM_ERROR("buffer chunks must be mapped contiguously"); 450 return ERR_PTR(-EINVAL); 451 } 452 453 exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size); 454 if (IS_ERR(exynos_gem)) 455 return ERR_CAST(exynos_gem); 456 457 /* 458 * Buffer has been mapped as contiguous into DMA address space, 459 * but if there is IOMMU, it can be either CONTIG or NONCONTIG. 460 * We assume a simplified logic below: 461 */ 462 if (is_drm_iommu_supported(dev)) 463 exynos_gem->flags |= EXYNOS_BO_NONCONTIG; 464 else 465 exynos_gem->flags |= EXYNOS_BO_CONTIG; 466 467 exynos_gem->dma_addr = sg_dma_address(sgt->sgl); 468 exynos_gem->sgt = sgt; 469 return &exynos_gem->base; 470 } 471 472 void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj) 473 { 474 return NULL; 475 } 476 477 void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 478 { 479 /* Nothing to do */ 480 } 481 482 int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj, 483 struct vm_area_struct *vma) 484 { 485 int ret; 486 487 ret = drm_gem_mmap_obj(obj, obj->size, vma); 488 if (ret < 0) 489 return ret; 490 491 return exynos_drm_gem_mmap_obj(obj, vma); 492 } 493