1 /* 2 * NVIDIA Tegra DRM GEM helper functions 3 * 4 * Copyright (C) 2012 Sascha Hauer, Pengutronix 5 * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved. 6 * 7 * Based on the GEM/CMA helpers 8 * 9 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License version 2 as 13 * published by the Free Software Foundation. 14 */ 15 16 #include <linux/dma-buf.h> 17 #include <drm/tegra_drm.h> 18 19 #include "gem.h" 20 21 static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo) 22 { 23 return container_of(bo, struct tegra_bo, base); 24 } 25 26 static void tegra_bo_put(struct host1x_bo *bo) 27 { 28 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 29 struct drm_device *drm = obj->gem.dev; 30 31 mutex_lock(&drm->struct_mutex); 32 drm_gem_object_unreference(&obj->gem); 33 mutex_unlock(&drm->struct_mutex); 34 } 35 36 static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt) 37 { 38 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 39 40 return obj->paddr; 41 } 42 43 static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt) 44 { 45 } 46 47 static void *tegra_bo_mmap(struct host1x_bo *bo) 48 { 49 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 50 51 return obj->vaddr; 52 } 53 54 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr) 55 { 56 } 57 58 static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page) 59 { 60 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 61 62 return obj->vaddr + page * PAGE_SIZE; 63 } 64 65 static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page, 66 void *addr) 67 { 68 } 69 70 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo) 71 { 72 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 73 struct drm_device *drm = obj->gem.dev; 74 75 mutex_lock(&drm->struct_mutex); 76 drm_gem_object_reference(&obj->gem); 77 mutex_unlock(&drm->struct_mutex); 78 79 return bo; 80 } 81 82 static const struct host1x_bo_ops tegra_bo_ops = { 83 .get = tegra_bo_get, 84 .put = tegra_bo_put, 85 .pin = tegra_bo_pin, 86 .unpin = tegra_bo_unpin, 87 .mmap = tegra_bo_mmap, 88 .munmap = tegra_bo_munmap, 89 .kmap = tegra_bo_kmap, 90 .kunmap = tegra_bo_kunmap, 91 }; 92 93 static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo) 94 { 95 dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr); 96 } 97 98 struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size, 99 unsigned long flags) 100 { 101 struct tegra_bo *bo; 102 int err; 103 104 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 105 if (!bo) 106 return ERR_PTR(-ENOMEM); 107 108 host1x_bo_init(&bo->base, &tegra_bo_ops); 109 size = round_up(size, PAGE_SIZE); 110 111 bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr, 112 GFP_KERNEL | __GFP_NOWARN); 113 if (!bo->vaddr) { 114 dev_err(drm->dev, "failed to allocate buffer with size %u\n", 115 size); 116 err = -ENOMEM; 117 goto err_dma; 118 } 119 120 err = drm_gem_object_init(drm, &bo->gem, size); 121 if (err) 122 goto err_init; 123 124 err = drm_gem_create_mmap_offset(&bo->gem); 125 if (err) 126 goto err_mmap; 127 128 if (flags & DRM_TEGRA_GEM_CREATE_TILED) 129 bo->flags |= TEGRA_BO_TILED; 130 131 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP) 132 bo->flags |= TEGRA_BO_BOTTOM_UP; 133 134 return bo; 135 136 err_mmap: 137 drm_gem_object_release(&bo->gem); 138 err_init: 139 tegra_bo_destroy(drm, bo); 140 err_dma: 141 kfree(bo); 142 143 return ERR_PTR(err); 144 } 145 146 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, 147 struct drm_device *drm, 148 unsigned int size, 149 unsigned long flags, 150 unsigned int *handle) 151 { 152 struct tegra_bo *bo; 153 int ret; 154 155 bo = tegra_bo_create(drm, size, flags); 156 if (IS_ERR(bo)) 157 return bo; 158 159 ret = drm_gem_handle_create(file, &bo->gem, handle); 160 if (ret) 161 goto err; 162 163 drm_gem_object_unreference_unlocked(&bo->gem); 164 165 return bo; 166 167 err: 168 tegra_bo_free_object(&bo->gem); 169 return ERR_PTR(ret); 170 } 171 172 static struct tegra_bo *tegra_bo_import(struct drm_device *drm, 173 struct dma_buf *buf) 174 { 175 struct dma_buf_attachment *attach; 176 struct tegra_bo *bo; 177 ssize_t size; 178 int err; 179 180 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 181 if (!bo) 182 return ERR_PTR(-ENOMEM); 183 184 host1x_bo_init(&bo->base, &tegra_bo_ops); 185 size = round_up(buf->size, PAGE_SIZE); 186 187 err = drm_gem_object_init(drm, &bo->gem, size); 188 if (err < 0) 189 goto free; 190 191 err = drm_gem_create_mmap_offset(&bo->gem); 192 if (err < 0) 193 goto release; 194 195 attach = dma_buf_attach(buf, drm->dev); 196 if (IS_ERR(attach)) { 197 err = PTR_ERR(attach); 198 goto free_mmap; 199 } 200 201 get_dma_buf(buf); 202 203 bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE); 204 if (!bo->sgt) { 205 err = -ENOMEM; 206 goto detach; 207 } 208 209 if (IS_ERR(bo->sgt)) { 210 err = PTR_ERR(bo->sgt); 211 goto detach; 212 } 213 214 if (bo->sgt->nents > 1) { 215 err = -EINVAL; 216 goto detach; 217 } 218 219 bo->paddr = sg_dma_address(bo->sgt->sgl); 220 bo->gem.import_attach = attach; 221 222 return bo; 223 224 detach: 225 if (!IS_ERR_OR_NULL(bo->sgt)) 226 dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE); 227 228 dma_buf_detach(buf, attach); 229 dma_buf_put(buf); 230 free_mmap: 231 drm_gem_free_mmap_offset(&bo->gem); 232 release: 233 drm_gem_object_release(&bo->gem); 234 free: 235 kfree(bo); 236 237 return ERR_PTR(err); 238 } 239 240 void tegra_bo_free_object(struct drm_gem_object *gem) 241 { 242 struct tegra_bo *bo = to_tegra_bo(gem); 243 244 if (gem->import_attach) { 245 dma_buf_unmap_attachment(gem->import_attach, bo->sgt, 246 DMA_TO_DEVICE); 247 drm_prime_gem_destroy(gem, NULL); 248 } else { 249 tegra_bo_destroy(gem->dev, bo); 250 } 251 252 drm_gem_free_mmap_offset(gem); 253 drm_gem_object_release(gem); 254 255 kfree(bo); 256 } 257 258 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, 259 struct drm_mode_create_dumb *args) 260 { 261 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 262 struct tegra_bo *bo; 263 264 if (args->pitch < min_pitch) 265 args->pitch = min_pitch; 266 267 if (args->size < args->pitch * args->height) 268 args->size = args->pitch * args->height; 269 270 bo = tegra_bo_create_with_handle(file, drm, args->size, 0, 271 &args->handle); 272 if (IS_ERR(bo)) 273 return PTR_ERR(bo); 274 275 return 0; 276 } 277 278 int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm, 279 uint32_t handle, uint64_t *offset) 280 { 281 struct drm_gem_object *gem; 282 struct tegra_bo *bo; 283 284 mutex_lock(&drm->struct_mutex); 285 286 gem = drm_gem_object_lookup(drm, file, handle); 287 if (!gem) { 288 dev_err(drm->dev, "failed to lookup GEM object\n"); 289 mutex_unlock(&drm->struct_mutex); 290 return -EINVAL; 291 } 292 293 bo = to_tegra_bo(gem); 294 295 *offset = drm_vma_node_offset_addr(&bo->gem.vma_node); 296 297 drm_gem_object_unreference(gem); 298 299 mutex_unlock(&drm->struct_mutex); 300 301 return 0; 302 } 303 304 const struct vm_operations_struct tegra_bo_vm_ops = { 305 .open = drm_gem_vm_open, 306 .close = drm_gem_vm_close, 307 }; 308 309 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma) 310 { 311 struct drm_gem_object *gem; 312 struct tegra_bo *bo; 313 int ret; 314 315 ret = drm_gem_mmap(file, vma); 316 if (ret) 317 return ret; 318 319 gem = vma->vm_private_data; 320 bo = to_tegra_bo(gem); 321 322 ret = remap_pfn_range(vma, vma->vm_start, bo->paddr >> PAGE_SHIFT, 323 vma->vm_end - vma->vm_start, vma->vm_page_prot); 324 if (ret) 325 drm_gem_vm_close(vma); 326 327 return ret; 328 } 329 330 static struct sg_table * 331 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, 332 enum dma_data_direction dir) 333 { 334 struct drm_gem_object *gem = attach->dmabuf->priv; 335 struct tegra_bo *bo = to_tegra_bo(gem); 336 struct sg_table *sgt; 337 338 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); 339 if (!sgt) 340 return NULL; 341 342 if (sg_alloc_table(sgt, 1, GFP_KERNEL)) { 343 kfree(sgt); 344 return NULL; 345 } 346 347 sg_dma_address(sgt->sgl) = bo->paddr; 348 sg_dma_len(sgt->sgl) = gem->size; 349 350 return sgt; 351 } 352 353 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, 354 struct sg_table *sgt, 355 enum dma_data_direction dir) 356 { 357 sg_free_table(sgt); 358 kfree(sgt); 359 } 360 361 static void tegra_gem_prime_release(struct dma_buf *buf) 362 { 363 drm_gem_dmabuf_release(buf); 364 } 365 366 static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf, 367 unsigned long page) 368 { 369 return NULL; 370 } 371 372 static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf, 373 unsigned long page, 374 void *addr) 375 { 376 } 377 378 static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page) 379 { 380 return NULL; 381 } 382 383 static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page, 384 void *addr) 385 { 386 } 387 388 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma) 389 { 390 return -EINVAL; 391 } 392 393 static void *tegra_gem_prime_vmap(struct dma_buf *buf) 394 { 395 struct drm_gem_object *gem = buf->priv; 396 struct tegra_bo *bo = to_tegra_bo(gem); 397 398 return bo->vaddr; 399 } 400 401 static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr) 402 { 403 } 404 405 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = { 406 .map_dma_buf = tegra_gem_prime_map_dma_buf, 407 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf, 408 .release = tegra_gem_prime_release, 409 .kmap_atomic = tegra_gem_prime_kmap_atomic, 410 .kunmap_atomic = tegra_gem_prime_kunmap_atomic, 411 .kmap = tegra_gem_prime_kmap, 412 .kunmap = tegra_gem_prime_kunmap, 413 .mmap = tegra_gem_prime_mmap, 414 .vmap = tegra_gem_prime_vmap, 415 .vunmap = tegra_gem_prime_vunmap, 416 }; 417 418 struct dma_buf *tegra_gem_prime_export(struct drm_device *drm, 419 struct drm_gem_object *gem, 420 int flags) 421 { 422 return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size, 423 flags); 424 } 425 426 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm, 427 struct dma_buf *buf) 428 { 429 struct tegra_bo *bo; 430 431 if (buf->ops == &tegra_gem_prime_dmabuf_ops) { 432 struct drm_gem_object *gem = buf->priv; 433 434 if (gem->dev == drm) { 435 drm_gem_object_reference(gem); 436 return gem; 437 } 438 } 439 440 bo = tegra_bo_import(drm, buf); 441 if (IS_ERR(bo)) 442 return ERR_CAST(bo); 443 444 return &bo->gem; 445 } 446