1 /* 2 * Copyright 2011 Red Hat, Inc. 3 * Copyright © 2014 The Chromium OS Authors 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software") 7 * to deal in the software without restriction, including without limitation 8 * on the rights to use, copy, modify, merge, publish, distribute, sub 9 * license, and/or sell copies of the Software, and to permit persons to whom 10 * them Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER 20 * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN 21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Adam Jackson <ajax@redhat.com> 25 * Ben Widawsky <ben@bwidawsk.net> 26 */ 27 28 /** 29 * This is vgem, a (non-hardware-backed) GEM service. This is used by Mesa's 30 * software renderer and the X server for efficient buffer sharing. 31 */ 32 33 #include <linux/module.h> 34 #include <linux/ramfs.h> 35 #include <linux/shmem_fs.h> 36 #include <linux/dma-buf.h> 37 #include "vgem_drv.h" 38 39 #define DRIVER_NAME "vgem" 40 #define DRIVER_DESC "Virtual GEM provider" 41 #define DRIVER_DATE "20120112" 42 #define DRIVER_MAJOR 1 43 #define DRIVER_MINOR 0 44 45 static struct vgem_device { 46 struct drm_device drm; 47 struct platform_device *platform; 48 } *vgem_device; 49 50 static void vgem_gem_free_object(struct drm_gem_object *obj) 51 { 52 struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj); 53 54 kvfree(vgem_obj->pages); 55 mutex_destroy(&vgem_obj->pages_lock); 56 57 if (obj->import_attach) 58 drm_prime_gem_destroy(obj, vgem_obj->table); 59 60 drm_gem_object_release(obj); 61 kfree(vgem_obj); 62 } 63 64 static vm_fault_t vgem_gem_fault(struct vm_fault *vmf) 65 { 66 struct vm_area_struct *vma = vmf->vma; 67 struct drm_vgem_gem_object *obj = vma->vm_private_data; 68 /* We don't use vmf->pgoff since that has the fake offset */ 69 unsigned long vaddr = vmf->address; 70 vm_fault_t ret = VM_FAULT_SIGBUS; 71 loff_t num_pages; 72 pgoff_t page_offset; 73 page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT; 74 75 num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE); 76 77 if (page_offset >= num_pages) 78 return VM_FAULT_SIGBUS; 79 80 mutex_lock(&obj->pages_lock); 81 if (obj->pages) { 82 get_page(obj->pages[page_offset]); 83 vmf->page = obj->pages[page_offset]; 84 ret = 0; 85 } 86 mutex_unlock(&obj->pages_lock); 87 if (ret) { 88 struct page *page; 89 90 page = shmem_read_mapping_page( 91 file_inode(obj->base.filp)->i_mapping, 92 page_offset); 93 if (!IS_ERR(page)) { 94 vmf->page = page; 95 ret = 0; 96 } else switch (PTR_ERR(page)) { 97 case -ENOSPC: 98 case -ENOMEM: 99 ret = VM_FAULT_OOM; 100 break; 101 case -EBUSY: 102 ret = VM_FAULT_RETRY; 103 break; 104 case -EFAULT: 105 case -EINVAL: 106 ret = VM_FAULT_SIGBUS; 107 break; 108 default: 109 WARN_ON(PTR_ERR(page)); 110 ret = VM_FAULT_SIGBUS; 111 break; 112 } 113 114 } 115 return ret; 116 } 117 118 static const struct vm_operations_struct vgem_gem_vm_ops = { 119 .fault = vgem_gem_fault, 120 .open = drm_gem_vm_open, 121 .close = drm_gem_vm_close, 122 }; 123 124 static int vgem_open(struct drm_device *dev, struct drm_file *file) 125 { 126 struct vgem_file *vfile; 127 int ret; 128 129 vfile = kzalloc(sizeof(*vfile), GFP_KERNEL); 130 if (!vfile) 131 return -ENOMEM; 132 133 file->driver_priv = vfile; 134 135 ret = vgem_fence_open(vfile); 136 if (ret) { 137 kfree(vfile); 138 return ret; 139 } 140 141 return 0; 142 } 143 144 static void vgem_postclose(struct drm_device *dev, struct drm_file *file) 145 { 146 struct vgem_file *vfile = file->driver_priv; 147 148 vgem_fence_close(vfile); 149 kfree(vfile); 150 } 151 152 static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev, 153 unsigned long size) 154 { 155 struct drm_vgem_gem_object *obj; 156 int ret; 157 158 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 159 if (!obj) 160 return ERR_PTR(-ENOMEM); 161 162 ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE)); 163 if (ret) { 164 kfree(obj); 165 return ERR_PTR(ret); 166 } 167 168 mutex_init(&obj->pages_lock); 169 170 return obj; 171 } 172 173 static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj) 174 { 175 drm_gem_object_release(&obj->base); 176 kfree(obj); 177 } 178 179 static struct drm_gem_object *vgem_gem_create(struct drm_device *dev, 180 struct drm_file *file, 181 unsigned int *handle, 182 unsigned long size) 183 { 184 struct drm_vgem_gem_object *obj; 185 int ret; 186 187 obj = __vgem_gem_create(dev, size); 188 if (IS_ERR(obj)) 189 return ERR_CAST(obj); 190 191 ret = drm_gem_handle_create(file, &obj->base, handle); 192 drm_gem_object_put_unlocked(&obj->base); 193 if (ret) 194 return ERR_PTR(ret); 195 196 return &obj->base; 197 } 198 199 static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 200 struct drm_mode_create_dumb *args) 201 { 202 struct drm_gem_object *gem_object; 203 u64 pitch, size; 204 205 pitch = args->width * DIV_ROUND_UP(args->bpp, 8); 206 size = args->height * pitch; 207 if (size == 0) 208 return -EINVAL; 209 210 gem_object = vgem_gem_create(dev, file, &args->handle, size); 211 if (IS_ERR(gem_object)) 212 return PTR_ERR(gem_object); 213 214 args->size = gem_object->size; 215 args->pitch = pitch; 216 217 DRM_DEBUG_DRIVER("Created object of size %lld\n", size); 218 219 return 0; 220 } 221 222 static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev, 223 uint32_t handle, uint64_t *offset) 224 { 225 struct drm_gem_object *obj; 226 int ret; 227 228 obj = drm_gem_object_lookup(file, handle); 229 if (!obj) 230 return -ENOENT; 231 232 if (!obj->filp) { 233 ret = -EINVAL; 234 goto unref; 235 } 236 237 ret = drm_gem_create_mmap_offset(obj); 238 if (ret) 239 goto unref; 240 241 *offset = drm_vma_node_offset_addr(&obj->vma_node); 242 unref: 243 drm_gem_object_put_unlocked(obj); 244 245 return ret; 246 } 247 248 static struct drm_ioctl_desc vgem_ioctls[] = { 249 DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 250 DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 251 }; 252 253 static int vgem_mmap(struct file *filp, struct vm_area_struct *vma) 254 { 255 unsigned long flags = vma->vm_flags; 256 int ret; 257 258 ret = drm_gem_mmap(filp, vma); 259 if (ret) 260 return ret; 261 262 /* Keep the WC mmaping set by drm_gem_mmap() but our pages 263 * are ordinary and not special. 264 */ 265 vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP; 266 return 0; 267 } 268 269 static const struct file_operations vgem_driver_fops = { 270 .owner = THIS_MODULE, 271 .open = drm_open, 272 .mmap = vgem_mmap, 273 .poll = drm_poll, 274 .read = drm_read, 275 .unlocked_ioctl = drm_ioctl, 276 .compat_ioctl = drm_compat_ioctl, 277 .release = drm_release, 278 }; 279 280 static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo) 281 { 282 mutex_lock(&bo->pages_lock); 283 if (bo->pages_pin_count++ == 0) { 284 struct page **pages; 285 286 pages = drm_gem_get_pages(&bo->base); 287 if (IS_ERR(pages)) { 288 bo->pages_pin_count--; 289 mutex_unlock(&bo->pages_lock); 290 return pages; 291 } 292 293 bo->pages = pages; 294 } 295 mutex_unlock(&bo->pages_lock); 296 297 return bo->pages; 298 } 299 300 static void vgem_unpin_pages(struct drm_vgem_gem_object *bo) 301 { 302 mutex_lock(&bo->pages_lock); 303 if (--bo->pages_pin_count == 0) { 304 drm_gem_put_pages(&bo->base, bo->pages, true, true); 305 bo->pages = NULL; 306 } 307 mutex_unlock(&bo->pages_lock); 308 } 309 310 static int vgem_prime_pin(struct drm_gem_object *obj) 311 { 312 struct drm_vgem_gem_object *bo = to_vgem_bo(obj); 313 long n_pages = obj->size >> PAGE_SHIFT; 314 struct page **pages; 315 316 pages = vgem_pin_pages(bo); 317 if (IS_ERR(pages)) 318 return PTR_ERR(pages); 319 320 /* Flush the object from the CPU cache so that importers can rely 321 * on coherent indirect access via the exported dma-address. 322 */ 323 drm_clflush_pages(pages, n_pages); 324 325 return 0; 326 } 327 328 static void vgem_prime_unpin(struct drm_gem_object *obj) 329 { 330 struct drm_vgem_gem_object *bo = to_vgem_bo(obj); 331 332 vgem_unpin_pages(bo); 333 } 334 335 static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj) 336 { 337 struct drm_vgem_gem_object *bo = to_vgem_bo(obj); 338 339 return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT); 340 } 341 342 static struct drm_gem_object* vgem_prime_import(struct drm_device *dev, 343 struct dma_buf *dma_buf) 344 { 345 struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm); 346 347 return drm_gem_prime_import_dev(dev, dma_buf, &vgem->platform->dev); 348 } 349 350 static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev, 351 struct dma_buf_attachment *attach, struct sg_table *sg) 352 { 353 struct drm_vgem_gem_object *obj; 354 int npages; 355 356 obj = __vgem_gem_create(dev, attach->dmabuf->size); 357 if (IS_ERR(obj)) 358 return ERR_CAST(obj); 359 360 npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE; 361 362 obj->table = sg; 363 obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 364 if (!obj->pages) { 365 __vgem_gem_destroy(obj); 366 return ERR_PTR(-ENOMEM); 367 } 368 369 obj->pages_pin_count++; /* perma-pinned */ 370 drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL, 371 npages); 372 return &obj->base; 373 } 374 375 static void *vgem_prime_vmap(struct drm_gem_object *obj) 376 { 377 struct drm_vgem_gem_object *bo = to_vgem_bo(obj); 378 long n_pages = obj->size >> PAGE_SHIFT; 379 struct page **pages; 380 381 pages = vgem_pin_pages(bo); 382 if (IS_ERR(pages)) 383 return NULL; 384 385 return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL)); 386 } 387 388 static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 389 { 390 struct drm_vgem_gem_object *bo = to_vgem_bo(obj); 391 392 vunmap(vaddr); 393 vgem_unpin_pages(bo); 394 } 395 396 static int vgem_prime_mmap(struct drm_gem_object *obj, 397 struct vm_area_struct *vma) 398 { 399 int ret; 400 401 if (obj->size < vma->vm_end - vma->vm_start) 402 return -EINVAL; 403 404 if (!obj->filp) 405 return -ENODEV; 406 407 ret = call_mmap(obj->filp, vma); 408 if (ret) 409 return ret; 410 411 fput(vma->vm_file); 412 vma->vm_file = get_file(obj->filp); 413 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 414 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 415 416 return 0; 417 } 418 419 static void vgem_release(struct drm_device *dev) 420 { 421 struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm); 422 423 platform_device_unregister(vgem->platform); 424 drm_dev_fini(&vgem->drm); 425 426 kfree(vgem); 427 } 428 429 static struct drm_driver vgem_driver = { 430 .driver_features = DRIVER_GEM | DRIVER_PRIME | 431 DRIVER_RENDER, 432 .release = vgem_release, 433 .open = vgem_open, 434 .postclose = vgem_postclose, 435 .gem_free_object_unlocked = vgem_gem_free_object, 436 .gem_vm_ops = &vgem_gem_vm_ops, 437 .ioctls = vgem_ioctls, 438 .num_ioctls = ARRAY_SIZE(vgem_ioctls), 439 .fops = &vgem_driver_fops, 440 441 .dumb_create = vgem_gem_dumb_create, 442 .dumb_map_offset = vgem_gem_dumb_map, 443 444 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 445 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 446 .gem_prime_pin = vgem_prime_pin, 447 .gem_prime_unpin = vgem_prime_unpin, 448 .gem_prime_import = vgem_prime_import, 449 .gem_prime_export = drm_gem_prime_export, 450 .gem_prime_import_sg_table = vgem_prime_import_sg_table, 451 .gem_prime_get_sg_table = vgem_prime_get_sg_table, 452 .gem_prime_vmap = vgem_prime_vmap, 453 .gem_prime_vunmap = vgem_prime_vunmap, 454 .gem_prime_mmap = vgem_prime_mmap, 455 456 .name = DRIVER_NAME, 457 .desc = DRIVER_DESC, 458 .date = DRIVER_DATE, 459 .major = DRIVER_MAJOR, 460 .minor = DRIVER_MINOR, 461 }; 462 463 static int __init vgem_init(void) 464 { 465 int ret; 466 467 vgem_device = kzalloc(sizeof(*vgem_device), GFP_KERNEL); 468 if (!vgem_device) 469 return -ENOMEM; 470 471 vgem_device->platform = 472 platform_device_register_simple("vgem", -1, NULL, 0); 473 if (IS_ERR(vgem_device->platform)) { 474 ret = PTR_ERR(vgem_device->platform); 475 goto out_free; 476 } 477 478 dma_coerce_mask_and_coherent(&vgem_device->platform->dev, 479 DMA_BIT_MASK(64)); 480 ret = drm_dev_init(&vgem_device->drm, &vgem_driver, 481 &vgem_device->platform->dev); 482 if (ret) 483 goto out_unregister; 484 485 /* Final step: expose the device/driver to userspace */ 486 ret = drm_dev_register(&vgem_device->drm, 0); 487 if (ret) 488 goto out_fini; 489 490 return 0; 491 492 out_fini: 493 drm_dev_fini(&vgem_device->drm); 494 out_unregister: 495 platform_device_unregister(vgem_device->platform); 496 out_free: 497 kfree(vgem_device); 498 return ret; 499 } 500 501 static void __exit vgem_exit(void) 502 { 503 drm_dev_unregister(&vgem_device->drm); 504 drm_dev_put(&vgem_device->drm); 505 } 506 507 module_init(vgem_init); 508 module_exit(vgem_exit); 509 510 MODULE_AUTHOR("Red Hat, Inc."); 511 MODULE_AUTHOR("Intel Corporation"); 512 MODULE_DESCRIPTION(DRIVER_DESC); 513 MODULE_LICENSE("GPL and additional rights"); 514