1 /* 2 * Copyright 2011 Red Hat, Inc. 3 * Copyright © 2014 The Chromium OS Authors 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software") 7 * to deal in the software without restriction, including without limitation 8 * on the rights to use, copy, modify, merge, publish, distribute, sub 9 * license, and/or sell copies of the Software, and to permit persons to whom 10 * them Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER 20 * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN 21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Adam Jackson <ajax@redhat.com> 25 * Ben Widawsky <ben@bwidawsk.net> 26 */ 27 28 /** 29 * This is vgem, a (non-hardware-backed) GEM service. This is used by Mesa's 30 * software renderer and the X server for efficient buffer sharing. 31 */ 32 33 #include <linux/module.h> 34 #include <linux/ramfs.h> 35 #include <linux/shmem_fs.h> 36 #include <linux/dma-buf.h> 37 #include "vgem_drv.h" 38 39 #define DRIVER_NAME "vgem" 40 #define DRIVER_DESC "Virtual GEM provider" 41 #define DRIVER_DATE "20120112" 42 #define DRIVER_MAJOR 1 43 #define DRIVER_MINOR 0 44 45 static void vgem_gem_free_object(struct drm_gem_object *obj) 46 { 47 struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj); 48 49 drm_gem_object_release(obj); 50 kfree(vgem_obj); 51 } 52 53 static int vgem_gem_fault(struct vm_fault *vmf) 54 { 55 struct vm_area_struct *vma = vmf->vma; 56 struct drm_vgem_gem_object *obj = vma->vm_private_data; 57 /* We don't use vmf->pgoff since that has the fake offset */ 58 unsigned long vaddr = vmf->address; 59 struct page *page; 60 61 page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping, 62 (vaddr - vma->vm_start) >> PAGE_SHIFT); 63 if (!IS_ERR(page)) { 64 vmf->page = page; 65 return 0; 66 } else switch (PTR_ERR(page)) { 67 case -ENOSPC: 68 case -ENOMEM: 69 return VM_FAULT_OOM; 70 case -EBUSY: 71 return VM_FAULT_RETRY; 72 case -EFAULT: 73 case -EINVAL: 74 return VM_FAULT_SIGBUS; 75 default: 76 WARN_ON_ONCE(PTR_ERR(page)); 77 return VM_FAULT_SIGBUS; 78 } 79 } 80 81 static const struct vm_operations_struct vgem_gem_vm_ops = { 82 .fault = vgem_gem_fault, 83 .open = drm_gem_vm_open, 84 .close = drm_gem_vm_close, 85 }; 86 87 static int vgem_open(struct drm_device *dev, struct drm_file *file) 88 { 89 struct vgem_file *vfile; 90 int ret; 91 92 vfile = kzalloc(sizeof(*vfile), GFP_KERNEL); 93 if (!vfile) 94 return -ENOMEM; 95 96 file->driver_priv = vfile; 97 98 ret = vgem_fence_open(vfile); 99 if (ret) { 100 kfree(vfile); 101 return ret; 102 } 103 104 return 0; 105 } 106 107 static void vgem_preclose(struct drm_device *dev, struct drm_file *file) 108 { 109 struct vgem_file *vfile = file->driver_priv; 110 111 vgem_fence_close(vfile); 112 kfree(vfile); 113 } 114 115 /* ioctls */ 116 117 static struct drm_gem_object *vgem_gem_create(struct drm_device *dev, 118 struct drm_file *file, 119 unsigned int *handle, 120 unsigned long size) 121 { 122 struct drm_vgem_gem_object *obj; 123 int ret; 124 125 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 126 if (!obj) 127 return ERR_PTR(-ENOMEM); 128 129 ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE)); 130 if (ret) 131 goto err_free; 132 133 ret = drm_gem_handle_create(file, &obj->base, handle); 134 drm_gem_object_unreference_unlocked(&obj->base); 135 if (ret) 136 goto err; 137 138 return &obj->base; 139 140 err_free: 141 kfree(obj); 142 err: 143 return ERR_PTR(ret); 144 } 145 146 static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 147 struct drm_mode_create_dumb *args) 148 { 149 struct drm_gem_object *gem_object; 150 u64 pitch, size; 151 152 pitch = args->width * DIV_ROUND_UP(args->bpp, 8); 153 size = args->height * pitch; 154 if (size == 0) 155 return -EINVAL; 156 157 gem_object = vgem_gem_create(dev, file, &args->handle, size); 158 if (IS_ERR(gem_object)) 159 return PTR_ERR(gem_object); 160 161 args->size = gem_object->size; 162 args->pitch = pitch; 163 164 DRM_DEBUG_DRIVER("Created object of size %lld\n", size); 165 166 return 0; 167 } 168 169 static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev, 170 uint32_t handle, uint64_t *offset) 171 { 172 struct drm_gem_object *obj; 173 int ret; 174 175 obj = drm_gem_object_lookup(file, handle); 176 if (!obj) 177 return -ENOENT; 178 179 if (!obj->filp) { 180 ret = -EINVAL; 181 goto unref; 182 } 183 184 ret = drm_gem_create_mmap_offset(obj); 185 if (ret) 186 goto unref; 187 188 *offset = drm_vma_node_offset_addr(&obj->vma_node); 189 unref: 190 drm_gem_object_unreference_unlocked(obj); 191 192 return ret; 193 } 194 195 static struct drm_ioctl_desc vgem_ioctls[] = { 196 DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 197 DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 198 }; 199 200 static int vgem_mmap(struct file *filp, struct vm_area_struct *vma) 201 { 202 unsigned long flags = vma->vm_flags; 203 int ret; 204 205 ret = drm_gem_mmap(filp, vma); 206 if (ret) 207 return ret; 208 209 /* Keep the WC mmaping set by drm_gem_mmap() but our pages 210 * are ordinary and not special. 211 */ 212 vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP; 213 return 0; 214 } 215 216 static const struct file_operations vgem_driver_fops = { 217 .owner = THIS_MODULE, 218 .open = drm_open, 219 .mmap = vgem_mmap, 220 .poll = drm_poll, 221 .read = drm_read, 222 .unlocked_ioctl = drm_ioctl, 223 .release = drm_release, 224 }; 225 226 static int vgem_prime_pin(struct drm_gem_object *obj) 227 { 228 long n_pages = obj->size >> PAGE_SHIFT; 229 struct page **pages; 230 231 /* Flush the object from the CPU cache so that importers can rely 232 * on coherent indirect access via the exported dma-address. 233 */ 234 pages = drm_gem_get_pages(obj); 235 if (IS_ERR(pages)) 236 return PTR_ERR(pages); 237 238 drm_clflush_pages(pages, n_pages); 239 drm_gem_put_pages(obj, pages, true, false); 240 241 return 0; 242 } 243 244 static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj) 245 { 246 struct sg_table *st; 247 struct page **pages; 248 249 pages = drm_gem_get_pages(obj); 250 if (IS_ERR(pages)) 251 return ERR_CAST(pages); 252 253 st = drm_prime_pages_to_sg(pages, obj->size >> PAGE_SHIFT); 254 drm_gem_put_pages(obj, pages, false, false); 255 256 return st; 257 } 258 259 static void *vgem_prime_vmap(struct drm_gem_object *obj) 260 { 261 long n_pages = obj->size >> PAGE_SHIFT; 262 struct page **pages; 263 void *addr; 264 265 pages = drm_gem_get_pages(obj); 266 if (IS_ERR(pages)) 267 return NULL; 268 269 addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL)); 270 drm_gem_put_pages(obj, pages, false, false); 271 272 return addr; 273 } 274 275 static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 276 { 277 vunmap(vaddr); 278 } 279 280 static int vgem_prime_mmap(struct drm_gem_object *obj, 281 struct vm_area_struct *vma) 282 { 283 int ret; 284 285 if (obj->size < vma->vm_end - vma->vm_start) 286 return -EINVAL; 287 288 if (!obj->filp) 289 return -ENODEV; 290 291 ret = call_mmap(obj->filp, vma); 292 if (ret) 293 return ret; 294 295 fput(vma->vm_file); 296 vma->vm_file = get_file(obj->filp); 297 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 298 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 299 300 return 0; 301 } 302 303 static struct drm_driver vgem_driver = { 304 .driver_features = DRIVER_GEM | DRIVER_PRIME, 305 .open = vgem_open, 306 .preclose = vgem_preclose, 307 .gem_free_object_unlocked = vgem_gem_free_object, 308 .gem_vm_ops = &vgem_gem_vm_ops, 309 .ioctls = vgem_ioctls, 310 .num_ioctls = ARRAY_SIZE(vgem_ioctls), 311 .fops = &vgem_driver_fops, 312 313 .dumb_create = vgem_gem_dumb_create, 314 .dumb_map_offset = vgem_gem_dumb_map, 315 316 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 317 .gem_prime_pin = vgem_prime_pin, 318 .gem_prime_export = drm_gem_prime_export, 319 .gem_prime_get_sg_table = vgem_prime_get_sg_table, 320 .gem_prime_vmap = vgem_prime_vmap, 321 .gem_prime_vunmap = vgem_prime_vunmap, 322 .gem_prime_mmap = vgem_prime_mmap, 323 324 .name = DRIVER_NAME, 325 .desc = DRIVER_DESC, 326 .date = DRIVER_DATE, 327 .major = DRIVER_MAJOR, 328 .minor = DRIVER_MINOR, 329 }; 330 331 static struct drm_device *vgem_device; 332 333 static int __init vgem_init(void) 334 { 335 int ret; 336 337 vgem_device = drm_dev_alloc(&vgem_driver, NULL); 338 if (IS_ERR(vgem_device)) { 339 ret = PTR_ERR(vgem_device); 340 goto out; 341 } 342 343 ret = drm_dev_register(vgem_device, 0); 344 if (ret) 345 goto out_unref; 346 347 return 0; 348 349 out_unref: 350 drm_dev_unref(vgem_device); 351 out: 352 return ret; 353 } 354 355 static void __exit vgem_exit(void) 356 { 357 drm_dev_unregister(vgem_device); 358 drm_dev_unref(vgem_device); 359 } 360 361 module_init(vgem_init); 362 module_exit(vgem_exit); 363 364 MODULE_AUTHOR("Red Hat, Inc."); 365 MODULE_AUTHOR("Intel Corporation"); 366 MODULE_DESCRIPTION(DRIVER_DESC); 367 MODULE_LICENSE("GPL and additional rights"); 368