1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/cred.h> 3 #include <linux/device.h> 4 #include <linux/dma-buf.h> 5 #include <linux/dma-resv.h> 6 #include <linux/highmem.h> 7 #include <linux/init.h> 8 #include <linux/kernel.h> 9 #include <linux/memfd.h> 10 #include <linux/miscdevice.h> 11 #include <linux/module.h> 12 #include <linux/shmem_fs.h> 13 #include <linux/slab.h> 14 #include <linux/udmabuf.h> 15 #include <linux/hugetlb.h> 16 #include <linux/vmalloc.h> 17 #include <linux/iosys-map.h> 18 19 static int list_limit = 1024; 20 module_param(list_limit, int, 0644); 21 MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 1024."); 22 23 static int size_limit_mb = 64; 24 module_param(size_limit_mb, int, 0644); 25 MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is 64."); 26 27 struct udmabuf { 28 pgoff_t pagecount; 29 struct page **pages; 30 struct sg_table *sg; 31 struct miscdevice *device; 32 }; 33 34 static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf) 35 { 36 struct vm_area_struct *vma = vmf->vma; 37 struct udmabuf *ubuf = vma->vm_private_data; 38 pgoff_t pgoff = vmf->pgoff; 39 40 if (pgoff >= ubuf->pagecount) 41 return VM_FAULT_SIGBUS; 42 vmf->page = ubuf->pages[pgoff]; 43 get_page(vmf->page); 44 return 0; 45 } 46 47 static const struct vm_operations_struct udmabuf_vm_ops = { 48 .fault = udmabuf_vm_fault, 49 }; 50 51 static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma) 52 { 53 struct udmabuf *ubuf = buf->priv; 54 55 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) 56 return -EINVAL; 57 58 vma->vm_ops = &udmabuf_vm_ops; 59 vma->vm_private_data = ubuf; 60 return 0; 61 } 62 63 static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map) 64 { 65 struct udmabuf *ubuf = buf->priv; 66 void *vaddr; 67 68 dma_resv_assert_held(buf->resv); 69 70 vaddr = vm_map_ram(ubuf->pages, ubuf->pagecount, -1); 71 if (!vaddr) 72 return -EINVAL; 73 74 iosys_map_set_vaddr(map, vaddr); 75 return 0; 76 } 77 78 static void vunmap_udmabuf(struct dma_buf *buf, struct iosys_map *map) 79 { 80 struct udmabuf *ubuf = buf->priv; 81 82 dma_resv_assert_held(buf->resv); 83 84 vm_unmap_ram(map->vaddr, ubuf->pagecount); 85 } 86 87 static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf, 88 enum dma_data_direction direction) 89 { 90 struct udmabuf *ubuf = buf->priv; 91 struct sg_table *sg; 92 int ret; 93 94 sg = kzalloc(sizeof(*sg), GFP_KERNEL); 95 if (!sg) 96 return ERR_PTR(-ENOMEM); 97 ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount, 98 0, ubuf->pagecount << PAGE_SHIFT, 99 GFP_KERNEL); 100 if (ret < 0) 101 goto err; 102 ret = dma_map_sgtable(dev, sg, direction, 0); 103 if (ret < 0) 104 goto err; 105 return sg; 106 107 err: 108 sg_free_table(sg); 109 kfree(sg); 110 return ERR_PTR(ret); 111 } 112 113 static void put_sg_table(struct device *dev, struct sg_table *sg, 114 enum dma_data_direction direction) 115 { 116 dma_unmap_sgtable(dev, sg, direction, 0); 117 sg_free_table(sg); 118 kfree(sg); 119 } 120 121 static struct sg_table *map_udmabuf(struct dma_buf_attachment *at, 122 enum dma_data_direction direction) 123 { 124 return get_sg_table(at->dev, at->dmabuf, direction); 125 } 126 127 static void unmap_udmabuf(struct dma_buf_attachment *at, 128 struct sg_table *sg, 129 enum dma_data_direction direction) 130 { 131 return put_sg_table(at->dev, sg, direction); 132 } 133 134 static void release_udmabuf(struct dma_buf *buf) 135 { 136 struct udmabuf *ubuf = buf->priv; 137 struct device *dev = ubuf->device->this_device; 138 pgoff_t pg; 139 140 if (ubuf->sg) 141 put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL); 142 143 for (pg = 0; pg < ubuf->pagecount; pg++) 144 put_page(ubuf->pages[pg]); 145 kfree(ubuf->pages); 146 kfree(ubuf); 147 } 148 149 static int begin_cpu_udmabuf(struct dma_buf *buf, 150 enum dma_data_direction direction) 151 { 152 struct udmabuf *ubuf = buf->priv; 153 struct device *dev = ubuf->device->this_device; 154 int ret = 0; 155 156 if (!ubuf->sg) { 157 ubuf->sg = get_sg_table(dev, buf, direction); 158 if (IS_ERR(ubuf->sg)) { 159 ret = PTR_ERR(ubuf->sg); 160 ubuf->sg = NULL; 161 } 162 } else { 163 dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents, 164 direction); 165 } 166 167 return ret; 168 } 169 170 static int end_cpu_udmabuf(struct dma_buf *buf, 171 enum dma_data_direction direction) 172 { 173 struct udmabuf *ubuf = buf->priv; 174 struct device *dev = ubuf->device->this_device; 175 176 if (!ubuf->sg) 177 return -EINVAL; 178 179 dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction); 180 return 0; 181 } 182 183 static const struct dma_buf_ops udmabuf_ops = { 184 .cache_sgt_mapping = true, 185 .map_dma_buf = map_udmabuf, 186 .unmap_dma_buf = unmap_udmabuf, 187 .release = release_udmabuf, 188 .mmap = mmap_udmabuf, 189 .vmap = vmap_udmabuf, 190 .vunmap = vunmap_udmabuf, 191 .begin_cpu_access = begin_cpu_udmabuf, 192 .end_cpu_access = end_cpu_udmabuf, 193 }; 194 195 #define SEALS_WANTED (F_SEAL_SHRINK) 196 #define SEALS_DENIED (F_SEAL_WRITE) 197 198 static long udmabuf_create(struct miscdevice *device, 199 struct udmabuf_create_list *head, 200 struct udmabuf_create_item *list) 201 { 202 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 203 struct file *memfd = NULL; 204 struct address_space *mapping = NULL; 205 struct udmabuf *ubuf; 206 struct dma_buf *buf; 207 pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit; 208 struct page *page, *hpage = NULL; 209 pgoff_t subpgoff, maxsubpgs; 210 struct hstate *hpstate; 211 int seals, ret = -EINVAL; 212 u32 i, flags; 213 214 ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL); 215 if (!ubuf) 216 return -ENOMEM; 217 218 pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT; 219 for (i = 0; i < head->count; i++) { 220 if (!IS_ALIGNED(list[i].offset, PAGE_SIZE)) 221 goto err; 222 if (!IS_ALIGNED(list[i].size, PAGE_SIZE)) 223 goto err; 224 ubuf->pagecount += list[i].size >> PAGE_SHIFT; 225 if (ubuf->pagecount > pglimit) 226 goto err; 227 } 228 229 if (!ubuf->pagecount) 230 goto err; 231 232 ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages), 233 GFP_KERNEL); 234 if (!ubuf->pages) { 235 ret = -ENOMEM; 236 goto err; 237 } 238 239 pgbuf = 0; 240 for (i = 0; i < head->count; i++) { 241 ret = -EBADFD; 242 memfd = fget(list[i].memfd); 243 if (!memfd) 244 goto err; 245 mapping = memfd->f_mapping; 246 if (!shmem_mapping(mapping) && !is_file_hugepages(memfd)) 247 goto err; 248 seals = memfd_fcntl(memfd, F_GET_SEALS, 0); 249 if (seals == -EINVAL) 250 goto err; 251 ret = -EINVAL; 252 if ((seals & SEALS_WANTED) != SEALS_WANTED || 253 (seals & SEALS_DENIED) != 0) 254 goto err; 255 pgoff = list[i].offset >> PAGE_SHIFT; 256 pgcnt = list[i].size >> PAGE_SHIFT; 257 if (is_file_hugepages(memfd)) { 258 hpstate = hstate_file(memfd); 259 pgoff = list[i].offset >> huge_page_shift(hpstate); 260 subpgoff = (list[i].offset & 261 ~huge_page_mask(hpstate)) >> PAGE_SHIFT; 262 maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT; 263 } 264 for (pgidx = 0; pgidx < pgcnt; pgidx++) { 265 if (is_file_hugepages(memfd)) { 266 if (!hpage) { 267 hpage = find_get_page_flags(mapping, pgoff, 268 FGP_ACCESSED); 269 if (!hpage) { 270 ret = -EINVAL; 271 goto err; 272 } 273 } 274 page = hpage + subpgoff; 275 get_page(page); 276 subpgoff++; 277 if (subpgoff == maxsubpgs) { 278 put_page(hpage); 279 hpage = NULL; 280 subpgoff = 0; 281 pgoff++; 282 } 283 } else { 284 page = shmem_read_mapping_page(mapping, 285 pgoff + pgidx); 286 if (IS_ERR(page)) { 287 ret = PTR_ERR(page); 288 goto err; 289 } 290 } 291 ubuf->pages[pgbuf++] = page; 292 } 293 fput(memfd); 294 memfd = NULL; 295 if (hpage) { 296 put_page(hpage); 297 hpage = NULL; 298 } 299 } 300 301 exp_info.ops = &udmabuf_ops; 302 exp_info.size = ubuf->pagecount << PAGE_SHIFT; 303 exp_info.priv = ubuf; 304 exp_info.flags = O_RDWR; 305 306 ubuf->device = device; 307 buf = dma_buf_export(&exp_info); 308 if (IS_ERR(buf)) { 309 ret = PTR_ERR(buf); 310 goto err; 311 } 312 313 flags = 0; 314 if (head->flags & UDMABUF_FLAGS_CLOEXEC) 315 flags |= O_CLOEXEC; 316 return dma_buf_fd(buf, flags); 317 318 err: 319 while (pgbuf > 0) 320 put_page(ubuf->pages[--pgbuf]); 321 if (memfd) 322 fput(memfd); 323 kfree(ubuf->pages); 324 kfree(ubuf); 325 return ret; 326 } 327 328 static long udmabuf_ioctl_create(struct file *filp, unsigned long arg) 329 { 330 struct udmabuf_create create; 331 struct udmabuf_create_list head; 332 struct udmabuf_create_item list; 333 334 if (copy_from_user(&create, (void __user *)arg, 335 sizeof(create))) 336 return -EFAULT; 337 338 head.flags = create.flags; 339 head.count = 1; 340 list.memfd = create.memfd; 341 list.offset = create.offset; 342 list.size = create.size; 343 344 return udmabuf_create(filp->private_data, &head, &list); 345 } 346 347 static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg) 348 { 349 struct udmabuf_create_list head; 350 struct udmabuf_create_item *list; 351 int ret = -EINVAL; 352 u32 lsize; 353 354 if (copy_from_user(&head, (void __user *)arg, sizeof(head))) 355 return -EFAULT; 356 if (head.count > list_limit) 357 return -EINVAL; 358 lsize = sizeof(struct udmabuf_create_item) * head.count; 359 list = memdup_user((void __user *)(arg + sizeof(head)), lsize); 360 if (IS_ERR(list)) 361 return PTR_ERR(list); 362 363 ret = udmabuf_create(filp->private_data, &head, list); 364 kfree(list); 365 return ret; 366 } 367 368 static long udmabuf_ioctl(struct file *filp, unsigned int ioctl, 369 unsigned long arg) 370 { 371 long ret; 372 373 switch (ioctl) { 374 case UDMABUF_CREATE: 375 ret = udmabuf_ioctl_create(filp, arg); 376 break; 377 case UDMABUF_CREATE_LIST: 378 ret = udmabuf_ioctl_create_list(filp, arg); 379 break; 380 default: 381 ret = -ENOTTY; 382 break; 383 } 384 return ret; 385 } 386 387 static const struct file_operations udmabuf_fops = { 388 .owner = THIS_MODULE, 389 .unlocked_ioctl = udmabuf_ioctl, 390 #ifdef CONFIG_COMPAT 391 .compat_ioctl = udmabuf_ioctl, 392 #endif 393 }; 394 395 static struct miscdevice udmabuf_misc = { 396 .minor = MISC_DYNAMIC_MINOR, 397 .name = "udmabuf", 398 .fops = &udmabuf_fops, 399 }; 400 401 static int __init udmabuf_dev_init(void) 402 { 403 int ret; 404 405 ret = misc_register(&udmabuf_misc); 406 if (ret < 0) { 407 pr_err("Could not initialize udmabuf device\n"); 408 return ret; 409 } 410 411 ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device, 412 DMA_BIT_MASK(64)); 413 if (ret < 0) { 414 pr_err("Could not setup DMA mask for udmabuf device\n"); 415 misc_deregister(&udmabuf_misc); 416 return ret; 417 } 418 419 return 0; 420 } 421 422 static void __exit udmabuf_dev_exit(void) 423 { 424 misc_deregister(&udmabuf_misc); 425 } 426 427 module_init(udmabuf_dev_init) 428 module_exit(udmabuf_dev_exit) 429 430 MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>"); 431