1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/cred.h> 3 #include <linux/device.h> 4 #include <linux/dma-buf.h> 5 #include <linux/highmem.h> 6 #include <linux/init.h> 7 #include <linux/kernel.h> 8 #include <linux/memfd.h> 9 #include <linux/miscdevice.h> 10 #include <linux/module.h> 11 #include <linux/shmem_fs.h> 12 #include <linux/slab.h> 13 #include <linux/udmabuf.h> 14 #include <linux/hugetlb.h> 15 16 static int list_limit = 1024; 17 module_param(list_limit, int, 0644); 18 MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 1024."); 19 20 static int size_limit_mb = 64; 21 module_param(size_limit_mb, int, 0644); 22 MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is 64."); 23 24 struct udmabuf { 25 pgoff_t pagecount; 26 struct page **pages; 27 struct sg_table *sg; 28 struct miscdevice *device; 29 }; 30 31 static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf) 32 { 33 struct vm_area_struct *vma = vmf->vma; 34 struct udmabuf *ubuf = vma->vm_private_data; 35 pgoff_t pgoff = vmf->pgoff; 36 37 if (pgoff >= ubuf->pagecount) 38 return VM_FAULT_SIGBUS; 39 vmf->page = ubuf->pages[pgoff]; 40 get_page(vmf->page); 41 return 0; 42 } 43 44 static const struct vm_operations_struct udmabuf_vm_ops = { 45 .fault = udmabuf_vm_fault, 46 }; 47 48 static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma) 49 { 50 struct udmabuf *ubuf = buf->priv; 51 52 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) 53 return -EINVAL; 54 55 vma->vm_ops = &udmabuf_vm_ops; 56 vma->vm_private_data = ubuf; 57 return 0; 58 } 59 60 static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf, 61 enum dma_data_direction direction) 62 { 63 struct udmabuf *ubuf = buf->priv; 64 struct sg_table *sg; 65 int ret; 66 67 sg = kzalloc(sizeof(*sg), GFP_KERNEL); 68 if (!sg) 69 return ERR_PTR(-ENOMEM); 70 ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount, 71 0, ubuf->pagecount << PAGE_SHIFT, 72 GFP_KERNEL); 73 if (ret < 0) 74 goto err; 75 ret = dma_map_sgtable(dev, sg, direction, 0); 76 if (ret < 0) 77 goto err; 78 return sg; 79 80 err: 81 sg_free_table(sg); 82 kfree(sg); 83 return ERR_PTR(ret); 84 } 85 86 static void put_sg_table(struct device *dev, struct sg_table *sg, 87 enum dma_data_direction direction) 88 { 89 dma_unmap_sgtable(dev, sg, direction, 0); 90 sg_free_table(sg); 91 kfree(sg); 92 } 93 94 static struct sg_table *map_udmabuf(struct dma_buf_attachment *at, 95 enum dma_data_direction direction) 96 { 97 return get_sg_table(at->dev, at->dmabuf, direction); 98 } 99 100 static void unmap_udmabuf(struct dma_buf_attachment *at, 101 struct sg_table *sg, 102 enum dma_data_direction direction) 103 { 104 return put_sg_table(at->dev, sg, direction); 105 } 106 107 static void release_udmabuf(struct dma_buf *buf) 108 { 109 struct udmabuf *ubuf = buf->priv; 110 struct device *dev = ubuf->device->this_device; 111 pgoff_t pg; 112 113 if (ubuf->sg) 114 put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL); 115 116 for (pg = 0; pg < ubuf->pagecount; pg++) 117 put_page(ubuf->pages[pg]); 118 kfree(ubuf->pages); 119 kfree(ubuf); 120 } 121 122 static int begin_cpu_udmabuf(struct dma_buf *buf, 123 enum dma_data_direction direction) 124 { 125 struct udmabuf *ubuf = buf->priv; 126 struct device *dev = ubuf->device->this_device; 127 int ret = 0; 128 129 if (!ubuf->sg) { 130 ubuf->sg = get_sg_table(dev, buf, direction); 131 if (IS_ERR(ubuf->sg)) { 132 ret = PTR_ERR(ubuf->sg); 133 ubuf->sg = NULL; 134 } 135 } else { 136 dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents, 137 direction); 138 } 139 140 return ret; 141 } 142 143 static int end_cpu_udmabuf(struct dma_buf *buf, 144 enum dma_data_direction direction) 145 { 146 struct udmabuf *ubuf = buf->priv; 147 struct device *dev = ubuf->device->this_device; 148 149 if (!ubuf->sg) 150 return -EINVAL; 151 152 dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction); 153 return 0; 154 } 155 156 static const struct dma_buf_ops udmabuf_ops = { 157 .cache_sgt_mapping = true, 158 .map_dma_buf = map_udmabuf, 159 .unmap_dma_buf = unmap_udmabuf, 160 .release = release_udmabuf, 161 .mmap = mmap_udmabuf, 162 .begin_cpu_access = begin_cpu_udmabuf, 163 .end_cpu_access = end_cpu_udmabuf, 164 }; 165 166 #define SEALS_WANTED (F_SEAL_SHRINK) 167 #define SEALS_DENIED (F_SEAL_WRITE) 168 169 static long udmabuf_create(struct miscdevice *device, 170 struct udmabuf_create_list *head, 171 struct udmabuf_create_item *list) 172 { 173 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 174 struct file *memfd = NULL; 175 struct address_space *mapping = NULL; 176 struct udmabuf *ubuf; 177 struct dma_buf *buf; 178 pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit; 179 struct page *page, *hpage = NULL; 180 pgoff_t subpgoff, maxsubpgs; 181 struct hstate *hpstate; 182 int seals, ret = -EINVAL; 183 u32 i, flags; 184 185 ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL); 186 if (!ubuf) 187 return -ENOMEM; 188 189 pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT; 190 for (i = 0; i < head->count; i++) { 191 if (!IS_ALIGNED(list[i].offset, PAGE_SIZE)) 192 goto err; 193 if (!IS_ALIGNED(list[i].size, PAGE_SIZE)) 194 goto err; 195 ubuf->pagecount += list[i].size >> PAGE_SHIFT; 196 if (ubuf->pagecount > pglimit) 197 goto err; 198 } 199 200 if (!ubuf->pagecount) 201 goto err; 202 203 ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages), 204 GFP_KERNEL); 205 if (!ubuf->pages) { 206 ret = -ENOMEM; 207 goto err; 208 } 209 210 pgbuf = 0; 211 for (i = 0; i < head->count; i++) { 212 ret = -EBADFD; 213 memfd = fget(list[i].memfd); 214 if (!memfd) 215 goto err; 216 mapping = memfd->f_mapping; 217 if (!shmem_mapping(mapping) && !is_file_hugepages(memfd)) 218 goto err; 219 seals = memfd_fcntl(memfd, F_GET_SEALS, 0); 220 if (seals == -EINVAL) 221 goto err; 222 ret = -EINVAL; 223 if ((seals & SEALS_WANTED) != SEALS_WANTED || 224 (seals & SEALS_DENIED) != 0) 225 goto err; 226 pgoff = list[i].offset >> PAGE_SHIFT; 227 pgcnt = list[i].size >> PAGE_SHIFT; 228 if (is_file_hugepages(memfd)) { 229 hpstate = hstate_file(memfd); 230 pgoff = list[i].offset >> huge_page_shift(hpstate); 231 subpgoff = (list[i].offset & 232 ~huge_page_mask(hpstate)) >> PAGE_SHIFT; 233 maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT; 234 } 235 for (pgidx = 0; pgidx < pgcnt; pgidx++) { 236 if (is_file_hugepages(memfd)) { 237 if (!hpage) { 238 hpage = find_get_page_flags(mapping, pgoff, 239 FGP_ACCESSED); 240 if (!hpage) { 241 ret = -EINVAL; 242 goto err; 243 } 244 } 245 page = hpage + subpgoff; 246 get_page(page); 247 subpgoff++; 248 if (subpgoff == maxsubpgs) { 249 put_page(hpage); 250 hpage = NULL; 251 subpgoff = 0; 252 pgoff++; 253 } 254 } else { 255 page = shmem_read_mapping_page(mapping, 256 pgoff + pgidx); 257 if (IS_ERR(page)) { 258 ret = PTR_ERR(page); 259 goto err; 260 } 261 } 262 ubuf->pages[pgbuf++] = page; 263 } 264 fput(memfd); 265 memfd = NULL; 266 if (hpage) { 267 put_page(hpage); 268 hpage = NULL; 269 } 270 } 271 272 exp_info.ops = &udmabuf_ops; 273 exp_info.size = ubuf->pagecount << PAGE_SHIFT; 274 exp_info.priv = ubuf; 275 exp_info.flags = O_RDWR; 276 277 ubuf->device = device; 278 buf = dma_buf_export(&exp_info); 279 if (IS_ERR(buf)) { 280 ret = PTR_ERR(buf); 281 goto err; 282 } 283 284 flags = 0; 285 if (head->flags & UDMABUF_FLAGS_CLOEXEC) 286 flags |= O_CLOEXEC; 287 return dma_buf_fd(buf, flags); 288 289 err: 290 while (pgbuf > 0) 291 put_page(ubuf->pages[--pgbuf]); 292 if (memfd) 293 fput(memfd); 294 kfree(ubuf->pages); 295 kfree(ubuf); 296 return ret; 297 } 298 299 static long udmabuf_ioctl_create(struct file *filp, unsigned long arg) 300 { 301 struct udmabuf_create create; 302 struct udmabuf_create_list head; 303 struct udmabuf_create_item list; 304 305 if (copy_from_user(&create, (void __user *)arg, 306 sizeof(create))) 307 return -EFAULT; 308 309 head.flags = create.flags; 310 head.count = 1; 311 list.memfd = create.memfd; 312 list.offset = create.offset; 313 list.size = create.size; 314 315 return udmabuf_create(filp->private_data, &head, &list); 316 } 317 318 static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg) 319 { 320 struct udmabuf_create_list head; 321 struct udmabuf_create_item *list; 322 int ret = -EINVAL; 323 u32 lsize; 324 325 if (copy_from_user(&head, (void __user *)arg, sizeof(head))) 326 return -EFAULT; 327 if (head.count > list_limit) 328 return -EINVAL; 329 lsize = sizeof(struct udmabuf_create_item) * head.count; 330 list = memdup_user((void __user *)(arg + sizeof(head)), lsize); 331 if (IS_ERR(list)) 332 return PTR_ERR(list); 333 334 ret = udmabuf_create(filp->private_data, &head, list); 335 kfree(list); 336 return ret; 337 } 338 339 static long udmabuf_ioctl(struct file *filp, unsigned int ioctl, 340 unsigned long arg) 341 { 342 long ret; 343 344 switch (ioctl) { 345 case UDMABUF_CREATE: 346 ret = udmabuf_ioctl_create(filp, arg); 347 break; 348 case UDMABUF_CREATE_LIST: 349 ret = udmabuf_ioctl_create_list(filp, arg); 350 break; 351 default: 352 ret = -ENOTTY; 353 break; 354 } 355 return ret; 356 } 357 358 static const struct file_operations udmabuf_fops = { 359 .owner = THIS_MODULE, 360 .unlocked_ioctl = udmabuf_ioctl, 361 #ifdef CONFIG_COMPAT 362 .compat_ioctl = udmabuf_ioctl, 363 #endif 364 }; 365 366 static struct miscdevice udmabuf_misc = { 367 .minor = MISC_DYNAMIC_MINOR, 368 .name = "udmabuf", 369 .fops = &udmabuf_fops, 370 }; 371 372 static int __init udmabuf_dev_init(void) 373 { 374 int ret; 375 376 ret = misc_register(&udmabuf_misc); 377 if (ret < 0) { 378 pr_err("Could not initialize udmabuf device\n"); 379 return ret; 380 } 381 382 ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device, 383 DMA_BIT_MASK(64)); 384 if (ret < 0) { 385 pr_err("Could not setup DMA mask for udmabuf device\n"); 386 misc_deregister(&udmabuf_misc); 387 return ret; 388 } 389 390 return 0; 391 } 392 393 static void __exit udmabuf_dev_exit(void) 394 { 395 misc_deregister(&udmabuf_misc); 396 } 397 398 module_init(udmabuf_dev_init) 399 module_exit(udmabuf_dev_exit) 400 401 MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>"); 402 MODULE_LICENSE("GPL v2"); 403