xref: /openbmc/linux/drivers/xen/gntdev.c (revision e5bd61e8)
1 /******************************************************************************
2  * gntdev.c
3  *
4  * Device for accessing (in user-space) pages that have been granted by other
5  * domains.
6  *
7  * Copyright (c) 2006-2007, D G Murray.
8  *           (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
9  *           (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19  */
20 
21 #undef DEBUG
22 
23 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
24 
25 #include <linux/dma-mapping.h>
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/miscdevice.h>
30 #include <linux/fs.h>
31 #include <linux/uaccess.h>
32 #include <linux/sched.h>
33 #include <linux/sched/mm.h>
34 #include <linux/spinlock.h>
35 #include <linux/slab.h>
36 #include <linux/highmem.h>
37 #include <linux/refcount.h>
38 
39 #include <xen/xen.h>
40 #include <xen/grant_table.h>
41 #include <xen/balloon.h>
42 #include <xen/gntdev.h>
43 #include <xen/events.h>
44 #include <xen/page.h>
45 #include <asm/xen/hypervisor.h>
46 #include <asm/xen/hypercall.h>
47 
48 #include "gntdev-common.h"
49 #ifdef CONFIG_XEN_GNTDEV_DMABUF
50 #include "gntdev-dmabuf.h"
51 #endif
52 
53 MODULE_LICENSE("GPL");
54 MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
55 	      "Gerd Hoffmann <kraxel@redhat.com>");
56 MODULE_DESCRIPTION("User-space granted page access driver");
57 
58 static unsigned int limit = 64*1024;
59 module_param(limit, uint, 0644);
60 MODULE_PARM_DESC(limit,
61 	"Maximum number of grants that may be mapped by one mapping request");
62 
63 static int use_ptemod;
64 
65 static int unmap_grant_pages(struct gntdev_grant_map *map,
66 			     int offset, int pages);
67 
68 static struct miscdevice gntdev_miscdev;
69 
70 /* ------------------------------------------------------------------ */
71 
72 bool gntdev_test_page_count(unsigned int count)
73 {
74 	return !count || count > limit;
75 }
76 
77 static void gntdev_print_maps(struct gntdev_priv *priv,
78 			      char *text, int text_index)
79 {
80 #ifdef DEBUG
81 	struct gntdev_grant_map *map;
82 
83 	pr_debug("%s: maps list (priv %p)\n", __func__, priv);
84 	list_for_each_entry(map, &priv->maps, next)
85 		pr_debug("  index %2d, count %2d %s\n",
86 		       map->index, map->count,
87 		       map->index == text_index && text ? text : "");
88 #endif
89 }
90 
91 static void gntdev_free_map(struct gntdev_grant_map *map)
92 {
93 	if (map == NULL)
94 		return;
95 
96 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
97 	if (map->dma_vaddr) {
98 		struct gnttab_dma_alloc_args args;
99 
100 		args.dev = map->dma_dev;
101 		args.coherent = !!(map->dma_flags & GNTDEV_DMA_FLAG_COHERENT);
102 		args.nr_pages = map->count;
103 		args.pages = map->pages;
104 		args.frames = map->frames;
105 		args.vaddr = map->dma_vaddr;
106 		args.dev_bus_addr = map->dma_bus_addr;
107 
108 		gnttab_dma_free_pages(&args);
109 	} else
110 #endif
111 	if (map->pages)
112 		gnttab_free_pages(map->count, map->pages);
113 
114 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
115 	kvfree(map->frames);
116 #endif
117 	kvfree(map->pages);
118 	kvfree(map->grants);
119 	kvfree(map->map_ops);
120 	kvfree(map->unmap_ops);
121 	kvfree(map->kmap_ops);
122 	kvfree(map->kunmap_ops);
123 	kfree(map);
124 }
125 
126 struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
127 					  int dma_flags)
128 {
129 	struct gntdev_grant_map *add;
130 	int i;
131 
132 	add = kzalloc(sizeof(*add), GFP_KERNEL);
133 	if (NULL == add)
134 		return NULL;
135 
136 	add->grants    = kvmalloc_array(count, sizeof(add->grants[0]),
137 					GFP_KERNEL);
138 	add->map_ops   = kvmalloc_array(count, sizeof(add->map_ops[0]),
139 					GFP_KERNEL);
140 	add->unmap_ops = kvmalloc_array(count, sizeof(add->unmap_ops[0]),
141 					GFP_KERNEL);
142 	add->pages     = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
143 	if (NULL == add->grants    ||
144 	    NULL == add->map_ops   ||
145 	    NULL == add->unmap_ops ||
146 	    NULL == add->pages)
147 		goto err;
148 	if (use_ptemod) {
149 		add->kmap_ops   = kvmalloc_array(count, sizeof(add->kmap_ops[0]),
150 						 GFP_KERNEL);
151 		add->kunmap_ops = kvmalloc_array(count, sizeof(add->kunmap_ops[0]),
152 						 GFP_KERNEL);
153 		if (NULL == add->kmap_ops || NULL == add->kunmap_ops)
154 			goto err;
155 	}
156 
157 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
158 	add->dma_flags = dma_flags;
159 
160 	/*
161 	 * Check if this mapping is requested to be backed
162 	 * by a DMA buffer.
163 	 */
164 	if (dma_flags & (GNTDEV_DMA_FLAG_WC | GNTDEV_DMA_FLAG_COHERENT)) {
165 		struct gnttab_dma_alloc_args args;
166 
167 		add->frames = kvcalloc(count, sizeof(add->frames[0]),
168 				       GFP_KERNEL);
169 		if (!add->frames)
170 			goto err;
171 
172 		/* Remember the device, so we can free DMA memory. */
173 		add->dma_dev = priv->dma_dev;
174 
175 		args.dev = priv->dma_dev;
176 		args.coherent = !!(dma_flags & GNTDEV_DMA_FLAG_COHERENT);
177 		args.nr_pages = count;
178 		args.pages = add->pages;
179 		args.frames = add->frames;
180 
181 		if (gnttab_dma_alloc_pages(&args))
182 			goto err;
183 
184 		add->dma_vaddr = args.vaddr;
185 		add->dma_bus_addr = args.dev_bus_addr;
186 	} else
187 #endif
188 	if (gnttab_alloc_pages(count, add->pages))
189 		goto err;
190 
191 	for (i = 0; i < count; i++) {
192 		add->grants[i].domid = DOMID_INVALID;
193 		add->grants[i].ref = INVALID_GRANT_REF;
194 		add->map_ops[i].handle = INVALID_GRANT_HANDLE;
195 		add->unmap_ops[i].handle = INVALID_GRANT_HANDLE;
196 		if (use_ptemod) {
197 			add->kmap_ops[i].handle = INVALID_GRANT_HANDLE;
198 			add->kunmap_ops[i].handle = INVALID_GRANT_HANDLE;
199 		}
200 	}
201 
202 	add->index = 0;
203 	add->count = count;
204 	refcount_set(&add->users, 1);
205 
206 	return add;
207 
208 err:
209 	gntdev_free_map(add);
210 	return NULL;
211 }
212 
213 void gntdev_add_map(struct gntdev_priv *priv, struct gntdev_grant_map *add)
214 {
215 	struct gntdev_grant_map *map;
216 
217 	list_for_each_entry(map, &priv->maps, next) {
218 		if (add->index + add->count < map->index) {
219 			list_add_tail(&add->next, &map->next);
220 			goto done;
221 		}
222 		add->index = map->index + map->count;
223 	}
224 	list_add_tail(&add->next, &priv->maps);
225 
226 done:
227 	gntdev_print_maps(priv, "[new]", add->index);
228 }
229 
230 static struct gntdev_grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
231 						      int index, int count)
232 {
233 	struct gntdev_grant_map *map;
234 
235 	list_for_each_entry(map, &priv->maps, next) {
236 		if (map->index != index)
237 			continue;
238 		if (count && map->count != count)
239 			continue;
240 		return map;
241 	}
242 	return NULL;
243 }
244 
245 void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
246 {
247 	if (!map)
248 		return;
249 
250 	if (!refcount_dec_and_test(&map->users))
251 		return;
252 
253 	if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
254 		notify_remote_via_evtchn(map->notify.event);
255 		evtchn_put(map->notify.event);
256 	}
257 
258 	if (map->pages && !use_ptemod)
259 		unmap_grant_pages(map, 0, map->count);
260 	gntdev_free_map(map);
261 }
262 
263 /* ------------------------------------------------------------------ */
264 
265 static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
266 {
267 	struct gntdev_grant_map *map = data;
268 	unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
269 	int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
270 	u64 pte_maddr;
271 
272 	BUG_ON(pgnr >= map->count);
273 	pte_maddr = arbitrary_virt_to_machine(pte).maddr;
274 
275 	/*
276 	 * Set the PTE as special to force get_user_pages_fast() fall
277 	 * back to the slow path.  If this is not supported as part of
278 	 * the grant map, it will be done afterwards.
279 	 */
280 	if (xen_feature(XENFEAT_gnttab_map_avail_bits))
281 		flags |= (1 << _GNTMAP_guest_avail0);
282 
283 	gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
284 			  map->grants[pgnr].ref,
285 			  map->grants[pgnr].domid);
286 	gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
287 			    INVALID_GRANT_HANDLE);
288 	return 0;
289 }
290 
291 #ifdef CONFIG_X86
292 static int set_grant_ptes_as_special(pte_t *pte, unsigned long addr, void *data)
293 {
294 	set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte));
295 	return 0;
296 }
297 #endif
298 
299 int gntdev_map_grant_pages(struct gntdev_grant_map *map)
300 {
301 	int i, err = 0;
302 
303 	if (!use_ptemod) {
304 		/* Note: it could already be mapped */
305 		if (map->map_ops[0].handle != INVALID_GRANT_HANDLE)
306 			return 0;
307 		for (i = 0; i < map->count; i++) {
308 			unsigned long addr = (unsigned long)
309 				pfn_to_kaddr(page_to_pfn(map->pages[i]));
310 			gnttab_set_map_op(&map->map_ops[i], addr, map->flags,
311 				map->grants[i].ref,
312 				map->grants[i].domid);
313 			gnttab_set_unmap_op(&map->unmap_ops[i], addr,
314 				map->flags, INVALID_GRANT_HANDLE);
315 		}
316 	} else {
317 		/*
318 		 * Setup the map_ops corresponding to the pte entries pointing
319 		 * to the kernel linear addresses of the struct pages.
320 		 * These ptes are completely different from the user ptes dealt
321 		 * with find_grant_ptes.
322 		 * Note that GNTMAP_device_map isn't needed here: The
323 		 * dev_bus_addr output field gets consumed only from ->map_ops,
324 		 * and by not requesting it when mapping we also avoid needing
325 		 * to mirror dev_bus_addr into ->unmap_ops (and holding an extra
326 		 * reference to the page in the hypervisor).
327 		 */
328 		unsigned int flags = (map->flags & ~GNTMAP_device_map) |
329 				     GNTMAP_host_map;
330 
331 		for (i = 0; i < map->count; i++) {
332 			unsigned long address = (unsigned long)
333 				pfn_to_kaddr(page_to_pfn(map->pages[i]));
334 			BUG_ON(PageHighMem(map->pages[i]));
335 
336 			gnttab_set_map_op(&map->kmap_ops[i], address, flags,
337 				map->grants[i].ref,
338 				map->grants[i].domid);
339 			gnttab_set_unmap_op(&map->kunmap_ops[i], address,
340 				flags, INVALID_GRANT_HANDLE);
341 		}
342 	}
343 
344 	pr_debug("map %d+%d\n", map->index, map->count);
345 	err = gnttab_map_refs(map->map_ops, map->kmap_ops, map->pages,
346 			map->count);
347 
348 	for (i = 0; i < map->count; i++) {
349 		if (map->map_ops[i].status == GNTST_okay)
350 			map->unmap_ops[i].handle = map->map_ops[i].handle;
351 		else if (!err)
352 			err = -EINVAL;
353 
354 		if (map->flags & GNTMAP_device_map)
355 			map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
356 
357 		if (use_ptemod) {
358 			if (map->kmap_ops[i].status == GNTST_okay)
359 				map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
360 			else if (!err)
361 				err = -EINVAL;
362 		}
363 	}
364 	return err;
365 }
366 
367 static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
368 			       int pages)
369 {
370 	int i, err = 0;
371 	struct gntab_unmap_queue_data unmap_data;
372 
373 	if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
374 		int pgno = (map->notify.addr >> PAGE_SHIFT);
375 		if (pgno >= offset && pgno < offset + pages) {
376 			/* No need for kmap, pages are in lowmem */
377 			uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
378 			tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
379 			map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
380 		}
381 	}
382 
383 	unmap_data.unmap_ops = map->unmap_ops + offset;
384 	unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
385 	unmap_data.pages = map->pages + offset;
386 	unmap_data.count = pages;
387 
388 	err = gnttab_unmap_refs_sync(&unmap_data);
389 	if (err)
390 		return err;
391 
392 	for (i = 0; i < pages; i++) {
393 		if (map->unmap_ops[offset+i].status)
394 			err = -EINVAL;
395 		pr_debug("unmap handle=%d st=%d\n",
396 			map->unmap_ops[offset+i].handle,
397 			map->unmap_ops[offset+i].status);
398 		map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
399 	}
400 	return err;
401 }
402 
403 static int unmap_grant_pages(struct gntdev_grant_map *map, int offset,
404 			     int pages)
405 {
406 	int range, err = 0;
407 
408 	pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
409 
410 	/* It is possible the requested range will have a "hole" where we
411 	 * already unmapped some of the grants. Only unmap valid ranges.
412 	 */
413 	while (pages && !err) {
414 		while (pages &&
415 		       map->unmap_ops[offset].handle == INVALID_GRANT_HANDLE) {
416 			offset++;
417 			pages--;
418 		}
419 		range = 0;
420 		while (range < pages) {
421 			if (map->unmap_ops[offset + range].handle ==
422 			    INVALID_GRANT_HANDLE)
423 				break;
424 			range++;
425 		}
426 		err = __unmap_grant_pages(map, offset, range);
427 		offset += range;
428 		pages -= range;
429 	}
430 
431 	return err;
432 }
433 
434 /* ------------------------------------------------------------------ */
435 
436 static void gntdev_vma_open(struct vm_area_struct *vma)
437 {
438 	struct gntdev_grant_map *map = vma->vm_private_data;
439 
440 	pr_debug("gntdev_vma_open %p\n", vma);
441 	refcount_inc(&map->users);
442 }
443 
444 static void gntdev_vma_close(struct vm_area_struct *vma)
445 {
446 	struct gntdev_grant_map *map = vma->vm_private_data;
447 	struct file *file = vma->vm_file;
448 	struct gntdev_priv *priv = file->private_data;
449 
450 	pr_debug("gntdev_vma_close %p\n", vma);
451 	if (use_ptemod) {
452 		WARN_ON(map->vma != vma);
453 		mmu_interval_notifier_remove(&map->notifier);
454 		map->vma = NULL;
455 	}
456 	vma->vm_private_data = NULL;
457 	gntdev_put_map(priv, map);
458 }
459 
460 static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
461 						 unsigned long addr)
462 {
463 	struct gntdev_grant_map *map = vma->vm_private_data;
464 
465 	return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT];
466 }
467 
468 static const struct vm_operations_struct gntdev_vmops = {
469 	.open = gntdev_vma_open,
470 	.close = gntdev_vma_close,
471 	.find_special_page = gntdev_vma_find_special_page,
472 };
473 
474 /* ------------------------------------------------------------------ */
475 
476 static bool gntdev_invalidate(struct mmu_interval_notifier *mn,
477 			      const struct mmu_notifier_range *range,
478 			      unsigned long cur_seq)
479 {
480 	struct gntdev_grant_map *map =
481 		container_of(mn, struct gntdev_grant_map, notifier);
482 	unsigned long mstart, mend;
483 	int err;
484 
485 	if (!mmu_notifier_range_blockable(range))
486 		return false;
487 
488 	/*
489 	 * If the VMA is split or otherwise changed the notifier is not
490 	 * updated, but we don't want to process VA's outside the modified
491 	 * VMA. FIXME: It would be much more understandable to just prevent
492 	 * modifying the VMA in the first place.
493 	 */
494 	if (map->vma->vm_start >= range->end ||
495 	    map->vma->vm_end <= range->start)
496 		return true;
497 
498 	mstart = max(range->start, map->vma->vm_start);
499 	mend = min(range->end, map->vma->vm_end);
500 	pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
501 			map->index, map->count,
502 			map->vma->vm_start, map->vma->vm_end,
503 			range->start, range->end, mstart, mend);
504 	err = unmap_grant_pages(map,
505 				(mstart - map->vma->vm_start) >> PAGE_SHIFT,
506 				(mend - mstart) >> PAGE_SHIFT);
507 	WARN_ON(err);
508 
509 	return true;
510 }
511 
512 static const struct mmu_interval_notifier_ops gntdev_mmu_ops = {
513 	.invalidate = gntdev_invalidate,
514 };
515 
516 /* ------------------------------------------------------------------ */
517 
518 static int gntdev_open(struct inode *inode, struct file *flip)
519 {
520 	struct gntdev_priv *priv;
521 
522 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
523 	if (!priv)
524 		return -ENOMEM;
525 
526 	INIT_LIST_HEAD(&priv->maps);
527 	mutex_init(&priv->lock);
528 
529 #ifdef CONFIG_XEN_GNTDEV_DMABUF
530 	priv->dmabuf_priv = gntdev_dmabuf_init(flip);
531 	if (IS_ERR(priv->dmabuf_priv)) {
532 		int ret = PTR_ERR(priv->dmabuf_priv);
533 
534 		kfree(priv);
535 		return ret;
536 	}
537 #endif
538 
539 	flip->private_data = priv;
540 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
541 	priv->dma_dev = gntdev_miscdev.this_device;
542 	dma_coerce_mask_and_coherent(priv->dma_dev, DMA_BIT_MASK(64));
543 #endif
544 	pr_debug("priv %p\n", priv);
545 
546 	return 0;
547 }
548 
549 static int gntdev_release(struct inode *inode, struct file *flip)
550 {
551 	struct gntdev_priv *priv = flip->private_data;
552 	struct gntdev_grant_map *map;
553 
554 	pr_debug("priv %p\n", priv);
555 
556 	mutex_lock(&priv->lock);
557 	while (!list_empty(&priv->maps)) {
558 		map = list_entry(priv->maps.next,
559 				 struct gntdev_grant_map, next);
560 		list_del(&map->next);
561 		gntdev_put_map(NULL /* already removed */, map);
562 	}
563 	mutex_unlock(&priv->lock);
564 
565 #ifdef CONFIG_XEN_GNTDEV_DMABUF
566 	gntdev_dmabuf_fini(priv->dmabuf_priv);
567 #endif
568 
569 	kfree(priv);
570 	return 0;
571 }
572 
573 static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
574 				       struct ioctl_gntdev_map_grant_ref __user *u)
575 {
576 	struct ioctl_gntdev_map_grant_ref op;
577 	struct gntdev_grant_map *map;
578 	int err;
579 
580 	if (copy_from_user(&op, u, sizeof(op)) != 0)
581 		return -EFAULT;
582 	pr_debug("priv %p, add %d\n", priv, op.count);
583 	if (unlikely(gntdev_test_page_count(op.count)))
584 		return -EINVAL;
585 
586 	err = -ENOMEM;
587 	map = gntdev_alloc_map(priv, op.count, 0 /* This is not a dma-buf. */);
588 	if (!map)
589 		return err;
590 
591 	if (copy_from_user(map->grants, &u->refs,
592 			   sizeof(map->grants[0]) * op.count) != 0) {
593 		gntdev_put_map(NULL, map);
594 		return -EFAULT;
595 	}
596 
597 	mutex_lock(&priv->lock);
598 	gntdev_add_map(priv, map);
599 	op.index = map->index << PAGE_SHIFT;
600 	mutex_unlock(&priv->lock);
601 
602 	if (copy_to_user(u, &op, sizeof(op)) != 0)
603 		return -EFAULT;
604 
605 	return 0;
606 }
607 
608 static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
609 					 struct ioctl_gntdev_unmap_grant_ref __user *u)
610 {
611 	struct ioctl_gntdev_unmap_grant_ref op;
612 	struct gntdev_grant_map *map;
613 	int err = -ENOENT;
614 
615 	if (copy_from_user(&op, u, sizeof(op)) != 0)
616 		return -EFAULT;
617 	pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
618 
619 	mutex_lock(&priv->lock);
620 	map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
621 	if (map) {
622 		list_del(&map->next);
623 		err = 0;
624 	}
625 	mutex_unlock(&priv->lock);
626 	if (map)
627 		gntdev_put_map(priv, map);
628 	return err;
629 }
630 
631 static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
632 					      struct ioctl_gntdev_get_offset_for_vaddr __user *u)
633 {
634 	struct ioctl_gntdev_get_offset_for_vaddr op;
635 	struct vm_area_struct *vma;
636 	struct gntdev_grant_map *map;
637 	int rv = -EINVAL;
638 
639 	if (copy_from_user(&op, u, sizeof(op)) != 0)
640 		return -EFAULT;
641 	pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
642 
643 	mmap_read_lock(current->mm);
644 	vma = find_vma(current->mm, op.vaddr);
645 	if (!vma || vma->vm_ops != &gntdev_vmops)
646 		goto out_unlock;
647 
648 	map = vma->vm_private_data;
649 	if (!map)
650 		goto out_unlock;
651 
652 	op.offset = map->index << PAGE_SHIFT;
653 	op.count = map->count;
654 	rv = 0;
655 
656  out_unlock:
657 	mmap_read_unlock(current->mm);
658 
659 	if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
660 		return -EFAULT;
661 	return rv;
662 }
663 
664 static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
665 {
666 	struct ioctl_gntdev_unmap_notify op;
667 	struct gntdev_grant_map *map;
668 	int rc;
669 	int out_flags;
670 	evtchn_port_t out_event;
671 
672 	if (copy_from_user(&op, u, sizeof(op)))
673 		return -EFAULT;
674 
675 	if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
676 		return -EINVAL;
677 
678 	/* We need to grab a reference to the event channel we are going to use
679 	 * to send the notify before releasing the reference we may already have
680 	 * (if someone has called this ioctl twice). This is required so that
681 	 * it is possible to change the clear_byte part of the notification
682 	 * without disturbing the event channel part, which may now be the last
683 	 * reference to that event channel.
684 	 */
685 	if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
686 		if (evtchn_get(op.event_channel_port))
687 			return -EINVAL;
688 	}
689 
690 	out_flags = op.action;
691 	out_event = op.event_channel_port;
692 
693 	mutex_lock(&priv->lock);
694 
695 	list_for_each_entry(map, &priv->maps, next) {
696 		uint64_t begin = map->index << PAGE_SHIFT;
697 		uint64_t end = (map->index + map->count) << PAGE_SHIFT;
698 		if (op.index >= begin && op.index < end)
699 			goto found;
700 	}
701 	rc = -ENOENT;
702 	goto unlock_out;
703 
704  found:
705 	if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) &&
706 			(map->flags & GNTMAP_readonly)) {
707 		rc = -EINVAL;
708 		goto unlock_out;
709 	}
710 
711 	out_flags = map->notify.flags;
712 	out_event = map->notify.event;
713 
714 	map->notify.flags = op.action;
715 	map->notify.addr = op.index - (map->index << PAGE_SHIFT);
716 	map->notify.event = op.event_channel_port;
717 
718 	rc = 0;
719 
720  unlock_out:
721 	mutex_unlock(&priv->lock);
722 
723 	/* Drop the reference to the event channel we did not save in the map */
724 	if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
725 		evtchn_put(out_event);
726 
727 	return rc;
728 }
729 
730 #define GNTDEV_COPY_BATCH 16
731 
732 struct gntdev_copy_batch {
733 	struct gnttab_copy ops[GNTDEV_COPY_BATCH];
734 	struct page *pages[GNTDEV_COPY_BATCH];
735 	s16 __user *status[GNTDEV_COPY_BATCH];
736 	unsigned int nr_ops;
737 	unsigned int nr_pages;
738 	bool writeable;
739 };
740 
741 static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
742 				unsigned long *gfn)
743 {
744 	unsigned long addr = (unsigned long)virt;
745 	struct page *page;
746 	unsigned long xen_pfn;
747 	int ret;
748 
749 	ret = pin_user_pages_fast(addr, 1, batch->writeable ? FOLL_WRITE : 0, &page);
750 	if (ret < 0)
751 		return ret;
752 
753 	batch->pages[batch->nr_pages++] = page;
754 
755 	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(addr & ~PAGE_MASK);
756 	*gfn = pfn_to_gfn(xen_pfn);
757 
758 	return 0;
759 }
760 
761 static void gntdev_put_pages(struct gntdev_copy_batch *batch)
762 {
763 	unpin_user_pages_dirty_lock(batch->pages, batch->nr_pages, batch->writeable);
764 	batch->nr_pages = 0;
765 	batch->writeable = false;
766 }
767 
768 static int gntdev_copy(struct gntdev_copy_batch *batch)
769 {
770 	unsigned int i;
771 
772 	gnttab_batch_copy(batch->ops, batch->nr_ops);
773 	gntdev_put_pages(batch);
774 
775 	/*
776 	 * For each completed op, update the status if the op failed
777 	 * and all previous ops for the segment were successful.
778 	 */
779 	for (i = 0; i < batch->nr_ops; i++) {
780 		s16 status = batch->ops[i].status;
781 		s16 old_status;
782 
783 		if (status == GNTST_okay)
784 			continue;
785 
786 		if (__get_user(old_status, batch->status[i]))
787 			return -EFAULT;
788 
789 		if (old_status != GNTST_okay)
790 			continue;
791 
792 		if (__put_user(status, batch->status[i]))
793 			return -EFAULT;
794 	}
795 
796 	batch->nr_ops = 0;
797 	return 0;
798 }
799 
800 static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch,
801 				 struct gntdev_grant_copy_segment *seg,
802 				 s16 __user *status)
803 {
804 	uint16_t copied = 0;
805 
806 	/*
807 	 * Disallow local -> local copies since there is only space in
808 	 * batch->pages for one page per-op and this would be a very
809 	 * expensive memcpy().
810 	 */
811 	if (!(seg->flags & (GNTCOPY_source_gref | GNTCOPY_dest_gref)))
812 		return -EINVAL;
813 
814 	/* Can't cross page if source/dest is a grant ref. */
815 	if (seg->flags & GNTCOPY_source_gref) {
816 		if (seg->source.foreign.offset + seg->len > XEN_PAGE_SIZE)
817 			return -EINVAL;
818 	}
819 	if (seg->flags & GNTCOPY_dest_gref) {
820 		if (seg->dest.foreign.offset + seg->len > XEN_PAGE_SIZE)
821 			return -EINVAL;
822 	}
823 
824 	if (put_user(GNTST_okay, status))
825 		return -EFAULT;
826 
827 	while (copied < seg->len) {
828 		struct gnttab_copy *op;
829 		void __user *virt;
830 		size_t len, off;
831 		unsigned long gfn;
832 		int ret;
833 
834 		if (batch->nr_ops >= GNTDEV_COPY_BATCH) {
835 			ret = gntdev_copy(batch);
836 			if (ret < 0)
837 				return ret;
838 		}
839 
840 		len = seg->len - copied;
841 
842 		op = &batch->ops[batch->nr_ops];
843 		op->flags = 0;
844 
845 		if (seg->flags & GNTCOPY_source_gref) {
846 			op->source.u.ref = seg->source.foreign.ref;
847 			op->source.domid = seg->source.foreign.domid;
848 			op->source.offset = seg->source.foreign.offset + copied;
849 			op->flags |= GNTCOPY_source_gref;
850 		} else {
851 			virt = seg->source.virt + copied;
852 			off = (unsigned long)virt & ~XEN_PAGE_MASK;
853 			len = min(len, (size_t)XEN_PAGE_SIZE - off);
854 			batch->writeable = false;
855 
856 			ret = gntdev_get_page(batch, virt, &gfn);
857 			if (ret < 0)
858 				return ret;
859 
860 			op->source.u.gmfn = gfn;
861 			op->source.domid = DOMID_SELF;
862 			op->source.offset = off;
863 		}
864 
865 		if (seg->flags & GNTCOPY_dest_gref) {
866 			op->dest.u.ref = seg->dest.foreign.ref;
867 			op->dest.domid = seg->dest.foreign.domid;
868 			op->dest.offset = seg->dest.foreign.offset + copied;
869 			op->flags |= GNTCOPY_dest_gref;
870 		} else {
871 			virt = seg->dest.virt + copied;
872 			off = (unsigned long)virt & ~XEN_PAGE_MASK;
873 			len = min(len, (size_t)XEN_PAGE_SIZE - off);
874 			batch->writeable = true;
875 
876 			ret = gntdev_get_page(batch, virt, &gfn);
877 			if (ret < 0)
878 				return ret;
879 
880 			op->dest.u.gmfn = gfn;
881 			op->dest.domid = DOMID_SELF;
882 			op->dest.offset = off;
883 		}
884 
885 		op->len = len;
886 		copied += len;
887 
888 		batch->status[batch->nr_ops] = status;
889 		batch->nr_ops++;
890 	}
891 
892 	return 0;
893 }
894 
895 static long gntdev_ioctl_grant_copy(struct gntdev_priv *priv, void __user *u)
896 {
897 	struct ioctl_gntdev_grant_copy copy;
898 	struct gntdev_copy_batch batch;
899 	unsigned int i;
900 	int ret = 0;
901 
902 	if (copy_from_user(&copy, u, sizeof(copy)))
903 		return -EFAULT;
904 
905 	batch.nr_ops = 0;
906 	batch.nr_pages = 0;
907 
908 	for (i = 0; i < copy.count; i++) {
909 		struct gntdev_grant_copy_segment seg;
910 
911 		if (copy_from_user(&seg, &copy.segments[i], sizeof(seg))) {
912 			ret = -EFAULT;
913 			goto out;
914 		}
915 
916 		ret = gntdev_grant_copy_seg(&batch, &seg, &copy.segments[i].status);
917 		if (ret < 0)
918 			goto out;
919 
920 		cond_resched();
921 	}
922 	if (batch.nr_ops)
923 		ret = gntdev_copy(&batch);
924 	return ret;
925 
926   out:
927 	gntdev_put_pages(&batch);
928 	return ret;
929 }
930 
931 static long gntdev_ioctl(struct file *flip,
932 			 unsigned int cmd, unsigned long arg)
933 {
934 	struct gntdev_priv *priv = flip->private_data;
935 	void __user *ptr = (void __user *)arg;
936 
937 	switch (cmd) {
938 	case IOCTL_GNTDEV_MAP_GRANT_REF:
939 		return gntdev_ioctl_map_grant_ref(priv, ptr);
940 
941 	case IOCTL_GNTDEV_UNMAP_GRANT_REF:
942 		return gntdev_ioctl_unmap_grant_ref(priv, ptr);
943 
944 	case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
945 		return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
946 
947 	case IOCTL_GNTDEV_SET_UNMAP_NOTIFY:
948 		return gntdev_ioctl_notify(priv, ptr);
949 
950 	case IOCTL_GNTDEV_GRANT_COPY:
951 		return gntdev_ioctl_grant_copy(priv, ptr);
952 
953 #ifdef CONFIG_XEN_GNTDEV_DMABUF
954 	case IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS:
955 		return gntdev_ioctl_dmabuf_exp_from_refs(priv, use_ptemod, ptr);
956 
957 	case IOCTL_GNTDEV_DMABUF_EXP_WAIT_RELEASED:
958 		return gntdev_ioctl_dmabuf_exp_wait_released(priv, ptr);
959 
960 	case IOCTL_GNTDEV_DMABUF_IMP_TO_REFS:
961 		return gntdev_ioctl_dmabuf_imp_to_refs(priv, ptr);
962 
963 	case IOCTL_GNTDEV_DMABUF_IMP_RELEASE:
964 		return gntdev_ioctl_dmabuf_imp_release(priv, ptr);
965 #endif
966 
967 	default:
968 		pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
969 		return -ENOIOCTLCMD;
970 	}
971 
972 	return 0;
973 }
974 
975 static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
976 {
977 	struct gntdev_priv *priv = flip->private_data;
978 	int index = vma->vm_pgoff;
979 	int count = vma_pages(vma);
980 	struct gntdev_grant_map *map;
981 	int err = -EINVAL;
982 
983 	if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
984 		return -EINVAL;
985 
986 	pr_debug("map %d+%d at %lx (pgoff %lx)\n",
987 			index, count, vma->vm_start, vma->vm_pgoff);
988 
989 	mutex_lock(&priv->lock);
990 	map = gntdev_find_map_index(priv, index, count);
991 	if (!map)
992 		goto unlock_out;
993 	if (use_ptemod && map->vma)
994 		goto unlock_out;
995 	refcount_inc(&map->users);
996 
997 	vma->vm_ops = &gntdev_vmops;
998 
999 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
1000 
1001 	if (use_ptemod)
1002 		vma->vm_flags |= VM_DONTCOPY;
1003 
1004 	vma->vm_private_data = map;
1005 	if (map->flags) {
1006 		if ((vma->vm_flags & VM_WRITE) &&
1007 				(map->flags & GNTMAP_readonly))
1008 			goto out_unlock_put;
1009 	} else {
1010 		map->flags = GNTMAP_host_map;
1011 		if (!(vma->vm_flags & VM_WRITE))
1012 			map->flags |= GNTMAP_readonly;
1013 	}
1014 
1015 	if (use_ptemod) {
1016 		map->vma = vma;
1017 		err = mmu_interval_notifier_insert_locked(
1018 			&map->notifier, vma->vm_mm, vma->vm_start,
1019 			vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
1020 		if (err)
1021 			goto out_unlock_put;
1022 	}
1023 	mutex_unlock(&priv->lock);
1024 
1025 	if (use_ptemod) {
1026 		/*
1027 		 * gntdev takes the address of the PTE in find_grant_ptes() and
1028 		 * passes it to the hypervisor in gntdev_map_grant_pages(). The
1029 		 * purpose of the notifier is to prevent the hypervisor pointer
1030 		 * to the PTE from going stale.
1031 		 *
1032 		 * Since this vma's mappings can't be touched without the
1033 		 * mmap_lock, and we are holding it now, there is no need for
1034 		 * the notifier_range locking pattern.
1035 		 */
1036 		mmu_interval_read_begin(&map->notifier);
1037 
1038 		map->pages_vm_start = vma->vm_start;
1039 		err = apply_to_page_range(vma->vm_mm, vma->vm_start,
1040 					  vma->vm_end - vma->vm_start,
1041 					  find_grant_ptes, map);
1042 		if (err) {
1043 			pr_warn("find_grant_ptes() failure.\n");
1044 			goto out_put_map;
1045 		}
1046 	}
1047 
1048 	err = gntdev_map_grant_pages(map);
1049 	if (err)
1050 		goto out_put_map;
1051 
1052 	if (!use_ptemod) {
1053 		err = vm_map_pages_zero(vma, map->pages, map->count);
1054 		if (err)
1055 			goto out_put_map;
1056 	} else {
1057 #ifdef CONFIG_X86
1058 		/*
1059 		 * If the PTEs were not made special by the grant map
1060 		 * hypercall, do so here.
1061 		 *
1062 		 * This is racy since the mapping is already visible
1063 		 * to userspace but userspace should be well-behaved
1064 		 * enough to not touch it until the mmap() call
1065 		 * returns.
1066 		 */
1067 		if (!xen_feature(XENFEAT_gnttab_map_avail_bits)) {
1068 			apply_to_page_range(vma->vm_mm, vma->vm_start,
1069 					    vma->vm_end - vma->vm_start,
1070 					    set_grant_ptes_as_special, NULL);
1071 		}
1072 #endif
1073 	}
1074 
1075 	return 0;
1076 
1077 unlock_out:
1078 	mutex_unlock(&priv->lock);
1079 	return err;
1080 
1081 out_unlock_put:
1082 	mutex_unlock(&priv->lock);
1083 out_put_map:
1084 	if (use_ptemod) {
1085 		unmap_grant_pages(map, 0, map->count);
1086 		if (map->vma) {
1087 			mmu_interval_notifier_remove(&map->notifier);
1088 			map->vma = NULL;
1089 		}
1090 	}
1091 	gntdev_put_map(priv, map);
1092 	return err;
1093 }
1094 
1095 static const struct file_operations gntdev_fops = {
1096 	.owner = THIS_MODULE,
1097 	.open = gntdev_open,
1098 	.release = gntdev_release,
1099 	.mmap = gntdev_mmap,
1100 	.unlocked_ioctl = gntdev_ioctl
1101 };
1102 
1103 static struct miscdevice gntdev_miscdev = {
1104 	.minor        = MISC_DYNAMIC_MINOR,
1105 	.name         = "xen/gntdev",
1106 	.fops         = &gntdev_fops,
1107 };
1108 
1109 /* ------------------------------------------------------------------ */
1110 
1111 static int __init gntdev_init(void)
1112 {
1113 	int err;
1114 
1115 	if (!xen_domain())
1116 		return -ENODEV;
1117 
1118 	use_ptemod = !xen_feature(XENFEAT_auto_translated_physmap);
1119 
1120 	err = misc_register(&gntdev_miscdev);
1121 	if (err != 0) {
1122 		pr_err("Could not register gntdev device\n");
1123 		return err;
1124 	}
1125 	return 0;
1126 }
1127 
1128 static void __exit gntdev_exit(void)
1129 {
1130 	misc_deregister(&gntdev_miscdev);
1131 }
1132 
1133 module_init(gntdev_init);
1134 module_exit(gntdev_exit);
1135 
1136 /* ------------------------------------------------------------------ */
1137