xref: /openbmc/linux/drivers/xen/privcmd.c (revision b285d2ae)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3  * privcmd.c
4  *
5  * Interface to privileged domain-0 commands.
6  *
7  * Copyright (c) 2002-2004, K A Fraser, B Dragovic
8  */
9 
10 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
11 
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/mm.h>
19 #include <linux/mman.h>
20 #include <linux/uaccess.h>
21 #include <linux/swap.h>
22 #include <linux/highmem.h>
23 #include <linux/pagemap.h>
24 #include <linux/seq_file.h>
25 #include <linux/miscdevice.h>
26 #include <linux/moduleparam.h>
27 
28 #include <asm/xen/hypervisor.h>
29 #include <asm/xen/hypercall.h>
30 
31 #include <xen/xen.h>
32 #include <xen/privcmd.h>
33 #include <xen/interface/xen.h>
34 #include <xen/interface/memory.h>
35 #include <xen/interface/hvm/dm_op.h>
36 #include <xen/features.h>
37 #include <xen/page.h>
38 #include <xen/xen-ops.h>
39 #include <xen/balloon.h>
40 
41 #include "privcmd.h"
42 
43 MODULE_LICENSE("GPL");
44 
45 #define PRIV_VMA_LOCKED ((void *)1)
46 
47 static unsigned int privcmd_dm_op_max_num = 16;
48 module_param_named(dm_op_max_nr_bufs, privcmd_dm_op_max_num, uint, 0644);
49 MODULE_PARM_DESC(dm_op_max_nr_bufs,
50 		 "Maximum number of buffers per dm_op hypercall");
51 
52 static unsigned int privcmd_dm_op_buf_max_size = 4096;
53 module_param_named(dm_op_buf_max_size, privcmd_dm_op_buf_max_size, uint,
54 		   0644);
55 MODULE_PARM_DESC(dm_op_buf_max_size,
56 		 "Maximum size of a dm_op hypercall buffer");
57 
58 struct privcmd_data {
59 	domid_t domid;
60 };
61 
62 static int privcmd_vma_range_is_mapped(
63                struct vm_area_struct *vma,
64                unsigned long addr,
65                unsigned long nr_pages);
66 
67 static long privcmd_ioctl_hypercall(struct file *file, void __user *udata)
68 {
69 	struct privcmd_data *data = file->private_data;
70 	struct privcmd_hypercall hypercall;
71 	long ret;
72 
73 	/* Disallow arbitrary hypercalls if restricted */
74 	if (data->domid != DOMID_INVALID)
75 		return -EPERM;
76 
77 	if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
78 		return -EFAULT;
79 
80 	xen_preemptible_hcall_begin();
81 	ret = privcmd_call(hypercall.op,
82 			   hypercall.arg[0], hypercall.arg[1],
83 			   hypercall.arg[2], hypercall.arg[3],
84 			   hypercall.arg[4]);
85 	xen_preemptible_hcall_end();
86 
87 	return ret;
88 }
89 
90 static void free_page_list(struct list_head *pages)
91 {
92 	struct page *p, *n;
93 
94 	list_for_each_entry_safe(p, n, pages, lru)
95 		__free_page(p);
96 
97 	INIT_LIST_HEAD(pages);
98 }
99 
100 /*
101  * Given an array of items in userspace, return a list of pages
102  * containing the data.  If copying fails, either because of memory
103  * allocation failure or a problem reading user memory, return an
104  * error code; its up to the caller to dispose of any partial list.
105  */
106 static int gather_array(struct list_head *pagelist,
107 			unsigned nelem, size_t size,
108 			const void __user *data)
109 {
110 	unsigned pageidx;
111 	void *pagedata;
112 	int ret;
113 
114 	if (size > PAGE_SIZE)
115 		return 0;
116 
117 	pageidx = PAGE_SIZE;
118 	pagedata = NULL;	/* quiet, gcc */
119 	while (nelem--) {
120 		if (pageidx > PAGE_SIZE-size) {
121 			struct page *page = alloc_page(GFP_KERNEL);
122 
123 			ret = -ENOMEM;
124 			if (page == NULL)
125 				goto fail;
126 
127 			pagedata = page_address(page);
128 
129 			list_add_tail(&page->lru, pagelist);
130 			pageidx = 0;
131 		}
132 
133 		ret = -EFAULT;
134 		if (copy_from_user(pagedata + pageidx, data, size))
135 			goto fail;
136 
137 		data += size;
138 		pageidx += size;
139 	}
140 
141 	ret = 0;
142 
143 fail:
144 	return ret;
145 }
146 
147 /*
148  * Call function "fn" on each element of the array fragmented
149  * over a list of pages.
150  */
151 static int traverse_pages(unsigned nelem, size_t size,
152 			  struct list_head *pos,
153 			  int (*fn)(void *data, void *state),
154 			  void *state)
155 {
156 	void *pagedata;
157 	unsigned pageidx;
158 	int ret = 0;
159 
160 	BUG_ON(size > PAGE_SIZE);
161 
162 	pageidx = PAGE_SIZE;
163 	pagedata = NULL;	/* hush, gcc */
164 
165 	while (nelem--) {
166 		if (pageidx > PAGE_SIZE-size) {
167 			struct page *page;
168 			pos = pos->next;
169 			page = list_entry(pos, struct page, lru);
170 			pagedata = page_address(page);
171 			pageidx = 0;
172 		}
173 
174 		ret = (*fn)(pagedata + pageidx, state);
175 		if (ret)
176 			break;
177 		pageidx += size;
178 	}
179 
180 	return ret;
181 }
182 
183 /*
184  * Similar to traverse_pages, but use each page as a "block" of
185  * data to be processed as one unit.
186  */
187 static int traverse_pages_block(unsigned nelem, size_t size,
188 				struct list_head *pos,
189 				int (*fn)(void *data, int nr, void *state),
190 				void *state)
191 {
192 	void *pagedata;
193 	int ret = 0;
194 
195 	BUG_ON(size > PAGE_SIZE);
196 
197 	while (nelem) {
198 		int nr = (PAGE_SIZE/size);
199 		struct page *page;
200 		if (nr > nelem)
201 			nr = nelem;
202 		pos = pos->next;
203 		page = list_entry(pos, struct page, lru);
204 		pagedata = page_address(page);
205 		ret = (*fn)(pagedata, nr, state);
206 		if (ret)
207 			break;
208 		nelem -= nr;
209 	}
210 
211 	return ret;
212 }
213 
214 struct mmap_gfn_state {
215 	unsigned long va;
216 	struct vm_area_struct *vma;
217 	domid_t domain;
218 };
219 
220 static int mmap_gfn_range(void *data, void *state)
221 {
222 	struct privcmd_mmap_entry *msg = data;
223 	struct mmap_gfn_state *st = state;
224 	struct vm_area_struct *vma = st->vma;
225 	int rc;
226 
227 	/* Do not allow range to wrap the address space. */
228 	if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
229 	    ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
230 		return -EINVAL;
231 
232 	/* Range chunks must be contiguous in va space. */
233 	if ((msg->va != st->va) ||
234 	    ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
235 		return -EINVAL;
236 
237 	rc = xen_remap_domain_gfn_range(vma,
238 					msg->va & PAGE_MASK,
239 					msg->mfn, msg->npages,
240 					vma->vm_page_prot,
241 					st->domain, NULL);
242 	if (rc < 0)
243 		return rc;
244 
245 	st->va += msg->npages << PAGE_SHIFT;
246 
247 	return 0;
248 }
249 
250 static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
251 {
252 	struct privcmd_data *data = file->private_data;
253 	struct privcmd_mmap mmapcmd;
254 	struct mm_struct *mm = current->mm;
255 	struct vm_area_struct *vma;
256 	int rc;
257 	LIST_HEAD(pagelist);
258 	struct mmap_gfn_state state;
259 
260 	/* We only support privcmd_ioctl_mmap_batch for auto translated. */
261 	if (xen_feature(XENFEAT_auto_translated_physmap))
262 		return -ENOSYS;
263 
264 	if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
265 		return -EFAULT;
266 
267 	/* If restriction is in place, check the domid matches */
268 	if (data->domid != DOMID_INVALID && data->domid != mmapcmd.dom)
269 		return -EPERM;
270 
271 	rc = gather_array(&pagelist,
272 			  mmapcmd.num, sizeof(struct privcmd_mmap_entry),
273 			  mmapcmd.entry);
274 
275 	if (rc || list_empty(&pagelist))
276 		goto out;
277 
278 	mmap_write_lock(mm);
279 
280 	{
281 		struct page *page = list_first_entry(&pagelist,
282 						     struct page, lru);
283 		struct privcmd_mmap_entry *msg = page_address(page);
284 
285 		vma = find_vma(mm, msg->va);
286 		rc = -EINVAL;
287 
288 		if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
289 			goto out_up;
290 		vma->vm_private_data = PRIV_VMA_LOCKED;
291 	}
292 
293 	state.va = vma->vm_start;
294 	state.vma = vma;
295 	state.domain = mmapcmd.dom;
296 
297 	rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
298 			    &pagelist,
299 			    mmap_gfn_range, &state);
300 
301 
302 out_up:
303 	mmap_write_unlock(mm);
304 
305 out:
306 	free_page_list(&pagelist);
307 
308 	return rc;
309 }
310 
311 struct mmap_batch_state {
312 	domid_t domain;
313 	unsigned long va;
314 	struct vm_area_struct *vma;
315 	int index;
316 	/* A tristate:
317 	 *      0 for no errors
318 	 *      1 if at least one error has happened (and no
319 	 *          -ENOENT errors have happened)
320 	 *      -ENOENT if at least 1 -ENOENT has happened.
321 	 */
322 	int global_error;
323 	int version;
324 
325 	/* User-space gfn array to store errors in the second pass for V1. */
326 	xen_pfn_t __user *user_gfn;
327 	/* User-space int array to store errors in the second pass for V2. */
328 	int __user *user_err;
329 };
330 
331 /* auto translated dom0 note: if domU being created is PV, then gfn is
332  * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
333  */
334 static int mmap_batch_fn(void *data, int nr, void *state)
335 {
336 	xen_pfn_t *gfnp = data;
337 	struct mmap_batch_state *st = state;
338 	struct vm_area_struct *vma = st->vma;
339 	struct page **pages = vma->vm_private_data;
340 	struct page **cur_pages = NULL;
341 	int ret;
342 
343 	if (xen_feature(XENFEAT_auto_translated_physmap))
344 		cur_pages = &pages[st->index];
345 
346 	BUG_ON(nr < 0);
347 	ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
348 					 (int *)gfnp, st->vma->vm_page_prot,
349 					 st->domain, cur_pages);
350 
351 	/* Adjust the global_error? */
352 	if (ret != nr) {
353 		if (ret == -ENOENT)
354 			st->global_error = -ENOENT;
355 		else {
356 			/* Record that at least one error has happened. */
357 			if (st->global_error == 0)
358 				st->global_error = 1;
359 		}
360 	}
361 	st->va += XEN_PAGE_SIZE * nr;
362 	st->index += nr / XEN_PFN_PER_PAGE;
363 
364 	return 0;
365 }
366 
367 static int mmap_return_error(int err, struct mmap_batch_state *st)
368 {
369 	int ret;
370 
371 	if (st->version == 1) {
372 		if (err) {
373 			xen_pfn_t gfn;
374 
375 			ret = get_user(gfn, st->user_gfn);
376 			if (ret < 0)
377 				return ret;
378 			/*
379 			 * V1 encodes the error codes in the 32bit top
380 			 * nibble of the gfn (with its known
381 			 * limitations vis-a-vis 64 bit callers).
382 			 */
383 			gfn |= (err == -ENOENT) ?
384 				PRIVCMD_MMAPBATCH_PAGED_ERROR :
385 				PRIVCMD_MMAPBATCH_MFN_ERROR;
386 			return __put_user(gfn, st->user_gfn++);
387 		} else
388 			st->user_gfn++;
389 	} else { /* st->version == 2 */
390 		if (err)
391 			return __put_user(err, st->user_err++);
392 		else
393 			st->user_err++;
394 	}
395 
396 	return 0;
397 }
398 
399 static int mmap_return_errors(void *data, int nr, void *state)
400 {
401 	struct mmap_batch_state *st = state;
402 	int *errs = data;
403 	int i;
404 	int ret;
405 
406 	for (i = 0; i < nr; i++) {
407 		ret = mmap_return_error(errs[i], st);
408 		if (ret < 0)
409 			return ret;
410 	}
411 	return 0;
412 }
413 
414 /* Allocate pfns that are then mapped with gfns from foreign domid. Update
415  * the vma with the page info to use later.
416  * Returns: 0 if success, otherwise -errno
417  */
418 static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
419 {
420 	int rc;
421 	struct page **pages;
422 
423 	pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
424 	if (pages == NULL)
425 		return -ENOMEM;
426 
427 	rc = alloc_xenballooned_pages(numpgs, pages);
428 	if (rc != 0) {
429 		pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
430 			numpgs, rc);
431 		kfree(pages);
432 		return -ENOMEM;
433 	}
434 	BUG_ON(vma->vm_private_data != NULL);
435 	vma->vm_private_data = pages;
436 
437 	return 0;
438 }
439 
440 static const struct vm_operations_struct privcmd_vm_ops;
441 
442 static long privcmd_ioctl_mmap_batch(
443 	struct file *file, void __user *udata, int version)
444 {
445 	struct privcmd_data *data = file->private_data;
446 	int ret;
447 	struct privcmd_mmapbatch_v2 m;
448 	struct mm_struct *mm = current->mm;
449 	struct vm_area_struct *vma;
450 	unsigned long nr_pages;
451 	LIST_HEAD(pagelist);
452 	struct mmap_batch_state state;
453 
454 	switch (version) {
455 	case 1:
456 		if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
457 			return -EFAULT;
458 		/* Returns per-frame error in m.arr. */
459 		m.err = NULL;
460 		if (!access_ok(m.arr, m.num * sizeof(*m.arr)))
461 			return -EFAULT;
462 		break;
463 	case 2:
464 		if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
465 			return -EFAULT;
466 		/* Returns per-frame error code in m.err. */
467 		if (!access_ok(m.err, m.num * (sizeof(*m.err))))
468 			return -EFAULT;
469 		break;
470 	default:
471 		return -EINVAL;
472 	}
473 
474 	/* If restriction is in place, check the domid matches */
475 	if (data->domid != DOMID_INVALID && data->domid != m.dom)
476 		return -EPERM;
477 
478 	nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
479 	if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
480 		return -EINVAL;
481 
482 	ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
483 
484 	if (ret)
485 		goto out;
486 	if (list_empty(&pagelist)) {
487 		ret = -EINVAL;
488 		goto out;
489 	}
490 
491 	if (version == 2) {
492 		/* Zero error array now to only copy back actual errors. */
493 		if (clear_user(m.err, sizeof(int) * m.num)) {
494 			ret = -EFAULT;
495 			goto out;
496 		}
497 	}
498 
499 	mmap_write_lock(mm);
500 
501 	vma = find_vma(mm, m.addr);
502 	if (!vma ||
503 	    vma->vm_ops != &privcmd_vm_ops) {
504 		ret = -EINVAL;
505 		goto out_unlock;
506 	}
507 
508 	/*
509 	 * Caller must either:
510 	 *
511 	 * Map the whole VMA range, which will also allocate all the
512 	 * pages required for the auto_translated_physmap case.
513 	 *
514 	 * Or
515 	 *
516 	 * Map unmapped holes left from a previous map attempt (e.g.,
517 	 * because those foreign frames were previously paged out).
518 	 */
519 	if (vma->vm_private_data == NULL) {
520 		if (m.addr != vma->vm_start ||
521 		    m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
522 			ret = -EINVAL;
523 			goto out_unlock;
524 		}
525 		if (xen_feature(XENFEAT_auto_translated_physmap)) {
526 			ret = alloc_empty_pages(vma, nr_pages);
527 			if (ret < 0)
528 				goto out_unlock;
529 		} else
530 			vma->vm_private_data = PRIV_VMA_LOCKED;
531 	} else {
532 		if (m.addr < vma->vm_start ||
533 		    m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
534 			ret = -EINVAL;
535 			goto out_unlock;
536 		}
537 		if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
538 			ret = -EINVAL;
539 			goto out_unlock;
540 		}
541 	}
542 
543 	state.domain        = m.dom;
544 	state.vma           = vma;
545 	state.va            = m.addr;
546 	state.index         = 0;
547 	state.global_error  = 0;
548 	state.version       = version;
549 
550 	BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
551 	/* mmap_batch_fn guarantees ret == 0 */
552 	BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
553 				    &pagelist, mmap_batch_fn, &state));
554 
555 	mmap_write_unlock(mm);
556 
557 	if (state.global_error) {
558 		/* Write back errors in second pass. */
559 		state.user_gfn = (xen_pfn_t *)m.arr;
560 		state.user_err = m.err;
561 		ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
562 					   &pagelist, mmap_return_errors, &state);
563 	} else
564 		ret = 0;
565 
566 	/* If we have not had any EFAULT-like global errors then set the global
567 	 * error to -ENOENT if necessary. */
568 	if ((ret == 0) && (state.global_error == -ENOENT))
569 		ret = -ENOENT;
570 
571 out:
572 	free_page_list(&pagelist);
573 	return ret;
574 
575 out_unlock:
576 	mmap_write_unlock(mm);
577 	goto out;
578 }
579 
580 static int lock_pages(
581 	struct privcmd_dm_op_buf kbufs[], unsigned int num,
582 	struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
583 {
584 	unsigned int i;
585 
586 	for (i = 0; i < num; i++) {
587 		unsigned int requested;
588 		int page_count;
589 
590 		requested = DIV_ROUND_UP(
591 			offset_in_page(kbufs[i].uptr) + kbufs[i].size,
592 			PAGE_SIZE);
593 		if (requested > nr_pages)
594 			return -ENOSPC;
595 
596 		page_count = pin_user_pages_fast(
597 			(unsigned long) kbufs[i].uptr,
598 			requested, FOLL_WRITE, pages);
599 		if (page_count < 0)
600 			return page_count;
601 
602 		*pinned += page_count;
603 		nr_pages -= page_count;
604 		pages += page_count;
605 	}
606 
607 	return 0;
608 }
609 
610 static void unlock_pages(struct page *pages[], unsigned int nr_pages)
611 {
612 	unpin_user_pages_dirty_lock(pages, nr_pages, true);
613 }
614 
615 static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
616 {
617 	struct privcmd_data *data = file->private_data;
618 	struct privcmd_dm_op kdata;
619 	struct privcmd_dm_op_buf *kbufs;
620 	unsigned int nr_pages = 0;
621 	struct page **pages = NULL;
622 	struct xen_dm_op_buf *xbufs = NULL;
623 	unsigned int i;
624 	long rc;
625 	unsigned int pinned = 0;
626 
627 	if (copy_from_user(&kdata, udata, sizeof(kdata)))
628 		return -EFAULT;
629 
630 	/* If restriction is in place, check the domid matches */
631 	if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
632 		return -EPERM;
633 
634 	if (kdata.num == 0)
635 		return 0;
636 
637 	if (kdata.num > privcmd_dm_op_max_num)
638 		return -E2BIG;
639 
640 	kbufs = kcalloc(kdata.num, sizeof(*kbufs), GFP_KERNEL);
641 	if (!kbufs)
642 		return -ENOMEM;
643 
644 	if (copy_from_user(kbufs, kdata.ubufs,
645 			   sizeof(*kbufs) * kdata.num)) {
646 		rc = -EFAULT;
647 		goto out;
648 	}
649 
650 	for (i = 0; i < kdata.num; i++) {
651 		if (kbufs[i].size > privcmd_dm_op_buf_max_size) {
652 			rc = -E2BIG;
653 			goto out;
654 		}
655 
656 		if (!access_ok(kbufs[i].uptr,
657 			       kbufs[i].size)) {
658 			rc = -EFAULT;
659 			goto out;
660 		}
661 
662 		nr_pages += DIV_ROUND_UP(
663 			offset_in_page(kbufs[i].uptr) + kbufs[i].size,
664 			PAGE_SIZE);
665 	}
666 
667 	pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
668 	if (!pages) {
669 		rc = -ENOMEM;
670 		goto out;
671 	}
672 
673 	xbufs = kcalloc(kdata.num, sizeof(*xbufs), GFP_KERNEL);
674 	if (!xbufs) {
675 		rc = -ENOMEM;
676 		goto out;
677 	}
678 
679 	rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
680 	if (rc < 0) {
681 		nr_pages = pinned;
682 		goto out;
683 	}
684 
685 	for (i = 0; i < kdata.num; i++) {
686 		set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
687 		xbufs[i].size = kbufs[i].size;
688 	}
689 
690 	xen_preemptible_hcall_begin();
691 	rc = HYPERVISOR_dm_op(kdata.dom, kdata.num, xbufs);
692 	xen_preemptible_hcall_end();
693 
694 out:
695 	unlock_pages(pages, nr_pages);
696 	kfree(xbufs);
697 	kfree(pages);
698 	kfree(kbufs);
699 
700 	return rc;
701 }
702 
703 static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
704 {
705 	struct privcmd_data *data = file->private_data;
706 	domid_t dom;
707 
708 	if (copy_from_user(&dom, udata, sizeof(dom)))
709 		return -EFAULT;
710 
711 	/* Set restriction to the specified domain, or check it matches */
712 	if (data->domid == DOMID_INVALID)
713 		data->domid = dom;
714 	else if (data->domid != dom)
715 		return -EINVAL;
716 
717 	return 0;
718 }
719 
720 static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
721 {
722 	struct privcmd_data *data = file->private_data;
723 	struct mm_struct *mm = current->mm;
724 	struct vm_area_struct *vma;
725 	struct privcmd_mmap_resource kdata;
726 	xen_pfn_t *pfns = NULL;
727 	struct xen_mem_acquire_resource xdata;
728 	int rc;
729 
730 	if (copy_from_user(&kdata, udata, sizeof(kdata)))
731 		return -EFAULT;
732 
733 	/* If restriction is in place, check the domid matches */
734 	if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
735 		return -EPERM;
736 
737 	mmap_write_lock(mm);
738 
739 	vma = find_vma(mm, kdata.addr);
740 	if (!vma || vma->vm_ops != &privcmd_vm_ops) {
741 		rc = -EINVAL;
742 		goto out;
743 	}
744 
745 	pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL);
746 	if (!pfns) {
747 		rc = -ENOMEM;
748 		goto out;
749 	}
750 
751 	if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
752 	    xen_feature(XENFEAT_auto_translated_physmap)) {
753 		unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
754 		struct page **pages;
755 		unsigned int i;
756 
757 		rc = alloc_empty_pages(vma, nr);
758 		if (rc < 0)
759 			goto out;
760 
761 		pages = vma->vm_private_data;
762 		for (i = 0; i < kdata.num; i++) {
763 			xen_pfn_t pfn =
764 				page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
765 
766 			pfns[i] = pfn + (i % XEN_PFN_PER_PAGE);
767 		}
768 	} else
769 		vma->vm_private_data = PRIV_VMA_LOCKED;
770 
771 	memset(&xdata, 0, sizeof(xdata));
772 	xdata.domid = kdata.dom;
773 	xdata.type = kdata.type;
774 	xdata.id = kdata.id;
775 	xdata.frame = kdata.idx;
776 	xdata.nr_frames = kdata.num;
777 	set_xen_guest_handle(xdata.frame_list, pfns);
778 
779 	xen_preemptible_hcall_begin();
780 	rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
781 	xen_preemptible_hcall_end();
782 
783 	if (rc)
784 		goto out;
785 
786 	if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
787 	    xen_feature(XENFEAT_auto_translated_physmap)) {
788 		rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
789 	} else {
790 		unsigned int domid =
791 			(xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
792 			DOMID_SELF : kdata.dom;
793 		int num;
794 
795 		num = xen_remap_domain_mfn_array(vma,
796 						 kdata.addr & PAGE_MASK,
797 						 pfns, kdata.num, (int *)pfns,
798 						 vma->vm_page_prot,
799 						 domid,
800 						 vma->vm_private_data);
801 		if (num < 0)
802 			rc = num;
803 		else if (num != kdata.num) {
804 			unsigned int i;
805 
806 			for (i = 0; i < num; i++) {
807 				rc = pfns[i];
808 				if (rc < 0)
809 					break;
810 			}
811 		} else
812 			rc = 0;
813 	}
814 
815 out:
816 	mmap_write_unlock(mm);
817 	kfree(pfns);
818 
819 	return rc;
820 }
821 
822 static long privcmd_ioctl(struct file *file,
823 			  unsigned int cmd, unsigned long data)
824 {
825 	int ret = -ENOTTY;
826 	void __user *udata = (void __user *) data;
827 
828 	switch (cmd) {
829 	case IOCTL_PRIVCMD_HYPERCALL:
830 		ret = privcmd_ioctl_hypercall(file, udata);
831 		break;
832 
833 	case IOCTL_PRIVCMD_MMAP:
834 		ret = privcmd_ioctl_mmap(file, udata);
835 		break;
836 
837 	case IOCTL_PRIVCMD_MMAPBATCH:
838 		ret = privcmd_ioctl_mmap_batch(file, udata, 1);
839 		break;
840 
841 	case IOCTL_PRIVCMD_MMAPBATCH_V2:
842 		ret = privcmd_ioctl_mmap_batch(file, udata, 2);
843 		break;
844 
845 	case IOCTL_PRIVCMD_DM_OP:
846 		ret = privcmd_ioctl_dm_op(file, udata);
847 		break;
848 
849 	case IOCTL_PRIVCMD_RESTRICT:
850 		ret = privcmd_ioctl_restrict(file, udata);
851 		break;
852 
853 	case IOCTL_PRIVCMD_MMAP_RESOURCE:
854 		ret = privcmd_ioctl_mmap_resource(file, udata);
855 		break;
856 
857 	default:
858 		break;
859 	}
860 
861 	return ret;
862 }
863 
864 static int privcmd_open(struct inode *ino, struct file *file)
865 {
866 	struct privcmd_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
867 
868 	if (!data)
869 		return -ENOMEM;
870 
871 	/* DOMID_INVALID implies no restriction */
872 	data->domid = DOMID_INVALID;
873 
874 	file->private_data = data;
875 	return 0;
876 }
877 
878 static int privcmd_release(struct inode *ino, struct file *file)
879 {
880 	struct privcmd_data *data = file->private_data;
881 
882 	kfree(data);
883 	return 0;
884 }
885 
886 static void privcmd_close(struct vm_area_struct *vma)
887 {
888 	struct page **pages = vma->vm_private_data;
889 	int numpgs = vma_pages(vma);
890 	int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
891 	int rc;
892 
893 	if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
894 		return;
895 
896 	rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
897 	if (rc == 0)
898 		free_xenballooned_pages(numpgs, pages);
899 	else
900 		pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
901 			numpgs, rc);
902 	kfree(pages);
903 }
904 
905 static vm_fault_t privcmd_fault(struct vm_fault *vmf)
906 {
907 	printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
908 	       vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
909 	       vmf->pgoff, (void *)vmf->address);
910 
911 	return VM_FAULT_SIGBUS;
912 }
913 
914 static const struct vm_operations_struct privcmd_vm_ops = {
915 	.close = privcmd_close,
916 	.fault = privcmd_fault
917 };
918 
919 static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
920 {
921 	/* DONTCOPY is essential for Xen because copy_page_range doesn't know
922 	 * how to recreate these mappings */
923 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
924 			 VM_DONTEXPAND | VM_DONTDUMP;
925 	vma->vm_ops = &privcmd_vm_ops;
926 	vma->vm_private_data = NULL;
927 
928 	return 0;
929 }
930 
931 /*
932  * For MMAPBATCH*. This allows asserting the singleshot mapping
933  * on a per pfn/pte basis. Mapping calls that fail with ENOENT
934  * can be then retried until success.
935  */
936 static int is_mapped_fn(pte_t *pte, unsigned long addr, void *data)
937 {
938 	return pte_none(*pte) ? 0 : -EBUSY;
939 }
940 
941 static int privcmd_vma_range_is_mapped(
942 	           struct vm_area_struct *vma,
943 	           unsigned long addr,
944 	           unsigned long nr_pages)
945 {
946 	return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
947 				   is_mapped_fn, NULL) != 0;
948 }
949 
950 const struct file_operations xen_privcmd_fops = {
951 	.owner = THIS_MODULE,
952 	.unlocked_ioctl = privcmd_ioctl,
953 	.open = privcmd_open,
954 	.release = privcmd_release,
955 	.mmap = privcmd_mmap,
956 };
957 EXPORT_SYMBOL_GPL(xen_privcmd_fops);
958 
959 static struct miscdevice privcmd_dev = {
960 	.minor = MISC_DYNAMIC_MINOR,
961 	.name = "xen/privcmd",
962 	.fops = &xen_privcmd_fops,
963 };
964 
965 static int __init privcmd_init(void)
966 {
967 	int err;
968 
969 	if (!xen_domain())
970 		return -ENODEV;
971 
972 	err = misc_register(&privcmd_dev);
973 	if (err != 0) {
974 		pr_err("Could not register Xen privcmd device\n");
975 		return err;
976 	}
977 
978 	err = misc_register(&xen_privcmdbuf_dev);
979 	if (err != 0) {
980 		pr_err("Could not register Xen hypercall-buf device\n");
981 		misc_deregister(&privcmd_dev);
982 		return err;
983 	}
984 
985 	return 0;
986 }
987 
988 static void __exit privcmd_exit(void)
989 {
990 	misc_deregister(&privcmd_dev);
991 	misc_deregister(&xen_privcmdbuf_dev);
992 }
993 
994 module_init(privcmd_init);
995 module_exit(privcmd_exit);
996