xref: /openbmc/linux/drivers/xen/privcmd.c (revision f8941e6c4c712948663ec5d7bbb546f1a0f4e3f6)
109c434b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2d8414d3cSBastian Blank /******************************************************************************
3d8414d3cSBastian Blank  * privcmd.c
4d8414d3cSBastian Blank  *
5d8414d3cSBastian Blank  * Interface to privileged domain-0 commands.
6d8414d3cSBastian Blank  *
7d8414d3cSBastian Blank  * Copyright (c) 2002-2004, K A Fraser, B Dragovic
8d8414d3cSBastian Blank  */
9d8414d3cSBastian Blank 
10283c0972SJoe Perches #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
11283c0972SJoe Perches 
12*f8941e6cSViresh Kumar #include <linux/eventfd.h>
13*f8941e6cSViresh Kumar #include <linux/file.h>
14d8414d3cSBastian Blank #include <linux/kernel.h>
15d8414d3cSBastian Blank #include <linux/module.h>
16*f8941e6cSViresh Kumar #include <linux/mutex.h>
17*f8941e6cSViresh Kumar #include <linux/poll.h>
18d8414d3cSBastian Blank #include <linux/sched.h>
19d8414d3cSBastian Blank #include <linux/slab.h>
20d8414d3cSBastian Blank #include <linux/string.h>
21*f8941e6cSViresh Kumar #include <linux/workqueue.h>
22d8414d3cSBastian Blank #include <linux/errno.h>
23d8414d3cSBastian Blank #include <linux/mm.h>
24d8414d3cSBastian Blank #include <linux/mman.h>
25d8414d3cSBastian Blank #include <linux/uaccess.h>
26d8414d3cSBastian Blank #include <linux/swap.h>
27d8414d3cSBastian Blank #include <linux/highmem.h>
28d8414d3cSBastian Blank #include <linux/pagemap.h>
29d8414d3cSBastian Blank #include <linux/seq_file.h>
30d8414d3cSBastian Blank #include <linux/miscdevice.h>
31ab520be8SPaul Durrant #include <linux/moduleparam.h>
32d8414d3cSBastian Blank 
33d8414d3cSBastian Blank #include <asm/xen/hypervisor.h>
34d8414d3cSBastian Blank #include <asm/xen/hypercall.h>
35d8414d3cSBastian Blank 
36d8414d3cSBastian Blank #include <xen/xen.h>
37d8414d3cSBastian Blank #include <xen/privcmd.h>
38d8414d3cSBastian Blank #include <xen/interface/xen.h>
393ad08765SPaul Durrant #include <xen/interface/memory.h>
40ab520be8SPaul Durrant #include <xen/interface/hvm/dm_op.h>
41d8414d3cSBastian Blank #include <xen/features.h>
42d8414d3cSBastian Blank #include <xen/page.h>
43d8414d3cSBastian Blank #include <xen/xen-ops.h>
44d71f5139SMukesh Rathor #include <xen/balloon.h>
45d8414d3cSBastian Blank 
46d8414d3cSBastian Blank #include "privcmd.h"
47d8414d3cSBastian Blank 
48d8414d3cSBastian Blank MODULE_LICENSE("GPL");
49d8414d3cSBastian Blank 
50d71f5139SMukesh Rathor #define PRIV_VMA_LOCKED ((void *)1)
51d71f5139SMukesh Rathor 
52ab520be8SPaul Durrant static unsigned int privcmd_dm_op_max_num = 16;
53ab520be8SPaul Durrant module_param_named(dm_op_max_nr_bufs, privcmd_dm_op_max_num, uint, 0644);
54ab520be8SPaul Durrant MODULE_PARM_DESC(dm_op_max_nr_bufs,
55ab520be8SPaul Durrant 		 "Maximum number of buffers per dm_op hypercall");
56ab520be8SPaul Durrant 
57ab520be8SPaul Durrant static unsigned int privcmd_dm_op_buf_max_size = 4096;
58ab520be8SPaul Durrant module_param_named(dm_op_buf_max_size, privcmd_dm_op_buf_max_size, uint,
59ab520be8SPaul Durrant 		   0644);
60ab520be8SPaul Durrant MODULE_PARM_DESC(dm_op_buf_max_size,
61ab520be8SPaul Durrant 		 "Maximum size of a dm_op hypercall buffer");
62ab520be8SPaul Durrant 
634610d240SPaul Durrant struct privcmd_data {
644610d240SPaul Durrant 	domid_t domid;
654610d240SPaul Durrant };
664610d240SPaul Durrant 
67a5deabe0SAndres Lagar-Cavilla static int privcmd_vma_range_is_mapped(
68a5deabe0SAndres Lagar-Cavilla                struct vm_area_struct *vma,
69a5deabe0SAndres Lagar-Cavilla                unsigned long addr,
70a5deabe0SAndres Lagar-Cavilla                unsigned long nr_pages);
71d8414d3cSBastian Blank 
724610d240SPaul Durrant static long privcmd_ioctl_hypercall(struct file *file, void __user *udata)
73d8414d3cSBastian Blank {
744610d240SPaul Durrant 	struct privcmd_data *data = file->private_data;
75d8414d3cSBastian Blank 	struct privcmd_hypercall hypercall;
76d8414d3cSBastian Blank 	long ret;
77d8414d3cSBastian Blank 
784610d240SPaul Durrant 	/* Disallow arbitrary hypercalls if restricted */
794610d240SPaul Durrant 	if (data->domid != DOMID_INVALID)
804610d240SPaul Durrant 		return -EPERM;
814610d240SPaul Durrant 
82d8414d3cSBastian Blank 	if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
83d8414d3cSBastian Blank 		return -EFAULT;
84d8414d3cSBastian Blank 
85fdfd811dSDavid Vrabel 	xen_preemptible_hcall_begin();
86d8414d3cSBastian Blank 	ret = privcmd_call(hypercall.op,
87d8414d3cSBastian Blank 			   hypercall.arg[0], hypercall.arg[1],
88d8414d3cSBastian Blank 			   hypercall.arg[2], hypercall.arg[3],
89d8414d3cSBastian Blank 			   hypercall.arg[4]);
90fdfd811dSDavid Vrabel 	xen_preemptible_hcall_end();
91d8414d3cSBastian Blank 
92d8414d3cSBastian Blank 	return ret;
93d8414d3cSBastian Blank }
94d8414d3cSBastian Blank 
95d8414d3cSBastian Blank static void free_page_list(struct list_head *pages)
96d8414d3cSBastian Blank {
97d8414d3cSBastian Blank 	struct page *p, *n;
98d8414d3cSBastian Blank 
99d8414d3cSBastian Blank 	list_for_each_entry_safe(p, n, pages, lru)
100d8414d3cSBastian Blank 		__free_page(p);
101d8414d3cSBastian Blank 
102d8414d3cSBastian Blank 	INIT_LIST_HEAD(pages);
103d8414d3cSBastian Blank }
104d8414d3cSBastian Blank 
105d8414d3cSBastian Blank /*
106d8414d3cSBastian Blank  * Given an array of items in userspace, return a list of pages
107d8414d3cSBastian Blank  * containing the data.  If copying fails, either because of memory
108d8414d3cSBastian Blank  * allocation failure or a problem reading user memory, return an
109d8414d3cSBastian Blank  * error code; its up to the caller to dispose of any partial list.
110d8414d3cSBastian Blank  */
111d8414d3cSBastian Blank static int gather_array(struct list_head *pagelist,
112d8414d3cSBastian Blank 			unsigned nelem, size_t size,
113ceb90fa0SAndres Lagar-Cavilla 			const void __user *data)
114d8414d3cSBastian Blank {
115d8414d3cSBastian Blank 	unsigned pageidx;
116d8414d3cSBastian Blank 	void *pagedata;
117d8414d3cSBastian Blank 	int ret;
118d8414d3cSBastian Blank 
119d8414d3cSBastian Blank 	if (size > PAGE_SIZE)
120d8414d3cSBastian Blank 		return 0;
121d8414d3cSBastian Blank 
122d8414d3cSBastian Blank 	pageidx = PAGE_SIZE;
123d8414d3cSBastian Blank 	pagedata = NULL;	/* quiet, gcc */
124d8414d3cSBastian Blank 	while (nelem--) {
125d8414d3cSBastian Blank 		if (pageidx > PAGE_SIZE-size) {
126d8414d3cSBastian Blank 			struct page *page = alloc_page(GFP_KERNEL);
127d8414d3cSBastian Blank 
128d8414d3cSBastian Blank 			ret = -ENOMEM;
129d8414d3cSBastian Blank 			if (page == NULL)
130d8414d3cSBastian Blank 				goto fail;
131d8414d3cSBastian Blank 
132d8414d3cSBastian Blank 			pagedata = page_address(page);
133d8414d3cSBastian Blank 
134d8414d3cSBastian Blank 			list_add_tail(&page->lru, pagelist);
135d8414d3cSBastian Blank 			pageidx = 0;
136d8414d3cSBastian Blank 		}
137d8414d3cSBastian Blank 
138d8414d3cSBastian Blank 		ret = -EFAULT;
139d8414d3cSBastian Blank 		if (copy_from_user(pagedata + pageidx, data, size))
140d8414d3cSBastian Blank 			goto fail;
141d8414d3cSBastian Blank 
142d8414d3cSBastian Blank 		data += size;
143d8414d3cSBastian Blank 		pageidx += size;
144d8414d3cSBastian Blank 	}
145d8414d3cSBastian Blank 
146d8414d3cSBastian Blank 	ret = 0;
147d8414d3cSBastian Blank 
148d8414d3cSBastian Blank fail:
149d8414d3cSBastian Blank 	return ret;
150d8414d3cSBastian Blank }
151d8414d3cSBastian Blank 
152d8414d3cSBastian Blank /*
153d8414d3cSBastian Blank  * Call function "fn" on each element of the array fragmented
154d8414d3cSBastian Blank  * over a list of pages.
155d8414d3cSBastian Blank  */
156d8414d3cSBastian Blank static int traverse_pages(unsigned nelem, size_t size,
157d8414d3cSBastian Blank 			  struct list_head *pos,
158d8414d3cSBastian Blank 			  int (*fn)(void *data, void *state),
159d8414d3cSBastian Blank 			  void *state)
160d8414d3cSBastian Blank {
161d8414d3cSBastian Blank 	void *pagedata;
162d8414d3cSBastian Blank 	unsigned pageidx;
163d8414d3cSBastian Blank 	int ret = 0;
164d8414d3cSBastian Blank 
165d8414d3cSBastian Blank 	BUG_ON(size > PAGE_SIZE);
166d8414d3cSBastian Blank 
167d8414d3cSBastian Blank 	pageidx = PAGE_SIZE;
168d8414d3cSBastian Blank 	pagedata = NULL;	/* hush, gcc */
169d8414d3cSBastian Blank 
170d8414d3cSBastian Blank 	while (nelem--) {
171d8414d3cSBastian Blank 		if (pageidx > PAGE_SIZE-size) {
172d8414d3cSBastian Blank 			struct page *page;
173d8414d3cSBastian Blank 			pos = pos->next;
174d8414d3cSBastian Blank 			page = list_entry(pos, struct page, lru);
175d8414d3cSBastian Blank 			pagedata = page_address(page);
176d8414d3cSBastian Blank 			pageidx = 0;
177d8414d3cSBastian Blank 		}
178d8414d3cSBastian Blank 
179d8414d3cSBastian Blank 		ret = (*fn)(pagedata + pageidx, state);
180d8414d3cSBastian Blank 		if (ret)
181d8414d3cSBastian Blank 			break;
182d8414d3cSBastian Blank 		pageidx += size;
183d8414d3cSBastian Blank 	}
184d8414d3cSBastian Blank 
185d8414d3cSBastian Blank 	return ret;
186d8414d3cSBastian Blank }
187d8414d3cSBastian Blank 
1884e8c0c8cSDavid Vrabel /*
1894e8c0c8cSDavid Vrabel  * Similar to traverse_pages, but use each page as a "block" of
1904e8c0c8cSDavid Vrabel  * data to be processed as one unit.
1914e8c0c8cSDavid Vrabel  */
1924e8c0c8cSDavid Vrabel static int traverse_pages_block(unsigned nelem, size_t size,
1934e8c0c8cSDavid Vrabel 				struct list_head *pos,
1944e8c0c8cSDavid Vrabel 				int (*fn)(void *data, int nr, void *state),
1954e8c0c8cSDavid Vrabel 				void *state)
1964e8c0c8cSDavid Vrabel {
1974e8c0c8cSDavid Vrabel 	void *pagedata;
1984e8c0c8cSDavid Vrabel 	int ret = 0;
1994e8c0c8cSDavid Vrabel 
2004e8c0c8cSDavid Vrabel 	BUG_ON(size > PAGE_SIZE);
2014e8c0c8cSDavid Vrabel 
2024e8c0c8cSDavid Vrabel 	while (nelem) {
2034e8c0c8cSDavid Vrabel 		int nr = (PAGE_SIZE/size);
2044e8c0c8cSDavid Vrabel 		struct page *page;
2054e8c0c8cSDavid Vrabel 		if (nr > nelem)
2064e8c0c8cSDavid Vrabel 			nr = nelem;
2074e8c0c8cSDavid Vrabel 		pos = pos->next;
2084e8c0c8cSDavid Vrabel 		page = list_entry(pos, struct page, lru);
2094e8c0c8cSDavid Vrabel 		pagedata = page_address(page);
2104e8c0c8cSDavid Vrabel 		ret = (*fn)(pagedata, nr, state);
2114e8c0c8cSDavid Vrabel 		if (ret)
2124e8c0c8cSDavid Vrabel 			break;
2134e8c0c8cSDavid Vrabel 		nelem -= nr;
2144e8c0c8cSDavid Vrabel 	}
2154e8c0c8cSDavid Vrabel 
2164e8c0c8cSDavid Vrabel 	return ret;
2174e8c0c8cSDavid Vrabel }
2184e8c0c8cSDavid Vrabel 
219a13d7201SJulien Grall struct mmap_gfn_state {
220d8414d3cSBastian Blank 	unsigned long va;
221d8414d3cSBastian Blank 	struct vm_area_struct *vma;
222d8414d3cSBastian Blank 	domid_t domain;
223d8414d3cSBastian Blank };
224d8414d3cSBastian Blank 
225a13d7201SJulien Grall static int mmap_gfn_range(void *data, void *state)
226d8414d3cSBastian Blank {
227d8414d3cSBastian Blank 	struct privcmd_mmap_entry *msg = data;
228a13d7201SJulien Grall 	struct mmap_gfn_state *st = state;
229d8414d3cSBastian Blank 	struct vm_area_struct *vma = st->vma;
230d8414d3cSBastian Blank 	int rc;
231d8414d3cSBastian Blank 
232d8414d3cSBastian Blank 	/* Do not allow range to wrap the address space. */
233d8414d3cSBastian Blank 	if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
234d8414d3cSBastian Blank 	    ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
235d8414d3cSBastian Blank 		return -EINVAL;
236d8414d3cSBastian Blank 
237d8414d3cSBastian Blank 	/* Range chunks must be contiguous in va space. */
238d8414d3cSBastian Blank 	if ((msg->va != st->va) ||
239d8414d3cSBastian Blank 	    ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
240d8414d3cSBastian Blank 		return -EINVAL;
241d8414d3cSBastian Blank 
242a13d7201SJulien Grall 	rc = xen_remap_domain_gfn_range(vma,
243d8414d3cSBastian Blank 					msg->va & PAGE_MASK,
244d8414d3cSBastian Blank 					msg->mfn, msg->npages,
245d8414d3cSBastian Blank 					vma->vm_page_prot,
2469a032e39SIan Campbell 					st->domain, NULL);
247d8414d3cSBastian Blank 	if (rc < 0)
248d8414d3cSBastian Blank 		return rc;
249d8414d3cSBastian Blank 
250d8414d3cSBastian Blank 	st->va += msg->npages << PAGE_SHIFT;
251d8414d3cSBastian Blank 
252d8414d3cSBastian Blank 	return 0;
253d8414d3cSBastian Blank }
254d8414d3cSBastian Blank 
2554610d240SPaul Durrant static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
256d8414d3cSBastian Blank {
2574610d240SPaul Durrant 	struct privcmd_data *data = file->private_data;
258d8414d3cSBastian Blank 	struct privcmd_mmap mmapcmd;
259d8414d3cSBastian Blank 	struct mm_struct *mm = current->mm;
260d8414d3cSBastian Blank 	struct vm_area_struct *vma;
261d8414d3cSBastian Blank 	int rc;
262d8414d3cSBastian Blank 	LIST_HEAD(pagelist);
263a13d7201SJulien Grall 	struct mmap_gfn_state state;
264d8414d3cSBastian Blank 
26597315723SJan Beulich 	/* We only support privcmd_ioctl_mmap_batch for non-auto-translated. */
266d71f5139SMukesh Rathor 	if (xen_feature(XENFEAT_auto_translated_physmap))
267d71f5139SMukesh Rathor 		return -ENOSYS;
268d71f5139SMukesh Rathor 
269d8414d3cSBastian Blank 	if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
270d8414d3cSBastian Blank 		return -EFAULT;
271d8414d3cSBastian Blank 
2724610d240SPaul Durrant 	/* If restriction is in place, check the domid matches */
2734610d240SPaul Durrant 	if (data->domid != DOMID_INVALID && data->domid != mmapcmd.dom)
2744610d240SPaul Durrant 		return -EPERM;
2754610d240SPaul Durrant 
276d8414d3cSBastian Blank 	rc = gather_array(&pagelist,
277d8414d3cSBastian Blank 			  mmapcmd.num, sizeof(struct privcmd_mmap_entry),
278d8414d3cSBastian Blank 			  mmapcmd.entry);
279d8414d3cSBastian Blank 
280d8414d3cSBastian Blank 	if (rc || list_empty(&pagelist))
281d8414d3cSBastian Blank 		goto out;
282d8414d3cSBastian Blank 
283d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
284d8414d3cSBastian Blank 
285d8414d3cSBastian Blank 	{
286d8414d3cSBastian Blank 		struct page *page = list_first_entry(&pagelist,
287d8414d3cSBastian Blank 						     struct page, lru);
288d8414d3cSBastian Blank 		struct privcmd_mmap_entry *msg = page_address(page);
289d8414d3cSBastian Blank 
2907ccf089bSLiam R. Howlett 		vma = vma_lookup(mm, msg->va);
291d8414d3cSBastian Blank 		rc = -EINVAL;
292d8414d3cSBastian Blank 
293a5deabe0SAndres Lagar-Cavilla 		if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
294d8414d3cSBastian Blank 			goto out_up;
295a5deabe0SAndres Lagar-Cavilla 		vma->vm_private_data = PRIV_VMA_LOCKED;
296d8414d3cSBastian Blank 	}
297d8414d3cSBastian Blank 
298d8414d3cSBastian Blank 	state.va = vma->vm_start;
299d8414d3cSBastian Blank 	state.vma = vma;
300d8414d3cSBastian Blank 	state.domain = mmapcmd.dom;
301d8414d3cSBastian Blank 
302d8414d3cSBastian Blank 	rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
303d8414d3cSBastian Blank 			    &pagelist,
304a13d7201SJulien Grall 			    mmap_gfn_range, &state);
305d8414d3cSBastian Blank 
306d8414d3cSBastian Blank 
307d8414d3cSBastian Blank out_up:
308d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
309d8414d3cSBastian Blank 
310d8414d3cSBastian Blank out:
311d8414d3cSBastian Blank 	free_page_list(&pagelist);
312d8414d3cSBastian Blank 
313d8414d3cSBastian Blank 	return rc;
314d8414d3cSBastian Blank }
315d8414d3cSBastian Blank 
316d8414d3cSBastian Blank struct mmap_batch_state {
317d8414d3cSBastian Blank 	domid_t domain;
318d8414d3cSBastian Blank 	unsigned long va;
319d8414d3cSBastian Blank 	struct vm_area_struct *vma;
320d71f5139SMukesh Rathor 	int index;
321ceb90fa0SAndres Lagar-Cavilla 	/* A tristate:
322ceb90fa0SAndres Lagar-Cavilla 	 *      0 for no errors
323ceb90fa0SAndres Lagar-Cavilla 	 *      1 if at least one error has happened (and no
324ceb90fa0SAndres Lagar-Cavilla 	 *          -ENOENT errors have happened)
325ceb90fa0SAndres Lagar-Cavilla 	 *      -ENOENT if at least 1 -ENOENT has happened.
326ceb90fa0SAndres Lagar-Cavilla 	 */
327ceb90fa0SAndres Lagar-Cavilla 	int global_error;
32899beae6cSAndres Lagar-Cavilla 	int version;
329d8414d3cSBastian Blank 
330a13d7201SJulien Grall 	/* User-space gfn array to store errors in the second pass for V1. */
331a13d7201SJulien Grall 	xen_pfn_t __user *user_gfn;
33299beae6cSAndres Lagar-Cavilla 	/* User-space int array to store errors in the second pass for V2. */
33399beae6cSAndres Lagar-Cavilla 	int __user *user_err;
334d8414d3cSBastian Blank };
335d8414d3cSBastian Blank 
336a13d7201SJulien Grall /* auto translated dom0 note: if domU being created is PV, then gfn is
337a13d7201SJulien Grall  * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
338d71f5139SMukesh Rathor  */
3394e8c0c8cSDavid Vrabel static int mmap_batch_fn(void *data, int nr, void *state)
340d8414d3cSBastian Blank {
341a13d7201SJulien Grall 	xen_pfn_t *gfnp = data;
342d8414d3cSBastian Blank 	struct mmap_batch_state *st = state;
343d71f5139SMukesh Rathor 	struct vm_area_struct *vma = st->vma;
344d71f5139SMukesh Rathor 	struct page **pages = vma->vm_private_data;
3454e8c0c8cSDavid Vrabel 	struct page **cur_pages = NULL;
346ceb90fa0SAndres Lagar-Cavilla 	int ret;
347d8414d3cSBastian Blank 
348d71f5139SMukesh Rathor 	if (xen_feature(XENFEAT_auto_translated_physmap))
3494e8c0c8cSDavid Vrabel 		cur_pages = &pages[st->index];
350d71f5139SMukesh Rathor 
3514e8c0c8cSDavid Vrabel 	BUG_ON(nr < 0);
352a13d7201SJulien Grall 	ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
353a13d7201SJulien Grall 					 (int *)gfnp, st->vma->vm_page_prot,
3544e8c0c8cSDavid Vrabel 					 st->domain, cur_pages);
355ceb90fa0SAndres Lagar-Cavilla 
3564e8c0c8cSDavid Vrabel 	/* Adjust the global_error? */
3574e8c0c8cSDavid Vrabel 	if (ret != nr) {
358ceb90fa0SAndres Lagar-Cavilla 		if (ret == -ENOENT)
359ceb90fa0SAndres Lagar-Cavilla 			st->global_error = -ENOENT;
360ceb90fa0SAndres Lagar-Cavilla 		else {
361ceb90fa0SAndres Lagar-Cavilla 			/* Record that at least one error has happened. */
362ceb90fa0SAndres Lagar-Cavilla 			if (st->global_error == 0)
363ceb90fa0SAndres Lagar-Cavilla 				st->global_error = 1;
364ceb90fa0SAndres Lagar-Cavilla 		}
365d8414d3cSBastian Blank 	}
366753c09b5SJulien Grall 	st->va += XEN_PAGE_SIZE * nr;
367753c09b5SJulien Grall 	st->index += nr / XEN_PFN_PER_PAGE;
368d8414d3cSBastian Blank 
369d8414d3cSBastian Blank 	return 0;
370d8414d3cSBastian Blank }
371d8414d3cSBastian Blank 
3724e8c0c8cSDavid Vrabel static int mmap_return_error(int err, struct mmap_batch_state *st)
373d8414d3cSBastian Blank {
3744e8c0c8cSDavid Vrabel 	int ret;
375d8414d3cSBastian Blank 
37699beae6cSAndres Lagar-Cavilla 	if (st->version == 1) {
3774e8c0c8cSDavid Vrabel 		if (err) {
378a13d7201SJulien Grall 			xen_pfn_t gfn;
3794e8c0c8cSDavid Vrabel 
380a13d7201SJulien Grall 			ret = get_user(gfn, st->user_gfn);
3814e8c0c8cSDavid Vrabel 			if (ret < 0)
3824e8c0c8cSDavid Vrabel 				return ret;
3834e8c0c8cSDavid Vrabel 			/*
3844e8c0c8cSDavid Vrabel 			 * V1 encodes the error codes in the 32bit top
385a13d7201SJulien Grall 			 * nibble of the gfn (with its known
3864e8c0c8cSDavid Vrabel 			 * limitations vis-a-vis 64 bit callers).
3874e8c0c8cSDavid Vrabel 			 */
388a13d7201SJulien Grall 			gfn |= (err == -ENOENT) ?
3894e8c0c8cSDavid Vrabel 				PRIVCMD_MMAPBATCH_PAGED_ERROR :
3904e8c0c8cSDavid Vrabel 				PRIVCMD_MMAPBATCH_MFN_ERROR;
391a13d7201SJulien Grall 			return __put_user(gfn, st->user_gfn++);
3924e8c0c8cSDavid Vrabel 		} else
393a13d7201SJulien Grall 			st->user_gfn++;
39499beae6cSAndres Lagar-Cavilla 	} else { /* st->version == 2 */
39599beae6cSAndres Lagar-Cavilla 		if (err)
39699beae6cSAndres Lagar-Cavilla 			return __put_user(err, st->user_err++);
39799beae6cSAndres Lagar-Cavilla 		else
39899beae6cSAndres Lagar-Cavilla 			st->user_err++;
39999beae6cSAndres Lagar-Cavilla 	}
40099beae6cSAndres Lagar-Cavilla 
40199beae6cSAndres Lagar-Cavilla 	return 0;
402d8414d3cSBastian Blank }
403d8414d3cSBastian Blank 
4044e8c0c8cSDavid Vrabel static int mmap_return_errors(void *data, int nr, void *state)
4054e8c0c8cSDavid Vrabel {
4064e8c0c8cSDavid Vrabel 	struct mmap_batch_state *st = state;
4074e8c0c8cSDavid Vrabel 	int *errs = data;
4084e8c0c8cSDavid Vrabel 	int i;
4094e8c0c8cSDavid Vrabel 	int ret;
4104e8c0c8cSDavid Vrabel 
4114e8c0c8cSDavid Vrabel 	for (i = 0; i < nr; i++) {
4124e8c0c8cSDavid Vrabel 		ret = mmap_return_error(errs[i], st);
4134e8c0c8cSDavid Vrabel 		if (ret < 0)
4144e8c0c8cSDavid Vrabel 			return ret;
4154e8c0c8cSDavid Vrabel 	}
4164e8c0c8cSDavid Vrabel 	return 0;
4174e8c0c8cSDavid Vrabel }
4184e8c0c8cSDavid Vrabel 
419a13d7201SJulien Grall /* Allocate pfns that are then mapped with gfns from foreign domid. Update
420d71f5139SMukesh Rathor  * the vma with the page info to use later.
421d71f5139SMukesh Rathor  * Returns: 0 if success, otherwise -errno
422d71f5139SMukesh Rathor  */
423d71f5139SMukesh Rathor static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
424d71f5139SMukesh Rathor {
425d71f5139SMukesh Rathor 	int rc;
426d71f5139SMukesh Rathor 	struct page **pages;
427d71f5139SMukesh Rathor 
4280432523fSJan Beulich 	pages = kvcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
429d71f5139SMukesh Rathor 	if (pages == NULL)
430d71f5139SMukesh Rathor 		return -ENOMEM;
431d71f5139SMukesh Rathor 
4329e2369c0SRoger Pau Monne 	rc = xen_alloc_unpopulated_pages(numpgs, pages);
433d71f5139SMukesh Rathor 	if (rc != 0) {
434d71f5139SMukesh Rathor 		pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
435d71f5139SMukesh Rathor 			numpgs, rc);
4360432523fSJan Beulich 		kvfree(pages);
437d71f5139SMukesh Rathor 		return -ENOMEM;
438d71f5139SMukesh Rathor 	}
439a5deabe0SAndres Lagar-Cavilla 	BUG_ON(vma->vm_private_data != NULL);
440d71f5139SMukesh Rathor 	vma->vm_private_data = pages;
441d71f5139SMukesh Rathor 
442d71f5139SMukesh Rathor 	return 0;
443d71f5139SMukesh Rathor }
444d71f5139SMukesh Rathor 
4457cbea8dcSKirill A. Shutemov static const struct vm_operations_struct privcmd_vm_ops;
446d8414d3cSBastian Blank 
4474610d240SPaul Durrant static long privcmd_ioctl_mmap_batch(
4484610d240SPaul Durrant 	struct file *file, void __user *udata, int version)
449d8414d3cSBastian Blank {
4504610d240SPaul Durrant 	struct privcmd_data *data = file->private_data;
451d8414d3cSBastian Blank 	int ret;
452ceb90fa0SAndres Lagar-Cavilla 	struct privcmd_mmapbatch_v2 m;
453d8414d3cSBastian Blank 	struct mm_struct *mm = current->mm;
454d8414d3cSBastian Blank 	struct vm_area_struct *vma;
455d8414d3cSBastian Blank 	unsigned long nr_pages;
456d8414d3cSBastian Blank 	LIST_HEAD(pagelist);
457d8414d3cSBastian Blank 	struct mmap_batch_state state;
458d8414d3cSBastian Blank 
459ceb90fa0SAndres Lagar-Cavilla 	switch (version) {
460ceb90fa0SAndres Lagar-Cavilla 	case 1:
461ceb90fa0SAndres Lagar-Cavilla 		if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
462d8414d3cSBastian Blank 			return -EFAULT;
463ceb90fa0SAndres Lagar-Cavilla 		/* Returns per-frame error in m.arr. */
464ceb90fa0SAndres Lagar-Cavilla 		m.err = NULL;
46596d4f267SLinus Torvalds 		if (!access_ok(m.arr, m.num * sizeof(*m.arr)))
466ceb90fa0SAndres Lagar-Cavilla 			return -EFAULT;
467ceb90fa0SAndres Lagar-Cavilla 		break;
468ceb90fa0SAndres Lagar-Cavilla 	case 2:
469ceb90fa0SAndres Lagar-Cavilla 		if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
470ceb90fa0SAndres Lagar-Cavilla 			return -EFAULT;
471ceb90fa0SAndres Lagar-Cavilla 		/* Returns per-frame error code in m.err. */
47296d4f267SLinus Torvalds 		if (!access_ok(m.err, m.num * (sizeof(*m.err))))
473ceb90fa0SAndres Lagar-Cavilla 			return -EFAULT;
474ceb90fa0SAndres Lagar-Cavilla 		break;
475ceb90fa0SAndres Lagar-Cavilla 	default:
476ceb90fa0SAndres Lagar-Cavilla 		return -EINVAL;
477ceb90fa0SAndres Lagar-Cavilla 	}
478d8414d3cSBastian Blank 
4794610d240SPaul Durrant 	/* If restriction is in place, check the domid matches */
4804610d240SPaul Durrant 	if (data->domid != DOMID_INVALID && data->domid != m.dom)
4814610d240SPaul Durrant 		return -EPERM;
4824610d240SPaul Durrant 
4835995a68aSJulien Grall 	nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
484d8414d3cSBastian Blank 	if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
485d8414d3cSBastian Blank 		return -EINVAL;
486d8414d3cSBastian Blank 
487ceb90fa0SAndres Lagar-Cavilla 	ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
488d8414d3cSBastian Blank 
489ceb90fa0SAndres Lagar-Cavilla 	if (ret)
490d8414d3cSBastian Blank 		goto out;
491ceb90fa0SAndres Lagar-Cavilla 	if (list_empty(&pagelist)) {
492ceb90fa0SAndres Lagar-Cavilla 		ret = -EINVAL;
493ceb90fa0SAndres Lagar-Cavilla 		goto out;
494ceb90fa0SAndres Lagar-Cavilla 	}
495ceb90fa0SAndres Lagar-Cavilla 
49699beae6cSAndres Lagar-Cavilla 	if (version == 2) {
49799beae6cSAndres Lagar-Cavilla 		/* Zero error array now to only copy back actual errors. */
49899beae6cSAndres Lagar-Cavilla 		if (clear_user(m.err, sizeof(int) * m.num)) {
49999beae6cSAndres Lagar-Cavilla 			ret = -EFAULT;
500ceb90fa0SAndres Lagar-Cavilla 			goto out;
501ceb90fa0SAndres Lagar-Cavilla 		}
50299beae6cSAndres Lagar-Cavilla 	}
503d8414d3cSBastian Blank 
504d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
505d8414d3cSBastian Blank 
506d8414d3cSBastian Blank 	vma = find_vma(mm, m.addr);
507d8414d3cSBastian Blank 	if (!vma ||
508a5deabe0SAndres Lagar-Cavilla 	    vma->vm_ops != &privcmd_vm_ops) {
50968fa965dSMats Petersson 		ret = -EINVAL;
510a5deabe0SAndres Lagar-Cavilla 		goto out_unlock;
511a5deabe0SAndres Lagar-Cavilla 	}
512a5deabe0SAndres Lagar-Cavilla 
513a5deabe0SAndres Lagar-Cavilla 	/*
514a5deabe0SAndres Lagar-Cavilla 	 * Caller must either:
515a5deabe0SAndres Lagar-Cavilla 	 *
516a5deabe0SAndres Lagar-Cavilla 	 * Map the whole VMA range, which will also allocate all the
517a5deabe0SAndres Lagar-Cavilla 	 * pages required for the auto_translated_physmap case.
518a5deabe0SAndres Lagar-Cavilla 	 *
519a5deabe0SAndres Lagar-Cavilla 	 * Or
520a5deabe0SAndres Lagar-Cavilla 	 *
521a5deabe0SAndres Lagar-Cavilla 	 * Map unmapped holes left from a previous map attempt (e.g.,
522a5deabe0SAndres Lagar-Cavilla 	 * because those foreign frames were previously paged out).
523a5deabe0SAndres Lagar-Cavilla 	 */
524a5deabe0SAndres Lagar-Cavilla 	if (vma->vm_private_data == NULL) {
525a5deabe0SAndres Lagar-Cavilla 		if (m.addr != vma->vm_start ||
526a5deabe0SAndres Lagar-Cavilla 		    m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
527a5deabe0SAndres Lagar-Cavilla 			ret = -EINVAL;
528a5deabe0SAndres Lagar-Cavilla 			goto out_unlock;
529d8414d3cSBastian Blank 		}
530d71f5139SMukesh Rathor 		if (xen_feature(XENFEAT_auto_translated_physmap)) {
5315995a68aSJulien Grall 			ret = alloc_empty_pages(vma, nr_pages);
532a5deabe0SAndres Lagar-Cavilla 			if (ret < 0)
533a5deabe0SAndres Lagar-Cavilla 				goto out_unlock;
534a5deabe0SAndres Lagar-Cavilla 		} else
535a5deabe0SAndres Lagar-Cavilla 			vma->vm_private_data = PRIV_VMA_LOCKED;
536a5deabe0SAndres Lagar-Cavilla 	} else {
537a5deabe0SAndres Lagar-Cavilla 		if (m.addr < vma->vm_start ||
538a5deabe0SAndres Lagar-Cavilla 		    m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
539a5deabe0SAndres Lagar-Cavilla 			ret = -EINVAL;
540a5deabe0SAndres Lagar-Cavilla 			goto out_unlock;
541a5deabe0SAndres Lagar-Cavilla 		}
542a5deabe0SAndres Lagar-Cavilla 		if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
543a5deabe0SAndres Lagar-Cavilla 			ret = -EINVAL;
544a5deabe0SAndres Lagar-Cavilla 			goto out_unlock;
545d71f5139SMukesh Rathor 		}
546d71f5139SMukesh Rathor 	}
547d8414d3cSBastian Blank 
548d8414d3cSBastian Blank 	state.domain        = m.dom;
549d8414d3cSBastian Blank 	state.vma           = vma;
550d8414d3cSBastian Blank 	state.va            = m.addr;
551d71f5139SMukesh Rathor 	state.index         = 0;
552ceb90fa0SAndres Lagar-Cavilla 	state.global_error  = 0;
55399beae6cSAndres Lagar-Cavilla 	state.version       = version;
554d8414d3cSBastian Blank 
5555995a68aSJulien Grall 	BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
556ceb90fa0SAndres Lagar-Cavilla 	/* mmap_batch_fn guarantees ret == 0 */
5574e8c0c8cSDavid Vrabel 	BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
558ceb90fa0SAndres Lagar-Cavilla 				    &pagelist, mmap_batch_fn, &state));
559d8414d3cSBastian Blank 
560d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
561d8414d3cSBastian Blank 
56268fa965dSMats Petersson 	if (state.global_error) {
563ceb90fa0SAndres Lagar-Cavilla 		/* Write back errors in second pass. */
564a13d7201SJulien Grall 		state.user_gfn = (xen_pfn_t *)m.arr;
56599beae6cSAndres Lagar-Cavilla 		state.user_err = m.err;
5664e8c0c8cSDavid Vrabel 		ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
56799beae6cSAndres Lagar-Cavilla 					   &pagelist, mmap_return_errors, &state);
56868fa965dSMats Petersson 	} else
56968fa965dSMats Petersson 		ret = 0;
57068fa965dSMats Petersson 
571ceb90fa0SAndres Lagar-Cavilla 	/* If we have not had any EFAULT-like global errors then set the global
572ceb90fa0SAndres Lagar-Cavilla 	 * error to -ENOENT if necessary. */
573ceb90fa0SAndres Lagar-Cavilla 	if ((ret == 0) && (state.global_error == -ENOENT))
574ceb90fa0SAndres Lagar-Cavilla 		ret = -ENOENT;
575d8414d3cSBastian Blank 
576d8414d3cSBastian Blank out:
577d8414d3cSBastian Blank 	free_page_list(&pagelist);
578d8414d3cSBastian Blank 	return ret;
579a5deabe0SAndres Lagar-Cavilla 
580a5deabe0SAndres Lagar-Cavilla out_unlock:
581d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
582a5deabe0SAndres Lagar-Cavilla 	goto out;
583d8414d3cSBastian Blank }
584d8414d3cSBastian Blank 
585ab520be8SPaul Durrant static int lock_pages(
586ab520be8SPaul Durrant 	struct privcmd_dm_op_buf kbufs[], unsigned int num,
587e398fb4bSSouptick Joarder 	struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
588ab520be8SPaul Durrant {
589c5deb278SJuergen Gross 	unsigned int i, off = 0;
590ab520be8SPaul Durrant 
591c5deb278SJuergen Gross 	for (i = 0; i < num; ) {
592ab520be8SPaul Durrant 		unsigned int requested;
593e398fb4bSSouptick Joarder 		int page_count;
594ab520be8SPaul Durrant 
595ab520be8SPaul Durrant 		requested = DIV_ROUND_UP(
596ab520be8SPaul Durrant 			offset_in_page(kbufs[i].uptr) + kbufs[i].size,
597c5deb278SJuergen Gross 			PAGE_SIZE) - off;
598ab520be8SPaul Durrant 		if (requested > nr_pages)
599ab520be8SPaul Durrant 			return -ENOSPC;
600ab520be8SPaul Durrant 
601ff669aa8SSouptick Joarder 		page_count = pin_user_pages_fast(
602c5deb278SJuergen Gross 			(unsigned long)kbufs[i].uptr + off * PAGE_SIZE,
603ab520be8SPaul Durrant 			requested, FOLL_WRITE, pages);
604c5deb278SJuergen Gross 		if (page_count <= 0)
605c5deb278SJuergen Gross 			return page_count ? : -EFAULT;
606ab520be8SPaul Durrant 
607e398fb4bSSouptick Joarder 		*pinned += page_count;
608e398fb4bSSouptick Joarder 		nr_pages -= page_count;
609e398fb4bSSouptick Joarder 		pages += page_count;
610c5deb278SJuergen Gross 
611c5deb278SJuergen Gross 		off = (requested == page_count) ? 0 : off + page_count;
612c5deb278SJuergen Gross 		i += !off;
613ab520be8SPaul Durrant 	}
614ab520be8SPaul Durrant 
615ab520be8SPaul Durrant 	return 0;
616ab520be8SPaul Durrant }
617ab520be8SPaul Durrant 
618ab520be8SPaul Durrant static void unlock_pages(struct page *pages[], unsigned int nr_pages)
619ab520be8SPaul Durrant {
620ff669aa8SSouptick Joarder 	unpin_user_pages_dirty_lock(pages, nr_pages, true);
621a0c34d22SSouptick Joarder }
622ab520be8SPaul Durrant 
6234610d240SPaul Durrant static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
624ab520be8SPaul Durrant {
6254610d240SPaul Durrant 	struct privcmd_data *data = file->private_data;
626ab520be8SPaul Durrant 	struct privcmd_dm_op kdata;
627ab520be8SPaul Durrant 	struct privcmd_dm_op_buf *kbufs;
628ab520be8SPaul Durrant 	unsigned int nr_pages = 0;
629ab520be8SPaul Durrant 	struct page **pages = NULL;
630ab520be8SPaul Durrant 	struct xen_dm_op_buf *xbufs = NULL;
631ab520be8SPaul Durrant 	unsigned int i;
632ab520be8SPaul Durrant 	long rc;
633e398fb4bSSouptick Joarder 	unsigned int pinned = 0;
634ab520be8SPaul Durrant 
635ab520be8SPaul Durrant 	if (copy_from_user(&kdata, udata, sizeof(kdata)))
636ab520be8SPaul Durrant 		return -EFAULT;
637ab520be8SPaul Durrant 
6384610d240SPaul Durrant 	/* If restriction is in place, check the domid matches */
6394610d240SPaul Durrant 	if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
6404610d240SPaul Durrant 		return -EPERM;
6414610d240SPaul Durrant 
642ab520be8SPaul Durrant 	if (kdata.num == 0)
643ab520be8SPaul Durrant 		return 0;
644ab520be8SPaul Durrant 
645ab520be8SPaul Durrant 	if (kdata.num > privcmd_dm_op_max_num)
646ab520be8SPaul Durrant 		return -E2BIG;
647ab520be8SPaul Durrant 
648ab520be8SPaul Durrant 	kbufs = kcalloc(kdata.num, sizeof(*kbufs), GFP_KERNEL);
649ab520be8SPaul Durrant 	if (!kbufs)
650ab520be8SPaul Durrant 		return -ENOMEM;
651ab520be8SPaul Durrant 
652ab520be8SPaul Durrant 	if (copy_from_user(kbufs, kdata.ubufs,
653ab520be8SPaul Durrant 			   sizeof(*kbufs) * kdata.num)) {
654ab520be8SPaul Durrant 		rc = -EFAULT;
655ab520be8SPaul Durrant 		goto out;
656ab520be8SPaul Durrant 	}
657ab520be8SPaul Durrant 
658ab520be8SPaul Durrant 	for (i = 0; i < kdata.num; i++) {
659ab520be8SPaul Durrant 		if (kbufs[i].size > privcmd_dm_op_buf_max_size) {
660ab520be8SPaul Durrant 			rc = -E2BIG;
661ab520be8SPaul Durrant 			goto out;
662ab520be8SPaul Durrant 		}
663ab520be8SPaul Durrant 
66496d4f267SLinus Torvalds 		if (!access_ok(kbufs[i].uptr,
665ab520be8SPaul Durrant 			       kbufs[i].size)) {
666ab520be8SPaul Durrant 			rc = -EFAULT;
667ab520be8SPaul Durrant 			goto out;
668ab520be8SPaul Durrant 		}
669ab520be8SPaul Durrant 
670ab520be8SPaul Durrant 		nr_pages += DIV_ROUND_UP(
671ab520be8SPaul Durrant 			offset_in_page(kbufs[i].uptr) + kbufs[i].size,
672ab520be8SPaul Durrant 			PAGE_SIZE);
673ab520be8SPaul Durrant 	}
674ab520be8SPaul Durrant 
675ab520be8SPaul Durrant 	pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
676ab520be8SPaul Durrant 	if (!pages) {
677ab520be8SPaul Durrant 		rc = -ENOMEM;
678ab520be8SPaul Durrant 		goto out;
679ab520be8SPaul Durrant 	}
680ab520be8SPaul Durrant 
681ab520be8SPaul Durrant 	xbufs = kcalloc(kdata.num, sizeof(*xbufs), GFP_KERNEL);
682ab520be8SPaul Durrant 	if (!xbufs) {
683ab520be8SPaul Durrant 		rc = -ENOMEM;
684ab520be8SPaul Durrant 		goto out;
685ab520be8SPaul Durrant 	}
686ab520be8SPaul Durrant 
687e398fb4bSSouptick Joarder 	rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
688c5deb278SJuergen Gross 	if (rc < 0)
689ab520be8SPaul Durrant 		goto out;
690ab520be8SPaul Durrant 
691ab520be8SPaul Durrant 	for (i = 0; i < kdata.num; i++) {
692ab520be8SPaul Durrant 		set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
693ab520be8SPaul Durrant 		xbufs[i].size = kbufs[i].size;
694ab520be8SPaul Durrant 	}
695ab520be8SPaul Durrant 
696ab520be8SPaul Durrant 	xen_preemptible_hcall_begin();
697ab520be8SPaul Durrant 	rc = HYPERVISOR_dm_op(kdata.dom, kdata.num, xbufs);
698ab520be8SPaul Durrant 	xen_preemptible_hcall_end();
699ab520be8SPaul Durrant 
700ab520be8SPaul Durrant out:
701c5deb278SJuergen Gross 	unlock_pages(pages, pinned);
702ab520be8SPaul Durrant 	kfree(xbufs);
703ab520be8SPaul Durrant 	kfree(pages);
704ab520be8SPaul Durrant 	kfree(kbufs);
705ab520be8SPaul Durrant 
706ab520be8SPaul Durrant 	return rc;
707ab520be8SPaul Durrant }
708ab520be8SPaul Durrant 
7094610d240SPaul Durrant static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
7104610d240SPaul Durrant {
7114610d240SPaul Durrant 	struct privcmd_data *data = file->private_data;
7124610d240SPaul Durrant 	domid_t dom;
7134610d240SPaul Durrant 
7144610d240SPaul Durrant 	if (copy_from_user(&dom, udata, sizeof(dom)))
7154610d240SPaul Durrant 		return -EFAULT;
7164610d240SPaul Durrant 
7174610d240SPaul Durrant 	/* Set restriction to the specified domain, or check it matches */
7184610d240SPaul Durrant 	if (data->domid == DOMID_INVALID)
7194610d240SPaul Durrant 		data->domid = dom;
7204610d240SPaul Durrant 	else if (data->domid != dom)
7214610d240SPaul Durrant 		return -EINVAL;
7224610d240SPaul Durrant 
7234610d240SPaul Durrant 	return 0;
7244610d240SPaul Durrant }
7254610d240SPaul Durrant 
726ef3a575bSRoger Pau Monne static long privcmd_ioctl_mmap_resource(struct file *file,
727ef3a575bSRoger Pau Monne 				struct privcmd_mmap_resource __user *udata)
7283ad08765SPaul Durrant {
7293ad08765SPaul Durrant 	struct privcmd_data *data = file->private_data;
7303ad08765SPaul Durrant 	struct mm_struct *mm = current->mm;
7313ad08765SPaul Durrant 	struct vm_area_struct *vma;
7323ad08765SPaul Durrant 	struct privcmd_mmap_resource kdata;
7333ad08765SPaul Durrant 	xen_pfn_t *pfns = NULL;
734ef3a575bSRoger Pau Monne 	struct xen_mem_acquire_resource xdata = { };
7353ad08765SPaul Durrant 	int rc;
7363ad08765SPaul Durrant 
7373ad08765SPaul Durrant 	if (copy_from_user(&kdata, udata, sizeof(kdata)))
7383ad08765SPaul Durrant 		return -EFAULT;
7393ad08765SPaul Durrant 
7403ad08765SPaul Durrant 	/* If restriction is in place, check the domid matches */
7413ad08765SPaul Durrant 	if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
7423ad08765SPaul Durrant 		return -EPERM;
7433ad08765SPaul Durrant 
744ef3a575bSRoger Pau Monne 	/* Both fields must be set or unset */
745ef3a575bSRoger Pau Monne 	if (!!kdata.addr != !!kdata.num)
746ef3a575bSRoger Pau Monne 		return -EINVAL;
747ef3a575bSRoger Pau Monne 
748ef3a575bSRoger Pau Monne 	xdata.domid = kdata.dom;
749ef3a575bSRoger Pau Monne 	xdata.type = kdata.type;
750ef3a575bSRoger Pau Monne 	xdata.id = kdata.id;
751ef3a575bSRoger Pau Monne 
752ef3a575bSRoger Pau Monne 	if (!kdata.addr && !kdata.num) {
753ef3a575bSRoger Pau Monne 		/* Query the size of the resource. */
754ef3a575bSRoger Pau Monne 		rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
755ef3a575bSRoger Pau Monne 		if (rc)
756ef3a575bSRoger Pau Monne 			return rc;
757ef3a575bSRoger Pau Monne 		return __put_user(xdata.nr_frames, &udata->num);
758ef3a575bSRoger Pau Monne 	}
759ef3a575bSRoger Pau Monne 
760d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
7613ad08765SPaul Durrant 
7623ad08765SPaul Durrant 	vma = find_vma(mm, kdata.addr);
7633ad08765SPaul Durrant 	if (!vma || vma->vm_ops != &privcmd_vm_ops) {
7643ad08765SPaul Durrant 		rc = -EINVAL;
7653ad08765SPaul Durrant 		goto out;
7663ad08765SPaul Durrant 	}
7673ad08765SPaul Durrant 
7688b997b2bSHarshit Mogalapalli 	pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN);
7693ad08765SPaul Durrant 	if (!pfns) {
7703ad08765SPaul Durrant 		rc = -ENOMEM;
7713ad08765SPaul Durrant 		goto out;
7723ad08765SPaul Durrant 	}
7733ad08765SPaul Durrant 
774a78d14a3SArnd Bergmann 	if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
775a78d14a3SArnd Bergmann 	    xen_feature(XENFEAT_auto_translated_physmap)) {
7763ad08765SPaul Durrant 		unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
7773ad08765SPaul Durrant 		struct page **pages;
7783ad08765SPaul Durrant 		unsigned int i;
7793ad08765SPaul Durrant 
7803ad08765SPaul Durrant 		rc = alloc_empty_pages(vma, nr);
7813ad08765SPaul Durrant 		if (rc < 0)
7823ad08765SPaul Durrant 			goto out;
7833ad08765SPaul Durrant 
7843ad08765SPaul Durrant 		pages = vma->vm_private_data;
7853ad08765SPaul Durrant 		for (i = 0; i < kdata.num; i++) {
7863ad08765SPaul Durrant 			xen_pfn_t pfn =
7873ad08765SPaul Durrant 				page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
7883ad08765SPaul Durrant 
7893ad08765SPaul Durrant 			pfns[i] = pfn + (i % XEN_PFN_PER_PAGE);
7903ad08765SPaul Durrant 		}
7913ad08765SPaul Durrant 	} else
7923ad08765SPaul Durrant 		vma->vm_private_data = PRIV_VMA_LOCKED;
7933ad08765SPaul Durrant 
7943ad08765SPaul Durrant 	xdata.frame = kdata.idx;
7953ad08765SPaul Durrant 	xdata.nr_frames = kdata.num;
7963ad08765SPaul Durrant 	set_xen_guest_handle(xdata.frame_list, pfns);
7973ad08765SPaul Durrant 
7983ad08765SPaul Durrant 	xen_preemptible_hcall_begin();
7993ad08765SPaul Durrant 	rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
8003ad08765SPaul Durrant 	xen_preemptible_hcall_end();
8013ad08765SPaul Durrant 
8023ad08765SPaul Durrant 	if (rc)
8033ad08765SPaul Durrant 		goto out;
8043ad08765SPaul Durrant 
805a78d14a3SArnd Bergmann 	if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
806a78d14a3SArnd Bergmann 	    xen_feature(XENFEAT_auto_translated_physmap)) {
807a78d14a3SArnd Bergmann 		rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
8083ad08765SPaul Durrant 	} else {
8093ad08765SPaul Durrant 		unsigned int domid =
8103ad08765SPaul Durrant 			(xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
8113ad08765SPaul Durrant 			DOMID_SELF : kdata.dom;
812e11423d6SJan Beulich 		int num, *errs = (int *)pfns;
8133ad08765SPaul Durrant 
814e11423d6SJan Beulich 		BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns));
8153ad08765SPaul Durrant 		num = xen_remap_domain_mfn_array(vma,
8163ad08765SPaul Durrant 						 kdata.addr & PAGE_MASK,
817e11423d6SJan Beulich 						 pfns, kdata.num, errs,
8183ad08765SPaul Durrant 						 vma->vm_page_prot,
81997315723SJan Beulich 						 domid);
8203ad08765SPaul Durrant 		if (num < 0)
8213ad08765SPaul Durrant 			rc = num;
8223ad08765SPaul Durrant 		else if (num != kdata.num) {
8233ad08765SPaul Durrant 			unsigned int i;
8243ad08765SPaul Durrant 
8253ad08765SPaul Durrant 			for (i = 0; i < num; i++) {
826e11423d6SJan Beulich 				rc = errs[i];
8273ad08765SPaul Durrant 				if (rc < 0)
8283ad08765SPaul Durrant 					break;
8293ad08765SPaul Durrant 			}
8303ad08765SPaul Durrant 		} else
8313ad08765SPaul Durrant 			rc = 0;
8323ad08765SPaul Durrant 	}
8333ad08765SPaul Durrant 
8343ad08765SPaul Durrant out:
835d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
8363ad08765SPaul Durrant 	kfree(pfns);
8373ad08765SPaul Durrant 
8383ad08765SPaul Durrant 	return rc;
8393ad08765SPaul Durrant }
8403ad08765SPaul Durrant 
841*f8941e6cSViresh Kumar #ifdef CONFIG_XEN_PRIVCMD_IRQFD
842*f8941e6cSViresh Kumar /* Irqfd support */
843*f8941e6cSViresh Kumar static struct workqueue_struct *irqfd_cleanup_wq;
844*f8941e6cSViresh Kumar static DEFINE_MUTEX(irqfds_lock);
845*f8941e6cSViresh Kumar static LIST_HEAD(irqfds_list);
846*f8941e6cSViresh Kumar 
847*f8941e6cSViresh Kumar struct privcmd_kernel_irqfd {
848*f8941e6cSViresh Kumar 	struct xen_dm_op_buf xbufs;
849*f8941e6cSViresh Kumar 	domid_t dom;
850*f8941e6cSViresh Kumar 	bool error;
851*f8941e6cSViresh Kumar 	struct eventfd_ctx *eventfd;
852*f8941e6cSViresh Kumar 	struct work_struct shutdown;
853*f8941e6cSViresh Kumar 	wait_queue_entry_t wait;
854*f8941e6cSViresh Kumar 	struct list_head list;
855*f8941e6cSViresh Kumar 	poll_table pt;
856*f8941e6cSViresh Kumar };
857*f8941e6cSViresh Kumar 
858*f8941e6cSViresh Kumar static void irqfd_deactivate(struct privcmd_kernel_irqfd *kirqfd)
859*f8941e6cSViresh Kumar {
860*f8941e6cSViresh Kumar 	lockdep_assert_held(&irqfds_lock);
861*f8941e6cSViresh Kumar 
862*f8941e6cSViresh Kumar 	list_del_init(&kirqfd->list);
863*f8941e6cSViresh Kumar 	queue_work(irqfd_cleanup_wq, &kirqfd->shutdown);
864*f8941e6cSViresh Kumar }
865*f8941e6cSViresh Kumar 
866*f8941e6cSViresh Kumar static void irqfd_shutdown(struct work_struct *work)
867*f8941e6cSViresh Kumar {
868*f8941e6cSViresh Kumar 	struct privcmd_kernel_irqfd *kirqfd =
869*f8941e6cSViresh Kumar 		container_of(work, struct privcmd_kernel_irqfd, shutdown);
870*f8941e6cSViresh Kumar 	u64 cnt;
871*f8941e6cSViresh Kumar 
872*f8941e6cSViresh Kumar 	eventfd_ctx_remove_wait_queue(kirqfd->eventfd, &kirqfd->wait, &cnt);
873*f8941e6cSViresh Kumar 	eventfd_ctx_put(kirqfd->eventfd);
874*f8941e6cSViresh Kumar 	kfree(kirqfd);
875*f8941e6cSViresh Kumar }
876*f8941e6cSViresh Kumar 
877*f8941e6cSViresh Kumar static void irqfd_inject(struct privcmd_kernel_irqfd *kirqfd)
878*f8941e6cSViresh Kumar {
879*f8941e6cSViresh Kumar 	u64 cnt;
880*f8941e6cSViresh Kumar 	long rc;
881*f8941e6cSViresh Kumar 
882*f8941e6cSViresh Kumar 	eventfd_ctx_do_read(kirqfd->eventfd, &cnt);
883*f8941e6cSViresh Kumar 
884*f8941e6cSViresh Kumar 	xen_preemptible_hcall_begin();
885*f8941e6cSViresh Kumar 	rc = HYPERVISOR_dm_op(kirqfd->dom, 1, &kirqfd->xbufs);
886*f8941e6cSViresh Kumar 	xen_preemptible_hcall_end();
887*f8941e6cSViresh Kumar 
888*f8941e6cSViresh Kumar 	/* Don't repeat the error message for consecutive failures */
889*f8941e6cSViresh Kumar 	if (rc && !kirqfd->error) {
890*f8941e6cSViresh Kumar 		pr_err("Failed to configure irq for guest domain: %d\n",
891*f8941e6cSViresh Kumar 		       kirqfd->dom);
892*f8941e6cSViresh Kumar 	}
893*f8941e6cSViresh Kumar 
894*f8941e6cSViresh Kumar 	kirqfd->error = rc;
895*f8941e6cSViresh Kumar }
896*f8941e6cSViresh Kumar 
897*f8941e6cSViresh Kumar static int
898*f8941e6cSViresh Kumar irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
899*f8941e6cSViresh Kumar {
900*f8941e6cSViresh Kumar 	struct privcmd_kernel_irqfd *kirqfd =
901*f8941e6cSViresh Kumar 		container_of(wait, struct privcmd_kernel_irqfd, wait);
902*f8941e6cSViresh Kumar 	__poll_t flags = key_to_poll(key);
903*f8941e6cSViresh Kumar 
904*f8941e6cSViresh Kumar 	if (flags & EPOLLIN)
905*f8941e6cSViresh Kumar 		irqfd_inject(kirqfd);
906*f8941e6cSViresh Kumar 
907*f8941e6cSViresh Kumar 	if (flags & EPOLLHUP) {
908*f8941e6cSViresh Kumar 		mutex_lock(&irqfds_lock);
909*f8941e6cSViresh Kumar 		irqfd_deactivate(kirqfd);
910*f8941e6cSViresh Kumar 		mutex_unlock(&irqfds_lock);
911*f8941e6cSViresh Kumar 	}
912*f8941e6cSViresh Kumar 
913*f8941e6cSViresh Kumar 	return 0;
914*f8941e6cSViresh Kumar }
915*f8941e6cSViresh Kumar 
916*f8941e6cSViresh Kumar static void
917*f8941e6cSViresh Kumar irqfd_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt)
918*f8941e6cSViresh Kumar {
919*f8941e6cSViresh Kumar 	struct privcmd_kernel_irqfd *kirqfd =
920*f8941e6cSViresh Kumar 		container_of(pt, struct privcmd_kernel_irqfd, pt);
921*f8941e6cSViresh Kumar 
922*f8941e6cSViresh Kumar 	add_wait_queue_priority(wqh, &kirqfd->wait);
923*f8941e6cSViresh Kumar }
924*f8941e6cSViresh Kumar 
925*f8941e6cSViresh Kumar static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
926*f8941e6cSViresh Kumar {
927*f8941e6cSViresh Kumar 	struct privcmd_kernel_irqfd *kirqfd, *tmp;
928*f8941e6cSViresh Kumar 	__poll_t events;
929*f8941e6cSViresh Kumar 	struct fd f;
930*f8941e6cSViresh Kumar 	void *dm_op;
931*f8941e6cSViresh Kumar 	int ret;
932*f8941e6cSViresh Kumar 
933*f8941e6cSViresh Kumar 	kirqfd = kzalloc(sizeof(*kirqfd) + irqfd->size, GFP_KERNEL);
934*f8941e6cSViresh Kumar 	if (!kirqfd)
935*f8941e6cSViresh Kumar 		return -ENOMEM;
936*f8941e6cSViresh Kumar 	dm_op = kirqfd + 1;
937*f8941e6cSViresh Kumar 
938*f8941e6cSViresh Kumar 	if (copy_from_user(dm_op, irqfd->dm_op, irqfd->size)) {
939*f8941e6cSViresh Kumar 		ret = -EFAULT;
940*f8941e6cSViresh Kumar 		goto error_kfree;
941*f8941e6cSViresh Kumar 	}
942*f8941e6cSViresh Kumar 
943*f8941e6cSViresh Kumar 	kirqfd->xbufs.size = irqfd->size;
944*f8941e6cSViresh Kumar 	set_xen_guest_handle(kirqfd->xbufs.h, dm_op);
945*f8941e6cSViresh Kumar 	kirqfd->dom = irqfd->dom;
946*f8941e6cSViresh Kumar 	INIT_WORK(&kirqfd->shutdown, irqfd_shutdown);
947*f8941e6cSViresh Kumar 
948*f8941e6cSViresh Kumar 	f = fdget(irqfd->fd);
949*f8941e6cSViresh Kumar 	if (!f.file) {
950*f8941e6cSViresh Kumar 		ret = -EBADF;
951*f8941e6cSViresh Kumar 		goto error_kfree;
952*f8941e6cSViresh Kumar 	}
953*f8941e6cSViresh Kumar 
954*f8941e6cSViresh Kumar 	kirqfd->eventfd = eventfd_ctx_fileget(f.file);
955*f8941e6cSViresh Kumar 	if (IS_ERR(kirqfd->eventfd)) {
956*f8941e6cSViresh Kumar 		ret = PTR_ERR(kirqfd->eventfd);
957*f8941e6cSViresh Kumar 		goto error_fd_put;
958*f8941e6cSViresh Kumar 	}
959*f8941e6cSViresh Kumar 
960*f8941e6cSViresh Kumar 	/*
961*f8941e6cSViresh Kumar 	 * Install our own custom wake-up handling so we are notified via a
962*f8941e6cSViresh Kumar 	 * callback whenever someone signals the underlying eventfd.
963*f8941e6cSViresh Kumar 	 */
964*f8941e6cSViresh Kumar 	init_waitqueue_func_entry(&kirqfd->wait, irqfd_wakeup);
965*f8941e6cSViresh Kumar 	init_poll_funcptr(&kirqfd->pt, irqfd_poll_func);
966*f8941e6cSViresh Kumar 
967*f8941e6cSViresh Kumar 	mutex_lock(&irqfds_lock);
968*f8941e6cSViresh Kumar 
969*f8941e6cSViresh Kumar 	list_for_each_entry(tmp, &irqfds_list, list) {
970*f8941e6cSViresh Kumar 		if (kirqfd->eventfd == tmp->eventfd) {
971*f8941e6cSViresh Kumar 			ret = -EBUSY;
972*f8941e6cSViresh Kumar 			mutex_unlock(&irqfds_lock);
973*f8941e6cSViresh Kumar 			goto error_eventfd;
974*f8941e6cSViresh Kumar 		}
975*f8941e6cSViresh Kumar 	}
976*f8941e6cSViresh Kumar 
977*f8941e6cSViresh Kumar 	list_add_tail(&kirqfd->list, &irqfds_list);
978*f8941e6cSViresh Kumar 	mutex_unlock(&irqfds_lock);
979*f8941e6cSViresh Kumar 
980*f8941e6cSViresh Kumar 	/*
981*f8941e6cSViresh Kumar 	 * Check if there was an event already pending on the eventfd before we
982*f8941e6cSViresh Kumar 	 * registered, and trigger it as if we didn't miss it.
983*f8941e6cSViresh Kumar 	 */
984*f8941e6cSViresh Kumar 	events = vfs_poll(f.file, &kirqfd->pt);
985*f8941e6cSViresh Kumar 	if (events & EPOLLIN)
986*f8941e6cSViresh Kumar 		irqfd_inject(kirqfd);
987*f8941e6cSViresh Kumar 
988*f8941e6cSViresh Kumar 	/*
989*f8941e6cSViresh Kumar 	 * Do not drop the file until the kirqfd is fully initialized, otherwise
990*f8941e6cSViresh Kumar 	 * we might race against the EPOLLHUP.
991*f8941e6cSViresh Kumar 	 */
992*f8941e6cSViresh Kumar 	fdput(f);
993*f8941e6cSViresh Kumar 	return 0;
994*f8941e6cSViresh Kumar 
995*f8941e6cSViresh Kumar error_eventfd:
996*f8941e6cSViresh Kumar 	eventfd_ctx_put(kirqfd->eventfd);
997*f8941e6cSViresh Kumar 
998*f8941e6cSViresh Kumar error_fd_put:
999*f8941e6cSViresh Kumar 	fdput(f);
1000*f8941e6cSViresh Kumar 
1001*f8941e6cSViresh Kumar error_kfree:
1002*f8941e6cSViresh Kumar 	kfree(kirqfd);
1003*f8941e6cSViresh Kumar 	return ret;
1004*f8941e6cSViresh Kumar }
1005*f8941e6cSViresh Kumar 
1006*f8941e6cSViresh Kumar static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd)
1007*f8941e6cSViresh Kumar {
1008*f8941e6cSViresh Kumar 	struct privcmd_kernel_irqfd *kirqfd;
1009*f8941e6cSViresh Kumar 	struct eventfd_ctx *eventfd;
1010*f8941e6cSViresh Kumar 
1011*f8941e6cSViresh Kumar 	eventfd = eventfd_ctx_fdget(irqfd->fd);
1012*f8941e6cSViresh Kumar 	if (IS_ERR(eventfd))
1013*f8941e6cSViresh Kumar 		return PTR_ERR(eventfd);
1014*f8941e6cSViresh Kumar 
1015*f8941e6cSViresh Kumar 	mutex_lock(&irqfds_lock);
1016*f8941e6cSViresh Kumar 
1017*f8941e6cSViresh Kumar 	list_for_each_entry(kirqfd, &irqfds_list, list) {
1018*f8941e6cSViresh Kumar 		if (kirqfd->eventfd == eventfd) {
1019*f8941e6cSViresh Kumar 			irqfd_deactivate(kirqfd);
1020*f8941e6cSViresh Kumar 			break;
1021*f8941e6cSViresh Kumar 		}
1022*f8941e6cSViresh Kumar 	}
1023*f8941e6cSViresh Kumar 
1024*f8941e6cSViresh Kumar 	mutex_unlock(&irqfds_lock);
1025*f8941e6cSViresh Kumar 
1026*f8941e6cSViresh Kumar 	eventfd_ctx_put(eventfd);
1027*f8941e6cSViresh Kumar 
1028*f8941e6cSViresh Kumar 	/*
1029*f8941e6cSViresh Kumar 	 * Block until we know all outstanding shutdown jobs have completed so
1030*f8941e6cSViresh Kumar 	 * that we guarantee there will not be any more interrupts once this
1031*f8941e6cSViresh Kumar 	 * deassign function returns.
1032*f8941e6cSViresh Kumar 	 */
1033*f8941e6cSViresh Kumar 	flush_workqueue(irqfd_cleanup_wq);
1034*f8941e6cSViresh Kumar 
1035*f8941e6cSViresh Kumar 	return 0;
1036*f8941e6cSViresh Kumar }
1037*f8941e6cSViresh Kumar 
1038*f8941e6cSViresh Kumar static long privcmd_ioctl_irqfd(struct file *file, void __user *udata)
1039*f8941e6cSViresh Kumar {
1040*f8941e6cSViresh Kumar 	struct privcmd_data *data = file->private_data;
1041*f8941e6cSViresh Kumar 	struct privcmd_irqfd irqfd;
1042*f8941e6cSViresh Kumar 
1043*f8941e6cSViresh Kumar 	if (copy_from_user(&irqfd, udata, sizeof(irqfd)))
1044*f8941e6cSViresh Kumar 		return -EFAULT;
1045*f8941e6cSViresh Kumar 
1046*f8941e6cSViresh Kumar 	/* No other flags should be set */
1047*f8941e6cSViresh Kumar 	if (irqfd.flags & ~PRIVCMD_IRQFD_FLAG_DEASSIGN)
1048*f8941e6cSViresh Kumar 		return -EINVAL;
1049*f8941e6cSViresh Kumar 
1050*f8941e6cSViresh Kumar 	/* If restriction is in place, check the domid matches */
1051*f8941e6cSViresh Kumar 	if (data->domid != DOMID_INVALID && data->domid != irqfd.dom)
1052*f8941e6cSViresh Kumar 		return -EPERM;
1053*f8941e6cSViresh Kumar 
1054*f8941e6cSViresh Kumar 	if (irqfd.flags & PRIVCMD_IRQFD_FLAG_DEASSIGN)
1055*f8941e6cSViresh Kumar 		return privcmd_irqfd_deassign(&irqfd);
1056*f8941e6cSViresh Kumar 
1057*f8941e6cSViresh Kumar 	return privcmd_irqfd_assign(&irqfd);
1058*f8941e6cSViresh Kumar }
1059*f8941e6cSViresh Kumar 
1060*f8941e6cSViresh Kumar static int privcmd_irqfd_init(void)
1061*f8941e6cSViresh Kumar {
1062*f8941e6cSViresh Kumar 	irqfd_cleanup_wq = alloc_workqueue("privcmd-irqfd-cleanup", 0, 0);
1063*f8941e6cSViresh Kumar 	if (!irqfd_cleanup_wq)
1064*f8941e6cSViresh Kumar 		return -ENOMEM;
1065*f8941e6cSViresh Kumar 
1066*f8941e6cSViresh Kumar 	return 0;
1067*f8941e6cSViresh Kumar }
1068*f8941e6cSViresh Kumar 
1069*f8941e6cSViresh Kumar static void privcmd_irqfd_exit(void)
1070*f8941e6cSViresh Kumar {
1071*f8941e6cSViresh Kumar 	struct privcmd_kernel_irqfd *kirqfd, *tmp;
1072*f8941e6cSViresh Kumar 
1073*f8941e6cSViresh Kumar 	mutex_lock(&irqfds_lock);
1074*f8941e6cSViresh Kumar 
1075*f8941e6cSViresh Kumar 	list_for_each_entry_safe(kirqfd, tmp, &irqfds_list, list)
1076*f8941e6cSViresh Kumar 		irqfd_deactivate(kirqfd);
1077*f8941e6cSViresh Kumar 
1078*f8941e6cSViresh Kumar 	mutex_unlock(&irqfds_lock);
1079*f8941e6cSViresh Kumar 
1080*f8941e6cSViresh Kumar 	destroy_workqueue(irqfd_cleanup_wq);
1081*f8941e6cSViresh Kumar }
1082*f8941e6cSViresh Kumar #else
1083*f8941e6cSViresh Kumar static inline long privcmd_ioctl_irqfd(struct file *file, void __user *udata)
1084*f8941e6cSViresh Kumar {
1085*f8941e6cSViresh Kumar 	return -EOPNOTSUPP;
1086*f8941e6cSViresh Kumar }
1087*f8941e6cSViresh Kumar 
1088*f8941e6cSViresh Kumar static inline int privcmd_irqfd_init(void)
1089*f8941e6cSViresh Kumar {
1090*f8941e6cSViresh Kumar 	return 0;
1091*f8941e6cSViresh Kumar }
1092*f8941e6cSViresh Kumar 
1093*f8941e6cSViresh Kumar static inline void privcmd_irqfd_exit(void)
1094*f8941e6cSViresh Kumar {
1095*f8941e6cSViresh Kumar }
1096*f8941e6cSViresh Kumar #endif /* CONFIG_XEN_PRIVCMD_IRQFD */
1097*f8941e6cSViresh Kumar 
1098d8414d3cSBastian Blank static long privcmd_ioctl(struct file *file,
1099d8414d3cSBastian Blank 			  unsigned int cmd, unsigned long data)
1100d8414d3cSBastian Blank {
1101dc9eab6fSPaul Durrant 	int ret = -ENOTTY;
1102d8414d3cSBastian Blank 	void __user *udata = (void __user *) data;
1103d8414d3cSBastian Blank 
1104d8414d3cSBastian Blank 	switch (cmd) {
1105d8414d3cSBastian Blank 	case IOCTL_PRIVCMD_HYPERCALL:
11064610d240SPaul Durrant 		ret = privcmd_ioctl_hypercall(file, udata);
1107d8414d3cSBastian Blank 		break;
1108d8414d3cSBastian Blank 
1109d8414d3cSBastian Blank 	case IOCTL_PRIVCMD_MMAP:
11104610d240SPaul Durrant 		ret = privcmd_ioctl_mmap(file, udata);
1111d8414d3cSBastian Blank 		break;
1112d8414d3cSBastian Blank 
1113d8414d3cSBastian Blank 	case IOCTL_PRIVCMD_MMAPBATCH:
11144610d240SPaul Durrant 		ret = privcmd_ioctl_mmap_batch(file, udata, 1);
1115ceb90fa0SAndres Lagar-Cavilla 		break;
1116ceb90fa0SAndres Lagar-Cavilla 
1117ceb90fa0SAndres Lagar-Cavilla 	case IOCTL_PRIVCMD_MMAPBATCH_V2:
11184610d240SPaul Durrant 		ret = privcmd_ioctl_mmap_batch(file, udata, 2);
1119d8414d3cSBastian Blank 		break;
1120d8414d3cSBastian Blank 
1121ab520be8SPaul Durrant 	case IOCTL_PRIVCMD_DM_OP:
11224610d240SPaul Durrant 		ret = privcmd_ioctl_dm_op(file, udata);
11234610d240SPaul Durrant 		break;
11244610d240SPaul Durrant 
11254610d240SPaul Durrant 	case IOCTL_PRIVCMD_RESTRICT:
11264610d240SPaul Durrant 		ret = privcmd_ioctl_restrict(file, udata);
1127ab520be8SPaul Durrant 		break;
1128ab520be8SPaul Durrant 
11293ad08765SPaul Durrant 	case IOCTL_PRIVCMD_MMAP_RESOURCE:
11303ad08765SPaul Durrant 		ret = privcmd_ioctl_mmap_resource(file, udata);
11313ad08765SPaul Durrant 		break;
11323ad08765SPaul Durrant 
1133*f8941e6cSViresh Kumar 	case IOCTL_PRIVCMD_IRQFD:
1134*f8941e6cSViresh Kumar 		ret = privcmd_ioctl_irqfd(file, udata);
1135*f8941e6cSViresh Kumar 		break;
1136*f8941e6cSViresh Kumar 
1137d8414d3cSBastian Blank 	default:
1138d8414d3cSBastian Blank 		break;
1139d8414d3cSBastian Blank 	}
1140d8414d3cSBastian Blank 
1141d8414d3cSBastian Blank 	return ret;
1142d8414d3cSBastian Blank }
1143d8414d3cSBastian Blank 
11444610d240SPaul Durrant static int privcmd_open(struct inode *ino, struct file *file)
11454610d240SPaul Durrant {
11464610d240SPaul Durrant 	struct privcmd_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
11474610d240SPaul Durrant 
11484610d240SPaul Durrant 	if (!data)
11494610d240SPaul Durrant 		return -ENOMEM;
11504610d240SPaul Durrant 
11514610d240SPaul Durrant 	/* DOMID_INVALID implies no restriction */
11524610d240SPaul Durrant 	data->domid = DOMID_INVALID;
11534610d240SPaul Durrant 
11544610d240SPaul Durrant 	file->private_data = data;
11554610d240SPaul Durrant 	return 0;
11564610d240SPaul Durrant }
11574610d240SPaul Durrant 
11584610d240SPaul Durrant static int privcmd_release(struct inode *ino, struct file *file)
11594610d240SPaul Durrant {
11604610d240SPaul Durrant 	struct privcmd_data *data = file->private_data;
11614610d240SPaul Durrant 
11624610d240SPaul Durrant 	kfree(data);
11634610d240SPaul Durrant 	return 0;
11644610d240SPaul Durrant }
11654610d240SPaul Durrant 
1166d71f5139SMukesh Rathor static void privcmd_close(struct vm_area_struct *vma)
1167d71f5139SMukesh Rathor {
1168d71f5139SMukesh Rathor 	struct page **pages = vma->vm_private_data;
1169c7ebf9d9SMuhammad Falak R Wani 	int numpgs = vma_pages(vma);
11705995a68aSJulien Grall 	int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
1171b6497b38SIan Campbell 	int rc;
1172d71f5139SMukesh Rathor 
11739eff37a8SDan Carpenter 	if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
1174d71f5139SMukesh Rathor 		return;
1175d71f5139SMukesh Rathor 
11765995a68aSJulien Grall 	rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
1177b6497b38SIan Campbell 	if (rc == 0)
11789e2369c0SRoger Pau Monne 		xen_free_unpopulated_pages(numpgs, pages);
1179b6497b38SIan Campbell 	else
1180b6497b38SIan Campbell 		pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
1181b6497b38SIan Campbell 			numpgs, rc);
11820432523fSJan Beulich 	kvfree(pages);
1183d71f5139SMukesh Rathor }
1184d71f5139SMukesh Rathor 
11854bf2cc96SSouptick Joarder static vm_fault_t privcmd_fault(struct vm_fault *vmf)
1186d8414d3cSBastian Blank {
1187d8414d3cSBastian Blank 	printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
118811bac800SDave Jiang 	       vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
11891a29d85eSJan Kara 	       vmf->pgoff, (void *)vmf->address);
1190d8414d3cSBastian Blank 
1191d8414d3cSBastian Blank 	return VM_FAULT_SIGBUS;
1192d8414d3cSBastian Blank }
1193d8414d3cSBastian Blank 
11947cbea8dcSKirill A. Shutemov static const struct vm_operations_struct privcmd_vm_ops = {
1195d71f5139SMukesh Rathor 	.close = privcmd_close,
1196d8414d3cSBastian Blank 	.fault = privcmd_fault
1197d8414d3cSBastian Blank };
1198d8414d3cSBastian Blank 
1199d8414d3cSBastian Blank static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
1200d8414d3cSBastian Blank {
1201d8414d3cSBastian Blank 	/* DONTCOPY is essential for Xen because copy_page_range doesn't know
1202d8414d3cSBastian Blank 	 * how to recreate these mappings */
12031c71222eSSuren Baghdasaryan 	vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTCOPY |
12041c71222eSSuren Baghdasaryan 			 VM_DONTEXPAND | VM_DONTDUMP);
1205d8414d3cSBastian Blank 	vma->vm_ops = &privcmd_vm_ops;
1206d8414d3cSBastian Blank 	vma->vm_private_data = NULL;
1207d8414d3cSBastian Blank 
1208d8414d3cSBastian Blank 	return 0;
1209d8414d3cSBastian Blank }
1210d8414d3cSBastian Blank 
1211a5deabe0SAndres Lagar-Cavilla /*
1212a5deabe0SAndres Lagar-Cavilla  * For MMAPBATCH*. This allows asserting the singleshot mapping
1213a5deabe0SAndres Lagar-Cavilla  * on a per pfn/pte basis. Mapping calls that fail with ENOENT
1214a5deabe0SAndres Lagar-Cavilla  * can be then retried until success.
1215a5deabe0SAndres Lagar-Cavilla  */
12168b1e0f81SAnshuman Khandual static int is_mapped_fn(pte_t *pte, unsigned long addr, void *data)
1217d8414d3cSBastian Blank {
1218c33c7948SRyan Roberts 	return pte_none(ptep_get(pte)) ? 0 : -EBUSY;
1219a5deabe0SAndres Lagar-Cavilla }
1220a5deabe0SAndres Lagar-Cavilla 
1221a5deabe0SAndres Lagar-Cavilla static int privcmd_vma_range_is_mapped(
1222a5deabe0SAndres Lagar-Cavilla 	           struct vm_area_struct *vma,
1223a5deabe0SAndres Lagar-Cavilla 	           unsigned long addr,
1224a5deabe0SAndres Lagar-Cavilla 	           unsigned long nr_pages)
1225a5deabe0SAndres Lagar-Cavilla {
1226a5deabe0SAndres Lagar-Cavilla 	return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
1227a5deabe0SAndres Lagar-Cavilla 				   is_mapped_fn, NULL) != 0;
1228d8414d3cSBastian Blank }
1229d8414d3cSBastian Blank 
1230d8414d3cSBastian Blank const struct file_operations xen_privcmd_fops = {
1231d8414d3cSBastian Blank 	.owner = THIS_MODULE,
1232d8414d3cSBastian Blank 	.unlocked_ioctl = privcmd_ioctl,
12334610d240SPaul Durrant 	.open = privcmd_open,
12344610d240SPaul Durrant 	.release = privcmd_release,
1235d8414d3cSBastian Blank 	.mmap = privcmd_mmap,
1236d8414d3cSBastian Blank };
1237d8414d3cSBastian Blank EXPORT_SYMBOL_GPL(xen_privcmd_fops);
1238d8414d3cSBastian Blank 
1239d8414d3cSBastian Blank static struct miscdevice privcmd_dev = {
1240d8414d3cSBastian Blank 	.minor = MISC_DYNAMIC_MINOR,
1241d8414d3cSBastian Blank 	.name = "xen/privcmd",
1242d8414d3cSBastian Blank 	.fops = &xen_privcmd_fops,
1243d8414d3cSBastian Blank };
1244d8414d3cSBastian Blank 
1245d8414d3cSBastian Blank static int __init privcmd_init(void)
1246d8414d3cSBastian Blank {
1247d8414d3cSBastian Blank 	int err;
1248d8414d3cSBastian Blank 
1249d8414d3cSBastian Blank 	if (!xen_domain())
1250d8414d3cSBastian Blank 		return -ENODEV;
1251d8414d3cSBastian Blank 
1252d8414d3cSBastian Blank 	err = misc_register(&privcmd_dev);
1253d8414d3cSBastian Blank 	if (err != 0) {
1254283c0972SJoe Perches 		pr_err("Could not register Xen privcmd device\n");
1255d8414d3cSBastian Blank 		return err;
1256d8414d3cSBastian Blank 	}
1257c51b3c63SJuergen Gross 
1258c51b3c63SJuergen Gross 	err = misc_register(&xen_privcmdbuf_dev);
1259c51b3c63SJuergen Gross 	if (err != 0) {
1260c51b3c63SJuergen Gross 		pr_err("Could not register Xen hypercall-buf device\n");
1261*f8941e6cSViresh Kumar 		goto err_privcmdbuf;
1262*f8941e6cSViresh Kumar 	}
1263*f8941e6cSViresh Kumar 
1264*f8941e6cSViresh Kumar 	err = privcmd_irqfd_init();
1265*f8941e6cSViresh Kumar 	if (err != 0) {
1266*f8941e6cSViresh Kumar 		pr_err("irqfd init failed\n");
1267*f8941e6cSViresh Kumar 		goto err_irqfd;
1268*f8941e6cSViresh Kumar 	}
1269*f8941e6cSViresh Kumar 
1270*f8941e6cSViresh Kumar 	return 0;
1271*f8941e6cSViresh Kumar 
1272*f8941e6cSViresh Kumar err_irqfd:
1273*f8941e6cSViresh Kumar 	misc_deregister(&xen_privcmdbuf_dev);
1274*f8941e6cSViresh Kumar err_privcmdbuf:
1275c51b3c63SJuergen Gross 	misc_deregister(&privcmd_dev);
1276c51b3c63SJuergen Gross 	return err;
1277c51b3c63SJuergen Gross }
1278c51b3c63SJuergen Gross 
1279d8414d3cSBastian Blank static void __exit privcmd_exit(void)
1280d8414d3cSBastian Blank {
1281*f8941e6cSViresh Kumar 	privcmd_irqfd_exit();
1282d8414d3cSBastian Blank 	misc_deregister(&privcmd_dev);
1283c51b3c63SJuergen Gross 	misc_deregister(&xen_privcmdbuf_dev);
1284d8414d3cSBastian Blank }
1285d8414d3cSBastian Blank 
1286d8414d3cSBastian Blank module_init(privcmd_init);
1287d8414d3cSBastian Blank module_exit(privcmd_exit);
1288