xref: /openbmc/linux/drivers/xen/privcmd.c (revision ca2478a7d974f38d29d27acb42a952c7f168916e)
109c434b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2d8414d3cSBastian Blank /******************************************************************************
3d8414d3cSBastian Blank  * privcmd.c
4d8414d3cSBastian Blank  *
5d8414d3cSBastian Blank  * Interface to privileged domain-0 commands.
6d8414d3cSBastian Blank  *
7d8414d3cSBastian Blank  * Copyright (c) 2002-2004, K A Fraser, B Dragovic
8d8414d3cSBastian Blank  */
9d8414d3cSBastian Blank 
10283c0972SJoe Perches #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
11283c0972SJoe Perches 
12f8941e6cSViresh Kumar #include <linux/eventfd.h>
13f8941e6cSViresh Kumar #include <linux/file.h>
14d8414d3cSBastian Blank #include <linux/kernel.h>
15d8414d3cSBastian Blank #include <linux/module.h>
16f8941e6cSViresh Kumar #include <linux/mutex.h>
17f8941e6cSViresh Kumar #include <linux/poll.h>
18d8414d3cSBastian Blank #include <linux/sched.h>
19d8414d3cSBastian Blank #include <linux/slab.h>
20*e997b357SViresh Kumar #include <linux/srcu.h>
21d8414d3cSBastian Blank #include <linux/string.h>
22f8941e6cSViresh Kumar #include <linux/workqueue.h>
23d8414d3cSBastian Blank #include <linux/errno.h>
24d8414d3cSBastian Blank #include <linux/mm.h>
25d8414d3cSBastian Blank #include <linux/mman.h>
26d8414d3cSBastian Blank #include <linux/uaccess.h>
27d8414d3cSBastian Blank #include <linux/swap.h>
28d8414d3cSBastian Blank #include <linux/highmem.h>
29d8414d3cSBastian Blank #include <linux/pagemap.h>
30d8414d3cSBastian Blank #include <linux/seq_file.h>
31d8414d3cSBastian Blank #include <linux/miscdevice.h>
32ab520be8SPaul Durrant #include <linux/moduleparam.h>
33d8414d3cSBastian Blank 
34d8414d3cSBastian Blank #include <asm/xen/hypervisor.h>
35d8414d3cSBastian Blank #include <asm/xen/hypercall.h>
36d8414d3cSBastian Blank 
37d8414d3cSBastian Blank #include <xen/xen.h>
38d8414d3cSBastian Blank #include <xen/privcmd.h>
39d8414d3cSBastian Blank #include <xen/interface/xen.h>
403ad08765SPaul Durrant #include <xen/interface/memory.h>
41ab520be8SPaul Durrant #include <xen/interface/hvm/dm_op.h>
42d8414d3cSBastian Blank #include <xen/features.h>
43d8414d3cSBastian Blank #include <xen/page.h>
44d8414d3cSBastian Blank #include <xen/xen-ops.h>
45d71f5139SMukesh Rathor #include <xen/balloon.h>
46d8414d3cSBastian Blank 
47d8414d3cSBastian Blank #include "privcmd.h"
48d8414d3cSBastian Blank 
49d8414d3cSBastian Blank MODULE_LICENSE("GPL");
50d8414d3cSBastian Blank 
51d71f5139SMukesh Rathor #define PRIV_VMA_LOCKED ((void *)1)
52d71f5139SMukesh Rathor 
53ab520be8SPaul Durrant static unsigned int privcmd_dm_op_max_num = 16;
54ab520be8SPaul Durrant module_param_named(dm_op_max_nr_bufs, privcmd_dm_op_max_num, uint, 0644);
55ab520be8SPaul Durrant MODULE_PARM_DESC(dm_op_max_nr_bufs,
56ab520be8SPaul Durrant 		 "Maximum number of buffers per dm_op hypercall");
57ab520be8SPaul Durrant 
58ab520be8SPaul Durrant static unsigned int privcmd_dm_op_buf_max_size = 4096;
59ab520be8SPaul Durrant module_param_named(dm_op_buf_max_size, privcmd_dm_op_buf_max_size, uint,
60ab520be8SPaul Durrant 		   0644);
61ab520be8SPaul Durrant MODULE_PARM_DESC(dm_op_buf_max_size,
62ab520be8SPaul Durrant 		 "Maximum size of a dm_op hypercall buffer");
63ab520be8SPaul Durrant 
644610d240SPaul Durrant struct privcmd_data {
654610d240SPaul Durrant 	domid_t domid;
664610d240SPaul Durrant };
674610d240SPaul Durrant 
68a5deabe0SAndres Lagar-Cavilla static int privcmd_vma_range_is_mapped(
69a5deabe0SAndres Lagar-Cavilla                struct vm_area_struct *vma,
70a5deabe0SAndres Lagar-Cavilla                unsigned long addr,
71a5deabe0SAndres Lagar-Cavilla                unsigned long nr_pages);
72d8414d3cSBastian Blank 
privcmd_ioctl_hypercall(struct file * file,void __user * udata)734610d240SPaul Durrant static long privcmd_ioctl_hypercall(struct file *file, void __user *udata)
74d8414d3cSBastian Blank {
754610d240SPaul Durrant 	struct privcmd_data *data = file->private_data;
76d8414d3cSBastian Blank 	struct privcmd_hypercall hypercall;
77d8414d3cSBastian Blank 	long ret;
78d8414d3cSBastian Blank 
794610d240SPaul Durrant 	/* Disallow arbitrary hypercalls if restricted */
804610d240SPaul Durrant 	if (data->domid != DOMID_INVALID)
814610d240SPaul Durrant 		return -EPERM;
824610d240SPaul Durrant 
83d8414d3cSBastian Blank 	if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
84d8414d3cSBastian Blank 		return -EFAULT;
85d8414d3cSBastian Blank 
86fdfd811dSDavid Vrabel 	xen_preemptible_hcall_begin();
87d8414d3cSBastian Blank 	ret = privcmd_call(hypercall.op,
88d8414d3cSBastian Blank 			   hypercall.arg[0], hypercall.arg[1],
89d8414d3cSBastian Blank 			   hypercall.arg[2], hypercall.arg[3],
90d8414d3cSBastian Blank 			   hypercall.arg[4]);
91fdfd811dSDavid Vrabel 	xen_preemptible_hcall_end();
92d8414d3cSBastian Blank 
93d8414d3cSBastian Blank 	return ret;
94d8414d3cSBastian Blank }
95d8414d3cSBastian Blank 
free_page_list(struct list_head * pages)96d8414d3cSBastian Blank static void free_page_list(struct list_head *pages)
97d8414d3cSBastian Blank {
98d8414d3cSBastian Blank 	struct page *p, *n;
99d8414d3cSBastian Blank 
100d8414d3cSBastian Blank 	list_for_each_entry_safe(p, n, pages, lru)
101d8414d3cSBastian Blank 		__free_page(p);
102d8414d3cSBastian Blank 
103d8414d3cSBastian Blank 	INIT_LIST_HEAD(pages);
104d8414d3cSBastian Blank }
105d8414d3cSBastian Blank 
106d8414d3cSBastian Blank /*
107d8414d3cSBastian Blank  * Given an array of items in userspace, return a list of pages
108d8414d3cSBastian Blank  * containing the data.  If copying fails, either because of memory
109d8414d3cSBastian Blank  * allocation failure or a problem reading user memory, return an
110d8414d3cSBastian Blank  * error code; its up to the caller to dispose of any partial list.
111d8414d3cSBastian Blank  */
gather_array(struct list_head * pagelist,unsigned nelem,size_t size,const void __user * data)112d8414d3cSBastian Blank static int gather_array(struct list_head *pagelist,
113d8414d3cSBastian Blank 			unsigned nelem, size_t size,
114ceb90fa0SAndres Lagar-Cavilla 			const void __user *data)
115d8414d3cSBastian Blank {
116d8414d3cSBastian Blank 	unsigned pageidx;
117d8414d3cSBastian Blank 	void *pagedata;
118d8414d3cSBastian Blank 	int ret;
119d8414d3cSBastian Blank 
120d8414d3cSBastian Blank 	if (size > PAGE_SIZE)
121d8414d3cSBastian Blank 		return 0;
122d8414d3cSBastian Blank 
123d8414d3cSBastian Blank 	pageidx = PAGE_SIZE;
124d8414d3cSBastian Blank 	pagedata = NULL;	/* quiet, gcc */
125d8414d3cSBastian Blank 	while (nelem--) {
126d8414d3cSBastian Blank 		if (pageidx > PAGE_SIZE-size) {
127d8414d3cSBastian Blank 			struct page *page = alloc_page(GFP_KERNEL);
128d8414d3cSBastian Blank 
129d8414d3cSBastian Blank 			ret = -ENOMEM;
130d8414d3cSBastian Blank 			if (page == NULL)
131d8414d3cSBastian Blank 				goto fail;
132d8414d3cSBastian Blank 
133d8414d3cSBastian Blank 			pagedata = page_address(page);
134d8414d3cSBastian Blank 
135d8414d3cSBastian Blank 			list_add_tail(&page->lru, pagelist);
136d8414d3cSBastian Blank 			pageidx = 0;
137d8414d3cSBastian Blank 		}
138d8414d3cSBastian Blank 
139d8414d3cSBastian Blank 		ret = -EFAULT;
140d8414d3cSBastian Blank 		if (copy_from_user(pagedata + pageidx, data, size))
141d8414d3cSBastian Blank 			goto fail;
142d8414d3cSBastian Blank 
143d8414d3cSBastian Blank 		data += size;
144d8414d3cSBastian Blank 		pageidx += size;
145d8414d3cSBastian Blank 	}
146d8414d3cSBastian Blank 
147d8414d3cSBastian Blank 	ret = 0;
148d8414d3cSBastian Blank 
149d8414d3cSBastian Blank fail:
150d8414d3cSBastian Blank 	return ret;
151d8414d3cSBastian Blank }
152d8414d3cSBastian Blank 
153d8414d3cSBastian Blank /*
154d8414d3cSBastian Blank  * Call function "fn" on each element of the array fragmented
155d8414d3cSBastian Blank  * over a list of pages.
156d8414d3cSBastian Blank  */
traverse_pages(unsigned nelem,size_t size,struct list_head * pos,int (* fn)(void * data,void * state),void * state)157d8414d3cSBastian Blank static int traverse_pages(unsigned nelem, size_t size,
158d8414d3cSBastian Blank 			  struct list_head *pos,
159d8414d3cSBastian Blank 			  int (*fn)(void *data, void *state),
160d8414d3cSBastian Blank 			  void *state)
161d8414d3cSBastian Blank {
162d8414d3cSBastian Blank 	void *pagedata;
163d8414d3cSBastian Blank 	unsigned pageidx;
164d8414d3cSBastian Blank 	int ret = 0;
165d8414d3cSBastian Blank 
166d8414d3cSBastian Blank 	BUG_ON(size > PAGE_SIZE);
167d8414d3cSBastian Blank 
168d8414d3cSBastian Blank 	pageidx = PAGE_SIZE;
169d8414d3cSBastian Blank 	pagedata = NULL;	/* hush, gcc */
170d8414d3cSBastian Blank 
171d8414d3cSBastian Blank 	while (nelem--) {
172d8414d3cSBastian Blank 		if (pageidx > PAGE_SIZE-size) {
173d8414d3cSBastian Blank 			struct page *page;
174d8414d3cSBastian Blank 			pos = pos->next;
175d8414d3cSBastian Blank 			page = list_entry(pos, struct page, lru);
176d8414d3cSBastian Blank 			pagedata = page_address(page);
177d8414d3cSBastian Blank 			pageidx = 0;
178d8414d3cSBastian Blank 		}
179d8414d3cSBastian Blank 
180d8414d3cSBastian Blank 		ret = (*fn)(pagedata + pageidx, state);
181d8414d3cSBastian Blank 		if (ret)
182d8414d3cSBastian Blank 			break;
183d8414d3cSBastian Blank 		pageidx += size;
184d8414d3cSBastian Blank 	}
185d8414d3cSBastian Blank 
186d8414d3cSBastian Blank 	return ret;
187d8414d3cSBastian Blank }
188d8414d3cSBastian Blank 
1894e8c0c8cSDavid Vrabel /*
1904e8c0c8cSDavid Vrabel  * Similar to traverse_pages, but use each page as a "block" of
1914e8c0c8cSDavid Vrabel  * data to be processed as one unit.
1924e8c0c8cSDavid Vrabel  */
traverse_pages_block(unsigned nelem,size_t size,struct list_head * pos,int (* fn)(void * data,int nr,void * state),void * state)1934e8c0c8cSDavid Vrabel static int traverse_pages_block(unsigned nelem, size_t size,
1944e8c0c8cSDavid Vrabel 				struct list_head *pos,
1954e8c0c8cSDavid Vrabel 				int (*fn)(void *data, int nr, void *state),
1964e8c0c8cSDavid Vrabel 				void *state)
1974e8c0c8cSDavid Vrabel {
1984e8c0c8cSDavid Vrabel 	void *pagedata;
1994e8c0c8cSDavid Vrabel 	int ret = 0;
2004e8c0c8cSDavid Vrabel 
2014e8c0c8cSDavid Vrabel 	BUG_ON(size > PAGE_SIZE);
2024e8c0c8cSDavid Vrabel 
2034e8c0c8cSDavid Vrabel 	while (nelem) {
2044e8c0c8cSDavid Vrabel 		int nr = (PAGE_SIZE/size);
2054e8c0c8cSDavid Vrabel 		struct page *page;
2064e8c0c8cSDavid Vrabel 		if (nr > nelem)
2074e8c0c8cSDavid Vrabel 			nr = nelem;
2084e8c0c8cSDavid Vrabel 		pos = pos->next;
2094e8c0c8cSDavid Vrabel 		page = list_entry(pos, struct page, lru);
2104e8c0c8cSDavid Vrabel 		pagedata = page_address(page);
2114e8c0c8cSDavid Vrabel 		ret = (*fn)(pagedata, nr, state);
2124e8c0c8cSDavid Vrabel 		if (ret)
2134e8c0c8cSDavid Vrabel 			break;
2144e8c0c8cSDavid Vrabel 		nelem -= nr;
2154e8c0c8cSDavid Vrabel 	}
2164e8c0c8cSDavid Vrabel 
2174e8c0c8cSDavid Vrabel 	return ret;
2184e8c0c8cSDavid Vrabel }
2194e8c0c8cSDavid Vrabel 
220a13d7201SJulien Grall struct mmap_gfn_state {
221d8414d3cSBastian Blank 	unsigned long va;
222d8414d3cSBastian Blank 	struct vm_area_struct *vma;
223d8414d3cSBastian Blank 	domid_t domain;
224d8414d3cSBastian Blank };
225d8414d3cSBastian Blank 
mmap_gfn_range(void * data,void * state)226a13d7201SJulien Grall static int mmap_gfn_range(void *data, void *state)
227d8414d3cSBastian Blank {
228d8414d3cSBastian Blank 	struct privcmd_mmap_entry *msg = data;
229a13d7201SJulien Grall 	struct mmap_gfn_state *st = state;
230d8414d3cSBastian Blank 	struct vm_area_struct *vma = st->vma;
231d8414d3cSBastian Blank 	int rc;
232d8414d3cSBastian Blank 
233d8414d3cSBastian Blank 	/* Do not allow range to wrap the address space. */
234d8414d3cSBastian Blank 	if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
235d8414d3cSBastian Blank 	    ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
236d8414d3cSBastian Blank 		return -EINVAL;
237d8414d3cSBastian Blank 
238d8414d3cSBastian Blank 	/* Range chunks must be contiguous in va space. */
239d8414d3cSBastian Blank 	if ((msg->va != st->va) ||
240d8414d3cSBastian Blank 	    ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
241d8414d3cSBastian Blank 		return -EINVAL;
242d8414d3cSBastian Blank 
243a13d7201SJulien Grall 	rc = xen_remap_domain_gfn_range(vma,
244d8414d3cSBastian Blank 					msg->va & PAGE_MASK,
245d8414d3cSBastian Blank 					msg->mfn, msg->npages,
246d8414d3cSBastian Blank 					vma->vm_page_prot,
2479a032e39SIan Campbell 					st->domain, NULL);
248d8414d3cSBastian Blank 	if (rc < 0)
249d8414d3cSBastian Blank 		return rc;
250d8414d3cSBastian Blank 
251d8414d3cSBastian Blank 	st->va += msg->npages << PAGE_SHIFT;
252d8414d3cSBastian Blank 
253d8414d3cSBastian Blank 	return 0;
254d8414d3cSBastian Blank }
255d8414d3cSBastian Blank 
privcmd_ioctl_mmap(struct file * file,void __user * udata)2564610d240SPaul Durrant static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
257d8414d3cSBastian Blank {
2584610d240SPaul Durrant 	struct privcmd_data *data = file->private_data;
259d8414d3cSBastian Blank 	struct privcmd_mmap mmapcmd;
260d8414d3cSBastian Blank 	struct mm_struct *mm = current->mm;
261d8414d3cSBastian Blank 	struct vm_area_struct *vma;
262d8414d3cSBastian Blank 	int rc;
263d8414d3cSBastian Blank 	LIST_HEAD(pagelist);
264a13d7201SJulien Grall 	struct mmap_gfn_state state;
265d8414d3cSBastian Blank 
26697315723SJan Beulich 	/* We only support privcmd_ioctl_mmap_batch for non-auto-translated. */
267d71f5139SMukesh Rathor 	if (xen_feature(XENFEAT_auto_translated_physmap))
268d71f5139SMukesh Rathor 		return -ENOSYS;
269d71f5139SMukesh Rathor 
270d8414d3cSBastian Blank 	if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
271d8414d3cSBastian Blank 		return -EFAULT;
272d8414d3cSBastian Blank 
2734610d240SPaul Durrant 	/* If restriction is in place, check the domid matches */
2744610d240SPaul Durrant 	if (data->domid != DOMID_INVALID && data->domid != mmapcmd.dom)
2754610d240SPaul Durrant 		return -EPERM;
2764610d240SPaul Durrant 
277d8414d3cSBastian Blank 	rc = gather_array(&pagelist,
278d8414d3cSBastian Blank 			  mmapcmd.num, sizeof(struct privcmd_mmap_entry),
279d8414d3cSBastian Blank 			  mmapcmd.entry);
280d8414d3cSBastian Blank 
281d8414d3cSBastian Blank 	if (rc || list_empty(&pagelist))
282d8414d3cSBastian Blank 		goto out;
283d8414d3cSBastian Blank 
284d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
285d8414d3cSBastian Blank 
286d8414d3cSBastian Blank 	{
287d8414d3cSBastian Blank 		struct page *page = list_first_entry(&pagelist,
288d8414d3cSBastian Blank 						     struct page, lru);
289d8414d3cSBastian Blank 		struct privcmd_mmap_entry *msg = page_address(page);
290d8414d3cSBastian Blank 
2917ccf089bSLiam R. Howlett 		vma = vma_lookup(mm, msg->va);
292d8414d3cSBastian Blank 		rc = -EINVAL;
293d8414d3cSBastian Blank 
294a5deabe0SAndres Lagar-Cavilla 		if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
295d8414d3cSBastian Blank 			goto out_up;
296a5deabe0SAndres Lagar-Cavilla 		vma->vm_private_data = PRIV_VMA_LOCKED;
297d8414d3cSBastian Blank 	}
298d8414d3cSBastian Blank 
299d8414d3cSBastian Blank 	state.va = vma->vm_start;
300d8414d3cSBastian Blank 	state.vma = vma;
301d8414d3cSBastian Blank 	state.domain = mmapcmd.dom;
302d8414d3cSBastian Blank 
303d8414d3cSBastian Blank 	rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
304d8414d3cSBastian Blank 			    &pagelist,
305a13d7201SJulien Grall 			    mmap_gfn_range, &state);
306d8414d3cSBastian Blank 
307d8414d3cSBastian Blank 
308d8414d3cSBastian Blank out_up:
309d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
310d8414d3cSBastian Blank 
311d8414d3cSBastian Blank out:
312d8414d3cSBastian Blank 	free_page_list(&pagelist);
313d8414d3cSBastian Blank 
314d8414d3cSBastian Blank 	return rc;
315d8414d3cSBastian Blank }
316d8414d3cSBastian Blank 
317d8414d3cSBastian Blank struct mmap_batch_state {
318d8414d3cSBastian Blank 	domid_t domain;
319d8414d3cSBastian Blank 	unsigned long va;
320d8414d3cSBastian Blank 	struct vm_area_struct *vma;
321d71f5139SMukesh Rathor 	int index;
322ceb90fa0SAndres Lagar-Cavilla 	/* A tristate:
323ceb90fa0SAndres Lagar-Cavilla 	 *      0 for no errors
324ceb90fa0SAndres Lagar-Cavilla 	 *      1 if at least one error has happened (and no
325ceb90fa0SAndres Lagar-Cavilla 	 *          -ENOENT errors have happened)
326ceb90fa0SAndres Lagar-Cavilla 	 *      -ENOENT if at least 1 -ENOENT has happened.
327ceb90fa0SAndres Lagar-Cavilla 	 */
328ceb90fa0SAndres Lagar-Cavilla 	int global_error;
32999beae6cSAndres Lagar-Cavilla 	int version;
330d8414d3cSBastian Blank 
331a13d7201SJulien Grall 	/* User-space gfn array to store errors in the second pass for V1. */
332a13d7201SJulien Grall 	xen_pfn_t __user *user_gfn;
33399beae6cSAndres Lagar-Cavilla 	/* User-space int array to store errors in the second pass for V2. */
33499beae6cSAndres Lagar-Cavilla 	int __user *user_err;
335d8414d3cSBastian Blank };
336d8414d3cSBastian Blank 
337a13d7201SJulien Grall /* auto translated dom0 note: if domU being created is PV, then gfn is
338a13d7201SJulien Grall  * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
339d71f5139SMukesh Rathor  */
mmap_batch_fn(void * data,int nr,void * state)3404e8c0c8cSDavid Vrabel static int mmap_batch_fn(void *data, int nr, void *state)
341d8414d3cSBastian Blank {
342a13d7201SJulien Grall 	xen_pfn_t *gfnp = data;
343d8414d3cSBastian Blank 	struct mmap_batch_state *st = state;
344d71f5139SMukesh Rathor 	struct vm_area_struct *vma = st->vma;
345d71f5139SMukesh Rathor 	struct page **pages = vma->vm_private_data;
3464e8c0c8cSDavid Vrabel 	struct page **cur_pages = NULL;
347ceb90fa0SAndres Lagar-Cavilla 	int ret;
348d8414d3cSBastian Blank 
349d71f5139SMukesh Rathor 	if (xen_feature(XENFEAT_auto_translated_physmap))
3504e8c0c8cSDavid Vrabel 		cur_pages = &pages[st->index];
351d71f5139SMukesh Rathor 
3524e8c0c8cSDavid Vrabel 	BUG_ON(nr < 0);
353a13d7201SJulien Grall 	ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
354a13d7201SJulien Grall 					 (int *)gfnp, st->vma->vm_page_prot,
3554e8c0c8cSDavid Vrabel 					 st->domain, cur_pages);
356ceb90fa0SAndres Lagar-Cavilla 
3574e8c0c8cSDavid Vrabel 	/* Adjust the global_error? */
3584e8c0c8cSDavid Vrabel 	if (ret != nr) {
359ceb90fa0SAndres Lagar-Cavilla 		if (ret == -ENOENT)
360ceb90fa0SAndres Lagar-Cavilla 			st->global_error = -ENOENT;
361ceb90fa0SAndres Lagar-Cavilla 		else {
362ceb90fa0SAndres Lagar-Cavilla 			/* Record that at least one error has happened. */
363ceb90fa0SAndres Lagar-Cavilla 			if (st->global_error == 0)
364ceb90fa0SAndres Lagar-Cavilla 				st->global_error = 1;
365ceb90fa0SAndres Lagar-Cavilla 		}
366d8414d3cSBastian Blank 	}
367753c09b5SJulien Grall 	st->va += XEN_PAGE_SIZE * nr;
368753c09b5SJulien Grall 	st->index += nr / XEN_PFN_PER_PAGE;
369d8414d3cSBastian Blank 
370d8414d3cSBastian Blank 	return 0;
371d8414d3cSBastian Blank }
372d8414d3cSBastian Blank 
mmap_return_error(int err,struct mmap_batch_state * st)3734e8c0c8cSDavid Vrabel static int mmap_return_error(int err, struct mmap_batch_state *st)
374d8414d3cSBastian Blank {
3754e8c0c8cSDavid Vrabel 	int ret;
376d8414d3cSBastian Blank 
37799beae6cSAndres Lagar-Cavilla 	if (st->version == 1) {
3784e8c0c8cSDavid Vrabel 		if (err) {
379a13d7201SJulien Grall 			xen_pfn_t gfn;
3804e8c0c8cSDavid Vrabel 
381a13d7201SJulien Grall 			ret = get_user(gfn, st->user_gfn);
3824e8c0c8cSDavid Vrabel 			if (ret < 0)
3834e8c0c8cSDavid Vrabel 				return ret;
3844e8c0c8cSDavid Vrabel 			/*
3854e8c0c8cSDavid Vrabel 			 * V1 encodes the error codes in the 32bit top
386a13d7201SJulien Grall 			 * nibble of the gfn (with its known
3874e8c0c8cSDavid Vrabel 			 * limitations vis-a-vis 64 bit callers).
3884e8c0c8cSDavid Vrabel 			 */
389a13d7201SJulien Grall 			gfn |= (err == -ENOENT) ?
3904e8c0c8cSDavid Vrabel 				PRIVCMD_MMAPBATCH_PAGED_ERROR :
3914e8c0c8cSDavid Vrabel 				PRIVCMD_MMAPBATCH_MFN_ERROR;
392a13d7201SJulien Grall 			return __put_user(gfn, st->user_gfn++);
3934e8c0c8cSDavid Vrabel 		} else
394a13d7201SJulien Grall 			st->user_gfn++;
39599beae6cSAndres Lagar-Cavilla 	} else { /* st->version == 2 */
39699beae6cSAndres Lagar-Cavilla 		if (err)
39799beae6cSAndres Lagar-Cavilla 			return __put_user(err, st->user_err++);
39899beae6cSAndres Lagar-Cavilla 		else
39999beae6cSAndres Lagar-Cavilla 			st->user_err++;
40099beae6cSAndres Lagar-Cavilla 	}
40199beae6cSAndres Lagar-Cavilla 
40299beae6cSAndres Lagar-Cavilla 	return 0;
403d8414d3cSBastian Blank }
404d8414d3cSBastian Blank 
mmap_return_errors(void * data,int nr,void * state)4054e8c0c8cSDavid Vrabel static int mmap_return_errors(void *data, int nr, void *state)
4064e8c0c8cSDavid Vrabel {
4074e8c0c8cSDavid Vrabel 	struct mmap_batch_state *st = state;
4084e8c0c8cSDavid Vrabel 	int *errs = data;
4094e8c0c8cSDavid Vrabel 	int i;
4104e8c0c8cSDavid Vrabel 	int ret;
4114e8c0c8cSDavid Vrabel 
4124e8c0c8cSDavid Vrabel 	for (i = 0; i < nr; i++) {
4134e8c0c8cSDavid Vrabel 		ret = mmap_return_error(errs[i], st);
4144e8c0c8cSDavid Vrabel 		if (ret < 0)
4154e8c0c8cSDavid Vrabel 			return ret;
4164e8c0c8cSDavid Vrabel 	}
4174e8c0c8cSDavid Vrabel 	return 0;
4184e8c0c8cSDavid Vrabel }
4194e8c0c8cSDavid Vrabel 
420a13d7201SJulien Grall /* Allocate pfns that are then mapped with gfns from foreign domid. Update
421d71f5139SMukesh Rathor  * the vma with the page info to use later.
422d71f5139SMukesh Rathor  * Returns: 0 if success, otherwise -errno
423d71f5139SMukesh Rathor  */
alloc_empty_pages(struct vm_area_struct * vma,int numpgs)424d71f5139SMukesh Rathor static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
425d71f5139SMukesh Rathor {
426d71f5139SMukesh Rathor 	int rc;
427d71f5139SMukesh Rathor 	struct page **pages;
428d71f5139SMukesh Rathor 
4290432523fSJan Beulich 	pages = kvcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
430d71f5139SMukesh Rathor 	if (pages == NULL)
431d71f5139SMukesh Rathor 		return -ENOMEM;
432d71f5139SMukesh Rathor 
4339e2369c0SRoger Pau Monne 	rc = xen_alloc_unpopulated_pages(numpgs, pages);
434d71f5139SMukesh Rathor 	if (rc != 0) {
435d71f5139SMukesh Rathor 		pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
436d71f5139SMukesh Rathor 			numpgs, rc);
4370432523fSJan Beulich 		kvfree(pages);
438d71f5139SMukesh Rathor 		return -ENOMEM;
439d71f5139SMukesh Rathor 	}
440a5deabe0SAndres Lagar-Cavilla 	BUG_ON(vma->vm_private_data != NULL);
441d71f5139SMukesh Rathor 	vma->vm_private_data = pages;
442d71f5139SMukesh Rathor 
443d71f5139SMukesh Rathor 	return 0;
444d71f5139SMukesh Rathor }
445d71f5139SMukesh Rathor 
4467cbea8dcSKirill A. Shutemov static const struct vm_operations_struct privcmd_vm_ops;
447d8414d3cSBastian Blank 
privcmd_ioctl_mmap_batch(struct file * file,void __user * udata,int version)4484610d240SPaul Durrant static long privcmd_ioctl_mmap_batch(
4494610d240SPaul Durrant 	struct file *file, void __user *udata, int version)
450d8414d3cSBastian Blank {
4514610d240SPaul Durrant 	struct privcmd_data *data = file->private_data;
452d8414d3cSBastian Blank 	int ret;
453ceb90fa0SAndres Lagar-Cavilla 	struct privcmd_mmapbatch_v2 m;
454d8414d3cSBastian Blank 	struct mm_struct *mm = current->mm;
455d8414d3cSBastian Blank 	struct vm_area_struct *vma;
456d8414d3cSBastian Blank 	unsigned long nr_pages;
457d8414d3cSBastian Blank 	LIST_HEAD(pagelist);
458d8414d3cSBastian Blank 	struct mmap_batch_state state;
459d8414d3cSBastian Blank 
460ceb90fa0SAndres Lagar-Cavilla 	switch (version) {
461ceb90fa0SAndres Lagar-Cavilla 	case 1:
462ceb90fa0SAndres Lagar-Cavilla 		if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
463d8414d3cSBastian Blank 			return -EFAULT;
464ceb90fa0SAndres Lagar-Cavilla 		/* Returns per-frame error in m.arr. */
465ceb90fa0SAndres Lagar-Cavilla 		m.err = NULL;
46696d4f267SLinus Torvalds 		if (!access_ok(m.arr, m.num * sizeof(*m.arr)))
467ceb90fa0SAndres Lagar-Cavilla 			return -EFAULT;
468ceb90fa0SAndres Lagar-Cavilla 		break;
469ceb90fa0SAndres Lagar-Cavilla 	case 2:
470ceb90fa0SAndres Lagar-Cavilla 		if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
471ceb90fa0SAndres Lagar-Cavilla 			return -EFAULT;
472ceb90fa0SAndres Lagar-Cavilla 		/* Returns per-frame error code in m.err. */
47396d4f267SLinus Torvalds 		if (!access_ok(m.err, m.num * (sizeof(*m.err))))
474ceb90fa0SAndres Lagar-Cavilla 			return -EFAULT;
475ceb90fa0SAndres Lagar-Cavilla 		break;
476ceb90fa0SAndres Lagar-Cavilla 	default:
477ceb90fa0SAndres Lagar-Cavilla 		return -EINVAL;
478ceb90fa0SAndres Lagar-Cavilla 	}
479d8414d3cSBastian Blank 
4804610d240SPaul Durrant 	/* If restriction is in place, check the domid matches */
4814610d240SPaul Durrant 	if (data->domid != DOMID_INVALID && data->domid != m.dom)
4824610d240SPaul Durrant 		return -EPERM;
4834610d240SPaul Durrant 
4845995a68aSJulien Grall 	nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
485d8414d3cSBastian Blank 	if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
486d8414d3cSBastian Blank 		return -EINVAL;
487d8414d3cSBastian Blank 
488ceb90fa0SAndres Lagar-Cavilla 	ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
489d8414d3cSBastian Blank 
490ceb90fa0SAndres Lagar-Cavilla 	if (ret)
491d8414d3cSBastian Blank 		goto out;
492ceb90fa0SAndres Lagar-Cavilla 	if (list_empty(&pagelist)) {
493ceb90fa0SAndres Lagar-Cavilla 		ret = -EINVAL;
494ceb90fa0SAndres Lagar-Cavilla 		goto out;
495ceb90fa0SAndres Lagar-Cavilla 	}
496ceb90fa0SAndres Lagar-Cavilla 
49799beae6cSAndres Lagar-Cavilla 	if (version == 2) {
49899beae6cSAndres Lagar-Cavilla 		/* Zero error array now to only copy back actual errors. */
49999beae6cSAndres Lagar-Cavilla 		if (clear_user(m.err, sizeof(int) * m.num)) {
50099beae6cSAndres Lagar-Cavilla 			ret = -EFAULT;
501ceb90fa0SAndres Lagar-Cavilla 			goto out;
502ceb90fa0SAndres Lagar-Cavilla 		}
50399beae6cSAndres Lagar-Cavilla 	}
504d8414d3cSBastian Blank 
505d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
506d8414d3cSBastian Blank 
507d8414d3cSBastian Blank 	vma = find_vma(mm, m.addr);
508d8414d3cSBastian Blank 	if (!vma ||
509a5deabe0SAndres Lagar-Cavilla 	    vma->vm_ops != &privcmd_vm_ops) {
51068fa965dSMats Petersson 		ret = -EINVAL;
511a5deabe0SAndres Lagar-Cavilla 		goto out_unlock;
512a5deabe0SAndres Lagar-Cavilla 	}
513a5deabe0SAndres Lagar-Cavilla 
514a5deabe0SAndres Lagar-Cavilla 	/*
515a5deabe0SAndres Lagar-Cavilla 	 * Caller must either:
516a5deabe0SAndres Lagar-Cavilla 	 *
517a5deabe0SAndres Lagar-Cavilla 	 * Map the whole VMA range, which will also allocate all the
518a5deabe0SAndres Lagar-Cavilla 	 * pages required for the auto_translated_physmap case.
519a5deabe0SAndres Lagar-Cavilla 	 *
520a5deabe0SAndres Lagar-Cavilla 	 * Or
521a5deabe0SAndres Lagar-Cavilla 	 *
522a5deabe0SAndres Lagar-Cavilla 	 * Map unmapped holes left from a previous map attempt (e.g.,
523a5deabe0SAndres Lagar-Cavilla 	 * because those foreign frames were previously paged out).
524a5deabe0SAndres Lagar-Cavilla 	 */
525a5deabe0SAndres Lagar-Cavilla 	if (vma->vm_private_data == NULL) {
526a5deabe0SAndres Lagar-Cavilla 		if (m.addr != vma->vm_start ||
527a5deabe0SAndres Lagar-Cavilla 		    m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
528a5deabe0SAndres Lagar-Cavilla 			ret = -EINVAL;
529a5deabe0SAndres Lagar-Cavilla 			goto out_unlock;
530d8414d3cSBastian Blank 		}
531d71f5139SMukesh Rathor 		if (xen_feature(XENFEAT_auto_translated_physmap)) {
5325995a68aSJulien Grall 			ret = alloc_empty_pages(vma, nr_pages);
533a5deabe0SAndres Lagar-Cavilla 			if (ret < 0)
534a5deabe0SAndres Lagar-Cavilla 				goto out_unlock;
535a5deabe0SAndres Lagar-Cavilla 		} else
536a5deabe0SAndres Lagar-Cavilla 			vma->vm_private_data = PRIV_VMA_LOCKED;
537a5deabe0SAndres Lagar-Cavilla 	} else {
538a5deabe0SAndres Lagar-Cavilla 		if (m.addr < vma->vm_start ||
539a5deabe0SAndres Lagar-Cavilla 		    m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
540a5deabe0SAndres Lagar-Cavilla 			ret = -EINVAL;
541a5deabe0SAndres Lagar-Cavilla 			goto out_unlock;
542a5deabe0SAndres Lagar-Cavilla 		}
543a5deabe0SAndres Lagar-Cavilla 		if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
544a5deabe0SAndres Lagar-Cavilla 			ret = -EINVAL;
545a5deabe0SAndres Lagar-Cavilla 			goto out_unlock;
546d71f5139SMukesh Rathor 		}
547d71f5139SMukesh Rathor 	}
548d8414d3cSBastian Blank 
549d8414d3cSBastian Blank 	state.domain        = m.dom;
550d8414d3cSBastian Blank 	state.vma           = vma;
551d8414d3cSBastian Blank 	state.va            = m.addr;
552d71f5139SMukesh Rathor 	state.index         = 0;
553ceb90fa0SAndres Lagar-Cavilla 	state.global_error  = 0;
55499beae6cSAndres Lagar-Cavilla 	state.version       = version;
555d8414d3cSBastian Blank 
5565995a68aSJulien Grall 	BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
557ceb90fa0SAndres Lagar-Cavilla 	/* mmap_batch_fn guarantees ret == 0 */
5584e8c0c8cSDavid Vrabel 	BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
559ceb90fa0SAndres Lagar-Cavilla 				    &pagelist, mmap_batch_fn, &state));
560d8414d3cSBastian Blank 
561d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
562d8414d3cSBastian Blank 
56368fa965dSMats Petersson 	if (state.global_error) {
564ceb90fa0SAndres Lagar-Cavilla 		/* Write back errors in second pass. */
565a13d7201SJulien Grall 		state.user_gfn = (xen_pfn_t *)m.arr;
56699beae6cSAndres Lagar-Cavilla 		state.user_err = m.err;
5674e8c0c8cSDavid Vrabel 		ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
56899beae6cSAndres Lagar-Cavilla 					   &pagelist, mmap_return_errors, &state);
56968fa965dSMats Petersson 	} else
57068fa965dSMats Petersson 		ret = 0;
57168fa965dSMats Petersson 
572ceb90fa0SAndres Lagar-Cavilla 	/* If we have not had any EFAULT-like global errors then set the global
573ceb90fa0SAndres Lagar-Cavilla 	 * error to -ENOENT if necessary. */
574ceb90fa0SAndres Lagar-Cavilla 	if ((ret == 0) && (state.global_error == -ENOENT))
575ceb90fa0SAndres Lagar-Cavilla 		ret = -ENOENT;
576d8414d3cSBastian Blank 
577d8414d3cSBastian Blank out:
578d8414d3cSBastian Blank 	free_page_list(&pagelist);
579d8414d3cSBastian Blank 	return ret;
580a5deabe0SAndres Lagar-Cavilla 
581a5deabe0SAndres Lagar-Cavilla out_unlock:
582d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
583a5deabe0SAndres Lagar-Cavilla 	goto out;
584d8414d3cSBastian Blank }
585d8414d3cSBastian Blank 
lock_pages(struct privcmd_dm_op_buf kbufs[],unsigned int num,struct page * pages[],unsigned int nr_pages,unsigned int * pinned)586ab520be8SPaul Durrant static int lock_pages(
587ab520be8SPaul Durrant 	struct privcmd_dm_op_buf kbufs[], unsigned int num,
588e398fb4bSSouptick Joarder 	struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
589ab520be8SPaul Durrant {
590c5deb278SJuergen Gross 	unsigned int i, off = 0;
591ab520be8SPaul Durrant 
592c5deb278SJuergen Gross 	for (i = 0; i < num; ) {
593ab520be8SPaul Durrant 		unsigned int requested;
594e398fb4bSSouptick Joarder 		int page_count;
595ab520be8SPaul Durrant 
596ab520be8SPaul Durrant 		requested = DIV_ROUND_UP(
597ab520be8SPaul Durrant 			offset_in_page(kbufs[i].uptr) + kbufs[i].size,
598c5deb278SJuergen Gross 			PAGE_SIZE) - off;
599ab520be8SPaul Durrant 		if (requested > nr_pages)
600ab520be8SPaul Durrant 			return -ENOSPC;
601ab520be8SPaul Durrant 
602ff669aa8SSouptick Joarder 		page_count = pin_user_pages_fast(
603c5deb278SJuergen Gross 			(unsigned long)kbufs[i].uptr + off * PAGE_SIZE,
604ab520be8SPaul Durrant 			requested, FOLL_WRITE, pages);
605c5deb278SJuergen Gross 		if (page_count <= 0)
606c5deb278SJuergen Gross 			return page_count ? : -EFAULT;
607ab520be8SPaul Durrant 
608e398fb4bSSouptick Joarder 		*pinned += page_count;
609e398fb4bSSouptick Joarder 		nr_pages -= page_count;
610e398fb4bSSouptick Joarder 		pages += page_count;
611c5deb278SJuergen Gross 
612c5deb278SJuergen Gross 		off = (requested == page_count) ? 0 : off + page_count;
613c5deb278SJuergen Gross 		i += !off;
614ab520be8SPaul Durrant 	}
615ab520be8SPaul Durrant 
616ab520be8SPaul Durrant 	return 0;
617ab520be8SPaul Durrant }
618ab520be8SPaul Durrant 
unlock_pages(struct page * pages[],unsigned int nr_pages)619ab520be8SPaul Durrant static void unlock_pages(struct page *pages[], unsigned int nr_pages)
620ab520be8SPaul Durrant {
621ff669aa8SSouptick Joarder 	unpin_user_pages_dirty_lock(pages, nr_pages, true);
622a0c34d22SSouptick Joarder }
623ab520be8SPaul Durrant 
privcmd_ioctl_dm_op(struct file * file,void __user * udata)6244610d240SPaul Durrant static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
625ab520be8SPaul Durrant {
6264610d240SPaul Durrant 	struct privcmd_data *data = file->private_data;
627ab520be8SPaul Durrant 	struct privcmd_dm_op kdata;
628ab520be8SPaul Durrant 	struct privcmd_dm_op_buf *kbufs;
629ab520be8SPaul Durrant 	unsigned int nr_pages = 0;
630ab520be8SPaul Durrant 	struct page **pages = NULL;
631ab520be8SPaul Durrant 	struct xen_dm_op_buf *xbufs = NULL;
632ab520be8SPaul Durrant 	unsigned int i;
633ab520be8SPaul Durrant 	long rc;
634e398fb4bSSouptick Joarder 	unsigned int pinned = 0;
635ab520be8SPaul Durrant 
636ab520be8SPaul Durrant 	if (copy_from_user(&kdata, udata, sizeof(kdata)))
637ab520be8SPaul Durrant 		return -EFAULT;
638ab520be8SPaul Durrant 
6394610d240SPaul Durrant 	/* If restriction is in place, check the domid matches */
6404610d240SPaul Durrant 	if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
6414610d240SPaul Durrant 		return -EPERM;
6424610d240SPaul Durrant 
643ab520be8SPaul Durrant 	if (kdata.num == 0)
644ab520be8SPaul Durrant 		return 0;
645ab520be8SPaul Durrant 
646ab520be8SPaul Durrant 	if (kdata.num > privcmd_dm_op_max_num)
647ab520be8SPaul Durrant 		return -E2BIG;
648ab520be8SPaul Durrant 
649ab520be8SPaul Durrant 	kbufs = kcalloc(kdata.num, sizeof(*kbufs), GFP_KERNEL);
650ab520be8SPaul Durrant 	if (!kbufs)
651ab520be8SPaul Durrant 		return -ENOMEM;
652ab520be8SPaul Durrant 
653ab520be8SPaul Durrant 	if (copy_from_user(kbufs, kdata.ubufs,
654ab520be8SPaul Durrant 			   sizeof(*kbufs) * kdata.num)) {
655ab520be8SPaul Durrant 		rc = -EFAULT;
656ab520be8SPaul Durrant 		goto out;
657ab520be8SPaul Durrant 	}
658ab520be8SPaul Durrant 
659ab520be8SPaul Durrant 	for (i = 0; i < kdata.num; i++) {
660ab520be8SPaul Durrant 		if (kbufs[i].size > privcmd_dm_op_buf_max_size) {
661ab520be8SPaul Durrant 			rc = -E2BIG;
662ab520be8SPaul Durrant 			goto out;
663ab520be8SPaul Durrant 		}
664ab520be8SPaul Durrant 
66596d4f267SLinus Torvalds 		if (!access_ok(kbufs[i].uptr,
666ab520be8SPaul Durrant 			       kbufs[i].size)) {
667ab520be8SPaul Durrant 			rc = -EFAULT;
668ab520be8SPaul Durrant 			goto out;
669ab520be8SPaul Durrant 		}
670ab520be8SPaul Durrant 
671ab520be8SPaul Durrant 		nr_pages += DIV_ROUND_UP(
672ab520be8SPaul Durrant 			offset_in_page(kbufs[i].uptr) + kbufs[i].size,
673ab520be8SPaul Durrant 			PAGE_SIZE);
674ab520be8SPaul Durrant 	}
675ab520be8SPaul Durrant 
676ab520be8SPaul Durrant 	pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
677ab520be8SPaul Durrant 	if (!pages) {
678ab520be8SPaul Durrant 		rc = -ENOMEM;
679ab520be8SPaul Durrant 		goto out;
680ab520be8SPaul Durrant 	}
681ab520be8SPaul Durrant 
682ab520be8SPaul Durrant 	xbufs = kcalloc(kdata.num, sizeof(*xbufs), GFP_KERNEL);
683ab520be8SPaul Durrant 	if (!xbufs) {
684ab520be8SPaul Durrant 		rc = -ENOMEM;
685ab520be8SPaul Durrant 		goto out;
686ab520be8SPaul Durrant 	}
687ab520be8SPaul Durrant 
688e398fb4bSSouptick Joarder 	rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
689c5deb278SJuergen Gross 	if (rc < 0)
690ab520be8SPaul Durrant 		goto out;
691ab520be8SPaul Durrant 
692ab520be8SPaul Durrant 	for (i = 0; i < kdata.num; i++) {
693ab520be8SPaul Durrant 		set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
694ab520be8SPaul Durrant 		xbufs[i].size = kbufs[i].size;
695ab520be8SPaul Durrant 	}
696ab520be8SPaul Durrant 
697ab520be8SPaul Durrant 	xen_preemptible_hcall_begin();
698ab520be8SPaul Durrant 	rc = HYPERVISOR_dm_op(kdata.dom, kdata.num, xbufs);
699ab520be8SPaul Durrant 	xen_preemptible_hcall_end();
700ab520be8SPaul Durrant 
701ab520be8SPaul Durrant out:
702c5deb278SJuergen Gross 	unlock_pages(pages, pinned);
703ab520be8SPaul Durrant 	kfree(xbufs);
704ab520be8SPaul Durrant 	kfree(pages);
705ab520be8SPaul Durrant 	kfree(kbufs);
706ab520be8SPaul Durrant 
707ab520be8SPaul Durrant 	return rc;
708ab520be8SPaul Durrant }
709ab520be8SPaul Durrant 
privcmd_ioctl_restrict(struct file * file,void __user * udata)7104610d240SPaul Durrant static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
7114610d240SPaul Durrant {
7124610d240SPaul Durrant 	struct privcmd_data *data = file->private_data;
7134610d240SPaul Durrant 	domid_t dom;
7144610d240SPaul Durrant 
7154610d240SPaul Durrant 	if (copy_from_user(&dom, udata, sizeof(dom)))
7164610d240SPaul Durrant 		return -EFAULT;
7174610d240SPaul Durrant 
7184610d240SPaul Durrant 	/* Set restriction to the specified domain, or check it matches */
7194610d240SPaul Durrant 	if (data->domid == DOMID_INVALID)
7204610d240SPaul Durrant 		data->domid = dom;
7214610d240SPaul Durrant 	else if (data->domid != dom)
7224610d240SPaul Durrant 		return -EINVAL;
7234610d240SPaul Durrant 
7244610d240SPaul Durrant 	return 0;
7254610d240SPaul Durrant }
7264610d240SPaul Durrant 
privcmd_ioctl_mmap_resource(struct file * file,struct privcmd_mmap_resource __user * udata)727ef3a575bSRoger Pau Monne static long privcmd_ioctl_mmap_resource(struct file *file,
728ef3a575bSRoger Pau Monne 				struct privcmd_mmap_resource __user *udata)
7293ad08765SPaul Durrant {
7303ad08765SPaul Durrant 	struct privcmd_data *data = file->private_data;
7313ad08765SPaul Durrant 	struct mm_struct *mm = current->mm;
7323ad08765SPaul Durrant 	struct vm_area_struct *vma;
7333ad08765SPaul Durrant 	struct privcmd_mmap_resource kdata;
7343ad08765SPaul Durrant 	xen_pfn_t *pfns = NULL;
735ef3a575bSRoger Pau Monne 	struct xen_mem_acquire_resource xdata = { };
7363ad08765SPaul Durrant 	int rc;
7373ad08765SPaul Durrant 
7383ad08765SPaul Durrant 	if (copy_from_user(&kdata, udata, sizeof(kdata)))
7393ad08765SPaul Durrant 		return -EFAULT;
7403ad08765SPaul Durrant 
7413ad08765SPaul Durrant 	/* If restriction is in place, check the domid matches */
7423ad08765SPaul Durrant 	if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
7433ad08765SPaul Durrant 		return -EPERM;
7443ad08765SPaul Durrant 
745ef3a575bSRoger Pau Monne 	/* Both fields must be set or unset */
746ef3a575bSRoger Pau Monne 	if (!!kdata.addr != !!kdata.num)
747ef3a575bSRoger Pau Monne 		return -EINVAL;
748ef3a575bSRoger Pau Monne 
749ef3a575bSRoger Pau Monne 	xdata.domid = kdata.dom;
750ef3a575bSRoger Pau Monne 	xdata.type = kdata.type;
751ef3a575bSRoger Pau Monne 	xdata.id = kdata.id;
752ef3a575bSRoger Pau Monne 
753ef3a575bSRoger Pau Monne 	if (!kdata.addr && !kdata.num) {
754ef3a575bSRoger Pau Monne 		/* Query the size of the resource. */
755ef3a575bSRoger Pau Monne 		rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
756ef3a575bSRoger Pau Monne 		if (rc)
757ef3a575bSRoger Pau Monne 			return rc;
758ef3a575bSRoger Pau Monne 		return __put_user(xdata.nr_frames, &udata->num);
759ef3a575bSRoger Pau Monne 	}
760ef3a575bSRoger Pau Monne 
761d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
7623ad08765SPaul Durrant 
7633ad08765SPaul Durrant 	vma = find_vma(mm, kdata.addr);
7643ad08765SPaul Durrant 	if (!vma || vma->vm_ops != &privcmd_vm_ops) {
7653ad08765SPaul Durrant 		rc = -EINVAL;
7663ad08765SPaul Durrant 		goto out;
7673ad08765SPaul Durrant 	}
7683ad08765SPaul Durrant 
7698b997b2bSHarshit Mogalapalli 	pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN);
7703ad08765SPaul Durrant 	if (!pfns) {
7713ad08765SPaul Durrant 		rc = -ENOMEM;
7723ad08765SPaul Durrant 		goto out;
7733ad08765SPaul Durrant 	}
7743ad08765SPaul Durrant 
775a78d14a3SArnd Bergmann 	if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
776a78d14a3SArnd Bergmann 	    xen_feature(XENFEAT_auto_translated_physmap)) {
7773ad08765SPaul Durrant 		unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
7783ad08765SPaul Durrant 		struct page **pages;
7793ad08765SPaul Durrant 		unsigned int i;
7803ad08765SPaul Durrant 
7813ad08765SPaul Durrant 		rc = alloc_empty_pages(vma, nr);
7823ad08765SPaul Durrant 		if (rc < 0)
7833ad08765SPaul Durrant 			goto out;
7843ad08765SPaul Durrant 
7853ad08765SPaul Durrant 		pages = vma->vm_private_data;
7863ad08765SPaul Durrant 		for (i = 0; i < kdata.num; i++) {
7873ad08765SPaul Durrant 			xen_pfn_t pfn =
7883ad08765SPaul Durrant 				page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
7893ad08765SPaul Durrant 
7903ad08765SPaul Durrant 			pfns[i] = pfn + (i % XEN_PFN_PER_PAGE);
7913ad08765SPaul Durrant 		}
7923ad08765SPaul Durrant 	} else
7933ad08765SPaul Durrant 		vma->vm_private_data = PRIV_VMA_LOCKED;
7943ad08765SPaul Durrant 
7953ad08765SPaul Durrant 	xdata.frame = kdata.idx;
7963ad08765SPaul Durrant 	xdata.nr_frames = kdata.num;
7973ad08765SPaul Durrant 	set_xen_guest_handle(xdata.frame_list, pfns);
7983ad08765SPaul Durrant 
7993ad08765SPaul Durrant 	xen_preemptible_hcall_begin();
8003ad08765SPaul Durrant 	rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
8013ad08765SPaul Durrant 	xen_preemptible_hcall_end();
8023ad08765SPaul Durrant 
8033ad08765SPaul Durrant 	if (rc)
8043ad08765SPaul Durrant 		goto out;
8053ad08765SPaul Durrant 
806a78d14a3SArnd Bergmann 	if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
807a78d14a3SArnd Bergmann 	    xen_feature(XENFEAT_auto_translated_physmap)) {
808a78d14a3SArnd Bergmann 		rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
8093ad08765SPaul Durrant 	} else {
8103ad08765SPaul Durrant 		unsigned int domid =
8113ad08765SPaul Durrant 			(xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
8123ad08765SPaul Durrant 			DOMID_SELF : kdata.dom;
813e11423d6SJan Beulich 		int num, *errs = (int *)pfns;
8143ad08765SPaul Durrant 
815e11423d6SJan Beulich 		BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns));
8163ad08765SPaul Durrant 		num = xen_remap_domain_mfn_array(vma,
8173ad08765SPaul Durrant 						 kdata.addr & PAGE_MASK,
818e11423d6SJan Beulich 						 pfns, kdata.num, errs,
8193ad08765SPaul Durrant 						 vma->vm_page_prot,
82097315723SJan Beulich 						 domid);
8213ad08765SPaul Durrant 		if (num < 0)
8223ad08765SPaul Durrant 			rc = num;
8233ad08765SPaul Durrant 		else if (num != kdata.num) {
8243ad08765SPaul Durrant 			unsigned int i;
8253ad08765SPaul Durrant 
8263ad08765SPaul Durrant 			for (i = 0; i < num; i++) {
827e11423d6SJan Beulich 				rc = errs[i];
8283ad08765SPaul Durrant 				if (rc < 0)
8293ad08765SPaul Durrant 					break;
8303ad08765SPaul Durrant 			}
8313ad08765SPaul Durrant 		} else
8323ad08765SPaul Durrant 			rc = 0;
8333ad08765SPaul Durrant 	}
8343ad08765SPaul Durrant 
8353ad08765SPaul Durrant out:
836d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
8373ad08765SPaul Durrant 	kfree(pfns);
8383ad08765SPaul Durrant 
8393ad08765SPaul Durrant 	return rc;
8403ad08765SPaul Durrant }
8413ad08765SPaul Durrant 
842f8941e6cSViresh Kumar #ifdef CONFIG_XEN_PRIVCMD_IRQFD
843f8941e6cSViresh Kumar /* Irqfd support */
844f8941e6cSViresh Kumar static struct workqueue_struct *irqfd_cleanup_wq;
845c2775ae4SViresh Kumar static DEFINE_SPINLOCK(irqfds_lock);
846*e997b357SViresh Kumar DEFINE_STATIC_SRCU(irqfds_srcu);
847f8941e6cSViresh Kumar static LIST_HEAD(irqfds_list);
848f8941e6cSViresh Kumar 
849f8941e6cSViresh Kumar struct privcmd_kernel_irqfd {
850f8941e6cSViresh Kumar 	struct xen_dm_op_buf xbufs;
851f8941e6cSViresh Kumar 	domid_t dom;
852f8941e6cSViresh Kumar 	bool error;
853f8941e6cSViresh Kumar 	struct eventfd_ctx *eventfd;
854f8941e6cSViresh Kumar 	struct work_struct shutdown;
855f8941e6cSViresh Kumar 	wait_queue_entry_t wait;
856f8941e6cSViresh Kumar 	struct list_head list;
857f8941e6cSViresh Kumar 	poll_table pt;
858f8941e6cSViresh Kumar };
859f8941e6cSViresh Kumar 
irqfd_deactivate(struct privcmd_kernel_irqfd * kirqfd)860f8941e6cSViresh Kumar static void irqfd_deactivate(struct privcmd_kernel_irqfd *kirqfd)
861f8941e6cSViresh Kumar {
862f8941e6cSViresh Kumar 	lockdep_assert_held(&irqfds_lock);
863f8941e6cSViresh Kumar 
864f8941e6cSViresh Kumar 	list_del_init(&kirqfd->list);
865f8941e6cSViresh Kumar 	queue_work(irqfd_cleanup_wq, &kirqfd->shutdown);
866f8941e6cSViresh Kumar }
867f8941e6cSViresh Kumar 
irqfd_shutdown(struct work_struct * work)868f8941e6cSViresh Kumar static void irqfd_shutdown(struct work_struct *work)
869f8941e6cSViresh Kumar {
870f8941e6cSViresh Kumar 	struct privcmd_kernel_irqfd *kirqfd =
871f8941e6cSViresh Kumar 		container_of(work, struct privcmd_kernel_irqfd, shutdown);
872f8941e6cSViresh Kumar 	u64 cnt;
873f8941e6cSViresh Kumar 
874*e997b357SViresh Kumar 	/* Make sure irqfd has been initialized in assign path */
875*e997b357SViresh Kumar 	synchronize_srcu(&irqfds_srcu);
876*e997b357SViresh Kumar 
877f8941e6cSViresh Kumar 	eventfd_ctx_remove_wait_queue(kirqfd->eventfd, &kirqfd->wait, &cnt);
878f8941e6cSViresh Kumar 	eventfd_ctx_put(kirqfd->eventfd);
879f8941e6cSViresh Kumar 	kfree(kirqfd);
880f8941e6cSViresh Kumar }
881f8941e6cSViresh Kumar 
irqfd_inject(struct privcmd_kernel_irqfd * kirqfd)882f8941e6cSViresh Kumar static void irqfd_inject(struct privcmd_kernel_irqfd *kirqfd)
883f8941e6cSViresh Kumar {
884f8941e6cSViresh Kumar 	u64 cnt;
885f8941e6cSViresh Kumar 	long rc;
886f8941e6cSViresh Kumar 
887f8941e6cSViresh Kumar 	eventfd_ctx_do_read(kirqfd->eventfd, &cnt);
888f8941e6cSViresh Kumar 
889f8941e6cSViresh Kumar 	xen_preemptible_hcall_begin();
890f8941e6cSViresh Kumar 	rc = HYPERVISOR_dm_op(kirqfd->dom, 1, &kirqfd->xbufs);
891f8941e6cSViresh Kumar 	xen_preemptible_hcall_end();
892f8941e6cSViresh Kumar 
893f8941e6cSViresh Kumar 	/* Don't repeat the error message for consecutive failures */
894f8941e6cSViresh Kumar 	if (rc && !kirqfd->error) {
895f8941e6cSViresh Kumar 		pr_err("Failed to configure irq for guest domain: %d\n",
896f8941e6cSViresh Kumar 		       kirqfd->dom);
897f8941e6cSViresh Kumar 	}
898f8941e6cSViresh Kumar 
899f8941e6cSViresh Kumar 	kirqfd->error = rc;
900f8941e6cSViresh Kumar }
901f8941e6cSViresh Kumar 
902f8941e6cSViresh Kumar static int
irqfd_wakeup(wait_queue_entry_t * wait,unsigned int mode,int sync,void * key)903f8941e6cSViresh Kumar irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
904f8941e6cSViresh Kumar {
905f8941e6cSViresh Kumar 	struct privcmd_kernel_irqfd *kirqfd =
906f8941e6cSViresh Kumar 		container_of(wait, struct privcmd_kernel_irqfd, wait);
907f8941e6cSViresh Kumar 	__poll_t flags = key_to_poll(key);
908f8941e6cSViresh Kumar 
909f8941e6cSViresh Kumar 	if (flags & EPOLLIN)
910f8941e6cSViresh Kumar 		irqfd_inject(kirqfd);
911f8941e6cSViresh Kumar 
912f8941e6cSViresh Kumar 	if (flags & EPOLLHUP) {
913c2775ae4SViresh Kumar 		unsigned long flags;
914c2775ae4SViresh Kumar 
915c2775ae4SViresh Kumar 		spin_lock_irqsave(&irqfds_lock, flags);
916f8941e6cSViresh Kumar 		irqfd_deactivate(kirqfd);
917c2775ae4SViresh Kumar 		spin_unlock_irqrestore(&irqfds_lock, flags);
918f8941e6cSViresh Kumar 	}
919f8941e6cSViresh Kumar 
920f8941e6cSViresh Kumar 	return 0;
921f8941e6cSViresh Kumar }
922f8941e6cSViresh Kumar 
923f8941e6cSViresh Kumar static void
irqfd_poll_func(struct file * file,wait_queue_head_t * wqh,poll_table * pt)924f8941e6cSViresh Kumar irqfd_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt)
925f8941e6cSViresh Kumar {
926f8941e6cSViresh Kumar 	struct privcmd_kernel_irqfd *kirqfd =
927f8941e6cSViresh Kumar 		container_of(pt, struct privcmd_kernel_irqfd, pt);
928f8941e6cSViresh Kumar 
929f8941e6cSViresh Kumar 	add_wait_queue_priority(wqh, &kirqfd->wait);
930f8941e6cSViresh Kumar }
931f8941e6cSViresh Kumar 
privcmd_irqfd_assign(struct privcmd_irqfd * irqfd)932f8941e6cSViresh Kumar static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
933f8941e6cSViresh Kumar {
934f8941e6cSViresh Kumar 	struct privcmd_kernel_irqfd *kirqfd, *tmp;
935c2775ae4SViresh Kumar 	unsigned long flags;
936f8941e6cSViresh Kumar 	__poll_t events;
937f8941e6cSViresh Kumar 	struct fd f;
938f8941e6cSViresh Kumar 	void *dm_op;
939*e997b357SViresh Kumar 	int ret, idx;
940f8941e6cSViresh Kumar 
941f8941e6cSViresh Kumar 	kirqfd = kzalloc(sizeof(*kirqfd) + irqfd->size, GFP_KERNEL);
942f8941e6cSViresh Kumar 	if (!kirqfd)
943f8941e6cSViresh Kumar 		return -ENOMEM;
944f8941e6cSViresh Kumar 	dm_op = kirqfd + 1;
945f8941e6cSViresh Kumar 
9460d99e8c8SViresh Kumar 	if (copy_from_user(dm_op, u64_to_user_ptr(irqfd->dm_op), irqfd->size)) {
947f8941e6cSViresh Kumar 		ret = -EFAULT;
948f8941e6cSViresh Kumar 		goto error_kfree;
949f8941e6cSViresh Kumar 	}
950f8941e6cSViresh Kumar 
951f8941e6cSViresh Kumar 	kirqfd->xbufs.size = irqfd->size;
952f8941e6cSViresh Kumar 	set_xen_guest_handle(kirqfd->xbufs.h, dm_op);
953f8941e6cSViresh Kumar 	kirqfd->dom = irqfd->dom;
954f8941e6cSViresh Kumar 	INIT_WORK(&kirqfd->shutdown, irqfd_shutdown);
955f8941e6cSViresh Kumar 
956f8941e6cSViresh Kumar 	f = fdget(irqfd->fd);
957f8941e6cSViresh Kumar 	if (!f.file) {
958f8941e6cSViresh Kumar 		ret = -EBADF;
959f8941e6cSViresh Kumar 		goto error_kfree;
960f8941e6cSViresh Kumar 	}
961f8941e6cSViresh Kumar 
962f8941e6cSViresh Kumar 	kirqfd->eventfd = eventfd_ctx_fileget(f.file);
963f8941e6cSViresh Kumar 	if (IS_ERR(kirqfd->eventfd)) {
964f8941e6cSViresh Kumar 		ret = PTR_ERR(kirqfd->eventfd);
965f8941e6cSViresh Kumar 		goto error_fd_put;
966f8941e6cSViresh Kumar 	}
967f8941e6cSViresh Kumar 
968f8941e6cSViresh Kumar 	/*
969f8941e6cSViresh Kumar 	 * Install our own custom wake-up handling so we are notified via a
970f8941e6cSViresh Kumar 	 * callback whenever someone signals the underlying eventfd.
971f8941e6cSViresh Kumar 	 */
972f8941e6cSViresh Kumar 	init_waitqueue_func_entry(&kirqfd->wait, irqfd_wakeup);
973f8941e6cSViresh Kumar 	init_poll_funcptr(&kirqfd->pt, irqfd_poll_func);
974f8941e6cSViresh Kumar 
975c2775ae4SViresh Kumar 	spin_lock_irqsave(&irqfds_lock, flags);
976f8941e6cSViresh Kumar 
977f8941e6cSViresh Kumar 	list_for_each_entry(tmp, &irqfds_list, list) {
978f8941e6cSViresh Kumar 		if (kirqfd->eventfd == tmp->eventfd) {
979f8941e6cSViresh Kumar 			ret = -EBUSY;
980c2775ae4SViresh Kumar 			spin_unlock_irqrestore(&irqfds_lock, flags);
981f8941e6cSViresh Kumar 			goto error_eventfd;
982f8941e6cSViresh Kumar 		}
983f8941e6cSViresh Kumar 	}
984f8941e6cSViresh Kumar 
985*e997b357SViresh Kumar 	idx = srcu_read_lock(&irqfds_srcu);
986f8941e6cSViresh Kumar 	list_add_tail(&kirqfd->list, &irqfds_list);
987c2775ae4SViresh Kumar 	spin_unlock_irqrestore(&irqfds_lock, flags);
988f8941e6cSViresh Kumar 
989f8941e6cSViresh Kumar 	/*
990f8941e6cSViresh Kumar 	 * Check if there was an event already pending on the eventfd before we
991f8941e6cSViresh Kumar 	 * registered, and trigger it as if we didn't miss it.
992f8941e6cSViresh Kumar 	 */
993f8941e6cSViresh Kumar 	events = vfs_poll(f.file, &kirqfd->pt);
994f8941e6cSViresh Kumar 	if (events & EPOLLIN)
995f8941e6cSViresh Kumar 		irqfd_inject(kirqfd);
996f8941e6cSViresh Kumar 
997*e997b357SViresh Kumar 	srcu_read_unlock(&irqfds_srcu, idx);
998*e997b357SViresh Kumar 
999f8941e6cSViresh Kumar 	/*
1000f8941e6cSViresh Kumar 	 * Do not drop the file until the kirqfd is fully initialized, otherwise
1001f8941e6cSViresh Kumar 	 * we might race against the EPOLLHUP.
1002f8941e6cSViresh Kumar 	 */
1003f8941e6cSViresh Kumar 	fdput(f);
1004f8941e6cSViresh Kumar 	return 0;
1005f8941e6cSViresh Kumar 
1006f8941e6cSViresh Kumar error_eventfd:
1007f8941e6cSViresh Kumar 	eventfd_ctx_put(kirqfd->eventfd);
1008f8941e6cSViresh Kumar 
1009f8941e6cSViresh Kumar error_fd_put:
1010f8941e6cSViresh Kumar 	fdput(f);
1011f8941e6cSViresh Kumar 
1012f8941e6cSViresh Kumar error_kfree:
1013f8941e6cSViresh Kumar 	kfree(kirqfd);
1014f8941e6cSViresh Kumar 	return ret;
1015f8941e6cSViresh Kumar }
1016f8941e6cSViresh Kumar 
privcmd_irqfd_deassign(struct privcmd_irqfd * irqfd)1017f8941e6cSViresh Kumar static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd)
1018f8941e6cSViresh Kumar {
1019f8941e6cSViresh Kumar 	struct privcmd_kernel_irqfd *kirqfd;
1020f8941e6cSViresh Kumar 	struct eventfd_ctx *eventfd;
1021c2775ae4SViresh Kumar 	unsigned long flags;
1022f8941e6cSViresh Kumar 
1023f8941e6cSViresh Kumar 	eventfd = eventfd_ctx_fdget(irqfd->fd);
1024f8941e6cSViresh Kumar 	if (IS_ERR(eventfd))
1025f8941e6cSViresh Kumar 		return PTR_ERR(eventfd);
1026f8941e6cSViresh Kumar 
1027c2775ae4SViresh Kumar 	spin_lock_irqsave(&irqfds_lock, flags);
1028f8941e6cSViresh Kumar 
1029f8941e6cSViresh Kumar 	list_for_each_entry(kirqfd, &irqfds_list, list) {
1030f8941e6cSViresh Kumar 		if (kirqfd->eventfd == eventfd) {
1031f8941e6cSViresh Kumar 			irqfd_deactivate(kirqfd);
1032f8941e6cSViresh Kumar 			break;
1033f8941e6cSViresh Kumar 		}
1034f8941e6cSViresh Kumar 	}
1035f8941e6cSViresh Kumar 
1036c2775ae4SViresh Kumar 	spin_unlock_irqrestore(&irqfds_lock, flags);
1037f8941e6cSViresh Kumar 
1038f8941e6cSViresh Kumar 	eventfd_ctx_put(eventfd);
1039f8941e6cSViresh Kumar 
1040f8941e6cSViresh Kumar 	/*
1041f8941e6cSViresh Kumar 	 * Block until we know all outstanding shutdown jobs have completed so
1042f8941e6cSViresh Kumar 	 * that we guarantee there will not be any more interrupts once this
1043f8941e6cSViresh Kumar 	 * deassign function returns.
1044f8941e6cSViresh Kumar 	 */
1045f8941e6cSViresh Kumar 	flush_workqueue(irqfd_cleanup_wq);
1046f8941e6cSViresh Kumar 
1047f8941e6cSViresh Kumar 	return 0;
1048f8941e6cSViresh Kumar }
1049f8941e6cSViresh Kumar 
privcmd_ioctl_irqfd(struct file * file,void __user * udata)1050f8941e6cSViresh Kumar static long privcmd_ioctl_irqfd(struct file *file, void __user *udata)
1051f8941e6cSViresh Kumar {
1052f8941e6cSViresh Kumar 	struct privcmd_data *data = file->private_data;
1053f8941e6cSViresh Kumar 	struct privcmd_irqfd irqfd;
1054f8941e6cSViresh Kumar 
1055f8941e6cSViresh Kumar 	if (copy_from_user(&irqfd, udata, sizeof(irqfd)))
1056f8941e6cSViresh Kumar 		return -EFAULT;
1057f8941e6cSViresh Kumar 
1058f8941e6cSViresh Kumar 	/* No other flags should be set */
1059f8941e6cSViresh Kumar 	if (irqfd.flags & ~PRIVCMD_IRQFD_FLAG_DEASSIGN)
1060f8941e6cSViresh Kumar 		return -EINVAL;
1061f8941e6cSViresh Kumar 
1062f8941e6cSViresh Kumar 	/* If restriction is in place, check the domid matches */
1063f8941e6cSViresh Kumar 	if (data->domid != DOMID_INVALID && data->domid != irqfd.dom)
1064f8941e6cSViresh Kumar 		return -EPERM;
1065f8941e6cSViresh Kumar 
1066f8941e6cSViresh Kumar 	if (irqfd.flags & PRIVCMD_IRQFD_FLAG_DEASSIGN)
1067f8941e6cSViresh Kumar 		return privcmd_irqfd_deassign(&irqfd);
1068f8941e6cSViresh Kumar 
1069f8941e6cSViresh Kumar 	return privcmd_irqfd_assign(&irqfd);
1070f8941e6cSViresh Kumar }
1071f8941e6cSViresh Kumar 
privcmd_irqfd_init(void)1072f8941e6cSViresh Kumar static int privcmd_irqfd_init(void)
1073f8941e6cSViresh Kumar {
1074f8941e6cSViresh Kumar 	irqfd_cleanup_wq = alloc_workqueue("privcmd-irqfd-cleanup", 0, 0);
1075f8941e6cSViresh Kumar 	if (!irqfd_cleanup_wq)
1076f8941e6cSViresh Kumar 		return -ENOMEM;
1077f8941e6cSViresh Kumar 
1078f8941e6cSViresh Kumar 	return 0;
1079f8941e6cSViresh Kumar }
1080f8941e6cSViresh Kumar 
privcmd_irqfd_exit(void)1081f8941e6cSViresh Kumar static void privcmd_irqfd_exit(void)
1082f8941e6cSViresh Kumar {
1083f8941e6cSViresh Kumar 	struct privcmd_kernel_irqfd *kirqfd, *tmp;
1084c2775ae4SViresh Kumar 	unsigned long flags;
1085f8941e6cSViresh Kumar 
1086c2775ae4SViresh Kumar 	spin_lock_irqsave(&irqfds_lock, flags);
1087f8941e6cSViresh Kumar 
1088f8941e6cSViresh Kumar 	list_for_each_entry_safe(kirqfd, tmp, &irqfds_list, list)
1089f8941e6cSViresh Kumar 		irqfd_deactivate(kirqfd);
1090f8941e6cSViresh Kumar 
1091c2775ae4SViresh Kumar 	spin_unlock_irqrestore(&irqfds_lock, flags);
1092f8941e6cSViresh Kumar 
1093f8941e6cSViresh Kumar 	destroy_workqueue(irqfd_cleanup_wq);
1094f8941e6cSViresh Kumar }
1095f8941e6cSViresh Kumar #else
privcmd_ioctl_irqfd(struct file * file,void __user * udata)1096f8941e6cSViresh Kumar static inline long privcmd_ioctl_irqfd(struct file *file, void __user *udata)
1097f8941e6cSViresh Kumar {
1098f8941e6cSViresh Kumar 	return -EOPNOTSUPP;
1099f8941e6cSViresh Kumar }
1100f8941e6cSViresh Kumar 
privcmd_irqfd_init(void)1101f8941e6cSViresh Kumar static inline int privcmd_irqfd_init(void)
1102f8941e6cSViresh Kumar {
1103f8941e6cSViresh Kumar 	return 0;
1104f8941e6cSViresh Kumar }
1105f8941e6cSViresh Kumar 
privcmd_irqfd_exit(void)1106f8941e6cSViresh Kumar static inline void privcmd_irqfd_exit(void)
1107f8941e6cSViresh Kumar {
1108f8941e6cSViresh Kumar }
1109f8941e6cSViresh Kumar #endif /* CONFIG_XEN_PRIVCMD_IRQFD */
1110f8941e6cSViresh Kumar 
privcmd_ioctl(struct file * file,unsigned int cmd,unsigned long data)1111d8414d3cSBastian Blank static long privcmd_ioctl(struct file *file,
1112d8414d3cSBastian Blank 			  unsigned int cmd, unsigned long data)
1113d8414d3cSBastian Blank {
1114dc9eab6fSPaul Durrant 	int ret = -ENOTTY;
1115d8414d3cSBastian Blank 	void __user *udata = (void __user *) data;
1116d8414d3cSBastian Blank 
1117d8414d3cSBastian Blank 	switch (cmd) {
1118d8414d3cSBastian Blank 	case IOCTL_PRIVCMD_HYPERCALL:
11194610d240SPaul Durrant 		ret = privcmd_ioctl_hypercall(file, udata);
1120d8414d3cSBastian Blank 		break;
1121d8414d3cSBastian Blank 
1122d8414d3cSBastian Blank 	case IOCTL_PRIVCMD_MMAP:
11234610d240SPaul Durrant 		ret = privcmd_ioctl_mmap(file, udata);
1124d8414d3cSBastian Blank 		break;
1125d8414d3cSBastian Blank 
1126d8414d3cSBastian Blank 	case IOCTL_PRIVCMD_MMAPBATCH:
11274610d240SPaul Durrant 		ret = privcmd_ioctl_mmap_batch(file, udata, 1);
1128ceb90fa0SAndres Lagar-Cavilla 		break;
1129ceb90fa0SAndres Lagar-Cavilla 
1130ceb90fa0SAndres Lagar-Cavilla 	case IOCTL_PRIVCMD_MMAPBATCH_V2:
11314610d240SPaul Durrant 		ret = privcmd_ioctl_mmap_batch(file, udata, 2);
1132d8414d3cSBastian Blank 		break;
1133d8414d3cSBastian Blank 
1134ab520be8SPaul Durrant 	case IOCTL_PRIVCMD_DM_OP:
11354610d240SPaul Durrant 		ret = privcmd_ioctl_dm_op(file, udata);
11364610d240SPaul Durrant 		break;
11374610d240SPaul Durrant 
11384610d240SPaul Durrant 	case IOCTL_PRIVCMD_RESTRICT:
11394610d240SPaul Durrant 		ret = privcmd_ioctl_restrict(file, udata);
1140ab520be8SPaul Durrant 		break;
1141ab520be8SPaul Durrant 
11423ad08765SPaul Durrant 	case IOCTL_PRIVCMD_MMAP_RESOURCE:
11433ad08765SPaul Durrant 		ret = privcmd_ioctl_mmap_resource(file, udata);
11443ad08765SPaul Durrant 		break;
11453ad08765SPaul Durrant 
1146f8941e6cSViresh Kumar 	case IOCTL_PRIVCMD_IRQFD:
1147f8941e6cSViresh Kumar 		ret = privcmd_ioctl_irqfd(file, udata);
1148f8941e6cSViresh Kumar 		break;
1149f8941e6cSViresh Kumar 
1150d8414d3cSBastian Blank 	default:
1151d8414d3cSBastian Blank 		break;
1152d8414d3cSBastian Blank 	}
1153d8414d3cSBastian Blank 
1154d8414d3cSBastian Blank 	return ret;
1155d8414d3cSBastian Blank }
1156d8414d3cSBastian Blank 
privcmd_open(struct inode * ino,struct file * file)11574610d240SPaul Durrant static int privcmd_open(struct inode *ino, struct file *file)
11584610d240SPaul Durrant {
11594610d240SPaul Durrant 	struct privcmd_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
11604610d240SPaul Durrant 
11614610d240SPaul Durrant 	if (!data)
11624610d240SPaul Durrant 		return -ENOMEM;
11634610d240SPaul Durrant 
11644610d240SPaul Durrant 	/* DOMID_INVALID implies no restriction */
11654610d240SPaul Durrant 	data->domid = DOMID_INVALID;
11664610d240SPaul Durrant 
11674610d240SPaul Durrant 	file->private_data = data;
11684610d240SPaul Durrant 	return 0;
11694610d240SPaul Durrant }
11704610d240SPaul Durrant 
privcmd_release(struct inode * ino,struct file * file)11714610d240SPaul Durrant static int privcmd_release(struct inode *ino, struct file *file)
11724610d240SPaul Durrant {
11734610d240SPaul Durrant 	struct privcmd_data *data = file->private_data;
11744610d240SPaul Durrant 
11754610d240SPaul Durrant 	kfree(data);
11764610d240SPaul Durrant 	return 0;
11774610d240SPaul Durrant }
11784610d240SPaul Durrant 
privcmd_close(struct vm_area_struct * vma)1179d71f5139SMukesh Rathor static void privcmd_close(struct vm_area_struct *vma)
1180d71f5139SMukesh Rathor {
1181d71f5139SMukesh Rathor 	struct page **pages = vma->vm_private_data;
1182c7ebf9d9SMuhammad Falak R Wani 	int numpgs = vma_pages(vma);
11835995a68aSJulien Grall 	int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
1184b6497b38SIan Campbell 	int rc;
1185d71f5139SMukesh Rathor 
11869eff37a8SDan Carpenter 	if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
1187d71f5139SMukesh Rathor 		return;
1188d71f5139SMukesh Rathor 
11895995a68aSJulien Grall 	rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
1190b6497b38SIan Campbell 	if (rc == 0)
11919e2369c0SRoger Pau Monne 		xen_free_unpopulated_pages(numpgs, pages);
1192b6497b38SIan Campbell 	else
1193b6497b38SIan Campbell 		pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
1194b6497b38SIan Campbell 			numpgs, rc);
11950432523fSJan Beulich 	kvfree(pages);
1196d71f5139SMukesh Rathor }
1197d71f5139SMukesh Rathor 
privcmd_fault(struct vm_fault * vmf)11984bf2cc96SSouptick Joarder static vm_fault_t privcmd_fault(struct vm_fault *vmf)
1199d8414d3cSBastian Blank {
1200d8414d3cSBastian Blank 	printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
120111bac800SDave Jiang 	       vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
12021a29d85eSJan Kara 	       vmf->pgoff, (void *)vmf->address);
1203d8414d3cSBastian Blank 
1204d8414d3cSBastian Blank 	return VM_FAULT_SIGBUS;
1205d8414d3cSBastian Blank }
1206d8414d3cSBastian Blank 
12077cbea8dcSKirill A. Shutemov static const struct vm_operations_struct privcmd_vm_ops = {
1208d71f5139SMukesh Rathor 	.close = privcmd_close,
1209d8414d3cSBastian Blank 	.fault = privcmd_fault
1210d8414d3cSBastian Blank };
1211d8414d3cSBastian Blank 
privcmd_mmap(struct file * file,struct vm_area_struct * vma)1212d8414d3cSBastian Blank static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
1213d8414d3cSBastian Blank {
1214d8414d3cSBastian Blank 	/* DONTCOPY is essential for Xen because copy_page_range doesn't know
1215d8414d3cSBastian Blank 	 * how to recreate these mappings */
12161c71222eSSuren Baghdasaryan 	vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTCOPY |
12171c71222eSSuren Baghdasaryan 			 VM_DONTEXPAND | VM_DONTDUMP);
1218d8414d3cSBastian Blank 	vma->vm_ops = &privcmd_vm_ops;
1219d8414d3cSBastian Blank 	vma->vm_private_data = NULL;
1220d8414d3cSBastian Blank 
1221d8414d3cSBastian Blank 	return 0;
1222d8414d3cSBastian Blank }
1223d8414d3cSBastian Blank 
1224a5deabe0SAndres Lagar-Cavilla /*
1225a5deabe0SAndres Lagar-Cavilla  * For MMAPBATCH*. This allows asserting the singleshot mapping
1226a5deabe0SAndres Lagar-Cavilla  * on a per pfn/pte basis. Mapping calls that fail with ENOENT
1227a5deabe0SAndres Lagar-Cavilla  * can be then retried until success.
1228a5deabe0SAndres Lagar-Cavilla  */
is_mapped_fn(pte_t * pte,unsigned long addr,void * data)12298b1e0f81SAnshuman Khandual static int is_mapped_fn(pte_t *pte, unsigned long addr, void *data)
1230d8414d3cSBastian Blank {
1231c33c7948SRyan Roberts 	return pte_none(ptep_get(pte)) ? 0 : -EBUSY;
1232a5deabe0SAndres Lagar-Cavilla }
1233a5deabe0SAndres Lagar-Cavilla 
privcmd_vma_range_is_mapped(struct vm_area_struct * vma,unsigned long addr,unsigned long nr_pages)1234a5deabe0SAndres Lagar-Cavilla static int privcmd_vma_range_is_mapped(
1235a5deabe0SAndres Lagar-Cavilla 	           struct vm_area_struct *vma,
1236a5deabe0SAndres Lagar-Cavilla 	           unsigned long addr,
1237a5deabe0SAndres Lagar-Cavilla 	           unsigned long nr_pages)
1238a5deabe0SAndres Lagar-Cavilla {
1239a5deabe0SAndres Lagar-Cavilla 	return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
1240a5deabe0SAndres Lagar-Cavilla 				   is_mapped_fn, NULL) != 0;
1241d8414d3cSBastian Blank }
1242d8414d3cSBastian Blank 
1243d8414d3cSBastian Blank const struct file_operations xen_privcmd_fops = {
1244d8414d3cSBastian Blank 	.owner = THIS_MODULE,
1245d8414d3cSBastian Blank 	.unlocked_ioctl = privcmd_ioctl,
12464610d240SPaul Durrant 	.open = privcmd_open,
12474610d240SPaul Durrant 	.release = privcmd_release,
1248d8414d3cSBastian Blank 	.mmap = privcmd_mmap,
1249d8414d3cSBastian Blank };
1250d8414d3cSBastian Blank EXPORT_SYMBOL_GPL(xen_privcmd_fops);
1251d8414d3cSBastian Blank 
1252d8414d3cSBastian Blank static struct miscdevice privcmd_dev = {
1253d8414d3cSBastian Blank 	.minor = MISC_DYNAMIC_MINOR,
1254d8414d3cSBastian Blank 	.name = "xen/privcmd",
1255d8414d3cSBastian Blank 	.fops = &xen_privcmd_fops,
1256d8414d3cSBastian Blank };
1257d8414d3cSBastian Blank 
privcmd_init(void)1258d8414d3cSBastian Blank static int __init privcmd_init(void)
1259d8414d3cSBastian Blank {
1260d8414d3cSBastian Blank 	int err;
1261d8414d3cSBastian Blank 
1262d8414d3cSBastian Blank 	if (!xen_domain())
1263d8414d3cSBastian Blank 		return -ENODEV;
1264d8414d3cSBastian Blank 
1265d8414d3cSBastian Blank 	err = misc_register(&privcmd_dev);
1266d8414d3cSBastian Blank 	if (err != 0) {
1267283c0972SJoe Perches 		pr_err("Could not register Xen privcmd device\n");
1268d8414d3cSBastian Blank 		return err;
1269d8414d3cSBastian Blank 	}
1270c51b3c63SJuergen Gross 
1271c51b3c63SJuergen Gross 	err = misc_register(&xen_privcmdbuf_dev);
1272c51b3c63SJuergen Gross 	if (err != 0) {
1273c51b3c63SJuergen Gross 		pr_err("Could not register Xen hypercall-buf device\n");
1274f8941e6cSViresh Kumar 		goto err_privcmdbuf;
1275f8941e6cSViresh Kumar 	}
1276f8941e6cSViresh Kumar 
1277f8941e6cSViresh Kumar 	err = privcmd_irqfd_init();
1278f8941e6cSViresh Kumar 	if (err != 0) {
1279f8941e6cSViresh Kumar 		pr_err("irqfd init failed\n");
1280f8941e6cSViresh Kumar 		goto err_irqfd;
1281f8941e6cSViresh Kumar 	}
1282f8941e6cSViresh Kumar 
1283f8941e6cSViresh Kumar 	return 0;
1284f8941e6cSViresh Kumar 
1285f8941e6cSViresh Kumar err_irqfd:
1286f8941e6cSViresh Kumar 	misc_deregister(&xen_privcmdbuf_dev);
1287f8941e6cSViresh Kumar err_privcmdbuf:
1288c51b3c63SJuergen Gross 	misc_deregister(&privcmd_dev);
1289c51b3c63SJuergen Gross 	return err;
1290c51b3c63SJuergen Gross }
1291c51b3c63SJuergen Gross 
privcmd_exit(void)1292d8414d3cSBastian Blank static void __exit privcmd_exit(void)
1293d8414d3cSBastian Blank {
1294f8941e6cSViresh Kumar 	privcmd_irqfd_exit();
1295d8414d3cSBastian Blank 	misc_deregister(&privcmd_dev);
1296c51b3c63SJuergen Gross 	misc_deregister(&xen_privcmdbuf_dev);
1297d8414d3cSBastian Blank }
1298d8414d3cSBastian Blank 
1299d8414d3cSBastian Blank module_init(privcmd_init);
1300d8414d3cSBastian Blank module_exit(privcmd_exit);
1301