xref: /openbmc/linux/drivers/xen/privcmd.c (revision d8414d3c157dc1f83e73c17447ba41fe5afa9d3d)
1*d8414d3cSBastian Blank /******************************************************************************
2*d8414d3cSBastian Blank  * privcmd.c
3*d8414d3cSBastian Blank  *
4*d8414d3cSBastian Blank  * Interface to privileged domain-0 commands.
5*d8414d3cSBastian Blank  *
6*d8414d3cSBastian Blank  * Copyright (c) 2002-2004, K A Fraser, B Dragovic
7*d8414d3cSBastian Blank  */
8*d8414d3cSBastian Blank 
9*d8414d3cSBastian Blank #include <linux/kernel.h>
10*d8414d3cSBastian Blank #include <linux/module.h>
11*d8414d3cSBastian Blank #include <linux/sched.h>
12*d8414d3cSBastian Blank #include <linux/slab.h>
13*d8414d3cSBastian Blank #include <linux/string.h>
14*d8414d3cSBastian Blank #include <linux/errno.h>
15*d8414d3cSBastian Blank #include <linux/mm.h>
16*d8414d3cSBastian Blank #include <linux/mman.h>
17*d8414d3cSBastian Blank #include <linux/uaccess.h>
18*d8414d3cSBastian Blank #include <linux/swap.h>
19*d8414d3cSBastian Blank #include <linux/highmem.h>
20*d8414d3cSBastian Blank #include <linux/pagemap.h>
21*d8414d3cSBastian Blank #include <linux/seq_file.h>
22*d8414d3cSBastian Blank #include <linux/miscdevice.h>
23*d8414d3cSBastian Blank 
24*d8414d3cSBastian Blank #include <asm/pgalloc.h>
25*d8414d3cSBastian Blank #include <asm/pgtable.h>
26*d8414d3cSBastian Blank #include <asm/tlb.h>
27*d8414d3cSBastian Blank #include <asm/xen/hypervisor.h>
28*d8414d3cSBastian Blank #include <asm/xen/hypercall.h>
29*d8414d3cSBastian Blank 
30*d8414d3cSBastian Blank #include <xen/xen.h>
31*d8414d3cSBastian Blank #include <xen/privcmd.h>
32*d8414d3cSBastian Blank #include <xen/interface/xen.h>
33*d8414d3cSBastian Blank #include <xen/features.h>
34*d8414d3cSBastian Blank #include <xen/page.h>
35*d8414d3cSBastian Blank #include <xen/xen-ops.h>
36*d8414d3cSBastian Blank 
37*d8414d3cSBastian Blank #include "privcmd.h"
38*d8414d3cSBastian Blank 
39*d8414d3cSBastian Blank MODULE_LICENSE("GPL");
40*d8414d3cSBastian Blank 
41*d8414d3cSBastian Blank #ifndef HAVE_ARCH_PRIVCMD_MMAP
42*d8414d3cSBastian Blank static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
43*d8414d3cSBastian Blank #endif
44*d8414d3cSBastian Blank 
45*d8414d3cSBastian Blank static long privcmd_ioctl_hypercall(void __user *udata)
46*d8414d3cSBastian Blank {
47*d8414d3cSBastian Blank 	struct privcmd_hypercall hypercall;
48*d8414d3cSBastian Blank 	long ret;
49*d8414d3cSBastian Blank 
50*d8414d3cSBastian Blank 	if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
51*d8414d3cSBastian Blank 		return -EFAULT;
52*d8414d3cSBastian Blank 
53*d8414d3cSBastian Blank 	ret = privcmd_call(hypercall.op,
54*d8414d3cSBastian Blank 			   hypercall.arg[0], hypercall.arg[1],
55*d8414d3cSBastian Blank 			   hypercall.arg[2], hypercall.arg[3],
56*d8414d3cSBastian Blank 			   hypercall.arg[4]);
57*d8414d3cSBastian Blank 
58*d8414d3cSBastian Blank 	return ret;
59*d8414d3cSBastian Blank }
60*d8414d3cSBastian Blank 
61*d8414d3cSBastian Blank static void free_page_list(struct list_head *pages)
62*d8414d3cSBastian Blank {
63*d8414d3cSBastian Blank 	struct page *p, *n;
64*d8414d3cSBastian Blank 
65*d8414d3cSBastian Blank 	list_for_each_entry_safe(p, n, pages, lru)
66*d8414d3cSBastian Blank 		__free_page(p);
67*d8414d3cSBastian Blank 
68*d8414d3cSBastian Blank 	INIT_LIST_HEAD(pages);
69*d8414d3cSBastian Blank }
70*d8414d3cSBastian Blank 
71*d8414d3cSBastian Blank /*
72*d8414d3cSBastian Blank  * Given an array of items in userspace, return a list of pages
73*d8414d3cSBastian Blank  * containing the data.  If copying fails, either because of memory
74*d8414d3cSBastian Blank  * allocation failure or a problem reading user memory, return an
75*d8414d3cSBastian Blank  * error code; its up to the caller to dispose of any partial list.
76*d8414d3cSBastian Blank  */
77*d8414d3cSBastian Blank static int gather_array(struct list_head *pagelist,
78*d8414d3cSBastian Blank 			unsigned nelem, size_t size,
79*d8414d3cSBastian Blank 			void __user *data)
80*d8414d3cSBastian Blank {
81*d8414d3cSBastian Blank 	unsigned pageidx;
82*d8414d3cSBastian Blank 	void *pagedata;
83*d8414d3cSBastian Blank 	int ret;
84*d8414d3cSBastian Blank 
85*d8414d3cSBastian Blank 	if (size > PAGE_SIZE)
86*d8414d3cSBastian Blank 		return 0;
87*d8414d3cSBastian Blank 
88*d8414d3cSBastian Blank 	pageidx = PAGE_SIZE;
89*d8414d3cSBastian Blank 	pagedata = NULL;	/* quiet, gcc */
90*d8414d3cSBastian Blank 	while (nelem--) {
91*d8414d3cSBastian Blank 		if (pageidx > PAGE_SIZE-size) {
92*d8414d3cSBastian Blank 			struct page *page = alloc_page(GFP_KERNEL);
93*d8414d3cSBastian Blank 
94*d8414d3cSBastian Blank 			ret = -ENOMEM;
95*d8414d3cSBastian Blank 			if (page == NULL)
96*d8414d3cSBastian Blank 				goto fail;
97*d8414d3cSBastian Blank 
98*d8414d3cSBastian Blank 			pagedata = page_address(page);
99*d8414d3cSBastian Blank 
100*d8414d3cSBastian Blank 			list_add_tail(&page->lru, pagelist);
101*d8414d3cSBastian Blank 			pageidx = 0;
102*d8414d3cSBastian Blank 		}
103*d8414d3cSBastian Blank 
104*d8414d3cSBastian Blank 		ret = -EFAULT;
105*d8414d3cSBastian Blank 		if (copy_from_user(pagedata + pageidx, data, size))
106*d8414d3cSBastian Blank 			goto fail;
107*d8414d3cSBastian Blank 
108*d8414d3cSBastian Blank 		data += size;
109*d8414d3cSBastian Blank 		pageidx += size;
110*d8414d3cSBastian Blank 	}
111*d8414d3cSBastian Blank 
112*d8414d3cSBastian Blank 	ret = 0;
113*d8414d3cSBastian Blank 
114*d8414d3cSBastian Blank fail:
115*d8414d3cSBastian Blank 	return ret;
116*d8414d3cSBastian Blank }
117*d8414d3cSBastian Blank 
118*d8414d3cSBastian Blank /*
119*d8414d3cSBastian Blank  * Call function "fn" on each element of the array fragmented
120*d8414d3cSBastian Blank  * over a list of pages.
121*d8414d3cSBastian Blank  */
122*d8414d3cSBastian Blank static int traverse_pages(unsigned nelem, size_t size,
123*d8414d3cSBastian Blank 			  struct list_head *pos,
124*d8414d3cSBastian Blank 			  int (*fn)(void *data, void *state),
125*d8414d3cSBastian Blank 			  void *state)
126*d8414d3cSBastian Blank {
127*d8414d3cSBastian Blank 	void *pagedata;
128*d8414d3cSBastian Blank 	unsigned pageidx;
129*d8414d3cSBastian Blank 	int ret = 0;
130*d8414d3cSBastian Blank 
131*d8414d3cSBastian Blank 	BUG_ON(size > PAGE_SIZE);
132*d8414d3cSBastian Blank 
133*d8414d3cSBastian Blank 	pageidx = PAGE_SIZE;
134*d8414d3cSBastian Blank 	pagedata = NULL;	/* hush, gcc */
135*d8414d3cSBastian Blank 
136*d8414d3cSBastian Blank 	while (nelem--) {
137*d8414d3cSBastian Blank 		if (pageidx > PAGE_SIZE-size) {
138*d8414d3cSBastian Blank 			struct page *page;
139*d8414d3cSBastian Blank 			pos = pos->next;
140*d8414d3cSBastian Blank 			page = list_entry(pos, struct page, lru);
141*d8414d3cSBastian Blank 			pagedata = page_address(page);
142*d8414d3cSBastian Blank 			pageidx = 0;
143*d8414d3cSBastian Blank 		}
144*d8414d3cSBastian Blank 
145*d8414d3cSBastian Blank 		ret = (*fn)(pagedata + pageidx, state);
146*d8414d3cSBastian Blank 		if (ret)
147*d8414d3cSBastian Blank 			break;
148*d8414d3cSBastian Blank 		pageidx += size;
149*d8414d3cSBastian Blank 	}
150*d8414d3cSBastian Blank 
151*d8414d3cSBastian Blank 	return ret;
152*d8414d3cSBastian Blank }
153*d8414d3cSBastian Blank 
154*d8414d3cSBastian Blank struct mmap_mfn_state {
155*d8414d3cSBastian Blank 	unsigned long va;
156*d8414d3cSBastian Blank 	struct vm_area_struct *vma;
157*d8414d3cSBastian Blank 	domid_t domain;
158*d8414d3cSBastian Blank };
159*d8414d3cSBastian Blank 
160*d8414d3cSBastian Blank static int mmap_mfn_range(void *data, void *state)
161*d8414d3cSBastian Blank {
162*d8414d3cSBastian Blank 	struct privcmd_mmap_entry *msg = data;
163*d8414d3cSBastian Blank 	struct mmap_mfn_state *st = state;
164*d8414d3cSBastian Blank 	struct vm_area_struct *vma = st->vma;
165*d8414d3cSBastian Blank 	int rc;
166*d8414d3cSBastian Blank 
167*d8414d3cSBastian Blank 	/* Do not allow range to wrap the address space. */
168*d8414d3cSBastian Blank 	if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
169*d8414d3cSBastian Blank 	    ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
170*d8414d3cSBastian Blank 		return -EINVAL;
171*d8414d3cSBastian Blank 
172*d8414d3cSBastian Blank 	/* Range chunks must be contiguous in va space. */
173*d8414d3cSBastian Blank 	if ((msg->va != st->va) ||
174*d8414d3cSBastian Blank 	    ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
175*d8414d3cSBastian Blank 		return -EINVAL;
176*d8414d3cSBastian Blank 
177*d8414d3cSBastian Blank 	rc = xen_remap_domain_mfn_range(vma,
178*d8414d3cSBastian Blank 					msg->va & PAGE_MASK,
179*d8414d3cSBastian Blank 					msg->mfn, msg->npages,
180*d8414d3cSBastian Blank 					vma->vm_page_prot,
181*d8414d3cSBastian Blank 					st->domain);
182*d8414d3cSBastian Blank 	if (rc < 0)
183*d8414d3cSBastian Blank 		return rc;
184*d8414d3cSBastian Blank 
185*d8414d3cSBastian Blank 	st->va += msg->npages << PAGE_SHIFT;
186*d8414d3cSBastian Blank 
187*d8414d3cSBastian Blank 	return 0;
188*d8414d3cSBastian Blank }
189*d8414d3cSBastian Blank 
190*d8414d3cSBastian Blank static long privcmd_ioctl_mmap(void __user *udata)
191*d8414d3cSBastian Blank {
192*d8414d3cSBastian Blank 	struct privcmd_mmap mmapcmd;
193*d8414d3cSBastian Blank 	struct mm_struct *mm = current->mm;
194*d8414d3cSBastian Blank 	struct vm_area_struct *vma;
195*d8414d3cSBastian Blank 	int rc;
196*d8414d3cSBastian Blank 	LIST_HEAD(pagelist);
197*d8414d3cSBastian Blank 	struct mmap_mfn_state state;
198*d8414d3cSBastian Blank 
199*d8414d3cSBastian Blank 	if (!xen_initial_domain())
200*d8414d3cSBastian Blank 		return -EPERM;
201*d8414d3cSBastian Blank 
202*d8414d3cSBastian Blank 	if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
203*d8414d3cSBastian Blank 		return -EFAULT;
204*d8414d3cSBastian Blank 
205*d8414d3cSBastian Blank 	rc = gather_array(&pagelist,
206*d8414d3cSBastian Blank 			  mmapcmd.num, sizeof(struct privcmd_mmap_entry),
207*d8414d3cSBastian Blank 			  mmapcmd.entry);
208*d8414d3cSBastian Blank 
209*d8414d3cSBastian Blank 	if (rc || list_empty(&pagelist))
210*d8414d3cSBastian Blank 		goto out;
211*d8414d3cSBastian Blank 
212*d8414d3cSBastian Blank 	down_write(&mm->mmap_sem);
213*d8414d3cSBastian Blank 
214*d8414d3cSBastian Blank 	{
215*d8414d3cSBastian Blank 		struct page *page = list_first_entry(&pagelist,
216*d8414d3cSBastian Blank 						     struct page, lru);
217*d8414d3cSBastian Blank 		struct privcmd_mmap_entry *msg = page_address(page);
218*d8414d3cSBastian Blank 
219*d8414d3cSBastian Blank 		vma = find_vma(mm, msg->va);
220*d8414d3cSBastian Blank 		rc = -EINVAL;
221*d8414d3cSBastian Blank 
222*d8414d3cSBastian Blank 		if (!vma || (msg->va != vma->vm_start) ||
223*d8414d3cSBastian Blank 		    !privcmd_enforce_singleshot_mapping(vma))
224*d8414d3cSBastian Blank 			goto out_up;
225*d8414d3cSBastian Blank 	}
226*d8414d3cSBastian Blank 
227*d8414d3cSBastian Blank 	state.va = vma->vm_start;
228*d8414d3cSBastian Blank 	state.vma = vma;
229*d8414d3cSBastian Blank 	state.domain = mmapcmd.dom;
230*d8414d3cSBastian Blank 
231*d8414d3cSBastian Blank 	rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
232*d8414d3cSBastian Blank 			    &pagelist,
233*d8414d3cSBastian Blank 			    mmap_mfn_range, &state);
234*d8414d3cSBastian Blank 
235*d8414d3cSBastian Blank 
236*d8414d3cSBastian Blank out_up:
237*d8414d3cSBastian Blank 	up_write(&mm->mmap_sem);
238*d8414d3cSBastian Blank 
239*d8414d3cSBastian Blank out:
240*d8414d3cSBastian Blank 	free_page_list(&pagelist);
241*d8414d3cSBastian Blank 
242*d8414d3cSBastian Blank 	return rc;
243*d8414d3cSBastian Blank }
244*d8414d3cSBastian Blank 
245*d8414d3cSBastian Blank struct mmap_batch_state {
246*d8414d3cSBastian Blank 	domid_t domain;
247*d8414d3cSBastian Blank 	unsigned long va;
248*d8414d3cSBastian Blank 	struct vm_area_struct *vma;
249*d8414d3cSBastian Blank 	int err;
250*d8414d3cSBastian Blank 
251*d8414d3cSBastian Blank 	xen_pfn_t __user *user;
252*d8414d3cSBastian Blank };
253*d8414d3cSBastian Blank 
254*d8414d3cSBastian Blank static int mmap_batch_fn(void *data, void *state)
255*d8414d3cSBastian Blank {
256*d8414d3cSBastian Blank 	xen_pfn_t *mfnp = data;
257*d8414d3cSBastian Blank 	struct mmap_batch_state *st = state;
258*d8414d3cSBastian Blank 
259*d8414d3cSBastian Blank 	if (xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
260*d8414d3cSBastian Blank 				       st->vma->vm_page_prot, st->domain) < 0) {
261*d8414d3cSBastian Blank 		*mfnp |= 0xf0000000U;
262*d8414d3cSBastian Blank 		st->err++;
263*d8414d3cSBastian Blank 	}
264*d8414d3cSBastian Blank 	st->va += PAGE_SIZE;
265*d8414d3cSBastian Blank 
266*d8414d3cSBastian Blank 	return 0;
267*d8414d3cSBastian Blank }
268*d8414d3cSBastian Blank 
269*d8414d3cSBastian Blank static int mmap_return_errors(void *data, void *state)
270*d8414d3cSBastian Blank {
271*d8414d3cSBastian Blank 	xen_pfn_t *mfnp = data;
272*d8414d3cSBastian Blank 	struct mmap_batch_state *st = state;
273*d8414d3cSBastian Blank 
274*d8414d3cSBastian Blank 	return put_user(*mfnp, st->user++);
275*d8414d3cSBastian Blank }
276*d8414d3cSBastian Blank 
277*d8414d3cSBastian Blank static struct vm_operations_struct privcmd_vm_ops;
278*d8414d3cSBastian Blank 
279*d8414d3cSBastian Blank static long privcmd_ioctl_mmap_batch(void __user *udata)
280*d8414d3cSBastian Blank {
281*d8414d3cSBastian Blank 	int ret;
282*d8414d3cSBastian Blank 	struct privcmd_mmapbatch m;
283*d8414d3cSBastian Blank 	struct mm_struct *mm = current->mm;
284*d8414d3cSBastian Blank 	struct vm_area_struct *vma;
285*d8414d3cSBastian Blank 	unsigned long nr_pages;
286*d8414d3cSBastian Blank 	LIST_HEAD(pagelist);
287*d8414d3cSBastian Blank 	struct mmap_batch_state state;
288*d8414d3cSBastian Blank 
289*d8414d3cSBastian Blank 	if (!xen_initial_domain())
290*d8414d3cSBastian Blank 		return -EPERM;
291*d8414d3cSBastian Blank 
292*d8414d3cSBastian Blank 	if (copy_from_user(&m, udata, sizeof(m)))
293*d8414d3cSBastian Blank 		return -EFAULT;
294*d8414d3cSBastian Blank 
295*d8414d3cSBastian Blank 	nr_pages = m.num;
296*d8414d3cSBastian Blank 	if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
297*d8414d3cSBastian Blank 		return -EINVAL;
298*d8414d3cSBastian Blank 
299*d8414d3cSBastian Blank 	ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t),
300*d8414d3cSBastian Blank 			   m.arr);
301*d8414d3cSBastian Blank 
302*d8414d3cSBastian Blank 	if (ret || list_empty(&pagelist))
303*d8414d3cSBastian Blank 		goto out;
304*d8414d3cSBastian Blank 
305*d8414d3cSBastian Blank 	down_write(&mm->mmap_sem);
306*d8414d3cSBastian Blank 
307*d8414d3cSBastian Blank 	vma = find_vma(mm, m.addr);
308*d8414d3cSBastian Blank 	ret = -EINVAL;
309*d8414d3cSBastian Blank 	if (!vma ||
310*d8414d3cSBastian Blank 	    vma->vm_ops != &privcmd_vm_ops ||
311*d8414d3cSBastian Blank 	    (m.addr != vma->vm_start) ||
312*d8414d3cSBastian Blank 	    ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
313*d8414d3cSBastian Blank 	    !privcmd_enforce_singleshot_mapping(vma)) {
314*d8414d3cSBastian Blank 		up_write(&mm->mmap_sem);
315*d8414d3cSBastian Blank 		goto out;
316*d8414d3cSBastian Blank 	}
317*d8414d3cSBastian Blank 
318*d8414d3cSBastian Blank 	state.domain = m.dom;
319*d8414d3cSBastian Blank 	state.vma = vma;
320*d8414d3cSBastian Blank 	state.va = m.addr;
321*d8414d3cSBastian Blank 	state.err = 0;
322*d8414d3cSBastian Blank 
323*d8414d3cSBastian Blank 	ret = traverse_pages(m.num, sizeof(xen_pfn_t),
324*d8414d3cSBastian Blank 			     &pagelist, mmap_batch_fn, &state);
325*d8414d3cSBastian Blank 
326*d8414d3cSBastian Blank 	up_write(&mm->mmap_sem);
327*d8414d3cSBastian Blank 
328*d8414d3cSBastian Blank 	if (state.err > 0) {
329*d8414d3cSBastian Blank 		state.user = m.arr;
330*d8414d3cSBastian Blank 		ret = traverse_pages(m.num, sizeof(xen_pfn_t),
331*d8414d3cSBastian Blank 			       &pagelist,
332*d8414d3cSBastian Blank 			       mmap_return_errors, &state);
333*d8414d3cSBastian Blank 	}
334*d8414d3cSBastian Blank 
335*d8414d3cSBastian Blank out:
336*d8414d3cSBastian Blank 	free_page_list(&pagelist);
337*d8414d3cSBastian Blank 
338*d8414d3cSBastian Blank 	return ret;
339*d8414d3cSBastian Blank }
340*d8414d3cSBastian Blank 
341*d8414d3cSBastian Blank static long privcmd_ioctl(struct file *file,
342*d8414d3cSBastian Blank 			  unsigned int cmd, unsigned long data)
343*d8414d3cSBastian Blank {
344*d8414d3cSBastian Blank 	int ret = -ENOSYS;
345*d8414d3cSBastian Blank 	void __user *udata = (void __user *) data;
346*d8414d3cSBastian Blank 
347*d8414d3cSBastian Blank 	switch (cmd) {
348*d8414d3cSBastian Blank 	case IOCTL_PRIVCMD_HYPERCALL:
349*d8414d3cSBastian Blank 		ret = privcmd_ioctl_hypercall(udata);
350*d8414d3cSBastian Blank 		break;
351*d8414d3cSBastian Blank 
352*d8414d3cSBastian Blank 	case IOCTL_PRIVCMD_MMAP:
353*d8414d3cSBastian Blank 		ret = privcmd_ioctl_mmap(udata);
354*d8414d3cSBastian Blank 		break;
355*d8414d3cSBastian Blank 
356*d8414d3cSBastian Blank 	case IOCTL_PRIVCMD_MMAPBATCH:
357*d8414d3cSBastian Blank 		ret = privcmd_ioctl_mmap_batch(udata);
358*d8414d3cSBastian Blank 		break;
359*d8414d3cSBastian Blank 
360*d8414d3cSBastian Blank 	default:
361*d8414d3cSBastian Blank 		ret = -EINVAL;
362*d8414d3cSBastian Blank 		break;
363*d8414d3cSBastian Blank 	}
364*d8414d3cSBastian Blank 
365*d8414d3cSBastian Blank 	return ret;
366*d8414d3cSBastian Blank }
367*d8414d3cSBastian Blank 
368*d8414d3cSBastian Blank #ifndef HAVE_ARCH_PRIVCMD_MMAP
369*d8414d3cSBastian Blank static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
370*d8414d3cSBastian Blank {
371*d8414d3cSBastian Blank 	printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
372*d8414d3cSBastian Blank 	       vma, vma->vm_start, vma->vm_end,
373*d8414d3cSBastian Blank 	       vmf->pgoff, vmf->virtual_address);
374*d8414d3cSBastian Blank 
375*d8414d3cSBastian Blank 	return VM_FAULT_SIGBUS;
376*d8414d3cSBastian Blank }
377*d8414d3cSBastian Blank 
378*d8414d3cSBastian Blank static struct vm_operations_struct privcmd_vm_ops = {
379*d8414d3cSBastian Blank 	.fault = privcmd_fault
380*d8414d3cSBastian Blank };
381*d8414d3cSBastian Blank 
382*d8414d3cSBastian Blank static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
383*d8414d3cSBastian Blank {
384*d8414d3cSBastian Blank 	/* Unsupported for auto-translate guests. */
385*d8414d3cSBastian Blank 	if (xen_feature(XENFEAT_auto_translated_physmap))
386*d8414d3cSBastian Blank 		return -ENOSYS;
387*d8414d3cSBastian Blank 
388*d8414d3cSBastian Blank 	/* DONTCOPY is essential for Xen because copy_page_range doesn't know
389*d8414d3cSBastian Blank 	 * how to recreate these mappings */
390*d8414d3cSBastian Blank 	vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
391*d8414d3cSBastian Blank 	vma->vm_ops = &privcmd_vm_ops;
392*d8414d3cSBastian Blank 	vma->vm_private_data = NULL;
393*d8414d3cSBastian Blank 
394*d8414d3cSBastian Blank 	return 0;
395*d8414d3cSBastian Blank }
396*d8414d3cSBastian Blank 
397*d8414d3cSBastian Blank static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
398*d8414d3cSBastian Blank {
399*d8414d3cSBastian Blank 	return (xchg(&vma->vm_private_data, (void *)1) == NULL);
400*d8414d3cSBastian Blank }
401*d8414d3cSBastian Blank #endif
402*d8414d3cSBastian Blank 
403*d8414d3cSBastian Blank const struct file_operations xen_privcmd_fops = {
404*d8414d3cSBastian Blank 	.owner = THIS_MODULE,
405*d8414d3cSBastian Blank 	.unlocked_ioctl = privcmd_ioctl,
406*d8414d3cSBastian Blank 	.mmap = privcmd_mmap,
407*d8414d3cSBastian Blank };
408*d8414d3cSBastian Blank EXPORT_SYMBOL_GPL(xen_privcmd_fops);
409*d8414d3cSBastian Blank 
410*d8414d3cSBastian Blank static struct miscdevice privcmd_dev = {
411*d8414d3cSBastian Blank 	.minor = MISC_DYNAMIC_MINOR,
412*d8414d3cSBastian Blank 	.name = "xen/privcmd",
413*d8414d3cSBastian Blank 	.fops = &xen_privcmd_fops,
414*d8414d3cSBastian Blank };
415*d8414d3cSBastian Blank 
416*d8414d3cSBastian Blank static int __init privcmd_init(void)
417*d8414d3cSBastian Blank {
418*d8414d3cSBastian Blank 	int err;
419*d8414d3cSBastian Blank 
420*d8414d3cSBastian Blank 	if (!xen_domain())
421*d8414d3cSBastian Blank 		return -ENODEV;
422*d8414d3cSBastian Blank 
423*d8414d3cSBastian Blank 	err = misc_register(&privcmd_dev);
424*d8414d3cSBastian Blank 	if (err != 0) {
425*d8414d3cSBastian Blank 		printk(KERN_ERR "Could not register Xen privcmd device\n");
426*d8414d3cSBastian Blank 		return err;
427*d8414d3cSBastian Blank 	}
428*d8414d3cSBastian Blank 	return 0;
429*d8414d3cSBastian Blank }
430*d8414d3cSBastian Blank 
431*d8414d3cSBastian Blank static void __exit privcmd_exit(void)
432*d8414d3cSBastian Blank {
433*d8414d3cSBastian Blank 	misc_deregister(&privcmd_dev);
434*d8414d3cSBastian Blank }
435*d8414d3cSBastian Blank 
436*d8414d3cSBastian Blank module_init(privcmd_init);
437*d8414d3cSBastian Blank module_exit(privcmd_exit);
438