xref: /openbmc/linux/drivers/xen/privcmd.c (revision fdfd811ddde3678247248ca9a27faa999ca4cd51)
1d8414d3cSBastian Blank /******************************************************************************
2d8414d3cSBastian Blank  * privcmd.c
3d8414d3cSBastian Blank  *
4d8414d3cSBastian Blank  * Interface to privileged domain-0 commands.
5d8414d3cSBastian Blank  *
6d8414d3cSBastian Blank  * Copyright (c) 2002-2004, K A Fraser, B Dragovic
7d8414d3cSBastian Blank  */
8d8414d3cSBastian Blank 
9283c0972SJoe Perches #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
10283c0972SJoe Perches 
11d8414d3cSBastian Blank #include <linux/kernel.h>
12d8414d3cSBastian Blank #include <linux/module.h>
13d8414d3cSBastian Blank #include <linux/sched.h>
14d8414d3cSBastian Blank #include <linux/slab.h>
15d8414d3cSBastian Blank #include <linux/string.h>
16d8414d3cSBastian Blank #include <linux/errno.h>
17d8414d3cSBastian Blank #include <linux/mm.h>
18d8414d3cSBastian Blank #include <linux/mman.h>
19d8414d3cSBastian Blank #include <linux/uaccess.h>
20d8414d3cSBastian Blank #include <linux/swap.h>
21d8414d3cSBastian Blank #include <linux/highmem.h>
22d8414d3cSBastian Blank #include <linux/pagemap.h>
23d8414d3cSBastian Blank #include <linux/seq_file.h>
24d8414d3cSBastian Blank #include <linux/miscdevice.h>
25d8414d3cSBastian Blank 
26d8414d3cSBastian Blank #include <asm/pgalloc.h>
27d8414d3cSBastian Blank #include <asm/pgtable.h>
28d8414d3cSBastian Blank #include <asm/tlb.h>
29d8414d3cSBastian Blank #include <asm/xen/hypervisor.h>
30d8414d3cSBastian Blank #include <asm/xen/hypercall.h>
31d8414d3cSBastian Blank 
32d8414d3cSBastian Blank #include <xen/xen.h>
33d8414d3cSBastian Blank #include <xen/privcmd.h>
34d8414d3cSBastian Blank #include <xen/interface/xen.h>
35d8414d3cSBastian Blank #include <xen/features.h>
36d8414d3cSBastian Blank #include <xen/page.h>
37d8414d3cSBastian Blank #include <xen/xen-ops.h>
38d71f5139SMukesh Rathor #include <xen/balloon.h>
39d8414d3cSBastian Blank 
40d8414d3cSBastian Blank #include "privcmd.h"
41d8414d3cSBastian Blank 
42d8414d3cSBastian Blank MODULE_LICENSE("GPL");
43d8414d3cSBastian Blank 
44d71f5139SMukesh Rathor #define PRIV_VMA_LOCKED ((void *)1)
45d71f5139SMukesh Rathor 
46a5deabe0SAndres Lagar-Cavilla static int privcmd_vma_range_is_mapped(
47a5deabe0SAndres Lagar-Cavilla                struct vm_area_struct *vma,
48a5deabe0SAndres Lagar-Cavilla                unsigned long addr,
49a5deabe0SAndres Lagar-Cavilla                unsigned long nr_pages);
50d8414d3cSBastian Blank 
51d8414d3cSBastian Blank static long privcmd_ioctl_hypercall(void __user *udata)
52d8414d3cSBastian Blank {
53d8414d3cSBastian Blank 	struct privcmd_hypercall hypercall;
54d8414d3cSBastian Blank 	long ret;
55d8414d3cSBastian Blank 
56d8414d3cSBastian Blank 	if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
57d8414d3cSBastian Blank 		return -EFAULT;
58d8414d3cSBastian Blank 
59*fdfd811dSDavid Vrabel 	xen_preemptible_hcall_begin();
60d8414d3cSBastian Blank 	ret = privcmd_call(hypercall.op,
61d8414d3cSBastian Blank 			   hypercall.arg[0], hypercall.arg[1],
62d8414d3cSBastian Blank 			   hypercall.arg[2], hypercall.arg[3],
63d8414d3cSBastian Blank 			   hypercall.arg[4]);
64*fdfd811dSDavid Vrabel 	xen_preemptible_hcall_end();
65d8414d3cSBastian Blank 
66d8414d3cSBastian Blank 	return ret;
67d8414d3cSBastian Blank }
68d8414d3cSBastian Blank 
69d8414d3cSBastian Blank static void free_page_list(struct list_head *pages)
70d8414d3cSBastian Blank {
71d8414d3cSBastian Blank 	struct page *p, *n;
72d8414d3cSBastian Blank 
73d8414d3cSBastian Blank 	list_for_each_entry_safe(p, n, pages, lru)
74d8414d3cSBastian Blank 		__free_page(p);
75d8414d3cSBastian Blank 
76d8414d3cSBastian Blank 	INIT_LIST_HEAD(pages);
77d8414d3cSBastian Blank }
78d8414d3cSBastian Blank 
79d8414d3cSBastian Blank /*
80d8414d3cSBastian Blank  * Given an array of items in userspace, return a list of pages
81d8414d3cSBastian Blank  * containing the data.  If copying fails, either because of memory
82d8414d3cSBastian Blank  * allocation failure or a problem reading user memory, return an
83d8414d3cSBastian Blank  * error code; its up to the caller to dispose of any partial list.
84d8414d3cSBastian Blank  */
85d8414d3cSBastian Blank static int gather_array(struct list_head *pagelist,
86d8414d3cSBastian Blank 			unsigned nelem, size_t size,
87ceb90fa0SAndres Lagar-Cavilla 			const void __user *data)
88d8414d3cSBastian Blank {
89d8414d3cSBastian Blank 	unsigned pageidx;
90d8414d3cSBastian Blank 	void *pagedata;
91d8414d3cSBastian Blank 	int ret;
92d8414d3cSBastian Blank 
93d8414d3cSBastian Blank 	if (size > PAGE_SIZE)
94d8414d3cSBastian Blank 		return 0;
95d8414d3cSBastian Blank 
96d8414d3cSBastian Blank 	pageidx = PAGE_SIZE;
97d8414d3cSBastian Blank 	pagedata = NULL;	/* quiet, gcc */
98d8414d3cSBastian Blank 	while (nelem--) {
99d8414d3cSBastian Blank 		if (pageidx > PAGE_SIZE-size) {
100d8414d3cSBastian Blank 			struct page *page = alloc_page(GFP_KERNEL);
101d8414d3cSBastian Blank 
102d8414d3cSBastian Blank 			ret = -ENOMEM;
103d8414d3cSBastian Blank 			if (page == NULL)
104d8414d3cSBastian Blank 				goto fail;
105d8414d3cSBastian Blank 
106d8414d3cSBastian Blank 			pagedata = page_address(page);
107d8414d3cSBastian Blank 
108d8414d3cSBastian Blank 			list_add_tail(&page->lru, pagelist);
109d8414d3cSBastian Blank 			pageidx = 0;
110d8414d3cSBastian Blank 		}
111d8414d3cSBastian Blank 
112d8414d3cSBastian Blank 		ret = -EFAULT;
113d8414d3cSBastian Blank 		if (copy_from_user(pagedata + pageidx, data, size))
114d8414d3cSBastian Blank 			goto fail;
115d8414d3cSBastian Blank 
116d8414d3cSBastian Blank 		data += size;
117d8414d3cSBastian Blank 		pageidx += size;
118d8414d3cSBastian Blank 	}
119d8414d3cSBastian Blank 
120d8414d3cSBastian Blank 	ret = 0;
121d8414d3cSBastian Blank 
122d8414d3cSBastian Blank fail:
123d8414d3cSBastian Blank 	return ret;
124d8414d3cSBastian Blank }
125d8414d3cSBastian Blank 
126d8414d3cSBastian Blank /*
127d8414d3cSBastian Blank  * Call function "fn" on each element of the array fragmented
128d8414d3cSBastian Blank  * over a list of pages.
129d8414d3cSBastian Blank  */
130d8414d3cSBastian Blank static int traverse_pages(unsigned nelem, size_t size,
131d8414d3cSBastian Blank 			  struct list_head *pos,
132d8414d3cSBastian Blank 			  int (*fn)(void *data, void *state),
133d8414d3cSBastian Blank 			  void *state)
134d8414d3cSBastian Blank {
135d8414d3cSBastian Blank 	void *pagedata;
136d8414d3cSBastian Blank 	unsigned pageidx;
137d8414d3cSBastian Blank 	int ret = 0;
138d8414d3cSBastian Blank 
139d8414d3cSBastian Blank 	BUG_ON(size > PAGE_SIZE);
140d8414d3cSBastian Blank 
141d8414d3cSBastian Blank 	pageidx = PAGE_SIZE;
142d8414d3cSBastian Blank 	pagedata = NULL;	/* hush, gcc */
143d8414d3cSBastian Blank 
144d8414d3cSBastian Blank 	while (nelem--) {
145d8414d3cSBastian Blank 		if (pageidx > PAGE_SIZE-size) {
146d8414d3cSBastian Blank 			struct page *page;
147d8414d3cSBastian Blank 			pos = pos->next;
148d8414d3cSBastian Blank 			page = list_entry(pos, struct page, lru);
149d8414d3cSBastian Blank 			pagedata = page_address(page);
150d8414d3cSBastian Blank 			pageidx = 0;
151d8414d3cSBastian Blank 		}
152d8414d3cSBastian Blank 
153d8414d3cSBastian Blank 		ret = (*fn)(pagedata + pageidx, state);
154d8414d3cSBastian Blank 		if (ret)
155d8414d3cSBastian Blank 			break;
156d8414d3cSBastian Blank 		pageidx += size;
157d8414d3cSBastian Blank 	}
158d8414d3cSBastian Blank 
159d8414d3cSBastian Blank 	return ret;
160d8414d3cSBastian Blank }
161d8414d3cSBastian Blank 
162d8414d3cSBastian Blank struct mmap_mfn_state {
163d8414d3cSBastian Blank 	unsigned long va;
164d8414d3cSBastian Blank 	struct vm_area_struct *vma;
165d8414d3cSBastian Blank 	domid_t domain;
166d8414d3cSBastian Blank };
167d8414d3cSBastian Blank 
168d8414d3cSBastian Blank static int mmap_mfn_range(void *data, void *state)
169d8414d3cSBastian Blank {
170d8414d3cSBastian Blank 	struct privcmd_mmap_entry *msg = data;
171d8414d3cSBastian Blank 	struct mmap_mfn_state *st = state;
172d8414d3cSBastian Blank 	struct vm_area_struct *vma = st->vma;
173d8414d3cSBastian Blank 	int rc;
174d8414d3cSBastian Blank 
175d8414d3cSBastian Blank 	/* Do not allow range to wrap the address space. */
176d8414d3cSBastian Blank 	if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
177d8414d3cSBastian Blank 	    ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
178d8414d3cSBastian Blank 		return -EINVAL;
179d8414d3cSBastian Blank 
180d8414d3cSBastian Blank 	/* Range chunks must be contiguous in va space. */
181d8414d3cSBastian Blank 	if ((msg->va != st->va) ||
182d8414d3cSBastian Blank 	    ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
183d8414d3cSBastian Blank 		return -EINVAL;
184d8414d3cSBastian Blank 
185d8414d3cSBastian Blank 	rc = xen_remap_domain_mfn_range(vma,
186d8414d3cSBastian Blank 					msg->va & PAGE_MASK,
187d8414d3cSBastian Blank 					msg->mfn, msg->npages,
188d8414d3cSBastian Blank 					vma->vm_page_prot,
1899a032e39SIan Campbell 					st->domain, NULL);
190d8414d3cSBastian Blank 	if (rc < 0)
191d8414d3cSBastian Blank 		return rc;
192d8414d3cSBastian Blank 
193d8414d3cSBastian Blank 	st->va += msg->npages << PAGE_SHIFT;
194d8414d3cSBastian Blank 
195d8414d3cSBastian Blank 	return 0;
196d8414d3cSBastian Blank }
197d8414d3cSBastian Blank 
198d8414d3cSBastian Blank static long privcmd_ioctl_mmap(void __user *udata)
199d8414d3cSBastian Blank {
200d8414d3cSBastian Blank 	struct privcmd_mmap mmapcmd;
201d8414d3cSBastian Blank 	struct mm_struct *mm = current->mm;
202d8414d3cSBastian Blank 	struct vm_area_struct *vma;
203d8414d3cSBastian Blank 	int rc;
204d8414d3cSBastian Blank 	LIST_HEAD(pagelist);
205d8414d3cSBastian Blank 	struct mmap_mfn_state state;
206d8414d3cSBastian Blank 
207d71f5139SMukesh Rathor 	/* We only support privcmd_ioctl_mmap_batch for auto translated. */
208d71f5139SMukesh Rathor 	if (xen_feature(XENFEAT_auto_translated_physmap))
209d71f5139SMukesh Rathor 		return -ENOSYS;
210d71f5139SMukesh Rathor 
211d8414d3cSBastian Blank 	if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
212d8414d3cSBastian Blank 		return -EFAULT;
213d8414d3cSBastian Blank 
214d8414d3cSBastian Blank 	rc = gather_array(&pagelist,
215d8414d3cSBastian Blank 			  mmapcmd.num, sizeof(struct privcmd_mmap_entry),
216d8414d3cSBastian Blank 			  mmapcmd.entry);
217d8414d3cSBastian Blank 
218d8414d3cSBastian Blank 	if (rc || list_empty(&pagelist))
219d8414d3cSBastian Blank 		goto out;
220d8414d3cSBastian Blank 
221d8414d3cSBastian Blank 	down_write(&mm->mmap_sem);
222d8414d3cSBastian Blank 
223d8414d3cSBastian Blank 	{
224d8414d3cSBastian Blank 		struct page *page = list_first_entry(&pagelist,
225d8414d3cSBastian Blank 						     struct page, lru);
226d8414d3cSBastian Blank 		struct privcmd_mmap_entry *msg = page_address(page);
227d8414d3cSBastian Blank 
228d8414d3cSBastian Blank 		vma = find_vma(mm, msg->va);
229d8414d3cSBastian Blank 		rc = -EINVAL;
230d8414d3cSBastian Blank 
231a5deabe0SAndres Lagar-Cavilla 		if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
232d8414d3cSBastian Blank 			goto out_up;
233a5deabe0SAndres Lagar-Cavilla 		vma->vm_private_data = PRIV_VMA_LOCKED;
234d8414d3cSBastian Blank 	}
235d8414d3cSBastian Blank 
236d8414d3cSBastian Blank 	state.va = vma->vm_start;
237d8414d3cSBastian Blank 	state.vma = vma;
238d8414d3cSBastian Blank 	state.domain = mmapcmd.dom;
239d8414d3cSBastian Blank 
240d8414d3cSBastian Blank 	rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
241d8414d3cSBastian Blank 			    &pagelist,
242d8414d3cSBastian Blank 			    mmap_mfn_range, &state);
243d8414d3cSBastian Blank 
244d8414d3cSBastian Blank 
245d8414d3cSBastian Blank out_up:
246d8414d3cSBastian Blank 	up_write(&mm->mmap_sem);
247d8414d3cSBastian Blank 
248d8414d3cSBastian Blank out:
249d8414d3cSBastian Blank 	free_page_list(&pagelist);
250d8414d3cSBastian Blank 
251d8414d3cSBastian Blank 	return rc;
252d8414d3cSBastian Blank }
253d8414d3cSBastian Blank 
254d8414d3cSBastian Blank struct mmap_batch_state {
255d8414d3cSBastian Blank 	domid_t domain;
256d8414d3cSBastian Blank 	unsigned long va;
257d8414d3cSBastian Blank 	struct vm_area_struct *vma;
258d71f5139SMukesh Rathor 	int index;
259ceb90fa0SAndres Lagar-Cavilla 	/* A tristate:
260ceb90fa0SAndres Lagar-Cavilla 	 *      0 for no errors
261ceb90fa0SAndres Lagar-Cavilla 	 *      1 if at least one error has happened (and no
262ceb90fa0SAndres Lagar-Cavilla 	 *          -ENOENT errors have happened)
263ceb90fa0SAndres Lagar-Cavilla 	 *      -ENOENT if at least 1 -ENOENT has happened.
264ceb90fa0SAndres Lagar-Cavilla 	 */
265ceb90fa0SAndres Lagar-Cavilla 	int global_error;
26699beae6cSAndres Lagar-Cavilla 	int version;
267d8414d3cSBastian Blank 
268ceb90fa0SAndres Lagar-Cavilla 	/* User-space mfn array to store errors in the second pass for V1. */
269ceb90fa0SAndres Lagar-Cavilla 	xen_pfn_t __user *user_mfn;
27099beae6cSAndres Lagar-Cavilla 	/* User-space int array to store errors in the second pass for V2. */
27199beae6cSAndres Lagar-Cavilla 	int __user *user_err;
272d8414d3cSBastian Blank };
273d8414d3cSBastian Blank 
274d71f5139SMukesh Rathor /* auto translated dom0 note: if domU being created is PV, then mfn is
275d71f5139SMukesh Rathor  * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP).
276d71f5139SMukesh Rathor  */
277d8414d3cSBastian Blank static int mmap_batch_fn(void *data, void *state)
278d8414d3cSBastian Blank {
279d8414d3cSBastian Blank 	xen_pfn_t *mfnp = data;
280d8414d3cSBastian Blank 	struct mmap_batch_state *st = state;
281d71f5139SMukesh Rathor 	struct vm_area_struct *vma = st->vma;
282d71f5139SMukesh Rathor 	struct page **pages = vma->vm_private_data;
283d71f5139SMukesh Rathor 	struct page *cur_page = NULL;
284ceb90fa0SAndres Lagar-Cavilla 	int ret;
285d8414d3cSBastian Blank 
286d71f5139SMukesh Rathor 	if (xen_feature(XENFEAT_auto_translated_physmap))
287d71f5139SMukesh Rathor 		cur_page = pages[st->index++];
288d71f5139SMukesh Rathor 
289ceb90fa0SAndres Lagar-Cavilla 	ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
2909a032e39SIan Campbell 					 st->vma->vm_page_prot, st->domain,
291d71f5139SMukesh Rathor 					 &cur_page);
292ceb90fa0SAndres Lagar-Cavilla 
293ceb90fa0SAndres Lagar-Cavilla 	/* Store error code for second pass. */
29499beae6cSAndres Lagar-Cavilla 	if (st->version == 1) {
29599beae6cSAndres Lagar-Cavilla 		if (ret < 0) {
29699beae6cSAndres Lagar-Cavilla 			/*
29799beae6cSAndres Lagar-Cavilla 			 * V1 encodes the error codes in the 32bit top nibble of the
29899beae6cSAndres Lagar-Cavilla 			 * mfn (with its known limitations vis-a-vis 64 bit callers).
29999beae6cSAndres Lagar-Cavilla 			 */
30099beae6cSAndres Lagar-Cavilla 			*mfnp |= (ret == -ENOENT) ?
30199beae6cSAndres Lagar-Cavilla 						PRIVCMD_MMAPBATCH_PAGED_ERROR :
30299beae6cSAndres Lagar-Cavilla 						PRIVCMD_MMAPBATCH_MFN_ERROR;
30399beae6cSAndres Lagar-Cavilla 		}
30499beae6cSAndres Lagar-Cavilla 	} else { /* st->version == 2 */
30599beae6cSAndres Lagar-Cavilla 		*((int *) mfnp) = ret;
30699beae6cSAndres Lagar-Cavilla 	}
307ceb90fa0SAndres Lagar-Cavilla 
308ceb90fa0SAndres Lagar-Cavilla 	/* And see if it affects the global_error. */
309ceb90fa0SAndres Lagar-Cavilla 	if (ret < 0) {
310ceb90fa0SAndres Lagar-Cavilla 		if (ret == -ENOENT)
311ceb90fa0SAndres Lagar-Cavilla 			st->global_error = -ENOENT;
312ceb90fa0SAndres Lagar-Cavilla 		else {
313ceb90fa0SAndres Lagar-Cavilla 			/* Record that at least one error has happened. */
314ceb90fa0SAndres Lagar-Cavilla 			if (st->global_error == 0)
315ceb90fa0SAndres Lagar-Cavilla 				st->global_error = 1;
316ceb90fa0SAndres Lagar-Cavilla 		}
317d8414d3cSBastian Blank 	}
318d8414d3cSBastian Blank 	st->va += PAGE_SIZE;
319d8414d3cSBastian Blank 
320d8414d3cSBastian Blank 	return 0;
321d8414d3cSBastian Blank }
322d8414d3cSBastian Blank 
32399beae6cSAndres Lagar-Cavilla static int mmap_return_errors(void *data, void *state)
324d8414d3cSBastian Blank {
325d8414d3cSBastian Blank 	struct mmap_batch_state *st = state;
326d8414d3cSBastian Blank 
32799beae6cSAndres Lagar-Cavilla 	if (st->version == 1) {
32899beae6cSAndres Lagar-Cavilla 		xen_pfn_t mfnp = *((xen_pfn_t *) data);
32999beae6cSAndres Lagar-Cavilla 		if (mfnp & PRIVCMD_MMAPBATCH_MFN_ERROR)
33099beae6cSAndres Lagar-Cavilla 			return __put_user(mfnp, st->user_mfn++);
33199beae6cSAndres Lagar-Cavilla 		else
33299beae6cSAndres Lagar-Cavilla 			st->user_mfn++;
33399beae6cSAndres Lagar-Cavilla 	} else { /* st->version == 2 */
33499beae6cSAndres Lagar-Cavilla 		int err = *((int *) data);
33599beae6cSAndres Lagar-Cavilla 		if (err)
33699beae6cSAndres Lagar-Cavilla 			return __put_user(err, st->user_err++);
33799beae6cSAndres Lagar-Cavilla 		else
33899beae6cSAndres Lagar-Cavilla 			st->user_err++;
33999beae6cSAndres Lagar-Cavilla 	}
34099beae6cSAndres Lagar-Cavilla 
34199beae6cSAndres Lagar-Cavilla 	return 0;
342d8414d3cSBastian Blank }
343d8414d3cSBastian Blank 
344d71f5139SMukesh Rathor /* Allocate pfns that are then mapped with gmfns from foreign domid. Update
345d71f5139SMukesh Rathor  * the vma with the page info to use later.
346d71f5139SMukesh Rathor  * Returns: 0 if success, otherwise -errno
347d71f5139SMukesh Rathor  */
348d71f5139SMukesh Rathor static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
349d71f5139SMukesh Rathor {
350d71f5139SMukesh Rathor 	int rc;
351d71f5139SMukesh Rathor 	struct page **pages;
352d71f5139SMukesh Rathor 
353d71f5139SMukesh Rathor 	pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
354d71f5139SMukesh Rathor 	if (pages == NULL)
355d71f5139SMukesh Rathor 		return -ENOMEM;
356d71f5139SMukesh Rathor 
357d71f5139SMukesh Rathor 	rc = alloc_xenballooned_pages(numpgs, pages, 0);
358d71f5139SMukesh Rathor 	if (rc != 0) {
359d71f5139SMukesh Rathor 		pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
360d71f5139SMukesh Rathor 			numpgs, rc);
361d71f5139SMukesh Rathor 		kfree(pages);
362d71f5139SMukesh Rathor 		return -ENOMEM;
363d71f5139SMukesh Rathor 	}
364a5deabe0SAndres Lagar-Cavilla 	BUG_ON(vma->vm_private_data != NULL);
365d71f5139SMukesh Rathor 	vma->vm_private_data = pages;
366d71f5139SMukesh Rathor 
367d71f5139SMukesh Rathor 	return 0;
368d71f5139SMukesh Rathor }
369d71f5139SMukesh Rathor 
370d8414d3cSBastian Blank static struct vm_operations_struct privcmd_vm_ops;
371d8414d3cSBastian Blank 
372ceb90fa0SAndres Lagar-Cavilla static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
373d8414d3cSBastian Blank {
374d8414d3cSBastian Blank 	int ret;
375ceb90fa0SAndres Lagar-Cavilla 	struct privcmd_mmapbatch_v2 m;
376d8414d3cSBastian Blank 	struct mm_struct *mm = current->mm;
377d8414d3cSBastian Blank 	struct vm_area_struct *vma;
378d8414d3cSBastian Blank 	unsigned long nr_pages;
379d8414d3cSBastian Blank 	LIST_HEAD(pagelist);
380d8414d3cSBastian Blank 	struct mmap_batch_state state;
381d8414d3cSBastian Blank 
382ceb90fa0SAndres Lagar-Cavilla 	switch (version) {
383ceb90fa0SAndres Lagar-Cavilla 	case 1:
384ceb90fa0SAndres Lagar-Cavilla 		if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
385d8414d3cSBastian Blank 			return -EFAULT;
386ceb90fa0SAndres Lagar-Cavilla 		/* Returns per-frame error in m.arr. */
387ceb90fa0SAndres Lagar-Cavilla 		m.err = NULL;
388ceb90fa0SAndres Lagar-Cavilla 		if (!access_ok(VERIFY_WRITE, m.arr, m.num * sizeof(*m.arr)))
389ceb90fa0SAndres Lagar-Cavilla 			return -EFAULT;
390ceb90fa0SAndres Lagar-Cavilla 		break;
391ceb90fa0SAndres Lagar-Cavilla 	case 2:
392ceb90fa0SAndres Lagar-Cavilla 		if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
393ceb90fa0SAndres Lagar-Cavilla 			return -EFAULT;
394ceb90fa0SAndres Lagar-Cavilla 		/* Returns per-frame error code in m.err. */
395ceb90fa0SAndres Lagar-Cavilla 		if (!access_ok(VERIFY_WRITE, m.err, m.num * (sizeof(*m.err))))
396ceb90fa0SAndres Lagar-Cavilla 			return -EFAULT;
397ceb90fa0SAndres Lagar-Cavilla 		break;
398ceb90fa0SAndres Lagar-Cavilla 	default:
399ceb90fa0SAndres Lagar-Cavilla 		return -EINVAL;
400ceb90fa0SAndres Lagar-Cavilla 	}
401d8414d3cSBastian Blank 
402d8414d3cSBastian Blank 	nr_pages = m.num;
403d8414d3cSBastian Blank 	if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
404d8414d3cSBastian Blank 		return -EINVAL;
405d8414d3cSBastian Blank 
406ceb90fa0SAndres Lagar-Cavilla 	ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
407d8414d3cSBastian Blank 
408ceb90fa0SAndres Lagar-Cavilla 	if (ret)
409d8414d3cSBastian Blank 		goto out;
410ceb90fa0SAndres Lagar-Cavilla 	if (list_empty(&pagelist)) {
411ceb90fa0SAndres Lagar-Cavilla 		ret = -EINVAL;
412ceb90fa0SAndres Lagar-Cavilla 		goto out;
413ceb90fa0SAndres Lagar-Cavilla 	}
414ceb90fa0SAndres Lagar-Cavilla 
41599beae6cSAndres Lagar-Cavilla 	if (version == 2) {
41699beae6cSAndres Lagar-Cavilla 		/* Zero error array now to only copy back actual errors. */
41799beae6cSAndres Lagar-Cavilla 		if (clear_user(m.err, sizeof(int) * m.num)) {
41899beae6cSAndres Lagar-Cavilla 			ret = -EFAULT;
419ceb90fa0SAndres Lagar-Cavilla 			goto out;
420ceb90fa0SAndres Lagar-Cavilla 		}
42199beae6cSAndres Lagar-Cavilla 	}
422d8414d3cSBastian Blank 
423d8414d3cSBastian Blank 	down_write(&mm->mmap_sem);
424d8414d3cSBastian Blank 
425d8414d3cSBastian Blank 	vma = find_vma(mm, m.addr);
426d8414d3cSBastian Blank 	if (!vma ||
427a5deabe0SAndres Lagar-Cavilla 	    vma->vm_ops != &privcmd_vm_ops) {
42868fa965dSMats Petersson 		ret = -EINVAL;
429a5deabe0SAndres Lagar-Cavilla 		goto out_unlock;
430a5deabe0SAndres Lagar-Cavilla 	}
431a5deabe0SAndres Lagar-Cavilla 
432a5deabe0SAndres Lagar-Cavilla 	/*
433a5deabe0SAndres Lagar-Cavilla 	 * Caller must either:
434a5deabe0SAndres Lagar-Cavilla 	 *
435a5deabe0SAndres Lagar-Cavilla 	 * Map the whole VMA range, which will also allocate all the
436a5deabe0SAndres Lagar-Cavilla 	 * pages required for the auto_translated_physmap case.
437a5deabe0SAndres Lagar-Cavilla 	 *
438a5deabe0SAndres Lagar-Cavilla 	 * Or
439a5deabe0SAndres Lagar-Cavilla 	 *
440a5deabe0SAndres Lagar-Cavilla 	 * Map unmapped holes left from a previous map attempt (e.g.,
441a5deabe0SAndres Lagar-Cavilla 	 * because those foreign frames were previously paged out).
442a5deabe0SAndres Lagar-Cavilla 	 */
443a5deabe0SAndres Lagar-Cavilla 	if (vma->vm_private_data == NULL) {
444a5deabe0SAndres Lagar-Cavilla 		if (m.addr != vma->vm_start ||
445a5deabe0SAndres Lagar-Cavilla 		    m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
446a5deabe0SAndres Lagar-Cavilla 			ret = -EINVAL;
447a5deabe0SAndres Lagar-Cavilla 			goto out_unlock;
448d8414d3cSBastian Blank 		}
449d71f5139SMukesh Rathor 		if (xen_feature(XENFEAT_auto_translated_physmap)) {
450d71f5139SMukesh Rathor 			ret = alloc_empty_pages(vma, m.num);
451a5deabe0SAndres Lagar-Cavilla 			if (ret < 0)
452a5deabe0SAndres Lagar-Cavilla 				goto out_unlock;
453a5deabe0SAndres Lagar-Cavilla 		} else
454a5deabe0SAndres Lagar-Cavilla 			vma->vm_private_data = PRIV_VMA_LOCKED;
455a5deabe0SAndres Lagar-Cavilla 	} else {
456a5deabe0SAndres Lagar-Cavilla 		if (m.addr < vma->vm_start ||
457a5deabe0SAndres Lagar-Cavilla 		    m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
458a5deabe0SAndres Lagar-Cavilla 			ret = -EINVAL;
459a5deabe0SAndres Lagar-Cavilla 			goto out_unlock;
460a5deabe0SAndres Lagar-Cavilla 		}
461a5deabe0SAndres Lagar-Cavilla 		if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
462a5deabe0SAndres Lagar-Cavilla 			ret = -EINVAL;
463a5deabe0SAndres Lagar-Cavilla 			goto out_unlock;
464d71f5139SMukesh Rathor 		}
465d71f5139SMukesh Rathor 	}
466d8414d3cSBastian Blank 
467d8414d3cSBastian Blank 	state.domain        = m.dom;
468d8414d3cSBastian Blank 	state.vma           = vma;
469d8414d3cSBastian Blank 	state.va            = m.addr;
470d71f5139SMukesh Rathor 	state.index         = 0;
471ceb90fa0SAndres Lagar-Cavilla 	state.global_error  = 0;
47299beae6cSAndres Lagar-Cavilla 	state.version       = version;
473d8414d3cSBastian Blank 
474ceb90fa0SAndres Lagar-Cavilla 	/* mmap_batch_fn guarantees ret == 0 */
475ceb90fa0SAndres Lagar-Cavilla 	BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t),
476ceb90fa0SAndres Lagar-Cavilla 			     &pagelist, mmap_batch_fn, &state));
477d8414d3cSBastian Blank 
478d8414d3cSBastian Blank 	up_write(&mm->mmap_sem);
479d8414d3cSBastian Blank 
48068fa965dSMats Petersson 	if (state.global_error) {
481ceb90fa0SAndres Lagar-Cavilla 		/* Write back errors in second pass. */
482ceb90fa0SAndres Lagar-Cavilla 		state.user_mfn = (xen_pfn_t *)m.arr;
48399beae6cSAndres Lagar-Cavilla 		state.user_err = m.err;
484d8414d3cSBastian Blank 		ret = traverse_pages(m.num, sizeof(xen_pfn_t),
48599beae6cSAndres Lagar-Cavilla 							 &pagelist, mmap_return_errors, &state);
48668fa965dSMats Petersson 	} else
48768fa965dSMats Petersson 		ret = 0;
48868fa965dSMats Petersson 
489ceb90fa0SAndres Lagar-Cavilla 	/* If we have not had any EFAULT-like global errors then set the global
490ceb90fa0SAndres Lagar-Cavilla 	 * error to -ENOENT if necessary. */
491ceb90fa0SAndres Lagar-Cavilla 	if ((ret == 0) && (state.global_error == -ENOENT))
492ceb90fa0SAndres Lagar-Cavilla 		ret = -ENOENT;
493d8414d3cSBastian Blank 
494d8414d3cSBastian Blank out:
495d8414d3cSBastian Blank 	free_page_list(&pagelist);
496d8414d3cSBastian Blank 	return ret;
497a5deabe0SAndres Lagar-Cavilla 
498a5deabe0SAndres Lagar-Cavilla out_unlock:
499a5deabe0SAndres Lagar-Cavilla 	up_write(&mm->mmap_sem);
500a5deabe0SAndres Lagar-Cavilla 	goto out;
501d8414d3cSBastian Blank }
502d8414d3cSBastian Blank 
503d8414d3cSBastian Blank static long privcmd_ioctl(struct file *file,
504d8414d3cSBastian Blank 			  unsigned int cmd, unsigned long data)
505d8414d3cSBastian Blank {
506d8414d3cSBastian Blank 	int ret = -ENOSYS;
507d8414d3cSBastian Blank 	void __user *udata = (void __user *) data;
508d8414d3cSBastian Blank 
509d8414d3cSBastian Blank 	switch (cmd) {
510d8414d3cSBastian Blank 	case IOCTL_PRIVCMD_HYPERCALL:
511d8414d3cSBastian Blank 		ret = privcmd_ioctl_hypercall(udata);
512d8414d3cSBastian Blank 		break;
513d8414d3cSBastian Blank 
514d8414d3cSBastian Blank 	case IOCTL_PRIVCMD_MMAP:
515d8414d3cSBastian Blank 		ret = privcmd_ioctl_mmap(udata);
516d8414d3cSBastian Blank 		break;
517d8414d3cSBastian Blank 
518d8414d3cSBastian Blank 	case IOCTL_PRIVCMD_MMAPBATCH:
519ceb90fa0SAndres Lagar-Cavilla 		ret = privcmd_ioctl_mmap_batch(udata, 1);
520ceb90fa0SAndres Lagar-Cavilla 		break;
521ceb90fa0SAndres Lagar-Cavilla 
522ceb90fa0SAndres Lagar-Cavilla 	case IOCTL_PRIVCMD_MMAPBATCH_V2:
523ceb90fa0SAndres Lagar-Cavilla 		ret = privcmd_ioctl_mmap_batch(udata, 2);
524d8414d3cSBastian Blank 		break;
525d8414d3cSBastian Blank 
526d8414d3cSBastian Blank 	default:
527d8414d3cSBastian Blank 		ret = -EINVAL;
528d8414d3cSBastian Blank 		break;
529d8414d3cSBastian Blank 	}
530d8414d3cSBastian Blank 
531d8414d3cSBastian Blank 	return ret;
532d8414d3cSBastian Blank }
533d8414d3cSBastian Blank 
534d71f5139SMukesh Rathor static void privcmd_close(struct vm_area_struct *vma)
535d71f5139SMukesh Rathor {
536d71f5139SMukesh Rathor 	struct page **pages = vma->vm_private_data;
537d71f5139SMukesh Rathor 	int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
538b6497b38SIan Campbell 	int rc;
539d71f5139SMukesh Rathor 
5409eff37a8SDan Carpenter 	if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
541d71f5139SMukesh Rathor 		return;
542d71f5139SMukesh Rathor 
543b6497b38SIan Campbell 	rc = xen_unmap_domain_mfn_range(vma, numpgs, pages);
544b6497b38SIan Campbell 	if (rc == 0)
545d71f5139SMukesh Rathor 		free_xenballooned_pages(numpgs, pages);
546b6497b38SIan Campbell 	else
547b6497b38SIan Campbell 		pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
548b6497b38SIan Campbell 			numpgs, rc);
549d71f5139SMukesh Rathor 	kfree(pages);
550d71f5139SMukesh Rathor }
551d71f5139SMukesh Rathor 
552d8414d3cSBastian Blank static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
553d8414d3cSBastian Blank {
554d8414d3cSBastian Blank 	printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
555d8414d3cSBastian Blank 	       vma, vma->vm_start, vma->vm_end,
556d8414d3cSBastian Blank 	       vmf->pgoff, vmf->virtual_address);
557d8414d3cSBastian Blank 
558d8414d3cSBastian Blank 	return VM_FAULT_SIGBUS;
559d8414d3cSBastian Blank }
560d8414d3cSBastian Blank 
561d8414d3cSBastian Blank static struct vm_operations_struct privcmd_vm_ops = {
562d71f5139SMukesh Rathor 	.close = privcmd_close,
563d8414d3cSBastian Blank 	.fault = privcmd_fault
564d8414d3cSBastian Blank };
565d8414d3cSBastian Blank 
566d8414d3cSBastian Blank static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
567d8414d3cSBastian Blank {
568d8414d3cSBastian Blank 	/* DONTCOPY is essential for Xen because copy_page_range doesn't know
569d8414d3cSBastian Blank 	 * how to recreate these mappings */
570314e51b9SKonstantin Khlebnikov 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
571314e51b9SKonstantin Khlebnikov 			 VM_DONTEXPAND | VM_DONTDUMP;
572d8414d3cSBastian Blank 	vma->vm_ops = &privcmd_vm_ops;
573d8414d3cSBastian Blank 	vma->vm_private_data = NULL;
574d8414d3cSBastian Blank 
575d8414d3cSBastian Blank 	return 0;
576d8414d3cSBastian Blank }
577d8414d3cSBastian Blank 
578a5deabe0SAndres Lagar-Cavilla /*
579a5deabe0SAndres Lagar-Cavilla  * For MMAPBATCH*. This allows asserting the singleshot mapping
580a5deabe0SAndres Lagar-Cavilla  * on a per pfn/pte basis. Mapping calls that fail with ENOENT
581a5deabe0SAndres Lagar-Cavilla  * can be then retried until success.
582a5deabe0SAndres Lagar-Cavilla  */
583a5deabe0SAndres Lagar-Cavilla static int is_mapped_fn(pte_t *pte, struct page *pmd_page,
584a5deabe0SAndres Lagar-Cavilla 	                unsigned long addr, void *data)
585d8414d3cSBastian Blank {
586a5deabe0SAndres Lagar-Cavilla 	return pte_none(*pte) ? 0 : -EBUSY;
587a5deabe0SAndres Lagar-Cavilla }
588a5deabe0SAndres Lagar-Cavilla 
589a5deabe0SAndres Lagar-Cavilla static int privcmd_vma_range_is_mapped(
590a5deabe0SAndres Lagar-Cavilla 	           struct vm_area_struct *vma,
591a5deabe0SAndres Lagar-Cavilla 	           unsigned long addr,
592a5deabe0SAndres Lagar-Cavilla 	           unsigned long nr_pages)
593a5deabe0SAndres Lagar-Cavilla {
594a5deabe0SAndres Lagar-Cavilla 	return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
595a5deabe0SAndres Lagar-Cavilla 				   is_mapped_fn, NULL) != 0;
596d8414d3cSBastian Blank }
597d8414d3cSBastian Blank 
598d8414d3cSBastian Blank const struct file_operations xen_privcmd_fops = {
599d8414d3cSBastian Blank 	.owner = THIS_MODULE,
600d8414d3cSBastian Blank 	.unlocked_ioctl = privcmd_ioctl,
601d8414d3cSBastian Blank 	.mmap = privcmd_mmap,
602d8414d3cSBastian Blank };
603d8414d3cSBastian Blank EXPORT_SYMBOL_GPL(xen_privcmd_fops);
604d8414d3cSBastian Blank 
605d8414d3cSBastian Blank static struct miscdevice privcmd_dev = {
606d8414d3cSBastian Blank 	.minor = MISC_DYNAMIC_MINOR,
607d8414d3cSBastian Blank 	.name = "xen/privcmd",
608d8414d3cSBastian Blank 	.fops = &xen_privcmd_fops,
609d8414d3cSBastian Blank };
610d8414d3cSBastian Blank 
611d8414d3cSBastian Blank static int __init privcmd_init(void)
612d8414d3cSBastian Blank {
613d8414d3cSBastian Blank 	int err;
614d8414d3cSBastian Blank 
615d8414d3cSBastian Blank 	if (!xen_domain())
616d8414d3cSBastian Blank 		return -ENODEV;
617d8414d3cSBastian Blank 
618d8414d3cSBastian Blank 	err = misc_register(&privcmd_dev);
619d8414d3cSBastian Blank 	if (err != 0) {
620283c0972SJoe Perches 		pr_err("Could not register Xen privcmd device\n");
621d8414d3cSBastian Blank 		return err;
622d8414d3cSBastian Blank 	}
623d8414d3cSBastian Blank 	return 0;
624d8414d3cSBastian Blank }
625d8414d3cSBastian Blank 
626d8414d3cSBastian Blank static void __exit privcmd_exit(void)
627d8414d3cSBastian Blank {
628d8414d3cSBastian Blank 	misc_deregister(&privcmd_dev);
629d8414d3cSBastian Blank }
630d8414d3cSBastian Blank 
631d8414d3cSBastian Blank module_init(privcmd_init);
632d8414d3cSBastian Blank module_exit(privcmd_exit);
633