xref: /openbmc/linux/drivers/xen/xlate_mmu.c (revision 40e6078f)
1628c28eeSDavid Vrabel /*
2628c28eeSDavid Vrabel  * MMU operations common to all auto-translated physmap guests.
3628c28eeSDavid Vrabel  *
4628c28eeSDavid Vrabel  * Copyright (C) 2015 Citrix Systems R&D Ltd.
5628c28eeSDavid Vrabel  *
6628c28eeSDavid Vrabel  * This program is free software; you can redistribute it and/or
7628c28eeSDavid Vrabel  * modify it under the terms of the GNU General Public License version 2
8628c28eeSDavid Vrabel  * as published by the Free Software Foundation; or, when distributed
9628c28eeSDavid Vrabel  * separately from the Linux kernel or incorporated into other
10628c28eeSDavid Vrabel  * software packages, subject to the following license:
11628c28eeSDavid Vrabel  *
12628c28eeSDavid Vrabel  * Permission is hereby granted, free of charge, to any person obtaining a copy
13628c28eeSDavid Vrabel  * of this source file (the "Software"), to deal in the Software without
14628c28eeSDavid Vrabel  * restriction, including without limitation the rights to use, copy, modify,
15628c28eeSDavid Vrabel  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16628c28eeSDavid Vrabel  * and to permit persons to whom the Software is furnished to do so, subject to
17628c28eeSDavid Vrabel  * the following conditions:
18628c28eeSDavid Vrabel  *
19628c28eeSDavid Vrabel  * The above copyright notice and this permission notice shall be included in
20628c28eeSDavid Vrabel  * all copies or substantial portions of the Software.
21628c28eeSDavid Vrabel  *
22628c28eeSDavid Vrabel  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23628c28eeSDavid Vrabel  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24628c28eeSDavid Vrabel  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25628c28eeSDavid Vrabel  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26628c28eeSDavid Vrabel  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27628c28eeSDavid Vrabel  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28628c28eeSDavid Vrabel  * IN THE SOFTWARE.
29628c28eeSDavid Vrabel  */
30628c28eeSDavid Vrabel #include <linux/kernel.h>
31628c28eeSDavid Vrabel #include <linux/mm.h>
32243848fcSShannon Zhao #include <linux/slab.h>
33243848fcSShannon Zhao #include <linux/vmalloc.h>
34628c28eeSDavid Vrabel 
35628c28eeSDavid Vrabel #include <asm/xen/hypercall.h>
36628c28eeSDavid Vrabel #include <asm/xen/hypervisor.h>
37628c28eeSDavid Vrabel 
38628c28eeSDavid Vrabel #include <xen/xen.h>
3972791ac8SSrikanth Boddepalli #include <xen/xen-ops.h>
40628c28eeSDavid Vrabel #include <xen/page.h>
41628c28eeSDavid Vrabel #include <xen/interface/xen.h>
42628c28eeSDavid Vrabel #include <xen/interface/memory.h>
43243848fcSShannon Zhao #include <xen/balloon.h>
44628c28eeSDavid Vrabel 
455995a68aSJulien Grall typedef void (*xen_gfn_fn_t)(unsigned long gfn, void *data);
465995a68aSJulien Grall 
475995a68aSJulien Grall /* Break down the pages in 4KB chunk and call fn for each gfn */
xen_for_each_gfn(struct page ** pages,unsigned nr_gfn,xen_gfn_fn_t fn,void * data)485995a68aSJulien Grall static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn,
495995a68aSJulien Grall 			     xen_gfn_fn_t fn, void *data)
50628c28eeSDavid Vrabel {
515995a68aSJulien Grall 	unsigned long xen_pfn = 0;
525995a68aSJulien Grall 	struct page *page;
535995a68aSJulien Grall 	int i;
54628c28eeSDavid Vrabel 
555995a68aSJulien Grall 	for (i = 0; i < nr_gfn; i++) {
565995a68aSJulien Grall 		if ((i % XEN_PFN_PER_PAGE) == 0) {
575995a68aSJulien Grall 			page = pages[i / XEN_PFN_PER_PAGE];
585995a68aSJulien Grall 			xen_pfn = page_to_xen_pfn(page);
595995a68aSJulien Grall 		}
605995a68aSJulien Grall 		fn(pfn_to_gfn(xen_pfn++), data);
615995a68aSJulien Grall 	}
62628c28eeSDavid Vrabel }
63628c28eeSDavid Vrabel 
64628c28eeSDavid Vrabel struct remap_data {
65a13d7201SJulien Grall 	xen_pfn_t *fgfn; /* foreign domain's gfn */
665995a68aSJulien Grall 	int nr_fgfn; /* Number of foreign gfn left to map */
67628c28eeSDavid Vrabel 	pgprot_t prot;
68628c28eeSDavid Vrabel 	domid_t  domid;
69628c28eeSDavid Vrabel 	struct vm_area_struct *vma;
70628c28eeSDavid Vrabel 	int index;
71628c28eeSDavid Vrabel 	struct page **pages;
72a13d7201SJulien Grall 	struct xen_remap_gfn_info *info;
734e8c0c8cSDavid Vrabel 	int *err_ptr;
744e8c0c8cSDavid Vrabel 	int mapped;
755995a68aSJulien Grall 
765995a68aSJulien Grall 	/* Hypercall parameters */
775995a68aSJulien Grall 	int h_errs[XEN_PFN_PER_PAGE];
785995a68aSJulien Grall 	xen_ulong_t h_idxs[XEN_PFN_PER_PAGE];
795995a68aSJulien Grall 	xen_pfn_t h_gpfns[XEN_PFN_PER_PAGE];
805995a68aSJulien Grall 
815995a68aSJulien Grall 	int h_iter;	/* Iterator */
82628c28eeSDavid Vrabel };
83628c28eeSDavid Vrabel 
setup_hparams(unsigned long gfn,void * data)845995a68aSJulien Grall static void setup_hparams(unsigned long gfn, void *data)
855995a68aSJulien Grall {
865995a68aSJulien Grall 	struct remap_data *info = data;
875995a68aSJulien Grall 
885995a68aSJulien Grall 	info->h_idxs[info->h_iter] = *info->fgfn;
895995a68aSJulien Grall 	info->h_gpfns[info->h_iter] = gfn;
905995a68aSJulien Grall 	info->h_errs[info->h_iter] = 0;
915995a68aSJulien Grall 
925995a68aSJulien Grall 	info->h_iter++;
935995a68aSJulien Grall 	info->fgfn++;
945995a68aSJulien Grall }
955995a68aSJulien Grall 
remap_pte_fn(pte_t * ptep,unsigned long addr,void * data)968b1e0f81SAnshuman Khandual static int remap_pte_fn(pte_t *ptep, unsigned long addr, void *data)
97628c28eeSDavid Vrabel {
98628c28eeSDavid Vrabel 	struct remap_data *info = data;
99628c28eeSDavid Vrabel 	struct page *page = info->pages[info->index++];
1005995a68aSJulien Grall 	pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), info->prot));
1015995a68aSJulien Grall 	int rc, nr_gfn;
1025995a68aSJulien Grall 	uint32_t i;
1035995a68aSJulien Grall 	struct xen_add_to_physmap_range xatp = {
1045995a68aSJulien Grall 		.domid = DOMID_SELF,
1055995a68aSJulien Grall 		.foreign_domid = info->domid,
1065995a68aSJulien Grall 		.space = XENMAPSPACE_gmfn_foreign,
1075995a68aSJulien Grall 	};
108628c28eeSDavid Vrabel 
1095995a68aSJulien Grall 	nr_gfn = min_t(typeof(info->nr_fgfn), XEN_PFN_PER_PAGE, info->nr_fgfn);
1105995a68aSJulien Grall 	info->nr_fgfn -= nr_gfn;
1115995a68aSJulien Grall 
1125995a68aSJulien Grall 	info->h_iter = 0;
1135995a68aSJulien Grall 	xen_for_each_gfn(&page, nr_gfn, setup_hparams, info);
1145995a68aSJulien Grall 	BUG_ON(info->h_iter != nr_gfn);
1155995a68aSJulien Grall 
1165995a68aSJulien Grall 	set_xen_guest_handle(xatp.idxs, info->h_idxs);
1175995a68aSJulien Grall 	set_xen_guest_handle(xatp.gpfns, info->h_gpfns);
1185995a68aSJulien Grall 	set_xen_guest_handle(xatp.errs, info->h_errs);
1195995a68aSJulien Grall 	xatp.size = nr_gfn;
1205995a68aSJulien Grall 
1215995a68aSJulien Grall 	rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
1225995a68aSJulien Grall 
1235995a68aSJulien Grall 	/* info->err_ptr expect to have one error status per Xen PFN */
1245995a68aSJulien Grall 	for (i = 0; i < nr_gfn; i++) {
1255995a68aSJulien Grall 		int err = (rc < 0) ? rc : info->h_errs[i];
1265995a68aSJulien Grall 
1275995a68aSJulien Grall 		*(info->err_ptr++) = err;
1285995a68aSJulien Grall 		if (!err)
1294e8c0c8cSDavid Vrabel 			info->mapped++;
1304e8c0c8cSDavid Vrabel 	}
1315995a68aSJulien Grall 
1325995a68aSJulien Grall 	/*
1335995a68aSJulien Grall 	 * Note: The hypercall will return 0 in most of the case if even if
1345995a68aSJulien Grall 	 * all the fgmfn are not mapped. We still have to update the pte
1355995a68aSJulien Grall 	 * as the userspace may decide to continue.
1365995a68aSJulien Grall 	 */
1375995a68aSJulien Grall 	if (!rc)
1385995a68aSJulien Grall 		set_pte_at(info->vma->vm_mm, addr, ptep, pte);
139628c28eeSDavid Vrabel 
140628c28eeSDavid Vrabel 	return 0;
141628c28eeSDavid Vrabel }
142628c28eeSDavid Vrabel 
xen_xlate_remap_gfn_array(struct vm_area_struct * vma,unsigned long addr,xen_pfn_t * gfn,int nr,int * err_ptr,pgprot_t prot,unsigned domid,struct page ** pages)1434e8c0c8cSDavid Vrabel int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
144628c28eeSDavid Vrabel 			      unsigned long addr,
145a13d7201SJulien Grall 			      xen_pfn_t *gfn, int nr,
1464e8c0c8cSDavid Vrabel 			      int *err_ptr, pgprot_t prot,
1474e8c0c8cSDavid Vrabel 			      unsigned domid,
148628c28eeSDavid Vrabel 			      struct page **pages)
149628c28eeSDavid Vrabel {
150628c28eeSDavid Vrabel 	int err;
151628c28eeSDavid Vrabel 	struct remap_data data;
1525995a68aSJulien Grall 	unsigned long range = DIV_ROUND_UP(nr, XEN_PFN_PER_PAGE) << PAGE_SHIFT;
153628c28eeSDavid Vrabel 
1544e8c0c8cSDavid Vrabel 	/* Kept here for the purpose of making sure code doesn't break
1554e8c0c8cSDavid Vrabel 	   x86 PVOPS */
1564e8c0c8cSDavid Vrabel 	BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
157628c28eeSDavid Vrabel 
158a13d7201SJulien Grall 	data.fgfn = gfn;
1595995a68aSJulien Grall 	data.nr_fgfn = nr;
160628c28eeSDavid Vrabel 	data.prot  = prot;
161628c28eeSDavid Vrabel 	data.domid = domid;
162628c28eeSDavid Vrabel 	data.vma   = vma;
163628c28eeSDavid Vrabel 	data.pages = pages;
1644e8c0c8cSDavid Vrabel 	data.index = 0;
1654e8c0c8cSDavid Vrabel 	data.err_ptr = err_ptr;
1664e8c0c8cSDavid Vrabel 	data.mapped = 0;
1674e8c0c8cSDavid Vrabel 
1684e8c0c8cSDavid Vrabel 	err = apply_to_page_range(vma->vm_mm, addr, range,
169628c28eeSDavid Vrabel 				  remap_pte_fn, &data);
1704e8c0c8cSDavid Vrabel 	return err < 0 ? err : data.mapped;
171628c28eeSDavid Vrabel }
1724e8c0c8cSDavid Vrabel EXPORT_SYMBOL_GPL(xen_xlate_remap_gfn_array);
173628c28eeSDavid Vrabel 
unmap_gfn(unsigned long gfn,void * data)1745995a68aSJulien Grall static void unmap_gfn(unsigned long gfn, void *data)
1755995a68aSJulien Grall {
1765995a68aSJulien Grall 	struct xen_remove_from_physmap xrp;
1775995a68aSJulien Grall 
1785995a68aSJulien Grall 	xrp.domid = DOMID_SELF;
1795995a68aSJulien Grall 	xrp.gpfn = gfn;
1805995a68aSJulien Grall 	(void)HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
1815995a68aSJulien Grall }
1825995a68aSJulien Grall 
xen_xlate_unmap_gfn_range(struct vm_area_struct * vma,int nr,struct page ** pages)183628c28eeSDavid Vrabel int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
184628c28eeSDavid Vrabel 			      int nr, struct page **pages)
185628c28eeSDavid Vrabel {
1865995a68aSJulien Grall 	xen_for_each_gfn(pages, nr, unmap_gfn, NULL);
187628c28eeSDavid Vrabel 
188628c28eeSDavid Vrabel 	return 0;
189628c28eeSDavid Vrabel }
190628c28eeSDavid Vrabel EXPORT_SYMBOL_GPL(xen_xlate_unmap_gfn_range);
191243848fcSShannon Zhao 
192975fac3cSShannon Zhao struct map_balloon_pages {
193975fac3cSShannon Zhao 	xen_pfn_t *pfns;
194975fac3cSShannon Zhao 	unsigned int idx;
195975fac3cSShannon Zhao };
196975fac3cSShannon Zhao 
setup_balloon_gfn(unsigned long gfn,void * data)197975fac3cSShannon Zhao static void setup_balloon_gfn(unsigned long gfn, void *data)
198975fac3cSShannon Zhao {
199975fac3cSShannon Zhao 	struct map_balloon_pages *info = data;
200975fac3cSShannon Zhao 
201975fac3cSShannon Zhao 	info->pfns[info->idx++] = gfn;
202975fac3cSShannon Zhao }
203975fac3cSShannon Zhao 
204243848fcSShannon Zhao /**
205243848fcSShannon Zhao  * xen_xlate_map_ballooned_pages - map a new set of ballooned pages
206243848fcSShannon Zhao  * @gfns: returns the array of corresponding GFNs
207243848fcSShannon Zhao  * @virt: returns the virtual address of the mapped region
208243848fcSShannon Zhao  * @nr_grant_frames: number of GFNs
209243848fcSShannon Zhao  * @return 0 on success, error otherwise
210243848fcSShannon Zhao  *
211243848fcSShannon Zhao  * This allocates a set of ballooned pages and maps them into the
212243848fcSShannon Zhao  * kernel's address space.
213243848fcSShannon Zhao  */
xen_xlate_map_ballooned_pages(xen_pfn_t ** gfns,void ** virt,unsigned long nr_grant_frames)214243848fcSShannon Zhao int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
215243848fcSShannon Zhao 					 unsigned long nr_grant_frames)
216243848fcSShannon Zhao {
217243848fcSShannon Zhao 	struct page **pages;
218243848fcSShannon Zhao 	xen_pfn_t *pfns;
219243848fcSShannon Zhao 	void *vaddr;
220975fac3cSShannon Zhao 	struct map_balloon_pages data;
221243848fcSShannon Zhao 	int rc;
222975fac3cSShannon Zhao 	unsigned long nr_pages;
223243848fcSShannon Zhao 
224243848fcSShannon Zhao 	BUG_ON(nr_grant_frames == 0);
225975fac3cSShannon Zhao 	nr_pages = DIV_ROUND_UP(nr_grant_frames, XEN_PFN_PER_PAGE);
226975fac3cSShannon Zhao 	pages = kcalloc(nr_pages, sizeof(pages[0]), GFP_KERNEL);
227243848fcSShannon Zhao 	if (!pages)
228243848fcSShannon Zhao 		return -ENOMEM;
229243848fcSShannon Zhao 
230243848fcSShannon Zhao 	pfns = kcalloc(nr_grant_frames, sizeof(pfns[0]), GFP_KERNEL);
231243848fcSShannon Zhao 	if (!pfns) {
232243848fcSShannon Zhao 		kfree(pages);
233243848fcSShannon Zhao 		return -ENOMEM;
234243848fcSShannon Zhao 	}
2359e2369c0SRoger Pau Monne 	rc = xen_alloc_unpopulated_pages(nr_pages, pages);
236243848fcSShannon Zhao 	if (rc) {
237975fac3cSShannon Zhao 		pr_warn("%s Couldn't balloon alloc %ld pages rc:%d\n", __func__,
238975fac3cSShannon Zhao 			nr_pages, rc);
239243848fcSShannon Zhao 		kfree(pages);
240243848fcSShannon Zhao 		kfree(pfns);
241243848fcSShannon Zhao 		return rc;
242243848fcSShannon Zhao 	}
243243848fcSShannon Zhao 
244975fac3cSShannon Zhao 	data.pfns = pfns;
245975fac3cSShannon Zhao 	data.idx = 0;
246975fac3cSShannon Zhao 	xen_for_each_gfn(pages, nr_grant_frames, setup_balloon_gfn, &data);
247975fac3cSShannon Zhao 
248975fac3cSShannon Zhao 	vaddr = vmap(pages, nr_pages, 0, PAGE_KERNEL);
249243848fcSShannon Zhao 	if (!vaddr) {
250975fac3cSShannon Zhao 		pr_warn("%s Couldn't map %ld pages rc:%d\n", __func__,
251975fac3cSShannon Zhao 			nr_pages, rc);
2529e2369c0SRoger Pau Monne 		xen_free_unpopulated_pages(nr_pages, pages);
253243848fcSShannon Zhao 		kfree(pages);
254243848fcSShannon Zhao 		kfree(pfns);
255243848fcSShannon Zhao 		return -ENOMEM;
256243848fcSShannon Zhao 	}
257243848fcSShannon Zhao 	kfree(pages);
258243848fcSShannon Zhao 
259243848fcSShannon Zhao 	*gfns = pfns;
260243848fcSShannon Zhao 	*virt = vaddr;
261243848fcSShannon Zhao 
262243848fcSShannon Zhao 	return 0;
263243848fcSShannon Zhao }
264a78d14a3SArnd Bergmann 
265a78d14a3SArnd Bergmann struct remap_pfn {
266a78d14a3SArnd Bergmann 	struct mm_struct *mm;
267a78d14a3SArnd Bergmann 	struct page **pages;
268a78d14a3SArnd Bergmann 	pgprot_t prot;
269a78d14a3SArnd Bergmann 	unsigned long i;
270a78d14a3SArnd Bergmann };
271a78d14a3SArnd Bergmann 
remap_pfn_fn(pte_t * ptep,unsigned long addr,void * data)272a78d14a3SArnd Bergmann static int remap_pfn_fn(pte_t *ptep, unsigned long addr, void *data)
273a78d14a3SArnd Bergmann {
274a78d14a3SArnd Bergmann 	struct remap_pfn *r = data;
275a78d14a3SArnd Bergmann 	struct page *page = r->pages[r->i];
276a78d14a3SArnd Bergmann 	pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot));
277a78d14a3SArnd Bergmann 
278a78d14a3SArnd Bergmann 	set_pte_at(r->mm, addr, ptep, pte);
279a78d14a3SArnd Bergmann 	r->i++;
280a78d14a3SArnd Bergmann 
281a78d14a3SArnd Bergmann 	return 0;
282a78d14a3SArnd Bergmann }
283a78d14a3SArnd Bergmann 
284a78d14a3SArnd Bergmann /* Used by the privcmd module, but has to be built-in on ARM */
xen_remap_vma_range(struct vm_area_struct * vma,unsigned long addr,unsigned long len)285a78d14a3SArnd Bergmann int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr, unsigned long len)
286a78d14a3SArnd Bergmann {
287a78d14a3SArnd Bergmann 	struct remap_pfn r = {
288a78d14a3SArnd Bergmann 		.mm = vma->vm_mm,
289a78d14a3SArnd Bergmann 		.pages = vma->vm_private_data,
290a78d14a3SArnd Bergmann 		.prot = vma->vm_page_prot,
291a78d14a3SArnd Bergmann 	};
292a78d14a3SArnd Bergmann 
293a78d14a3SArnd Bergmann 	return apply_to_page_range(vma->vm_mm, addr, len, remap_pfn_fn, &r);
294a78d14a3SArnd Bergmann }
295a78d14a3SArnd Bergmann EXPORT_SYMBOL_GPL(xen_remap_vma_range);
296