xref: /openbmc/linux/drivers/xen/mem-reservation.c (revision d99bb72a)
1ae4c51a5SOleksandr Andrushchenko // SPDX-License-Identifier: GPL-2.0
2ae4c51a5SOleksandr Andrushchenko 
3ae4c51a5SOleksandr Andrushchenko /******************************************************************************
4ae4c51a5SOleksandr Andrushchenko  * Xen memory reservation utilities.
5ae4c51a5SOleksandr Andrushchenko  *
6ae4c51a5SOleksandr Andrushchenko  * Copyright (c) 2003, B Dragovic
7ae4c51a5SOleksandr Andrushchenko  * Copyright (c) 2003-2004, M Williamson, K Fraser
8ae4c51a5SOleksandr Andrushchenko  * Copyright (c) 2005 Dan M. Smith, IBM Corporation
9ae4c51a5SOleksandr Andrushchenko  * Copyright (c) 2010 Daniel Kiper
10ae4c51a5SOleksandr Andrushchenko  * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
11ae4c51a5SOleksandr Andrushchenko  */
12ae4c51a5SOleksandr Andrushchenko 
13ae4c51a5SOleksandr Andrushchenko #include <asm/xen/hypercall.h>
14ae4c51a5SOleksandr Andrushchenko 
15ae4c51a5SOleksandr Andrushchenko #include <xen/interface/memory.h>
16ae4c51a5SOleksandr Andrushchenko #include <xen/mem-reservation.h>
17197ecb38SMarek Marczykowski-Górecki #include <linux/moduleparam.h>
18197ecb38SMarek Marczykowski-Górecki 
19197ecb38SMarek Marczykowski-Górecki bool __read_mostly xen_scrub_pages = IS_ENABLED(CONFIG_XEN_SCRUB_PAGES_DEFAULT);
20197ecb38SMarek Marczykowski-Górecki core_param(xen_scrub_pages, xen_scrub_pages, bool, 0);
21ae4c51a5SOleksandr Andrushchenko 
22ae4c51a5SOleksandr Andrushchenko /*
23ae4c51a5SOleksandr Andrushchenko  * Use one extent per PAGE_SIZE to avoid to break down the page into
24ae4c51a5SOleksandr Andrushchenko  * multiple frame.
25ae4c51a5SOleksandr Andrushchenko  */
26ae4c51a5SOleksandr Andrushchenko #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
27ae4c51a5SOleksandr Andrushchenko 
28ae4c51a5SOleksandr Andrushchenko #ifdef CONFIG_XEN_HAVE_PVMMU
__xenmem_reservation_va_mapping_update(unsigned long count,struct page ** pages,xen_pfn_t * frames)29ae4c51a5SOleksandr Andrushchenko void __xenmem_reservation_va_mapping_update(unsigned long count,
30ae4c51a5SOleksandr Andrushchenko 					    struct page **pages,
31ae4c51a5SOleksandr Andrushchenko 					    xen_pfn_t *frames)
32ae4c51a5SOleksandr Andrushchenko {
33ae4c51a5SOleksandr Andrushchenko 	int i;
34ae4c51a5SOleksandr Andrushchenko 
35ae4c51a5SOleksandr Andrushchenko 	for (i = 0; i < count; i++) {
36ae4c51a5SOleksandr Andrushchenko 		struct page *page = pages[i];
37ae4c51a5SOleksandr Andrushchenko 		unsigned long pfn = page_to_pfn(page);
38*d99bb72aSJuergen Gross 		int ret;
39ae4c51a5SOleksandr Andrushchenko 
40ae4c51a5SOleksandr Andrushchenko 		BUG_ON(!page);
41ae4c51a5SOleksandr Andrushchenko 
42ae4c51a5SOleksandr Andrushchenko 		/*
43ae4c51a5SOleksandr Andrushchenko 		 * We don't support PV MMU when Linux and Xen is using
44ae4c51a5SOleksandr Andrushchenko 		 * different page granularity.
45ae4c51a5SOleksandr Andrushchenko 		 */
46ae4c51a5SOleksandr Andrushchenko 		BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
47ae4c51a5SOleksandr Andrushchenko 
48ae4c51a5SOleksandr Andrushchenko 		set_phys_to_machine(pfn, frames[i]);
49ae4c51a5SOleksandr Andrushchenko 
50ae4c51a5SOleksandr Andrushchenko 		ret = HYPERVISOR_update_va_mapping(
51ae4c51a5SOleksandr Andrushchenko 				(unsigned long)__va(pfn << PAGE_SHIFT),
52*d99bb72aSJuergen Gross 				mfn_pte(frames[i], PAGE_KERNEL), 0);
53ae4c51a5SOleksandr Andrushchenko 		BUG_ON(ret);
54ae4c51a5SOleksandr Andrushchenko 	}
55ae4c51a5SOleksandr Andrushchenko }
56ae4c51a5SOleksandr Andrushchenko EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_update);
57ae4c51a5SOleksandr Andrushchenko 
__xenmem_reservation_va_mapping_reset(unsigned long count,struct page ** pages)58ae4c51a5SOleksandr Andrushchenko void __xenmem_reservation_va_mapping_reset(unsigned long count,
59ae4c51a5SOleksandr Andrushchenko 					   struct page **pages)
60ae4c51a5SOleksandr Andrushchenko {
61ae4c51a5SOleksandr Andrushchenko 	int i;
62ae4c51a5SOleksandr Andrushchenko 
63ae4c51a5SOleksandr Andrushchenko 	for (i = 0; i < count; i++) {
64ae4c51a5SOleksandr Andrushchenko 		struct page *page = pages[i];
65ae4c51a5SOleksandr Andrushchenko 		unsigned long pfn = page_to_pfn(page);
66*d99bb72aSJuergen Gross 		int ret;
67ae4c51a5SOleksandr Andrushchenko 
68ae4c51a5SOleksandr Andrushchenko 		/*
69ae4c51a5SOleksandr Andrushchenko 		 * We don't support PV MMU when Linux and Xen are using
70ae4c51a5SOleksandr Andrushchenko 		 * different page granularity.
71ae4c51a5SOleksandr Andrushchenko 		 */
72ae4c51a5SOleksandr Andrushchenko 		BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
73ae4c51a5SOleksandr Andrushchenko 
74ae4c51a5SOleksandr Andrushchenko 		ret = HYPERVISOR_update_va_mapping(
75ae4c51a5SOleksandr Andrushchenko 				(unsigned long)__va(pfn << PAGE_SHIFT),
76ae4c51a5SOleksandr Andrushchenko 				__pte_ma(0), 0);
77ae4c51a5SOleksandr Andrushchenko 		BUG_ON(ret);
78*d99bb72aSJuergen Gross 
79ae4c51a5SOleksandr Andrushchenko 		__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
80ae4c51a5SOleksandr Andrushchenko 	}
81ae4c51a5SOleksandr Andrushchenko }
82ae4c51a5SOleksandr Andrushchenko EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_reset);
83ae4c51a5SOleksandr Andrushchenko #endif /* CONFIG_XEN_HAVE_PVMMU */
84ae4c51a5SOleksandr Andrushchenko 
85ae4c51a5SOleksandr Andrushchenko /* @frames is an array of PFNs */
xenmem_reservation_increase(int count,xen_pfn_t * frames)86ae4c51a5SOleksandr Andrushchenko int xenmem_reservation_increase(int count, xen_pfn_t *frames)
87ae4c51a5SOleksandr Andrushchenko {
88ae4c51a5SOleksandr Andrushchenko 	struct xen_memory_reservation reservation = {
89ae4c51a5SOleksandr Andrushchenko 		.address_bits = 0,
90ae4c51a5SOleksandr Andrushchenko 		.extent_order = EXTENT_ORDER,
91ae4c51a5SOleksandr Andrushchenko 		.domid        = DOMID_SELF
92ae4c51a5SOleksandr Andrushchenko 	};
93ae4c51a5SOleksandr Andrushchenko 
94ae4c51a5SOleksandr Andrushchenko 	/* XENMEM_populate_physmap requires a PFN based on Xen granularity. */
95ae4c51a5SOleksandr Andrushchenko 	set_xen_guest_handle(reservation.extent_start, frames);
96ae4c51a5SOleksandr Andrushchenko 	reservation.nr_extents = count;
97ae4c51a5SOleksandr Andrushchenko 	return HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
98ae4c51a5SOleksandr Andrushchenko }
99ae4c51a5SOleksandr Andrushchenko EXPORT_SYMBOL_GPL(xenmem_reservation_increase);
100ae4c51a5SOleksandr Andrushchenko 
101ae4c51a5SOleksandr Andrushchenko /* @frames is an array of GFNs */
xenmem_reservation_decrease(int count,xen_pfn_t * frames)102ae4c51a5SOleksandr Andrushchenko int xenmem_reservation_decrease(int count, xen_pfn_t *frames)
103ae4c51a5SOleksandr Andrushchenko {
104ae4c51a5SOleksandr Andrushchenko 	struct xen_memory_reservation reservation = {
105ae4c51a5SOleksandr Andrushchenko 		.address_bits = 0,
106ae4c51a5SOleksandr Andrushchenko 		.extent_order = EXTENT_ORDER,
107ae4c51a5SOleksandr Andrushchenko 		.domid        = DOMID_SELF
108ae4c51a5SOleksandr Andrushchenko 	};
109ae4c51a5SOleksandr Andrushchenko 
110ae4c51a5SOleksandr Andrushchenko 	/* XENMEM_decrease_reservation requires a GFN */
111ae4c51a5SOleksandr Andrushchenko 	set_xen_guest_handle(reservation.extent_start, frames);
112ae4c51a5SOleksandr Andrushchenko 	reservation.nr_extents = count;
113ae4c51a5SOleksandr Andrushchenko 	return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
114ae4c51a5SOleksandr Andrushchenko }
115ae4c51a5SOleksandr Andrushchenko EXPORT_SYMBOL_GPL(xenmem_reservation_decrease);
116