xref: /openbmc/linux/include/xen/mem-reservation.h (revision 197ecb38)
1ae4c51a5SOleksandr Andrushchenko /* SPDX-License-Identifier: GPL-2.0 */
2ae4c51a5SOleksandr Andrushchenko 
3ae4c51a5SOleksandr Andrushchenko /*
4ae4c51a5SOleksandr Andrushchenko  * Xen memory reservation utilities.
5ae4c51a5SOleksandr Andrushchenko  *
6ae4c51a5SOleksandr Andrushchenko  * Copyright (c) 2003, B Dragovic
7ae4c51a5SOleksandr Andrushchenko  * Copyright (c) 2003-2004, M Williamson, K Fraser
8ae4c51a5SOleksandr Andrushchenko  * Copyright (c) 2005 Dan M. Smith, IBM Corporation
9ae4c51a5SOleksandr Andrushchenko  * Copyright (c) 2010 Daniel Kiper
10ae4c51a5SOleksandr Andrushchenko  * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
11ae4c51a5SOleksandr Andrushchenko  */
12ae4c51a5SOleksandr Andrushchenko 
13ae4c51a5SOleksandr Andrushchenko #ifndef _XENMEM_RESERVATION_H
14ae4c51a5SOleksandr Andrushchenko #define _XENMEM_RESERVATION_H
15ae4c51a5SOleksandr Andrushchenko 
16ae4c51a5SOleksandr Andrushchenko #include <linux/highmem.h>
17ae4c51a5SOleksandr Andrushchenko 
18ae4c51a5SOleksandr Andrushchenko #include <xen/page.h>
19ae4c51a5SOleksandr Andrushchenko 
20197ecb38SMarek Marczykowski-Górecki extern bool xen_scrub_pages;
21197ecb38SMarek Marczykowski-Górecki 
xenmem_reservation_scrub_page(struct page * page)22ae4c51a5SOleksandr Andrushchenko static inline void xenmem_reservation_scrub_page(struct page *page)
23ae4c51a5SOleksandr Andrushchenko {
24197ecb38SMarek Marczykowski-Górecki 	if (xen_scrub_pages)
25ae4c51a5SOleksandr Andrushchenko 		clear_highpage(page);
26ae4c51a5SOleksandr Andrushchenko }
27ae4c51a5SOleksandr Andrushchenko 
28ae4c51a5SOleksandr Andrushchenko #ifdef CONFIG_XEN_HAVE_PVMMU
29ae4c51a5SOleksandr Andrushchenko void __xenmem_reservation_va_mapping_update(unsigned long count,
30ae4c51a5SOleksandr Andrushchenko 					    struct page **pages,
31ae4c51a5SOleksandr Andrushchenko 					    xen_pfn_t *frames);
32ae4c51a5SOleksandr Andrushchenko 
33ae4c51a5SOleksandr Andrushchenko void __xenmem_reservation_va_mapping_reset(unsigned long count,
34ae4c51a5SOleksandr Andrushchenko 					   struct page **pages);
35ae4c51a5SOleksandr Andrushchenko #endif
36ae4c51a5SOleksandr Andrushchenko 
xenmem_reservation_va_mapping_update(unsigned long count,struct page ** pages,xen_pfn_t * frames)37ae4c51a5SOleksandr Andrushchenko static inline void xenmem_reservation_va_mapping_update(unsigned long count,
38ae4c51a5SOleksandr Andrushchenko 							struct page **pages,
39ae4c51a5SOleksandr Andrushchenko 							xen_pfn_t *frames)
40ae4c51a5SOleksandr Andrushchenko {
41ae4c51a5SOleksandr Andrushchenko #ifdef CONFIG_XEN_HAVE_PVMMU
42ae4c51a5SOleksandr Andrushchenko 	if (!xen_feature(XENFEAT_auto_translated_physmap))
43ae4c51a5SOleksandr Andrushchenko 		__xenmem_reservation_va_mapping_update(count, pages, frames);
44ae4c51a5SOleksandr Andrushchenko #endif
45ae4c51a5SOleksandr Andrushchenko }
46ae4c51a5SOleksandr Andrushchenko 
xenmem_reservation_va_mapping_reset(unsigned long count,struct page ** pages)47ae4c51a5SOleksandr Andrushchenko static inline void xenmem_reservation_va_mapping_reset(unsigned long count,
48ae4c51a5SOleksandr Andrushchenko 						       struct page **pages)
49ae4c51a5SOleksandr Andrushchenko {
50ae4c51a5SOleksandr Andrushchenko #ifdef CONFIG_XEN_HAVE_PVMMU
51ae4c51a5SOleksandr Andrushchenko 	if (!xen_feature(XENFEAT_auto_translated_physmap))
52ae4c51a5SOleksandr Andrushchenko 		__xenmem_reservation_va_mapping_reset(count, pages);
53ae4c51a5SOleksandr Andrushchenko #endif
54ae4c51a5SOleksandr Andrushchenko }
55ae4c51a5SOleksandr Andrushchenko 
56ae4c51a5SOleksandr Andrushchenko int xenmem_reservation_increase(int count, xen_pfn_t *frames);
57ae4c51a5SOleksandr Andrushchenko 
58ae4c51a5SOleksandr Andrushchenko int xenmem_reservation_decrease(int count, xen_pfn_t *frames);
59ae4c51a5SOleksandr Andrushchenko 
60ae4c51a5SOleksandr Andrushchenko #endif
61