1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 25c51bdbeSYinghai Lu #ifndef __X86_MM_INTERNAL_H 35c51bdbeSYinghai Lu #define __X86_MM_INTERNAL_H 45c51bdbeSYinghai Lu 522c8ca2aSYinghai Lu void *alloc_low_pages(unsigned int num); alloc_low_page(void)622c8ca2aSYinghai Lustatic inline void *alloc_low_page(void) 722c8ca2aSYinghai Lu { 822c8ca2aSYinghai Lu return alloc_low_pages(1); 922c8ca2aSYinghai Lu } 105c51bdbeSYinghai Lu 11c8dcdb9cSYinghai Lu void early_ioremap_page_table_range_init(void); 12c8dcdb9cSYinghai Lu 13c8dcdb9cSYinghai Lu unsigned long kernel_physical_mapping_init(unsigned long start, 14c8dcdb9cSYinghai Lu unsigned long end, 15*c164fbb4SLogan Gunthorpe unsigned long page_size_mask, 16*c164fbb4SLogan Gunthorpe pgprot_t prot); 17eccd9064SBrijesh Singh unsigned long kernel_physical_mapping_change(unsigned long start, 18eccd9064SBrijesh Singh unsigned long end, 19eccd9064SBrijesh Singh unsigned long page_size_mask); 20c8dcdb9cSYinghai Lu void zone_sizes_init(void); 21c8dcdb9cSYinghai Lu 2260a8f428SYinghai Lu extern int after_bootmem; 2360a8f428SYinghai Lu 24bd809af1SJuergen Gross void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache); 25bd809af1SJuergen Gross 26935f5839SPeter Zijlstra extern unsigned long tlb_single_page_flush_ceiling; 27935f5839SPeter Zijlstra 285c51bdbeSYinghai Lu #endif /* __X86_MM_INTERNAL_H */ 29