xref: /openbmc/linux/arch/um/include/asm/pgtable.h (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1f2f4bf5aSAlex Dewar /* SPDX-License-Identifier: GPL-2.0 */
28ede0bdbSAl Viro /*
38ede0bdbSAl Viro  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
48ede0bdbSAl Viro  * Copyright 2003 PathScale, Inc.
58ede0bdbSAl Viro  * Derived from include/asm-i386/pgtable.h
68ede0bdbSAl Viro  */
78ede0bdbSAl Viro 
88ede0bdbSAl Viro #ifndef __UM_PGTABLE_H
98ede0bdbSAl Viro #define __UM_PGTABLE_H
108ede0bdbSAl Viro 
118ede0bdbSAl Viro #include <asm/fixmap.h>
128ede0bdbSAl Viro 
138ede0bdbSAl Viro #define _PAGE_PRESENT	0x001
148ede0bdbSAl Viro #define _PAGE_NEWPAGE	0x002
158ede0bdbSAl Viro #define _PAGE_NEWPROT	0x004
168ede0bdbSAl Viro #define _PAGE_RW	0x020
178ede0bdbSAl Viro #define _PAGE_USER	0x040
188ede0bdbSAl Viro #define _PAGE_ACCESSED	0x080
198ede0bdbSAl Viro #define _PAGE_DIRTY	0x100
208ede0bdbSAl Viro /* If _PAGE_PRESENT is clear, we use these: */
218ede0bdbSAl Viro #define _PAGE_PROTNONE	0x010	/* if the user mapped it with PROT_NONE;
228ede0bdbSAl Viro 				   pte_present gives true */
238ede0bdbSAl Viro 
24e2858d77SDavid Hildenbrand /* We borrow bit 10 to store the exclusive marker in swap PTEs. */
25e2858d77SDavid Hildenbrand #define _PAGE_SWP_EXCLUSIVE	0x400
26e2858d77SDavid Hildenbrand 
278ede0bdbSAl Viro #ifdef CONFIG_3_LEVEL_PGTABLES
2837185b33SAl Viro #include <asm/pgtable-3level.h>
298ede0bdbSAl Viro #else
3037185b33SAl Viro #include <asm/pgtable-2level.h>
318ede0bdbSAl Viro #endif
328ede0bdbSAl Viro 
338ede0bdbSAl Viro extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
348ede0bdbSAl Viro 
358ede0bdbSAl Viro /* zero page used for uninitialized stuff */
368ede0bdbSAl Viro extern unsigned long *empty_zero_page;
378ede0bdbSAl Viro 
388ede0bdbSAl Viro /* Just any arbitrary offset to the start of the vmalloc VM area: the
398ede0bdbSAl Viro  * current 8MB value just means that there will be a 8MB "hole" after the
408ede0bdbSAl Viro  * physical memory until the kernel virtual memory starts.  That means that
418ede0bdbSAl Viro  * any out-of-bounds memory accesses will hopefully be caught.
428ede0bdbSAl Viro  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
438ede0bdbSAl Viro  * area for the same reason. ;)
448ede0bdbSAl Viro  */
458ede0bdbSAl Viro 
468ede0bdbSAl Viro extern unsigned long end_iomem;
478ede0bdbSAl Viro 
488ede0bdbSAl Viro #define VMALLOC_OFFSET	(__va_space)
498ede0bdbSAl Viro #define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
50fe1cd987SAl Viro #define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
518ede0bdbSAl Viro #define VMALLOC_END	(FIXADDR_START-2*PAGE_SIZE)
52c398df30SAmerigo Wang #define MODULES_VADDR	VMALLOC_START
53c398df30SAmerigo Wang #define MODULES_END	VMALLOC_END
54c398df30SAmerigo Wang #define MODULES_LEN	(MODULES_VADDR - MODULES_END)
558ede0bdbSAl Viro 
568ede0bdbSAl Viro #define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
578ede0bdbSAl Viro #define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
588ede0bdbSAl Viro #define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
59c398df30SAmerigo Wang #define __PAGE_KERNEL_EXEC                                              \
60c398df30SAmerigo Wang 	 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
618ede0bdbSAl Viro #define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
628ede0bdbSAl Viro #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
638ede0bdbSAl Viro #define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
648ede0bdbSAl Viro #define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
658ede0bdbSAl Viro #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
66c398df30SAmerigo Wang #define PAGE_KERNEL_EXEC	__pgprot(__PAGE_KERNEL_EXEC)
678ede0bdbSAl Viro 
688ede0bdbSAl Viro /*
698ede0bdbSAl Viro  * The i386 can't do page protection for execute, and considers that the same
708ede0bdbSAl Viro  * are read.
718ede0bdbSAl Viro  * Also, write permissions imply read permissions. This is the closest we can
728ede0bdbSAl Viro  * get..
738ede0bdbSAl Viro  */
748ede0bdbSAl Viro 
758ede0bdbSAl Viro /*
768ede0bdbSAl Viro  * ZERO_PAGE is a global shared page that is always zero: used
778ede0bdbSAl Viro  * for zero-mapped memory areas etc..
788ede0bdbSAl Viro  */
798ede0bdbSAl Viro #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
808ede0bdbSAl Viro 
818ede0bdbSAl Viro #define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
828ede0bdbSAl Viro 
838ede0bdbSAl Viro #define pmd_none(x)	(!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
848ede0bdbSAl Viro #define	pmd_bad(x)	((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
858ede0bdbSAl Viro 
868ede0bdbSAl Viro #define pmd_present(x)	(pmd_val(x) & _PAGE_PRESENT)
878ede0bdbSAl Viro #define pmd_clear(xp)	do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
888ede0bdbSAl Viro 
898ede0bdbSAl Viro #define pmd_newpage(x)  (pmd_val(x) & _PAGE_NEWPAGE)
908ede0bdbSAl Viro #define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
918ede0bdbSAl Viro 
928ede0bdbSAl Viro #define pud_newpage(x)  (pud_val(x) & _PAGE_NEWPAGE)
938ede0bdbSAl Viro #define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
948ede0bdbSAl Viro 
95e19f97edSMike Rapoport #define p4d_newpage(x)  (p4d_val(x) & _PAGE_NEWPAGE)
96e19f97edSMike Rapoport #define p4d_mkuptodate(x) (p4d_val(x) &= ~_PAGE_NEWPAGE)
97e19f97edSMike Rapoport 
987106c51eSMike Rapoport #define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
998ede0bdbSAl Viro #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
1008ede0bdbSAl Viro 
1018ede0bdbSAl Viro #define pte_page(x) pfn_to_page(pte_pfn(x))
1028ede0bdbSAl Viro 
1038ede0bdbSAl Viro #define pte_present(x)	pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
1048ede0bdbSAl Viro 
1058ede0bdbSAl Viro /*
1068ede0bdbSAl Viro  * =================================
1078ede0bdbSAl Viro  * Flags checking section.
1088ede0bdbSAl Viro  * =================================
1098ede0bdbSAl Viro  */
1108ede0bdbSAl Viro 
pte_none(pte_t pte)1118ede0bdbSAl Viro static inline int pte_none(pte_t pte)
1128ede0bdbSAl Viro {
1138ede0bdbSAl Viro 	return pte_is_zero(pte);
1148ede0bdbSAl Viro }
1158ede0bdbSAl Viro 
1168ede0bdbSAl Viro /*
1178ede0bdbSAl Viro  * The following only work if pte_present() is true.
1188ede0bdbSAl Viro  * Undefined behaviour if not..
1198ede0bdbSAl Viro  */
pte_read(pte_t pte)1208ede0bdbSAl Viro static inline int pte_read(pte_t pte)
1218ede0bdbSAl Viro {
1228ede0bdbSAl Viro 	return((pte_get_bits(pte, _PAGE_USER)) &&
1238ede0bdbSAl Viro 	       !(pte_get_bits(pte, _PAGE_PROTNONE)));
1248ede0bdbSAl Viro }
1258ede0bdbSAl Viro 
pte_exec(pte_t pte)1268ede0bdbSAl Viro static inline int pte_exec(pte_t pte){
1278ede0bdbSAl Viro 	return((pte_get_bits(pte, _PAGE_USER)) &&
1288ede0bdbSAl Viro 	       !(pte_get_bits(pte, _PAGE_PROTNONE)));
1298ede0bdbSAl Viro }
1308ede0bdbSAl Viro 
pte_write(pte_t pte)1318ede0bdbSAl Viro static inline int pte_write(pte_t pte)
1328ede0bdbSAl Viro {
1338ede0bdbSAl Viro 	return((pte_get_bits(pte, _PAGE_RW)) &&
1348ede0bdbSAl Viro 	       !(pte_get_bits(pte, _PAGE_PROTNONE)));
1358ede0bdbSAl Viro }
1368ede0bdbSAl Viro 
pte_dirty(pte_t pte)1378ede0bdbSAl Viro static inline int pte_dirty(pte_t pte)
1388ede0bdbSAl Viro {
1398ede0bdbSAl Viro 	return pte_get_bits(pte, _PAGE_DIRTY);
1408ede0bdbSAl Viro }
1418ede0bdbSAl Viro 
pte_young(pte_t pte)1428ede0bdbSAl Viro static inline int pte_young(pte_t pte)
1438ede0bdbSAl Viro {
1448ede0bdbSAl Viro 	return pte_get_bits(pte, _PAGE_ACCESSED);
1458ede0bdbSAl Viro }
1468ede0bdbSAl Viro 
pte_newpage(pte_t pte)1478ede0bdbSAl Viro static inline int pte_newpage(pte_t pte)
1488ede0bdbSAl Viro {
1498ede0bdbSAl Viro 	return pte_get_bits(pte, _PAGE_NEWPAGE);
1508ede0bdbSAl Viro }
1518ede0bdbSAl Viro 
pte_newprot(pte_t pte)1528ede0bdbSAl Viro static inline int pte_newprot(pte_t pte)
1538ede0bdbSAl Viro {
1548ede0bdbSAl Viro 	return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
1558ede0bdbSAl Viro }
1568ede0bdbSAl Viro 
1578ede0bdbSAl Viro /*
1588ede0bdbSAl Viro  * =================================
1598ede0bdbSAl Viro  * Flags setting section.
1608ede0bdbSAl Viro  * =================================
1618ede0bdbSAl Viro  */
1628ede0bdbSAl Viro 
pte_mknewprot(pte_t pte)1638ede0bdbSAl Viro static inline pte_t pte_mknewprot(pte_t pte)
1648ede0bdbSAl Viro {
1658ede0bdbSAl Viro 	pte_set_bits(pte, _PAGE_NEWPROT);
1668ede0bdbSAl Viro 	return(pte);
1678ede0bdbSAl Viro }
1688ede0bdbSAl Viro 
pte_mkclean(pte_t pte)1698ede0bdbSAl Viro static inline pte_t pte_mkclean(pte_t pte)
1708ede0bdbSAl Viro {
1718ede0bdbSAl Viro 	pte_clear_bits(pte, _PAGE_DIRTY);
1728ede0bdbSAl Viro 	return(pte);
1738ede0bdbSAl Viro }
1748ede0bdbSAl Viro 
pte_mkold(pte_t pte)1758ede0bdbSAl Viro static inline pte_t pte_mkold(pte_t pte)
1768ede0bdbSAl Viro {
1778ede0bdbSAl Viro 	pte_clear_bits(pte, _PAGE_ACCESSED);
1788ede0bdbSAl Viro 	return(pte);
1798ede0bdbSAl Viro }
1808ede0bdbSAl Viro 
pte_wrprotect(pte_t pte)1818ede0bdbSAl Viro static inline pte_t pte_wrprotect(pte_t pte)
1828ede0bdbSAl Viro {
1838892d854SAnton Ivanov 	if (likely(pte_get_bits(pte, _PAGE_RW)))
1848ede0bdbSAl Viro 		pte_clear_bits(pte, _PAGE_RW);
1858892d854SAnton Ivanov 	else
1868892d854SAnton Ivanov 		return pte;
1878ede0bdbSAl Viro 	return(pte_mknewprot(pte));
1888ede0bdbSAl Viro }
1898ede0bdbSAl Viro 
pte_mkread(pte_t pte)1908ede0bdbSAl Viro static inline pte_t pte_mkread(pte_t pte)
1918ede0bdbSAl Viro {
1928892d854SAnton Ivanov 	if (unlikely(pte_get_bits(pte, _PAGE_USER)))
1938892d854SAnton Ivanov 		return pte;
1948ede0bdbSAl Viro 	pte_set_bits(pte, _PAGE_USER);
1958ede0bdbSAl Viro 	return(pte_mknewprot(pte));
1968ede0bdbSAl Viro }
1978ede0bdbSAl Viro 
pte_mkdirty(pte_t pte)1988ede0bdbSAl Viro static inline pte_t pte_mkdirty(pte_t pte)
1998ede0bdbSAl Viro {
2008ede0bdbSAl Viro 	pte_set_bits(pte, _PAGE_DIRTY);
2018ede0bdbSAl Viro 	return(pte);
2028ede0bdbSAl Viro }
2038ede0bdbSAl Viro 
pte_mkyoung(pte_t pte)2048ede0bdbSAl Viro static inline pte_t pte_mkyoung(pte_t pte)
2058ede0bdbSAl Viro {
2068ede0bdbSAl Viro 	pte_set_bits(pte, _PAGE_ACCESSED);
2078ede0bdbSAl Viro 	return(pte);
2088ede0bdbSAl Viro }
2098ede0bdbSAl Viro 
pte_mkwrite_novma(pte_t pte)2102f0584f3SRick Edgecombe static inline pte_t pte_mkwrite_novma(pte_t pte)
2118ede0bdbSAl Viro {
2128892d854SAnton Ivanov 	if (unlikely(pte_get_bits(pte,  _PAGE_RW)))
2138892d854SAnton Ivanov 		return pte;
2148ede0bdbSAl Viro 	pte_set_bits(pte, _PAGE_RW);
2158ede0bdbSAl Viro 	return(pte_mknewprot(pte));
2168ede0bdbSAl Viro }
2178ede0bdbSAl Viro 
pte_mkuptodate(pte_t pte)2188ede0bdbSAl Viro static inline pte_t pte_mkuptodate(pte_t pte)
2198ede0bdbSAl Viro {
2208ede0bdbSAl Viro 	pte_clear_bits(pte, _PAGE_NEWPAGE);
2218ede0bdbSAl Viro 	if(pte_present(pte))
2228ede0bdbSAl Viro 		pte_clear_bits(pte, _PAGE_NEWPROT);
2238ede0bdbSAl Viro 	return(pte);
2248ede0bdbSAl Viro }
2258ede0bdbSAl Viro 
pte_mknewpage(pte_t pte)2268ede0bdbSAl Viro static inline pte_t pte_mknewpage(pte_t pte)
2278ede0bdbSAl Viro {
2288ede0bdbSAl Viro 	pte_set_bits(pte, _PAGE_NEWPAGE);
2298ede0bdbSAl Viro 	return(pte);
2308ede0bdbSAl Viro }
2318ede0bdbSAl Viro 
set_pte(pte_t * pteptr,pte_t pteval)2328ede0bdbSAl Viro static inline void set_pte(pte_t *pteptr, pte_t pteval)
2338ede0bdbSAl Viro {
2348ede0bdbSAl Viro 	pte_copy(*pteptr, pteval);
2358ede0bdbSAl Viro 
2368ede0bdbSAl Viro 	/* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
2378ede0bdbSAl Viro 	 * fix_range knows to unmap it.  _PAGE_NEWPROT is specific to
2388ede0bdbSAl Viro 	 * mapped pages.
2398ede0bdbSAl Viro 	 */
2408ede0bdbSAl Viro 
2418ede0bdbSAl Viro 	*pteptr = pte_mknewpage(*pteptr);
2428ede0bdbSAl Viro 	if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
2438ede0bdbSAl Viro }
244ea70d791SBartosz Golaszewski 
245*fd8132e6SMatthew Wilcox (Oracle) #define PFN_PTE_SHIFT		PAGE_SHIFT
2468ede0bdbSAl Viro 
247f15b9000SRichard Weinberger #define __HAVE_ARCH_PTE_SAME
pte_same(pte_t pte_a,pte_t pte_b)248f15b9000SRichard Weinberger static inline int pte_same(pte_t pte_a, pte_t pte_b)
249f15b9000SRichard Weinberger {
250f15b9000SRichard Weinberger 	return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
251f15b9000SRichard Weinberger }
252f15b9000SRichard Weinberger 
2538ede0bdbSAl Viro /*
2548ede0bdbSAl Viro  * Conversion functions: convert a page and protection to a page entry,
2558ede0bdbSAl Viro  * and a page entry and page directory to the page they refer to.
2568ede0bdbSAl Viro  */
2578ede0bdbSAl Viro 
2588ede0bdbSAl Viro #define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
2598ede0bdbSAl Viro #define __virt_to_page(virt) phys_to_page(__pa(virt))
26016da3068SDan Williams #define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
2618ede0bdbSAl Viro #define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
2628ede0bdbSAl Viro 
2638ede0bdbSAl Viro #define mk_pte(page, pgprot) \
2648ede0bdbSAl Viro 	({ pte_t pte;					\
2658ede0bdbSAl Viro 							\
2668ede0bdbSAl Viro 	pte_set_val(pte, page_to_phys(page), (pgprot));	\
2678ede0bdbSAl Viro 	if (pte_present(pte))				\
2688ede0bdbSAl Viro 		pte_mknewprot(pte_mknewpage(pte));	\
2698ede0bdbSAl Viro 	pte;})
2708ede0bdbSAl Viro 
pte_modify(pte_t pte,pgprot_t newprot)2718ede0bdbSAl Viro static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2728ede0bdbSAl Viro {
2738ede0bdbSAl Viro 	pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
2748ede0bdbSAl Viro 	return pte;
2758ede0bdbSAl Viro }
2768ede0bdbSAl Viro 
2778ede0bdbSAl Viro /*
2788ede0bdbSAl Viro  * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
2798ede0bdbSAl Viro  *
2808ede0bdbSAl Viro  * this macro returns the index of the entry in the pmd page which would
2818ede0bdbSAl Viro  * control the given virtual address
2828ede0bdbSAl Viro  */
2838ede0bdbSAl Viro #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
2848ede0bdbSAl Viro 
2858ede0bdbSAl Viro struct mm_struct;
2868ede0bdbSAl Viro extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
2878ede0bdbSAl Viro 
2886e166319SRandy Dunlap #define update_mmu_cache(vma,address,ptep) do {} while (0)
289*fd8132e6SMatthew Wilcox (Oracle) #define update_mmu_cache_range(vmf, vma, address, ptep, nr) do {} while (0)
2908ede0bdbSAl Viro 
291e2858d77SDavid Hildenbrand /*
292e2858d77SDavid Hildenbrand  * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
293e2858d77SDavid Hildenbrand  * are !pte_none() && !pte_present().
294e2858d77SDavid Hildenbrand  *
295e2858d77SDavid Hildenbrand  * Format of swap PTEs:
296e2858d77SDavid Hildenbrand  *
297e2858d77SDavid Hildenbrand  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
298e2858d77SDavid Hildenbrand  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
299e2858d77SDavid Hildenbrand  *   <--------------- offset ----------------> E < type -> 0 0 0 1 0
300e2858d77SDavid Hildenbrand  *
301e2858d77SDavid Hildenbrand  *   E is the exclusive marker that is not stored in swap entries.
302e2858d77SDavid Hildenbrand  *   _PAGE_NEWPAGE (bit 1) is always set to 1 in set_pte().
303e2858d77SDavid Hildenbrand  */
3042b76ebaaSRichard Weinberger #define __swp_type(x)			(((x).val >> 5) & 0x1f)
3058ede0bdbSAl Viro #define __swp_offset(x)			((x).val >> 11)
3068ede0bdbSAl Viro 
3078ede0bdbSAl Viro #define __swp_entry(type, offset) \
308e2858d77SDavid Hildenbrand 	((swp_entry_t) { (((type) & 0x1f) << 5) | ((offset) << 11) })
3098ede0bdbSAl Viro #define __pte_to_swp_entry(pte) \
3108ede0bdbSAl Viro 	((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
3118ede0bdbSAl Viro #define __swp_entry_to_pte(x)		((pte_t) { (x).val })
3128ede0bdbSAl Viro 
pte_swp_exclusive(pte_t pte)313e2858d77SDavid Hildenbrand static inline int pte_swp_exclusive(pte_t pte)
314e2858d77SDavid Hildenbrand {
315e2858d77SDavid Hildenbrand 	return pte_get_bits(pte, _PAGE_SWP_EXCLUSIVE);
316e2858d77SDavid Hildenbrand }
317e2858d77SDavid Hildenbrand 
pte_swp_mkexclusive(pte_t pte)318e2858d77SDavid Hildenbrand static inline pte_t pte_swp_mkexclusive(pte_t pte)
319e2858d77SDavid Hildenbrand {
320e2858d77SDavid Hildenbrand 	pte_set_bits(pte, _PAGE_SWP_EXCLUSIVE);
321e2858d77SDavid Hildenbrand 	return pte;
322e2858d77SDavid Hildenbrand }
323e2858d77SDavid Hildenbrand 
pte_swp_clear_exclusive(pte_t pte)324e2858d77SDavid Hildenbrand static inline pte_t pte_swp_clear_exclusive(pte_t pte)
325e2858d77SDavid Hildenbrand {
326e2858d77SDavid Hildenbrand 	pte_clear_bits(pte, _PAGE_SWP_EXCLUSIVE);
327e2858d77SDavid Hildenbrand 	return pte;
328e2858d77SDavid Hildenbrand }
329e2858d77SDavid Hildenbrand 
330fe1cd987SAl Viro /* Clear a kernel PTE and flush it from the TLB */
331fe1cd987SAl Viro #define kpte_clear_flush(ptep, vaddr)		\
332fe1cd987SAl Viro do {						\
333fe1cd987SAl Viro 	pte_clear(&init_mm, (vaddr), (ptep));	\
334fe1cd987SAl Viro 	__flush_tlb_one((vaddr));		\
335fe1cd987SAl Viro } while (0)
336fe1cd987SAl Viro 
3378ede0bdbSAl Viro #endif
338