xref: /openbmc/linux/arch/mips/include/asm/pgtable.h (revision 15fa3e8e)
1384740dcSRalf Baechle /*
2384740dcSRalf Baechle  * This file is subject to the terms and conditions of the GNU General Public
3384740dcSRalf Baechle  * License.  See the file "COPYING" in the main directory of this archive
4384740dcSRalf Baechle  * for more details.
5384740dcSRalf Baechle  *
6384740dcSRalf Baechle  * Copyright (C) 2003 Ralf Baechle
7384740dcSRalf Baechle  */
8384740dcSRalf Baechle #ifndef _ASM_PGTABLE_H
9384740dcSRalf Baechle #define _ASM_PGTABLE_H
10384740dcSRalf Baechle 
115bbea36aSCorey Minyard #include <linux/mm_types.h>
12970d032fSRalf Baechle #include <linux/mmzone.h>
13384740dcSRalf Baechle #ifdef CONFIG_32BIT
14384740dcSRalf Baechle #include <asm/pgtable-32.h>
15384740dcSRalf Baechle #endif
16384740dcSRalf Baechle #ifdef CONFIG_64BIT
17384740dcSRalf Baechle #include <asm/pgtable-64.h>
18384740dcSRalf Baechle #endif
19384740dcSRalf Baechle 
2082f4f66dSPaul Burton #include <asm/cmpxchg.h>
21384740dcSRalf Baechle #include <asm/io.h>
22384740dcSRalf Baechle #include <asm/pgtable-bits.h>
23446f062bSChristoph Hellwig #include <asm/cpu-features.h>
24384740dcSRalf Baechle 
25384740dcSRalf Baechle struct mm_struct;
26384740dcSRalf Baechle struct vm_area_struct;
27384740dcSRalf Baechle 
2841bb1a9bSThomas Bogendoerfer #define PAGE_SHARED	vm_get_page_prot(VM_READ|VM_WRITE|VM_SHARED)
2941bb1a9bSThomas Bogendoerfer 
30384740dcSRalf Baechle #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
31384740dcSRalf Baechle 				 _PAGE_GLOBAL | _page_cachable_default)
32e2a9e5adSPaul Burton #define PAGE_KERNEL_NC	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
33e2a9e5adSPaul Burton 				 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
34384740dcSRalf Baechle #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
35384740dcSRalf Baechle 			__WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
36384740dcSRalf Baechle 
37384740dcSRalf Baechle /*
386dd9344cSDavid Daney  * If _PAGE_NO_EXEC is not defined, we can't do page protection for
396dd9344cSDavid Daney  * execute, and consider it to be the same as read. Also, write
406dd9344cSDavid Daney  * permissions imply read permissions. This is the closest we can get
416dd9344cSDavid Daney  * by reasonable means..
42384740dcSRalf Baechle  */
43384740dcSRalf Baechle 
44384740dcSRalf Baechle extern unsigned long _page_cachable_default;
45a2fa4cedSYanteng Si extern void __update_cache(unsigned long address, pte_t pte);
46384740dcSRalf Baechle 
47384740dcSRalf Baechle /*
48384740dcSRalf Baechle  * ZERO_PAGE is a global shared page that is always zero; used
49384740dcSRalf Baechle  * for zero-mapped memory areas etc..
50384740dcSRalf Baechle  */
51384740dcSRalf Baechle 
52384740dcSRalf Baechle extern unsigned long empty_zero_page;
53384740dcSRalf Baechle extern unsigned long zero_page_mask;
54384740dcSRalf Baechle 
55384740dcSRalf Baechle #define ZERO_PAGE(vaddr) \
56384740dcSRalf Baechle 	(virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
57816422adSKirill A. Shutemov #define __HAVE_COLOR_ZERO_PAGE
5862eede62SHugh Dickins 
59384740dcSRalf Baechle extern void paging_init(void);
60384740dcSRalf Baechle 
61384740dcSRalf Baechle /*
62384740dcSRalf Baechle  * Conversion functions: convert a page and protection to a page entry,
63384740dcSRalf Baechle  * and a page entry and page directory to the page they refer to.
64384740dcSRalf Baechle  */
65384740dcSRalf Baechle #define pmd_phys(pmd)		virt_to_phys((void *)pmd_val(pmd))
66970d032fSRalf Baechle 
pmd_pfn(pmd_t pmd)67177bd2a9SMatthew Wilcox (Oracle) static inline unsigned long pmd_pfn(pmd_t pmd)
68177bd2a9SMatthew Wilcox (Oracle) {
69*15fa3e8eSMatthew Wilcox (Oracle) 	return pmd_val(pmd) >> PFN_PTE_SHIFT;
70177bd2a9SMatthew Wilcox (Oracle) }
71177bd2a9SMatthew Wilcox (Oracle) 
72f69fa4c8SZhaolong Zhang #ifndef CONFIG_MIPS_HUGE_TLB_SUPPORT
73f69fa4c8SZhaolong Zhang #define pmd_page(pmd)		(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
74f69fa4c8SZhaolong Zhang #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
75970d032fSRalf Baechle 
76384740dcSRalf Baechle #define pmd_page_vaddr(pmd)	pmd_val(pmd)
77384740dcSRalf Baechle 
78f1014d1bSMarkos Chandras #define htw_stop()							\
79f1014d1bSMarkos Chandras do {									\
80cabcff9bSAlexander Lobakin 	unsigned long __flags;						\
81ed4cbc81SMarkos Chandras 									\
82461d1597SMarkos Chandras 	if (cpu_has_htw) {						\
83cabcff9bSAlexander Lobakin 		local_irq_save(__flags);				\
84ed4cbc81SMarkos Chandras 		if(!raw_current_cpu_data.htw_seq++) {			\
85f1014d1bSMarkos Chandras 			write_c0_pwctl(read_c0_pwctl() &		\
86f1014d1bSMarkos Chandras 				       ~(1 << MIPS_PWCTL_PWEN_SHIFT));	\
87461d1597SMarkos Chandras 			back_to_back_c0_hazard();			\
88461d1597SMarkos Chandras 		}							\
89cabcff9bSAlexander Lobakin 		local_irq_restore(__flags);				\
90ed4cbc81SMarkos Chandras 	}								\
91f1014d1bSMarkos Chandras } while(0)
92f1014d1bSMarkos Chandras 
93f1014d1bSMarkos Chandras #define htw_start()							\
94f1014d1bSMarkos Chandras do {									\
95cabcff9bSAlexander Lobakin 	unsigned long __flags;						\
96ed4cbc81SMarkos Chandras 									\
97461d1597SMarkos Chandras 	if (cpu_has_htw) {						\
98cabcff9bSAlexander Lobakin 		local_irq_save(__flags);				\
99ed4cbc81SMarkos Chandras 		if (!--raw_current_cpu_data.htw_seq) {			\
100f1014d1bSMarkos Chandras 			write_c0_pwctl(read_c0_pwctl() |		\
101f1014d1bSMarkos Chandras 				       (1 << MIPS_PWCTL_PWEN_SHIFT));	\
102f1014d1bSMarkos Chandras 			back_to_back_c0_hazard();			\
103f1014d1bSMarkos Chandras 		}							\
104cabcff9bSAlexander Lobakin 		local_irq_restore(__flags);				\
105ed4cbc81SMarkos Chandras 	}								\
106f1014d1bSMarkos Chandras } while(0)
107f1014d1bSMarkos Chandras 
10834adb28dSRalf Baechle #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
109384740dcSRalf Baechle 
1107b2cb64fSPaul Burton #ifdef CONFIG_XPA
111c5b36783SSteven J. Hill # define pte_none(pte)		(!(((pte).pte_high) & ~_PAGE_GLOBAL))
1127b2cb64fSPaul Burton #else
1137b2cb64fSPaul Burton # define pte_none(pte)		(!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
1147b2cb64fSPaul Burton #endif
1157b2cb64fSPaul Burton 
116384740dcSRalf Baechle #define pte_present(pte)	((pte).pte_low & _PAGE_PRESENT)
11737d22a0dSPaul Burton #define pte_no_exec(pte)	((pte).pte_low & _PAGE_NO_EXEC)
118384740dcSRalf Baechle 
set_pte(pte_t * ptep,pte_t pte)119384740dcSRalf Baechle static inline void set_pte(pte_t *ptep, pte_t pte)
120384740dcSRalf Baechle {
121384740dcSRalf Baechle 	ptep->pte_high = pte.pte_high;
122384740dcSRalf Baechle 	smp_wmb();
123384740dcSRalf Baechle 	ptep->pte_low = pte.pte_low;
124384740dcSRalf Baechle 
1257b2cb64fSPaul Burton #ifdef CONFIG_XPA
126c5b36783SSteven J. Hill 	if (pte.pte_high & _PAGE_GLOBAL) {
1277b2cb64fSPaul Burton #else
1287b2cb64fSPaul Burton 	if (pte.pte_low & _PAGE_GLOBAL) {
1297b2cb64fSPaul Burton #endif
130384740dcSRalf Baechle 		pte_t *buddy = ptep_buddy(ptep);
131384740dcSRalf Baechle 		/*
132384740dcSRalf Baechle 		 * Make sure the buddy is global too (if it's !none,
133384740dcSRalf Baechle 		 * it better already be global)
134384740dcSRalf Baechle 		 */
1357b2cb64fSPaul Burton 		if (pte_none(*buddy)) {
13697f2645fSMasahiro Yamada 			if (!IS_ENABLED(CONFIG_XPA))
1377b2cb64fSPaul Burton 				buddy->pte_low |= _PAGE_GLOBAL;
138384740dcSRalf Baechle 			buddy->pte_high |= _PAGE_GLOBAL;
139384740dcSRalf Baechle 		}
140384740dcSRalf Baechle 	}
1417b2cb64fSPaul Burton }
142384740dcSRalf Baechle 
143384740dcSRalf Baechle static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
144384740dcSRalf Baechle {
145384740dcSRalf Baechle 	pte_t null = __pte(0);
146384740dcSRalf Baechle 
147fde3538aSMarkos Chandras 	htw_stop();
148384740dcSRalf Baechle 	/* Preserve global status for the pair */
14997f2645fSMasahiro Yamada 	if (IS_ENABLED(CONFIG_XPA)) {
150c5b36783SSteven J. Hill 		if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
151c5b36783SSteven J. Hill 			null.pte_high = _PAGE_GLOBAL;
1527b2cb64fSPaul Burton 	} else {
1537b2cb64fSPaul Burton 		if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
1547b2cb64fSPaul Burton 			null.pte_low = null.pte_high = _PAGE_GLOBAL;
1557b2cb64fSPaul Burton 	}
156384740dcSRalf Baechle 
157*15fa3e8eSMatthew Wilcox (Oracle) 	set_pte(ptep, null);
158fde3538aSMarkos Chandras 	htw_start();
159384740dcSRalf Baechle }
160384740dcSRalf Baechle #else
161384740dcSRalf Baechle 
162384740dcSRalf Baechle #define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL))
163384740dcSRalf Baechle #define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)
16437d22a0dSPaul Burton #define pte_no_exec(pte)	(pte_val(pte) & _PAGE_NO_EXEC)
165384740dcSRalf Baechle 
166384740dcSRalf Baechle /*
167384740dcSRalf Baechle  * Certain architectures need to do special things when pte's
168384740dcSRalf Baechle  * within a page table are directly modified.  Thus, the following
169384740dcSRalf Baechle  * hook is made available.
170384740dcSRalf Baechle  */
171384740dcSRalf Baechle static inline void set_pte(pte_t *ptep, pte_t pteval)
172384740dcSRalf Baechle {
173384740dcSRalf Baechle 	*ptep = pteval;
17454746829SPaul Burton #if !defined(CONFIG_CPU_R3K_TLB)
175384740dcSRalf Baechle 	if (pte_val(pteval) & _PAGE_GLOBAL) {
176384740dcSRalf Baechle 		pte_t *buddy = ptep_buddy(ptep);
177384740dcSRalf Baechle 		/*
178384740dcSRalf Baechle 		 * Make sure the buddy is global too (if it's !none,
179384740dcSRalf Baechle 		 * it better already be global)
180384740dcSRalf Baechle 		 */
181c7e2d71dSPaul Burton # if defined(CONFIG_PHYS_ADDR_T_64BIT) && !defined(CONFIG_CPU_MIPS32)
182c7e2d71dSPaul Burton 		cmpxchg64(&buddy->pte, 0, _PAGE_GLOBAL);
183c7e2d71dSPaul Burton # else
18482f4f66dSPaul Burton 		cmpxchg(&buddy->pte, 0, _PAGE_GLOBAL);
185c7e2d71dSPaul Burton # endif
186384740dcSRalf Baechle 	}
187384740dcSRalf Baechle #endif
188384740dcSRalf Baechle }
189384740dcSRalf Baechle 
190384740dcSRalf Baechle static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
191384740dcSRalf Baechle {
192fde3538aSMarkos Chandras 	htw_stop();
19354746829SPaul Burton #if !defined(CONFIG_CPU_R3K_TLB)
194384740dcSRalf Baechle 	/* Preserve global status for the pair */
195384740dcSRalf Baechle 	if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
196*15fa3e8eSMatthew Wilcox (Oracle) 		set_pte(ptep, __pte(_PAGE_GLOBAL));
197384740dcSRalf Baechle 	else
198384740dcSRalf Baechle #endif
199*15fa3e8eSMatthew Wilcox (Oracle) 		set_pte(ptep, __pte(0));
200fde3538aSMarkos Chandras 	htw_start();
201384740dcSRalf Baechle }
202384740dcSRalf Baechle #endif
203384740dcSRalf Baechle 
204*15fa3e8eSMatthew Wilcox (Oracle) static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
205*15fa3e8eSMatthew Wilcox (Oracle) 		pte_t *ptep, pte_t pte, unsigned int nr)
20637d22a0dSPaul Burton {
207*15fa3e8eSMatthew Wilcox (Oracle) 	unsigned int i;
208*15fa3e8eSMatthew Wilcox (Oracle) 	bool do_sync = false;
20937d22a0dSPaul Burton 
210*15fa3e8eSMatthew Wilcox (Oracle) 	for (i = 0; i < nr; i++) {
211*15fa3e8eSMatthew Wilcox (Oracle) 		if (!pte_present(pte))
212*15fa3e8eSMatthew Wilcox (Oracle) 			continue;
213*15fa3e8eSMatthew Wilcox (Oracle) 		if (pte_present(ptep[i]) &&
214*15fa3e8eSMatthew Wilcox (Oracle) 		    (pte_pfn(ptep[i]) == pte_pfn(pte)))
215*15fa3e8eSMatthew Wilcox (Oracle) 			continue;
216*15fa3e8eSMatthew Wilcox (Oracle) 		do_sync = true;
21737d22a0dSPaul Burton 	}
21837d22a0dSPaul Burton 
219*15fa3e8eSMatthew Wilcox (Oracle) 	if (do_sync)
220*15fa3e8eSMatthew Wilcox (Oracle) 		__update_cache(addr, pte);
221*15fa3e8eSMatthew Wilcox (Oracle) 
222*15fa3e8eSMatthew Wilcox (Oracle) 	for (;;) {
223*15fa3e8eSMatthew Wilcox (Oracle) 		set_pte(ptep, pte);
224*15fa3e8eSMatthew Wilcox (Oracle) 		if (--nr == 0)
225*15fa3e8eSMatthew Wilcox (Oracle) 			break;
226*15fa3e8eSMatthew Wilcox (Oracle) 		ptep++;
227*15fa3e8eSMatthew Wilcox (Oracle) 		pte = __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT));
228*15fa3e8eSMatthew Wilcox (Oracle) 	}
229*15fa3e8eSMatthew Wilcox (Oracle) }
230*15fa3e8eSMatthew Wilcox (Oracle) #define set_ptes set_ptes
231*15fa3e8eSMatthew Wilcox (Oracle) 
232384740dcSRalf Baechle /*
233384740dcSRalf Baechle  * (pmds are folded into puds so this doesn't get actually called,
234384740dcSRalf Baechle  * but the define is needed for a generic inline function.)
235384740dcSRalf Baechle  */
236384740dcSRalf Baechle #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
237384740dcSRalf Baechle 
238325f8a0aSDavid Daney #ifndef __PAGETABLE_PMD_FOLDED
239384740dcSRalf Baechle /*
240384740dcSRalf Baechle  * (puds are folded into pgds so this doesn't get actually called,
241384740dcSRalf Baechle  * but the define is needed for a generic inline function.)
242384740dcSRalf Baechle  */
243384740dcSRalf Baechle #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
244384740dcSRalf Baechle #endif
245384740dcSRalf Baechle 
246384740dcSRalf Baechle #define PGD_T_LOG2	(__builtin_ffs(sizeof(pgd_t)) - 1)
247384740dcSRalf Baechle #define PMD_T_LOG2	(__builtin_ffs(sizeof(pmd_t)) - 1)
248384740dcSRalf Baechle #define PTE_T_LOG2	(__builtin_ffs(sizeof(pte_t)) - 1)
249384740dcSRalf Baechle 
250384740dcSRalf Baechle /*
251384740dcSRalf Baechle  * We used to declare this array with size but gcc 3.3 and older are not able
252384740dcSRalf Baechle  * to find that this expression is a constant, so the size is dropped.
253384740dcSRalf Baechle  */
254384740dcSRalf Baechle extern pgd_t swapper_pg_dir[];
255384740dcSRalf Baechle 
256384740dcSRalf Baechle /*
25778e7c5afSAnshuman Khandual  * Platform specific pte_special() and pte_mkspecial() definitions
25878e7c5afSAnshuman Khandual  * are required only when ARCH_HAS_PTE_SPECIAL is enabled.
25978e7c5afSAnshuman Khandual  */
26078e7c5afSAnshuman Khandual #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
26178e7c5afSAnshuman Khandual #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
26278e7c5afSAnshuman Khandual static inline int pte_special(pte_t pte)
26378e7c5afSAnshuman Khandual {
26478e7c5afSAnshuman Khandual 	return pte.pte_low & _PAGE_SPECIAL;
26578e7c5afSAnshuman Khandual }
26678e7c5afSAnshuman Khandual 
26778e7c5afSAnshuman Khandual static inline pte_t pte_mkspecial(pte_t pte)
26878e7c5afSAnshuman Khandual {
26978e7c5afSAnshuman Khandual 	pte.pte_low |= _PAGE_SPECIAL;
27078e7c5afSAnshuman Khandual 	return pte;
27178e7c5afSAnshuman Khandual }
27278e7c5afSAnshuman Khandual #else
27378e7c5afSAnshuman Khandual static inline int pte_special(pte_t pte)
27478e7c5afSAnshuman Khandual {
27578e7c5afSAnshuman Khandual 	return pte_val(pte) & _PAGE_SPECIAL;
27678e7c5afSAnshuman Khandual }
27778e7c5afSAnshuman Khandual 
27878e7c5afSAnshuman Khandual static inline pte_t pte_mkspecial(pte_t pte)
27978e7c5afSAnshuman Khandual {
28078e7c5afSAnshuman Khandual 	pte_val(pte) |= _PAGE_SPECIAL;
28178e7c5afSAnshuman Khandual 	return pte;
28278e7c5afSAnshuman Khandual }
28378e7c5afSAnshuman Khandual #endif
28478e7c5afSAnshuman Khandual #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
28578e7c5afSAnshuman Khandual 
28678e7c5afSAnshuman Khandual /*
287384740dcSRalf Baechle  * The following only work if pte_present() is true.
288384740dcSRalf Baechle  * Undefined behaviour if not..
289384740dcSRalf Baechle  */
29034adb28dSRalf Baechle #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
291384740dcSRalf Baechle static inline int pte_write(pte_t pte)	{ return pte.pte_low & _PAGE_WRITE; }
292384740dcSRalf Baechle static inline int pte_dirty(pte_t pte)	{ return pte.pte_low & _PAGE_MODIFIED; }
293384740dcSRalf Baechle static inline int pte_young(pte_t pte)	{ return pte.pte_low & _PAGE_ACCESSED; }
294384740dcSRalf Baechle 
295384740dcSRalf Baechle static inline pte_t pte_wrprotect(pte_t pte)
296384740dcSRalf Baechle {
297c5b36783SSteven J. Hill 	pte.pte_low  &= ~_PAGE_WRITE;
29897f2645fSMasahiro Yamada 	if (!IS_ENABLED(CONFIG_XPA))
2997b2cb64fSPaul Burton 		pte.pte_low &= ~_PAGE_SILENT_WRITE;
300384740dcSRalf Baechle 	pte.pte_high &= ~_PAGE_SILENT_WRITE;
301384740dcSRalf Baechle 	return pte;
302384740dcSRalf Baechle }
303384740dcSRalf Baechle 
304384740dcSRalf Baechle static inline pte_t pte_mkclean(pte_t pte)
305384740dcSRalf Baechle {
306c5b36783SSteven J. Hill 	pte.pte_low  &= ~_PAGE_MODIFIED;
30797f2645fSMasahiro Yamada 	if (!IS_ENABLED(CONFIG_XPA))
3087b2cb64fSPaul Burton 		pte.pte_low &= ~_PAGE_SILENT_WRITE;
309384740dcSRalf Baechle 	pte.pte_high &= ~_PAGE_SILENT_WRITE;
310384740dcSRalf Baechle 	return pte;
311384740dcSRalf Baechle }
312384740dcSRalf Baechle 
313384740dcSRalf Baechle static inline pte_t pte_mkold(pte_t pte)
314384740dcSRalf Baechle {
315c5b36783SSteven J. Hill 	pte.pte_low  &= ~_PAGE_ACCESSED;
31697f2645fSMasahiro Yamada 	if (!IS_ENABLED(CONFIG_XPA))
3177b2cb64fSPaul Burton 		pte.pte_low &= ~_PAGE_SILENT_READ;
318384740dcSRalf Baechle 	pte.pte_high &= ~_PAGE_SILENT_READ;
319384740dcSRalf Baechle 	return pte;
320384740dcSRalf Baechle }
321384740dcSRalf Baechle 
322384740dcSRalf Baechle static inline pte_t pte_mkwrite_novma(pte_t pte)
323384740dcSRalf Baechle {
324384740dcSRalf Baechle 	pte.pte_low |= _PAGE_WRITE;
3257b2cb64fSPaul Burton 	if (pte.pte_low & _PAGE_MODIFIED) {
32697f2645fSMasahiro Yamada 		if (!IS_ENABLED(CONFIG_XPA))
3277b2cb64fSPaul Burton 			pte.pte_low |= _PAGE_SILENT_WRITE;
328384740dcSRalf Baechle 		pte.pte_high |= _PAGE_SILENT_WRITE;
3297b2cb64fSPaul Burton 	}
330384740dcSRalf Baechle 	return pte;
331384740dcSRalf Baechle }
332384740dcSRalf Baechle 
333384740dcSRalf Baechle static inline pte_t pte_mkdirty(pte_t pte)
334384740dcSRalf Baechle {
335384740dcSRalf Baechle 	pte.pte_low |= _PAGE_MODIFIED;
3367b2cb64fSPaul Burton 	if (pte.pte_low & _PAGE_WRITE) {
33797f2645fSMasahiro Yamada 		if (!IS_ENABLED(CONFIG_XPA))
3387b2cb64fSPaul Burton 			pte.pte_low |= _PAGE_SILENT_WRITE;
339384740dcSRalf Baechle 		pte.pte_high |= _PAGE_SILENT_WRITE;
3407b2cb64fSPaul Burton 	}
341384740dcSRalf Baechle 	return pte;
342384740dcSRalf Baechle }
343384740dcSRalf Baechle 
344384740dcSRalf Baechle static inline pte_t pte_mkyoung(pte_t pte)
345384740dcSRalf Baechle {
346384740dcSRalf Baechle 	pte.pte_low |= _PAGE_ACCESSED;
3477b2cb64fSPaul Burton 	if (!(pte.pte_low & _PAGE_NO_READ)) {
34897f2645fSMasahiro Yamada 		if (!IS_ENABLED(CONFIG_XPA))
3497b2cb64fSPaul Burton 			pte.pte_low |= _PAGE_SILENT_READ;
350384740dcSRalf Baechle 		pte.pte_high |= _PAGE_SILENT_READ;
3517b2cb64fSPaul Burton 	}
352384740dcSRalf Baechle 	return pte;
353384740dcSRalf Baechle }
354384740dcSRalf Baechle #else
355384740dcSRalf Baechle static inline int pte_write(pte_t pte)	{ return pte_val(pte) & _PAGE_WRITE; }
356384740dcSRalf Baechle static inline int pte_dirty(pte_t pte)	{ return pte_val(pte) & _PAGE_MODIFIED; }
357384740dcSRalf Baechle static inline int pte_young(pte_t pte)	{ return pte_val(pte) & _PAGE_ACCESSED; }
358384740dcSRalf Baechle 
359384740dcSRalf Baechle static inline pte_t pte_wrprotect(pte_t pte)
360384740dcSRalf Baechle {
361384740dcSRalf Baechle 	pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
362384740dcSRalf Baechle 	return pte;
363384740dcSRalf Baechle }
364384740dcSRalf Baechle 
365384740dcSRalf Baechle static inline pte_t pte_mkclean(pte_t pte)
366384740dcSRalf Baechle {
367384740dcSRalf Baechle 	pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
368384740dcSRalf Baechle 	return pte;
369384740dcSRalf Baechle }
370384740dcSRalf Baechle 
371384740dcSRalf Baechle static inline pte_t pte_mkold(pte_t pte)
372384740dcSRalf Baechle {
373384740dcSRalf Baechle 	pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
374384740dcSRalf Baechle 	return pte;
375384740dcSRalf Baechle }
376384740dcSRalf Baechle 
377384740dcSRalf Baechle static inline pte_t pte_mkwrite_novma(pte_t pte)
378384740dcSRalf Baechle {
379384740dcSRalf Baechle 	pte_val(pte) |= _PAGE_WRITE;
380384740dcSRalf Baechle 	if (pte_val(pte) & _PAGE_MODIFIED)
381384740dcSRalf Baechle 		pte_val(pte) |= _PAGE_SILENT_WRITE;
382384740dcSRalf Baechle 	return pte;
383384740dcSRalf Baechle }
384384740dcSRalf Baechle 
385384740dcSRalf Baechle static inline pte_t pte_mkdirty(pte_t pte)
386384740dcSRalf Baechle {
3872971317aSGuoyun Sun 	pte_val(pte) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY;
388384740dcSRalf Baechle 	if (pte_val(pte) & _PAGE_WRITE)
389384740dcSRalf Baechle 		pte_val(pte) |= _PAGE_SILENT_WRITE;
390384740dcSRalf Baechle 	return pte;
391384740dcSRalf Baechle }
392384740dcSRalf Baechle 
393384740dcSRalf Baechle static inline pte_t pte_mkyoung(pte_t pte)
394384740dcSRalf Baechle {
395384740dcSRalf Baechle 	pte_val(pte) |= _PAGE_ACCESSED;
3966dd9344cSDavid Daney 	if (!(pte_val(pte) & _PAGE_NO_READ))
3976dd9344cSDavid Daney 		pte_val(pte) |= _PAGE_SILENT_READ;
398384740dcSRalf Baechle 	return pte;
399384740dcSRalf Baechle }
400dd794392SDavid Daney 
40144bf431bSBibo Mao #define pte_sw_mkyoung	pte_mkyoung
40244bf431bSBibo Mao 
40305f9883aSSteven J. Hill #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
404dd794392SDavid Daney static inline int pte_huge(pte_t pte)	{ return pte_val(pte) & _PAGE_HUGE; }
405dd794392SDavid Daney 
406dd794392SDavid Daney static inline pte_t pte_mkhuge(pte_t pte)
407dd794392SDavid Daney {
408dd794392SDavid Daney 	pte_val(pte) |= _PAGE_HUGE;
409dd794392SDavid Daney 	return pte;
410dd794392SDavid Daney }
411f69fa4c8SZhaolong Zhang 
412f69fa4c8SZhaolong Zhang #define pmd_write pmd_write
413f69fa4c8SZhaolong Zhang static inline int pmd_write(pmd_t pmd)
414f69fa4c8SZhaolong Zhang {
415f69fa4c8SZhaolong Zhang 	return !!(pmd_val(pmd) & _PAGE_WRITE);
416f69fa4c8SZhaolong Zhang }
417f69fa4c8SZhaolong Zhang 
418f69fa4c8SZhaolong Zhang static inline struct page *pmd_page(pmd_t pmd)
419f69fa4c8SZhaolong Zhang {
420f69fa4c8SZhaolong Zhang 	if (pmd_val(pmd) & _PAGE_HUGE)
421f69fa4c8SZhaolong Zhang 		return pfn_to_page(pmd_pfn(pmd));
422f69fa4c8SZhaolong Zhang 
423f69fa4c8SZhaolong Zhang 	return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
424f69fa4c8SZhaolong Zhang }
42505f9883aSSteven J. Hill #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
4262971317aSGuoyun Sun 
4272971317aSGuoyun Sun #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
4282971317aSGuoyun Sun static inline bool pte_soft_dirty(pte_t pte)
4292971317aSGuoyun Sun {
4302971317aSGuoyun Sun 	return pte_val(pte) & _PAGE_SOFT_DIRTY;
4312971317aSGuoyun Sun }
4322971317aSGuoyun Sun #define pte_swp_soft_dirty pte_soft_dirty
4332971317aSGuoyun Sun 
4342971317aSGuoyun Sun static inline pte_t pte_mksoft_dirty(pte_t pte)
4352971317aSGuoyun Sun {
4362971317aSGuoyun Sun 	pte_val(pte) |= _PAGE_SOFT_DIRTY;
4372971317aSGuoyun Sun 	return pte;
4382971317aSGuoyun Sun }
4392971317aSGuoyun Sun #define pte_swp_mksoft_dirty pte_mksoft_dirty
4402971317aSGuoyun Sun 
4412971317aSGuoyun Sun static inline pte_t pte_clear_soft_dirty(pte_t pte)
4422971317aSGuoyun Sun {
4432971317aSGuoyun Sun 	pte_val(pte) &= ~(_PAGE_SOFT_DIRTY);
4442971317aSGuoyun Sun 	return pte;
4452971317aSGuoyun Sun }
4462971317aSGuoyun Sun #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
4472971317aSGuoyun Sun 
4482971317aSGuoyun Sun #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
4492971317aSGuoyun Sun 
450384740dcSRalf Baechle #endif
451384740dcSRalf Baechle 
452384740dcSRalf Baechle /*
453384740dcSRalf Baechle  * Macro to make mark a page protection value as "uncacheable".	 Note
454384740dcSRalf Baechle  * that "protection" is really a misnomer here as the protection value
455384740dcSRalf Baechle  * contains the memory attribute bits, dirty bits, and various other
456384740dcSRalf Baechle  * bits as well.
457384740dcSRalf Baechle  */
458384740dcSRalf Baechle #define pgprot_noncached pgprot_noncached
459384740dcSRalf Baechle 
460384740dcSRalf Baechle static inline pgprot_t pgprot_noncached(pgprot_t _prot)
461384740dcSRalf Baechle {
462384740dcSRalf Baechle 	unsigned long prot = pgprot_val(_prot);
463384740dcSRalf Baechle 
464384740dcSRalf Baechle 	prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
465384740dcSRalf Baechle 
466384740dcSRalf Baechle 	return __pgprot(prot);
467384740dcSRalf Baechle }
468384740dcSRalf Baechle 
469c4687b15SAlex Smith #define pgprot_writecombine pgprot_writecombine
470c4687b15SAlex Smith 
4714b050ba7SMarkos Chandras static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
4724b050ba7SMarkos Chandras {
4734b050ba7SMarkos Chandras 	unsigned long prot = pgprot_val(_prot);
4744b050ba7SMarkos Chandras 
4754b050ba7SMarkos Chandras 	/* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */
4764b050ba7SMarkos Chandras 	prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine;
4774b050ba7SMarkos Chandras 
4784b050ba7SMarkos Chandras 	return __pgprot(prot);
4794b050ba7SMarkos Chandras }
4804b050ba7SMarkos Chandras 
4814dd7683eSBibo Mao static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
48299c29133SGerald Schaefer 						unsigned long address,
48399c29133SGerald Schaefer 						pte_t *ptep)
4844dd7683eSBibo Mao {
4854dd7683eSBibo Mao }
4864dd7683eSBibo Mao 
4877df67697SBibo Mao #define __HAVE_ARCH_PTE_SAME
4887df67697SBibo Mao static inline int pte_same(pte_t pte_a, pte_t pte_b)
4897df67697SBibo Mao {
4907df67697SBibo Mao 	return pte_val(pte_a) == pte_val(pte_b);
4917df67697SBibo Mao }
4927df67697SBibo Mao 
4937df67697SBibo Mao #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
4947df67697SBibo Mao static inline int ptep_set_access_flags(struct vm_area_struct *vma,
4957df67697SBibo Mao 					unsigned long address, pte_t *ptep,
4967df67697SBibo Mao 					pte_t entry, int dirty)
4977df67697SBibo Mao {
4987df67697SBibo Mao 	if (!pte_same(*ptep, entry))
499*15fa3e8eSMatthew Wilcox (Oracle) 		set_pte(ptep, entry);
5007df67697SBibo Mao 	/*
5017df67697SBibo Mao 	 * update_mmu_cache will unconditionally execute, handling both
5027df67697SBibo Mao 	 * the case that the PTE changed and the spurious fault case.
5037df67697SBibo Mao 	 */
5047df67697SBibo Mao 	return true;
5057df67697SBibo Mao }
5067df67697SBibo Mao 
507384740dcSRalf Baechle /*
508384740dcSRalf Baechle  * Conversion functions: convert a page and protection to a page entry,
509384740dcSRalf Baechle  * and a page entry and page directory to the page they refer to.
510384740dcSRalf Baechle  */
511384740dcSRalf Baechle #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
512384740dcSRalf Baechle 
5137b2cb64fSPaul Burton #if defined(CONFIG_XPA)
514384740dcSRalf Baechle static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
515384740dcSRalf Baechle {
516c5b36783SSteven J. Hill 	pte.pte_low  &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
51777a5c593SSteven J. Hill 	pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
518c5b36783SSteven J. Hill 	pte.pte_low  |= pgprot_val(newprot) & ~_PFNX_MASK;
5196d037de9SRalf Baechle 	pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
520384740dcSRalf Baechle 	return pte;
521384740dcSRalf Baechle }
5227b2cb64fSPaul Burton #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
5237b2cb64fSPaul Burton static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
5247b2cb64fSPaul Burton {
5257b2cb64fSPaul Burton 	pte.pte_low  &= _PAGE_CHG_MASK;
5267b2cb64fSPaul Burton 	pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
5277b2cb64fSPaul Burton 	pte.pte_low  |= pgprot_val(newprot);
5287b2cb64fSPaul Burton 	pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
5297b2cb64fSPaul Burton 	return pte;
5307b2cb64fSPaul Burton }
531384740dcSRalf Baechle #else
532384740dcSRalf Baechle static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
533384740dcSRalf Baechle {
534273b5fa0SBibo Mao 	pte_val(pte) &= _PAGE_CHG_MASK;
535273b5fa0SBibo Mao 	pte_val(pte) |= pgprot_val(newprot) & ~_PAGE_CHG_MASK;
536273b5fa0SBibo Mao 	if ((pte_val(pte) & _PAGE_ACCESSED) && !(pte_val(pte) & _PAGE_NO_READ))
537273b5fa0SBibo Mao 		pte_val(pte) |= _PAGE_SILENT_READ;
538273b5fa0SBibo Mao 	return pte;
539384740dcSRalf Baechle }
540384740dcSRalf Baechle #endif
541384740dcSRalf Baechle 
54283d3b2b4SDavid Hildenbrand #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
54383d3b2b4SDavid Hildenbrand static inline int pte_swp_exclusive(pte_t pte)
54483d3b2b4SDavid Hildenbrand {
54583d3b2b4SDavid Hildenbrand 	return pte.pte_low & _PAGE_SWP_EXCLUSIVE;
54683d3b2b4SDavid Hildenbrand }
54783d3b2b4SDavid Hildenbrand 
54883d3b2b4SDavid Hildenbrand static inline pte_t pte_swp_mkexclusive(pte_t pte)
54983d3b2b4SDavid Hildenbrand {
55083d3b2b4SDavid Hildenbrand 	pte.pte_low |= _PAGE_SWP_EXCLUSIVE;
55183d3b2b4SDavid Hildenbrand 	return pte;
55283d3b2b4SDavid Hildenbrand }
55383d3b2b4SDavid Hildenbrand 
55483d3b2b4SDavid Hildenbrand static inline pte_t pte_swp_clear_exclusive(pte_t pte)
55583d3b2b4SDavid Hildenbrand {
55683d3b2b4SDavid Hildenbrand 	pte.pte_low &= ~_PAGE_SWP_EXCLUSIVE;
55783d3b2b4SDavid Hildenbrand 	return pte;
55883d3b2b4SDavid Hildenbrand }
55983d3b2b4SDavid Hildenbrand #else
56083d3b2b4SDavid Hildenbrand static inline int pte_swp_exclusive(pte_t pte)
56183d3b2b4SDavid Hildenbrand {
56283d3b2b4SDavid Hildenbrand 	return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
56383d3b2b4SDavid Hildenbrand }
56483d3b2b4SDavid Hildenbrand 
56583d3b2b4SDavid Hildenbrand static inline pte_t pte_swp_mkexclusive(pte_t pte)
56683d3b2b4SDavid Hildenbrand {
56783d3b2b4SDavid Hildenbrand 	pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
56883d3b2b4SDavid Hildenbrand 	return pte;
56983d3b2b4SDavid Hildenbrand }
57083d3b2b4SDavid Hildenbrand 
57183d3b2b4SDavid Hildenbrand static inline pte_t pte_swp_clear_exclusive(pte_t pte)
57283d3b2b4SDavid Hildenbrand {
57383d3b2b4SDavid Hildenbrand 	pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
57483d3b2b4SDavid Hildenbrand 	return pte;
57583d3b2b4SDavid Hildenbrand }
57683d3b2b4SDavid Hildenbrand #endif
577384740dcSRalf Baechle 
578384740dcSRalf Baechle extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
579384740dcSRalf Baechle 	pte_t pte);
580384740dcSRalf Baechle 
581*15fa3e8eSMatthew Wilcox (Oracle) static inline void update_mmu_cache_range(struct vm_fault *vmf,
582*15fa3e8eSMatthew Wilcox (Oracle) 		struct vm_area_struct *vma, unsigned long address,
583*15fa3e8eSMatthew Wilcox (Oracle) 		pte_t *ptep, unsigned int nr)
584384740dcSRalf Baechle {
585*15fa3e8eSMatthew Wilcox (Oracle) 	for (;;) {
5864b3073e1SRussell King 		pte_t pte = *ptep;
587384740dcSRalf Baechle 		__update_tlb(vma, address, pte);
588*15fa3e8eSMatthew Wilcox (Oracle) 		if (--nr == 0)
589*15fa3e8eSMatthew Wilcox (Oracle) 			break;
590*15fa3e8eSMatthew Wilcox (Oracle) 		ptep++;
591*15fa3e8eSMatthew Wilcox (Oracle) 		address += PAGE_SIZE;
592384740dcSRalf Baechle 	}
593*15fa3e8eSMatthew Wilcox (Oracle) }
594*15fa3e8eSMatthew Wilcox (Oracle) #define update_mmu_cache(vma, address, ptep) \
595*15fa3e8eSMatthew Wilcox (Oracle) 	update_mmu_cache_range(NULL, vma, address, ptep, 1)
596384740dcSRalf Baechle 
5977df67697SBibo Mao #define	__HAVE_ARCH_UPDATE_MMU_TLB
5987df67697SBibo Mao #define update_mmu_tlb	update_mmu_cache
5997df67697SBibo Mao 
600970d032fSRalf Baechle static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
601970d032fSRalf Baechle 	unsigned long address, pmd_t *pmdp)
602970d032fSRalf Baechle {
603970d032fSRalf Baechle 	pte_t pte = *(pte_t *)pmdp;
604970d032fSRalf Baechle 
605970d032fSRalf Baechle 	__update_tlb(vma, address, pte);
606970d032fSRalf Baechle }
607970d032fSRalf Baechle 
608d3991572SChristoph Hellwig /*
609d3991572SChristoph Hellwig  * Allow physical addresses to be fixed up to help 36-bit peripherals.
610d3991572SChristoph Hellwig  */
611d3991572SChristoph Hellwig #ifdef CONFIG_MIPS_FIXUP_BIGPHYS_ADDR
612d3991572SChristoph Hellwig phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size);
613d3991572SChristoph Hellwig int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long vaddr,
614d3991572SChristoph Hellwig 		unsigned long pfn, unsigned long size, pgprot_t prot);
61540d158e6SAl Viro #define io_remap_pfn_range io_remap_pfn_range
616d3991572SChristoph Hellwig #else
617d3991572SChristoph Hellwig #define fixup_bigphys_addr(addr, size)	(addr)
618d3991572SChristoph Hellwig #endif /* CONFIG_MIPS_FIXUP_BIGPHYS_ADDR */
619384740dcSRalf Baechle 
620970d032fSRalf Baechle #ifdef CONFIG_TRANSPARENT_HUGEPAGE
621970d032fSRalf Baechle 
622b6b34b2dSKirill A. Shutemov /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
623b6b34b2dSKirill A. Shutemov #define pmdp_establish generic_pmdp_establish
624b6b34b2dSKirill A. Shutemov 
625fd8cfd30SHugh Dickins #define has_transparent_hugepage has_transparent_hugepage
626970d032fSRalf Baechle extern int has_transparent_hugepage(void);
627970d032fSRalf Baechle 
628970d032fSRalf Baechle static inline int pmd_trans_huge(pmd_t pmd)
629970d032fSRalf Baechle {
630970d032fSRalf Baechle 	return !!(pmd_val(pmd) & _PAGE_HUGE);
631970d032fSRalf Baechle }
632970d032fSRalf Baechle 
633970d032fSRalf Baechle static inline pmd_t pmd_mkhuge(pmd_t pmd)
634970d032fSRalf Baechle {
635970d032fSRalf Baechle 	pmd_val(pmd) |= _PAGE_HUGE;
636970d032fSRalf Baechle 
637970d032fSRalf Baechle 	return pmd;
638970d032fSRalf Baechle }
639970d032fSRalf Baechle 
640970d032fSRalf Baechle extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
641970d032fSRalf Baechle 		       pmd_t *pmdp, pmd_t pmd);
642970d032fSRalf Baechle 
643970d032fSRalf Baechle static inline pmd_t pmd_wrprotect(pmd_t pmd)
644970d032fSRalf Baechle {
645970d032fSRalf Baechle 	pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
646970d032fSRalf Baechle 	return pmd;
647970d032fSRalf Baechle }
648970d032fSRalf Baechle 
649970d032fSRalf Baechle static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
650970d032fSRalf Baechle {
651970d032fSRalf Baechle 	pmd_val(pmd) |= _PAGE_WRITE;
652970d032fSRalf Baechle 	if (pmd_val(pmd) & _PAGE_MODIFIED)
653970d032fSRalf Baechle 		pmd_val(pmd) |= _PAGE_SILENT_WRITE;
654970d032fSRalf Baechle 
655970d032fSRalf Baechle 	return pmd;
656970d032fSRalf Baechle }
657970d032fSRalf Baechle 
658970d032fSRalf Baechle static inline int pmd_dirty(pmd_t pmd)
659970d032fSRalf Baechle {
660970d032fSRalf Baechle 	return !!(pmd_val(pmd) & _PAGE_MODIFIED);
661970d032fSRalf Baechle }
662970d032fSRalf Baechle 
663970d032fSRalf Baechle static inline pmd_t pmd_mkclean(pmd_t pmd)
664970d032fSRalf Baechle {
665970d032fSRalf Baechle 	pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
666970d032fSRalf Baechle 	return pmd;
667970d032fSRalf Baechle }
668970d032fSRalf Baechle 
669970d032fSRalf Baechle static inline pmd_t pmd_mkdirty(pmd_t pmd)
670970d032fSRalf Baechle {
6712971317aSGuoyun Sun 	pmd_val(pmd) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY;
672970d032fSRalf Baechle 	if (pmd_val(pmd) & _PAGE_WRITE)
673970d032fSRalf Baechle 		pmd_val(pmd) |= _PAGE_SILENT_WRITE;
674970d032fSRalf Baechle 
675970d032fSRalf Baechle 	return pmd;
676970d032fSRalf Baechle }
677970d032fSRalf Baechle 
6786617da8fSJuergen Gross #define pmd_young pmd_young
679970d032fSRalf Baechle static inline int pmd_young(pmd_t pmd)
680970d032fSRalf Baechle {
681970d032fSRalf Baechle 	return !!(pmd_val(pmd) & _PAGE_ACCESSED);
682970d032fSRalf Baechle }
683970d032fSRalf Baechle 
684970d032fSRalf Baechle static inline pmd_t pmd_mkold(pmd_t pmd)
685970d032fSRalf Baechle {
686970d032fSRalf Baechle 	pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
687970d032fSRalf Baechle 
688970d032fSRalf Baechle 	return pmd;
689970d032fSRalf Baechle }
690970d032fSRalf Baechle 
691970d032fSRalf Baechle static inline pmd_t pmd_mkyoung(pmd_t pmd)
692970d032fSRalf Baechle {
693970d032fSRalf Baechle 	pmd_val(pmd) |= _PAGE_ACCESSED;
694970d032fSRalf Baechle 
695970d032fSRalf Baechle 	if (!(pmd_val(pmd) & _PAGE_NO_READ))
696970d032fSRalf Baechle 		pmd_val(pmd) |= _PAGE_SILENT_READ;
697970d032fSRalf Baechle 
698970d032fSRalf Baechle 	return pmd;
699970d032fSRalf Baechle }
700970d032fSRalf Baechle 
7012971317aSGuoyun Sun #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
7022971317aSGuoyun Sun static inline int pmd_soft_dirty(pmd_t pmd)
7032971317aSGuoyun Sun {
7042971317aSGuoyun Sun 	return !!(pmd_val(pmd) & _PAGE_SOFT_DIRTY);
7052971317aSGuoyun Sun }
7062971317aSGuoyun Sun 
7072971317aSGuoyun Sun static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
7082971317aSGuoyun Sun {
7092971317aSGuoyun Sun 	pmd_val(pmd) |= _PAGE_SOFT_DIRTY;
7102971317aSGuoyun Sun 	return pmd;
7112971317aSGuoyun Sun }
7122971317aSGuoyun Sun 
7132971317aSGuoyun Sun static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
7142971317aSGuoyun Sun {
7152971317aSGuoyun Sun 	pmd_val(pmd) &= ~(_PAGE_SOFT_DIRTY);
7162971317aSGuoyun Sun 	return pmd;
7172971317aSGuoyun Sun }
7182971317aSGuoyun Sun 
7192971317aSGuoyun Sun #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
7202971317aSGuoyun Sun 
721970d032fSRalf Baechle /* Extern to avoid header file madness */
722970d032fSRalf Baechle extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
723970d032fSRalf Baechle 
724970d032fSRalf Baechle static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
725970d032fSRalf Baechle {
72688d02a2bSDavid Daney 	pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
7276d037de9SRalf Baechle 		       (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
728970d032fSRalf Baechle 	return pmd;
729970d032fSRalf Baechle }
730970d032fSRalf Baechle 
73186ec2da0SAnshuman Khandual static inline pmd_t pmd_mkinvalid(pmd_t pmd)
732970d032fSRalf Baechle {
733970d032fSRalf Baechle 	pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
734970d032fSRalf Baechle 
735970d032fSRalf Baechle 	return pmd;
736970d032fSRalf Baechle }
737970d032fSRalf Baechle 
738970d032fSRalf Baechle /*
7398809aa2dSAneesh Kumar K.V  * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
740970d032fSRalf Baechle  * different prototype.
741970d032fSRalf Baechle  */
7428809aa2dSAneesh Kumar K.V #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
7438809aa2dSAneesh Kumar K.V static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
744970d032fSRalf Baechle 					    unsigned long address, pmd_t *pmdp)
745970d032fSRalf Baechle {
746970d032fSRalf Baechle 	pmd_t old = *pmdp;
747970d032fSRalf Baechle 
748970d032fSRalf Baechle 	pmd_clear(pmdp);
749970d032fSRalf Baechle 
750970d032fSRalf Baechle 	return old;
751970d032fSRalf Baechle }
752970d032fSRalf Baechle 
753970d032fSRalf Baechle #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
754970d032fSRalf Baechle 
755501b8104SSteven Price #ifdef _PAGE_HUGE
756501b8104SSteven Price #define pmd_leaf(pmd)	((pmd_val(pmd) & _PAGE_HUGE) != 0)
757501b8104SSteven Price #define pud_leaf(pud)	((pud_val(pud) & _PAGE_HUGE) != 0)
758501b8104SSteven Price #endif
759501b8104SSteven Price 
760446f062bSChristoph Hellwig #define gup_fast_permitted(start, end)	(!cpu_has_dc_aliases)
761446f062bSChristoph Hellwig 
762384740dcSRalf Baechle /*
763384740dcSRalf Baechle  * We provide our own get_unmapped area to cope with the virtual aliasing
764384740dcSRalf Baechle  * constraints placed on us by the cache architecture.
765384740dcSRalf Baechle  */
766384740dcSRalf Baechle #define HAVE_ARCH_UNMAPPED_AREA
767d0be89f6SJian Peng #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
768384740dcSRalf Baechle 
769384740dcSRalf Baechle #endif /* _ASM_PGTABLE_H */
770