1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4  *
5  * Derived from MIPS:
6  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7  * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8  */
9 #ifndef _ASM_PGTABLE_H
10 #define _ASM_PGTABLE_H
11 
12 #include <linux/compiler.h>
13 #include <asm/addrspace.h>
14 #include <asm/page.h>
15 #include <asm/pgtable-bits.h>
16 
17 #if CONFIG_PGTABLE_LEVELS == 2
18 #include <asm-generic/pgtable-nopmd.h>
19 #elif CONFIG_PGTABLE_LEVELS == 3
20 #include <asm-generic/pgtable-nopud.h>
21 #else
22 #include <asm-generic/pgtable-nop4d.h>
23 #endif
24 
25 #if CONFIG_PGTABLE_LEVELS == 2
26 #define PGDIR_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - 3))
27 #elif CONFIG_PGTABLE_LEVELS == 3
28 #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - 3))
29 #define PMD_SIZE	(1UL << PMD_SHIFT)
30 #define PMD_MASK	(~(PMD_SIZE-1))
31 #define PGDIR_SHIFT	(PMD_SHIFT + (PAGE_SHIFT - 3))
32 #elif CONFIG_PGTABLE_LEVELS == 4
33 #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - 3))
34 #define PMD_SIZE	(1UL << PMD_SHIFT)
35 #define PMD_MASK	(~(PMD_SIZE-1))
36 #define PUD_SHIFT	(PMD_SHIFT + (PAGE_SHIFT - 3))
37 #define PUD_SIZE	(1UL << PUD_SHIFT)
38 #define PUD_MASK	(~(PUD_SIZE-1))
39 #define PGDIR_SHIFT	(PUD_SHIFT + (PAGE_SHIFT - 3))
40 #endif
41 
42 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
43 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
44 
45 #define VA_BITS		(PGDIR_SHIFT + (PAGE_SHIFT - 3))
46 
47 #define PTRS_PER_PGD	(PAGE_SIZE >> 3)
48 #if CONFIG_PGTABLE_LEVELS > 3
49 #define PTRS_PER_PUD	(PAGE_SIZE >> 3)
50 #endif
51 #if CONFIG_PGTABLE_LEVELS > 2
52 #define PTRS_PER_PMD	(PAGE_SIZE >> 3)
53 #endif
54 #define PTRS_PER_PTE	(PAGE_SIZE >> 3)
55 
56 #define USER_PTRS_PER_PGD       ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
57 
58 #ifndef __ASSEMBLY__
59 
60 #include <linux/mm_types.h>
61 #include <linux/mmzone.h>
62 #include <asm/fixmap.h>
63 #include <asm/sparsemem.h>
64 
65 struct mm_struct;
66 struct vm_area_struct;
67 
68 /*
69  * ZERO_PAGE is a global shared page that is always zero; used
70  * for zero-mapped memory areas etc..
71  */
72 
73 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
74 
75 #define ZERO_PAGE(vaddr)	virt_to_page(empty_zero_page)
76 
77 /*
78  * TLB refill handlers may also map the vmalloc area into xkvrange.
79  * Avoid the first couple of pages so NULL pointer dereferences will
80  * still reliably trap.
81  */
82 #define MODULES_VADDR	(vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
83 #define MODULES_END	(MODULES_VADDR + SZ_256M)
84 
85 #ifdef CONFIG_KFENCE
86 #define KFENCE_AREA_SIZE	(((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 + 2) * PAGE_SIZE)
87 #else
88 #define KFENCE_AREA_SIZE	0
89 #endif
90 
91 #define VMALLOC_START	MODULES_END
92 
93 #ifndef CONFIG_KASAN
94 #define VMALLOC_END	\
95 	(vm_map_base +	\
96 	 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
97 #else
98 #define VMALLOC_END	\
99 	(vm_map_base +	\
100 	 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
101 #endif
102 
103 #define vmemmap		((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
104 #define VMEMMAP_END	((unsigned long)vmemmap + VMEMMAP_SIZE - 1)
105 
106 #define KFENCE_AREA_START	(VMEMMAP_END + 1)
107 #define KFENCE_AREA_END		(KFENCE_AREA_START + KFENCE_AREA_SIZE - 1)
108 
109 #define pte_ERROR(e) \
110 	pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
111 #ifndef __PAGETABLE_PMD_FOLDED
112 #define pmd_ERROR(e) \
113 	pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
114 #endif
115 #ifndef __PAGETABLE_PUD_FOLDED
116 #define pud_ERROR(e) \
117 	pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
118 #endif
119 #define pgd_ERROR(e) \
120 	pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
121 
122 extern pte_t invalid_pte_table[PTRS_PER_PTE];
123 
124 #ifndef __PAGETABLE_PUD_FOLDED
125 
126 typedef struct { unsigned long pud; } pud_t;
127 #define pud_val(x)	((x).pud)
128 #define __pud(x)	((pud_t) { (x) })
129 
130 extern pud_t invalid_pud_table[PTRS_PER_PUD];
131 
132 /*
133  * Empty pgd/p4d entries point to the invalid_pud_table.
134  */
p4d_none(p4d_t p4d)135 static inline int p4d_none(p4d_t p4d)
136 {
137 	return p4d_val(p4d) == (unsigned long)invalid_pud_table;
138 }
139 
p4d_bad(p4d_t p4d)140 static inline int p4d_bad(p4d_t p4d)
141 {
142 	return p4d_val(p4d) & ~PAGE_MASK;
143 }
144 
p4d_present(p4d_t p4d)145 static inline int p4d_present(p4d_t p4d)
146 {
147 	return p4d_val(p4d) != (unsigned long)invalid_pud_table;
148 }
149 
p4d_clear(p4d_t * p4dp)150 static inline void p4d_clear(p4d_t *p4dp)
151 {
152 	p4d_val(*p4dp) = (unsigned long)invalid_pud_table;
153 }
154 
p4d_pgtable(p4d_t p4d)155 static inline pud_t *p4d_pgtable(p4d_t p4d)
156 {
157 	return (pud_t *)p4d_val(p4d);
158 }
159 
set_p4d(p4d_t * p4d,p4d_t p4dval)160 static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
161 {
162 	*p4d = p4dval;
163 }
164 
165 #define p4d_phys(p4d)		PHYSADDR(p4d_val(p4d))
166 #define p4d_page(p4d)		(pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
167 
168 #endif
169 
170 #ifndef __PAGETABLE_PMD_FOLDED
171 
172 typedef struct { unsigned long pmd; } pmd_t;
173 #define pmd_val(x)	((x).pmd)
174 #define __pmd(x)	((pmd_t) { (x) })
175 
176 extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
177 
178 /*
179  * Empty pud entries point to the invalid_pmd_table.
180  */
pud_none(pud_t pud)181 static inline int pud_none(pud_t pud)
182 {
183 	return pud_val(pud) == (unsigned long)invalid_pmd_table;
184 }
185 
pud_bad(pud_t pud)186 static inline int pud_bad(pud_t pud)
187 {
188 	return pud_val(pud) & ~PAGE_MASK;
189 }
190 
pud_present(pud_t pud)191 static inline int pud_present(pud_t pud)
192 {
193 	return pud_val(pud) != (unsigned long)invalid_pmd_table;
194 }
195 
pud_clear(pud_t * pudp)196 static inline void pud_clear(pud_t *pudp)
197 {
198 	pud_val(*pudp) = ((unsigned long)invalid_pmd_table);
199 }
200 
pud_pgtable(pud_t pud)201 static inline pmd_t *pud_pgtable(pud_t pud)
202 {
203 	return (pmd_t *)pud_val(pud);
204 }
205 
206 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0)
207 
208 #define pud_phys(pud)		PHYSADDR(pud_val(pud))
209 #define pud_page(pud)		(pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
210 
211 #endif
212 
213 /*
214  * Empty pmd entries point to the invalid_pte_table.
215  */
pmd_none(pmd_t pmd)216 static inline int pmd_none(pmd_t pmd)
217 {
218 	return pmd_val(pmd) == (unsigned long)invalid_pte_table;
219 }
220 
pmd_bad(pmd_t pmd)221 static inline int pmd_bad(pmd_t pmd)
222 {
223 	return (pmd_val(pmd) & ~PAGE_MASK);
224 }
225 
pmd_present(pmd_t pmd)226 static inline int pmd_present(pmd_t pmd)
227 {
228 	if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
229 		return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PRESENT_INVALID));
230 
231 	return pmd_val(pmd) != (unsigned long)invalid_pte_table;
232 }
233 
pmd_clear(pmd_t * pmdp)234 static inline void pmd_clear(pmd_t *pmdp)
235 {
236 	pmd_val(*pmdp) = ((unsigned long)invalid_pte_table);
237 }
238 
239 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0)
240 
241 #define pmd_phys(pmd)		PHYSADDR(pmd_val(pmd))
242 
243 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
244 #define pmd_page(pmd)		(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
245 #endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
246 
247 #define pmd_page_vaddr(pmd)	pmd_val(pmd)
248 
249 extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
250 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd);
251 
252 #define pte_page(x)		pfn_to_page(pte_pfn(x))
253 #define pte_pfn(x)		((unsigned long)(((x).pte & _PFN_MASK) >> PFN_PTE_SHIFT))
254 #define pfn_pte(pfn, prot)	__pte(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
255 #define pfn_pmd(pfn, prot)	__pmd(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
256 
257 /*
258  * Initialize a new pgd / pud / pmd table with invalid pointers.
259  */
260 extern void pgd_init(void *addr);
261 extern void pud_init(void *addr);
262 extern void pmd_init(void *addr);
263 
264 /*
265  * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
266  * are !pte_none() && !pte_present().
267  *
268  * Format of swap PTEs:
269  *
270  *   6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
271  *   3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
272  *   <--------------------------- offset ---------------------------
273  *
274  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
275  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
276  *   --------------> E <--- type ---> <---------- zeroes ---------->
277  *
278  *   E is the exclusive marker that is not stored in swap entries.
279  *   The zero'ed bits include _PAGE_PRESENT and _PAGE_PROTNONE.
280  */
mk_swap_pte(unsigned long type,unsigned long offset)281 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
282 { pte_t pte; pte_val(pte) = ((type & 0x7f) << 16) | (offset << 24); return pte; }
283 
284 #define __swp_type(x)		(((x).val >> 16) & 0x7f)
285 #define __swp_offset(x)		((x).val >> 24)
286 #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
287 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
288 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
289 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
290 #define __swp_entry_to_pmd(x)	((pmd_t) { (x).val | _PAGE_HUGE })
291 
pte_swp_exclusive(pte_t pte)292 static inline int pte_swp_exclusive(pte_t pte)
293 {
294 	return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
295 }
296 
pte_swp_mkexclusive(pte_t pte)297 static inline pte_t pte_swp_mkexclusive(pte_t pte)
298 {
299 	pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
300 	return pte;
301 }
302 
pte_swp_clear_exclusive(pte_t pte)303 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
304 {
305 	pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
306 	return pte;
307 }
308 
309 extern void paging_init(void);
310 
311 #define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL))
312 #define pte_present(pte)	(pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE))
313 #define pte_no_exec(pte)	(pte_val(pte) & _PAGE_NO_EXEC)
314 
set_pte(pte_t * ptep,pte_t pteval)315 static inline void set_pte(pte_t *ptep, pte_t pteval)
316 {
317 	*ptep = pteval;
318 	if (pte_val(pteval) & _PAGE_GLOBAL) {
319 		pte_t *buddy = ptep_buddy(ptep);
320 		/*
321 		 * Make sure the buddy is global too (if it's !none,
322 		 * it better already be global)
323 		 */
324 #ifdef CONFIG_SMP
325 		/*
326 		 * For SMP, multiple CPUs can race, so we need to do
327 		 * this atomically.
328 		 */
329 		unsigned long page_global = _PAGE_GLOBAL;
330 		unsigned long tmp;
331 
332 		__asm__ __volatile__ (
333 		"1:"	__LL	"%[tmp], %[buddy]		\n"
334 		"	bnez	%[tmp], 2f			\n"
335 		"	 or	%[tmp], %[tmp], %[global]	\n"
336 			__SC	"%[tmp], %[buddy]		\n"
337 		"	beqz	%[tmp], 1b			\n"
338 		"	nop					\n"
339 		"2:						\n"
340 		__WEAK_LLSC_MB
341 		: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
342 		: [global] "r" (page_global));
343 #else /* !CONFIG_SMP */
344 		if (pte_none(*buddy))
345 			pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
346 #endif /* CONFIG_SMP */
347 	}
348 }
349 
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)350 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
351 {
352 	/* Preserve global status for the pair */
353 	if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
354 		set_pte(ptep, __pte(_PAGE_GLOBAL));
355 	else
356 		set_pte(ptep, __pte(0));
357 }
358 
359 #define PGD_T_LOG2	(__builtin_ffs(sizeof(pgd_t)) - 1)
360 #define PMD_T_LOG2	(__builtin_ffs(sizeof(pmd_t)) - 1)
361 #define PTE_T_LOG2	(__builtin_ffs(sizeof(pte_t)) - 1)
362 
363 extern pgd_t swapper_pg_dir[];
364 extern pgd_t invalid_pg_dir[];
365 
366 struct page *dmw_virt_to_page(unsigned long kaddr);
367 struct page *tlb_virt_to_page(unsigned long kaddr);
368 
369 /*
370  * The following only work if pte_present() is true.
371  * Undefined behaviour if not..
372  */
pte_write(pte_t pte)373 static inline int pte_write(pte_t pte)	{ return pte_val(pte) & _PAGE_WRITE; }
pte_young(pte_t pte)374 static inline int pte_young(pte_t pte)	{ return pte_val(pte) & _PAGE_ACCESSED; }
pte_dirty(pte_t pte)375 static inline int pte_dirty(pte_t pte)	{ return pte_val(pte) & (_PAGE_DIRTY | _PAGE_MODIFIED); }
376 
pte_mkold(pte_t pte)377 static inline pte_t pte_mkold(pte_t pte)
378 {
379 	pte_val(pte) &= ~_PAGE_ACCESSED;
380 	return pte;
381 }
382 
pte_mkyoung(pte_t pte)383 static inline pte_t pte_mkyoung(pte_t pte)
384 {
385 	pte_val(pte) |= _PAGE_ACCESSED;
386 	return pte;
387 }
388 
pte_mkclean(pte_t pte)389 static inline pte_t pte_mkclean(pte_t pte)
390 {
391 	pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
392 	return pte;
393 }
394 
pte_mkdirty(pte_t pte)395 static inline pte_t pte_mkdirty(pte_t pte)
396 {
397 	pte_val(pte) |= _PAGE_MODIFIED;
398 	if (pte_val(pte) & _PAGE_WRITE)
399 		pte_val(pte) |= _PAGE_DIRTY;
400 	return pte;
401 }
402 
pte_mkwrite_novma(pte_t pte)403 static inline pte_t pte_mkwrite_novma(pte_t pte)
404 {
405 	pte_val(pte) |= _PAGE_WRITE;
406 	if (pte_val(pte) & _PAGE_MODIFIED)
407 		pte_val(pte) |= _PAGE_DIRTY;
408 	return pte;
409 }
410 
pte_wrprotect(pte_t pte)411 static inline pte_t pte_wrprotect(pte_t pte)
412 {
413 	pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
414 	return pte;
415 }
416 
pte_huge(pte_t pte)417 static inline int pte_huge(pte_t pte)	{ return pte_val(pte) & _PAGE_HUGE; }
418 
pte_mkhuge(pte_t pte)419 static inline pte_t pte_mkhuge(pte_t pte)
420 {
421 	pte_val(pte) |= _PAGE_HUGE;
422 	return pte;
423 }
424 
425 #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
pte_special(pte_t pte)426 static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }
pte_mkspecial(pte_t pte)427 static inline pte_t pte_mkspecial(pte_t pte)	{ pte_val(pte) |= _PAGE_SPECIAL; return pte; }
428 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
429 
430 #define pte_accessible pte_accessible
pte_accessible(struct mm_struct * mm,pte_t a)431 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
432 {
433 	if (pte_val(a) & _PAGE_PRESENT)
434 		return true;
435 
436 	if ((pte_val(a) & _PAGE_PROTNONE) &&
437 			atomic_read(&mm->tlb_flush_pending))
438 		return true;
439 
440 	return false;
441 }
442 
443 /*
444  * Conversion functions: convert a page and protection to a page entry,
445  * and a page entry and page directory to the page they refer to.
446  */
447 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
448 
pte_modify(pte_t pte,pgprot_t newprot)449 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
450 {
451 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
452 		     (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
453 }
454 
455 extern void __update_tlb(struct vm_area_struct *vma,
456 			unsigned long address, pte_t *ptep);
457 
update_mmu_cache_range(struct vm_fault * vmf,struct vm_area_struct * vma,unsigned long address,pte_t * ptep,unsigned int nr)458 static inline void update_mmu_cache_range(struct vm_fault *vmf,
459 		struct vm_area_struct *vma, unsigned long address,
460 		pte_t *ptep, unsigned int nr)
461 {
462 	for (;;) {
463 		__update_tlb(vma, address, ptep);
464 		if (--nr == 0)
465 			break;
466 		address += PAGE_SIZE;
467 		ptep++;
468 	}
469 }
470 #define update_mmu_cache(vma, addr, ptep) \
471 	update_mmu_cache_range(NULL, vma, addr, ptep, 1)
472 
473 #define __HAVE_ARCH_UPDATE_MMU_TLB
474 #define update_mmu_tlb	update_mmu_cache
475 
update_mmu_cache_pmd(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)476 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
477 			unsigned long address, pmd_t *pmdp)
478 {
479 	__update_tlb(vma, address, (pte_t *)pmdp);
480 }
481 
pmd_pfn(pmd_t pmd)482 static inline unsigned long pmd_pfn(pmd_t pmd)
483 {
484 	return (pmd_val(pmd) & _PFN_MASK) >> PFN_PTE_SHIFT;
485 }
486 
487 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
488 
489 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
490 #define pmdp_establish generic_pmdp_establish
491 
pmd_trans_huge(pmd_t pmd)492 static inline int pmd_trans_huge(pmd_t pmd)
493 {
494 	return !!(pmd_val(pmd) & _PAGE_HUGE) && pmd_present(pmd);
495 }
496 
pmd_mkhuge(pmd_t pmd)497 static inline pmd_t pmd_mkhuge(pmd_t pmd)
498 {
499 	pmd_val(pmd) = (pmd_val(pmd) & ~(_PAGE_GLOBAL)) |
500 		((pmd_val(pmd) & _PAGE_GLOBAL) << (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT));
501 	pmd_val(pmd) |= _PAGE_HUGE;
502 
503 	return pmd;
504 }
505 
506 #define pmd_write pmd_write
pmd_write(pmd_t pmd)507 static inline int pmd_write(pmd_t pmd)
508 {
509 	return !!(pmd_val(pmd) & _PAGE_WRITE);
510 }
511 
pmd_mkwrite_novma(pmd_t pmd)512 static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
513 {
514 	pmd_val(pmd) |= _PAGE_WRITE;
515 	if (pmd_val(pmd) & _PAGE_MODIFIED)
516 		pmd_val(pmd) |= _PAGE_DIRTY;
517 	return pmd;
518 }
519 
pmd_wrprotect(pmd_t pmd)520 static inline pmd_t pmd_wrprotect(pmd_t pmd)
521 {
522 	pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
523 	return pmd;
524 }
525 
pmd_dirty(pmd_t pmd)526 static inline int pmd_dirty(pmd_t pmd)
527 {
528 	return !!(pmd_val(pmd) & (_PAGE_DIRTY | _PAGE_MODIFIED));
529 }
530 
pmd_mkclean(pmd_t pmd)531 static inline pmd_t pmd_mkclean(pmd_t pmd)
532 {
533 	pmd_val(pmd) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
534 	return pmd;
535 }
536 
pmd_mkdirty(pmd_t pmd)537 static inline pmd_t pmd_mkdirty(pmd_t pmd)
538 {
539 	pmd_val(pmd) |= _PAGE_MODIFIED;
540 	if (pmd_val(pmd) & _PAGE_WRITE)
541 		pmd_val(pmd) |= _PAGE_DIRTY;
542 	return pmd;
543 }
544 
545 #define pmd_young pmd_young
pmd_young(pmd_t pmd)546 static inline int pmd_young(pmd_t pmd)
547 {
548 	return !!(pmd_val(pmd) & _PAGE_ACCESSED);
549 }
550 
pmd_mkold(pmd_t pmd)551 static inline pmd_t pmd_mkold(pmd_t pmd)
552 {
553 	pmd_val(pmd) &= ~_PAGE_ACCESSED;
554 	return pmd;
555 }
556 
pmd_mkyoung(pmd_t pmd)557 static inline pmd_t pmd_mkyoung(pmd_t pmd)
558 {
559 	pmd_val(pmd) |= _PAGE_ACCESSED;
560 	return pmd;
561 }
562 
pmd_page(pmd_t pmd)563 static inline struct page *pmd_page(pmd_t pmd)
564 {
565 	if (pmd_trans_huge(pmd))
566 		return pfn_to_page(pmd_pfn(pmd));
567 
568 	return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
569 }
570 
pmd_modify(pmd_t pmd,pgprot_t newprot)571 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
572 {
573 	pmd_val(pmd) = (pmd_val(pmd) & _HPAGE_CHG_MASK) |
574 				(pgprot_val(newprot) & ~_HPAGE_CHG_MASK);
575 	return pmd;
576 }
577 
pmd_mkinvalid(pmd_t pmd)578 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
579 {
580 	pmd_val(pmd) |= _PAGE_PRESENT_INVALID;
581 	pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY | _PAGE_PROTNONE);
582 
583 	return pmd;
584 }
585 
586 /*
587  * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
588  * different prototype.
589  */
590 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)591 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
592 					    unsigned long address, pmd_t *pmdp)
593 {
594 	pmd_t old = *pmdp;
595 
596 	pmd_clear(pmdp);
597 
598 	return old;
599 }
600 
601 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
602 
603 #ifdef CONFIG_NUMA_BALANCING
pte_protnone(pte_t pte)604 static inline long pte_protnone(pte_t pte)
605 {
606 	return (pte_val(pte) & _PAGE_PROTNONE);
607 }
608 
pmd_protnone(pmd_t pmd)609 static inline long pmd_protnone(pmd_t pmd)
610 {
611 	return (pmd_val(pmd) & _PAGE_PROTNONE);
612 }
613 #endif /* CONFIG_NUMA_BALANCING */
614 
615 #define pmd_leaf(pmd)		((pmd_val(pmd) & _PAGE_HUGE) != 0)
616 #define pud_leaf(pud)		((pud_val(pud) & _PAGE_HUGE) != 0)
617 
618 /*
619  * We provide our own get_unmapped area to cope with the virtual aliasing
620  * constraints placed on us by the cache architecture.
621  */
622 #define HAVE_ARCH_UNMAPPED_AREA
623 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
624 
625 #endif /* !__ASSEMBLY__ */
626 
627 #endif /* _ASM_PGTABLE_H */
628